blob: 8046a5521c2f1be995c8f7a166f07ea28baa319e [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
Teresa Charlin52664732020-06-29 16:27:03 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00005
surmeh01bceff2f2018-03-29 16:29:27 +01006#include "TfParser.hpp"
7
surmeh01bceff2f2018-03-29 16:29:27 +01008#include <armnn/TypesUtils.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +01009#include <armnn/Descriptors.hpp>
10
Matteo Martincighe011d202019-11-28 11:35:47 +000011#include <armnnUtils/Permute.hpp>
12#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly08759e22020-03-02 11:41:31 +000013#include <armnnUtils/Transpose.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000014#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan589e3e82020-09-11 16:17:48 +010015#include <armnn/utility/NumericCast.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010016#include <armnn/utility/PolymorphicDowncast.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000017
surmeh01bceff2f2018-03-29 16:29:27 +010018#include <GraphTopologicalSort.hpp>
Sadik Armagan479045b2018-10-01 11:51:37 +010019#include <ParserHelper.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010020
21#include <google/protobuf/io/zero_copy_stream_impl.h>
22#include <google/protobuf/text_format.h>
23
Derek Lambertibaa177f2019-12-10 22:00:43 +000024#include <tensorflow/core/framework/graph.pb.h>
surmeh01bceff2f2018-03-29 16:29:27 +010025
surmeh01bceff2f2018-03-29 16:29:27 +010026#include <boost/format.hpp>
Jan Eilers1f3b49b2020-09-08 08:57:40 +010027#include <fmt/core.h>
surmeh01bceff2f2018-03-29 16:29:27 +010028#include <numeric>
surmeh01bceff2f2018-03-29 16:29:27 +010029
Matteo Martincigh46315822018-11-28 16:22:36 +000030using namespace armnnUtils;
surmeh01bceff2f2018-03-29 16:29:27 +010031using namespace armnn;
32
33namespace armnnTfParser
34{
35namespace
36{
37
38const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
39const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
40
surmeh01bceff2f2018-03-29 16:29:27 +010041
42template <typename Callable>
43void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
44 const std::string& attribName,
45 tensorflow::AttrValue::ValueCase expectedValueCase,
46 Callable callable)
47{
48 auto iter = nodeDef.attr().find(attribName);
49 if (iter != nodeDef.attr().end())
50 {
51 const auto& attrValue = iter->second;
52 if (attrValue.value_case() == expectedValueCase)
53 {
54 callable(attrValue);
55 }
56 else
57 {
telsoa01c577f2c2018-08-31 09:22:23 +010058 throw ParseException(
59 boost::str(
60 boost::format(
61 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
62 "but found %4% instead %5%")
63 % attribName
64 % nodeDef.name()
65 % static_cast<int>(expectedValueCase)
66 % static_cast<int>(attrValue.value_case())
67 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010068 }
69 }
70 else
71 {
telsoa01c577f2c2018-08-31 09:22:23 +010072 throw ParseException(
73 boost::str(
74 boost::format(
75 "Could not find required attribute %1% in node %2% %3%")
76 % attribName
77 % nodeDef.name()
78 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010079 }
80}
81
82template <typename Callable>
83void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
84 const std::string& attribName,
85 tensorflow::AttrValue::ValueCase expectedValueCase,
86 Callable callable)
87{
88 auto iter = nodeDef.attr().find(attribName);
89 if (iter != nodeDef.attr().end())
90 {
91 const auto& attrValue = iter->second;
92 if (attrValue.value_case() == expectedValueCase)
93 {
94 callable(attrValue);
95 }
96 else
97 {
telsoa01c577f2c2018-08-31 09:22:23 +010098 throw ParseException(
99 boost::str(
100 boost::format(
101 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
102 "but found %4% instead %5%")
103 % attribName
104 % nodeDef.name()
105 % static_cast<int>(expectedValueCase)
106 % static_cast<int>(attrValue.value_case())
107 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100108 }
109 }
110}
111
112float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
113{
114 float attribValue = 0.0f;
115 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
116 [&attribValue](const tensorflow::AttrValue& attrValue)
117 {
118 attribValue = attrValue.f();
119 });
120 return attribValue;
121}
122
Conor Kennedyc2130a02018-12-05 11:05:54 +0000123int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
124{
125 int32_t attribValue = 0u;
126 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
127 [&attribValue](const tensorflow::AttrValue& attrValue)
128 {
129 attribValue = static_cast<int32_t>(attrValue.i());
130 });
131 return attribValue;
132}
133
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000134bool ReadMandatoryNodeBoolAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
135{
136 bool attribValue = false;
137 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
138 [&attribValue](const tensorflow::AttrValue& attrValue)
139 {
140 attribValue = static_cast<bool>(attrValue.b());
141 });
142 return attribValue;
143}
144
surmeh01bceff2f2018-03-29 16:29:27 +0100145uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
146{
147 uint32_t attribValue = 0u;
148 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
149 [&attribValue](const tensorflow::AttrValue& attrValue)
150 {
151 attribValue = static_cast<uint32_t>(attrValue.i());
152 });
153 return attribValue;
154}
155
156std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
157{
158 std::string attribValue = "";
159 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
160 [&attribValue](const tensorflow::AttrValue& attrValue)
161 {
162 attribValue = attrValue.s();
163 });
164 return attribValue;
165}
166
167std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
168 const std::string& name)
169{
170 std::vector<uint32_t> attriList;
171 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
172 [&attriList](const tensorflow::AttrValue& attrValue)
173 {
174 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
175 {
176 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
177 }
178 });
179
180 return attriList;
181}
182
183std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
184 const std::string& name)
185{
186 std::vector<uint32_t> attriList;
187 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
188 [&attriList](const tensorflow::AttrValue& attrValue)
189 {
190 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
191 {
192 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
193 }
194 });
195
196 return attriList;
197}
198
Aron Virginas-Tar2e259272019-11-27 13:29:51 +0000199std::string ReadOptionalNodeStringAttribute(const tensorflow::NodeDef& nodeDef,
200 const std::string& name,
201 const std::string& defaultValue = "")
202{
203 std::string attribValue = defaultValue;
204 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
205 [&attribValue](const tensorflow::AttrValue& attrValue)
206 {
207 attribValue = attrValue.s();
208 });
209 return attribValue;
210}
211
surmeh01bceff2f2018-03-29 16:29:27 +0100212bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
213 const std::string& name,
214 bool defaultValue = false)
215{
216 bool attribValue = defaultValue;
217 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
218 [&attribValue](const tensorflow::AttrValue& attrValue)
219 {
220 attribValue = attrValue.b();
221 });
222 return attribValue;
223}
224
225tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
226{
227 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
228 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
229 [&attribValue](const tensorflow::AttrValue& attrValue)
230 {
231 attribValue = attrValue.type();
232 });
233 return attribValue;
234}
235
236TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
237{
238 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
239 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
240
241 if (stretchDim != targetDims.end())
242 {
243 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
244 {
telsoa01c577f2c2018-08-31 09:22:23 +0100245 throw ParseException(
246 boost::str(
247 boost::format(
248 "At most one component of shape can be -1 %1%")
249 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100250 }
251
telsoa01c577f2c2018-08-31 09:22:23 +0100252 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100253 armnn::numeric_cast<unsigned int>(
telsoa01c577f2c2018-08-31 09:22:23 +0100254 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100255 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
256 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
257 }
258
259 TensorInfo reshapeInfo = input;
260 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
261
262 return reshapeInfo;
263}
264
telsoa01c577f2c2018-08-31 09:22:23 +0100265// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100266IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
267 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100268{
269 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
270 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
271 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
272 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
273 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
274 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
275
276 armnn::TensorInfo reshapedInfo = input1Info;
277 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
278
279 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
280 ReshapeDescriptor reshapeDesc;
281 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
282 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
283
284 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
285 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
286
287 input1Slot = &reshapeLayer->GetOutputSlot(0);
288
289 return input1Slot;
290}
291
292OutputId ParseOutputId(const std::string & name)
293{
294 unsigned int outputNum = 0;
295 size_t colonPos = name.find_last_of(":");
296 if (colonPos != std::string::npos)
297 {
298 int n = std::stoi(name.substr(colonPos+1));
299 if (n<0 || n>100)
300 {
telsoa01c577f2c2018-08-31 09:22:23 +0100301 throw ParseException(
302 boost::str(
303 boost::format(
304 "Output tensor id is out of range for %1% %2%")
305 % name
306 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100307 }
308 outputNum = static_cast<unsigned int>(n);
309 }
310 return OutputId(name.substr(0,colonPos),outputNum);
311}
312
telsoa01c577f2c2018-08-31 09:22:23 +0100313#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
314 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
315 { \
316 throw ParseException( \
317 boost::str( \
318 boost::format( \
319 "Unsupported data format %1% passed for %2% node %3%. " \
320 "Only NHWC and NCHW supported %4%") \
321 % FORMAT \
322 % NODE_TYPE \
323 % NODE_DEF.name() \
324 % CHECK_LOCATION().AsString())); \
325 }
326
327#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
328 if(PADDING != "SAME" && PADDING != "VALID" ) \
329 { \
330 throw ParseException( \
331 boost::str( \
332 boost::format( \
333 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
334 % PADDING \
335 % NODE_DEF.name() \
336 % CHECK_LOCATION().AsString())); \
337 } \
338
surmeh01bceff2f2018-03-29 16:29:27 +0100339} // namespace
340
341const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
342 { "Const", &TfParser::ParseConst },
343 { "Add", &TfParser::ParseAdd },
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000344 { "AddN", &TfParser::ParseAddN },
surmeh01bceff2f2018-03-29 16:29:27 +0100345 { "BiasAdd", &TfParser::ParseBiasAdd },
346 { "Identity", &TfParser::ParseIdentity },
347 { "Conv2D", &TfParser::ParseConv2D },
348 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
Conor Kennedyc2130a02018-12-05 11:05:54 +0000349 { "ExpandDims", &TfParser::ParseExpandDims },
surmeh01bceff2f2018-03-29 16:29:27 +0100350 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
FrancisMurtagh94412af2019-01-24 10:53:39 +0000351 { "Gather", &TfParser::ParseGather},
jimfly01a06bf312018-12-18 16:24:51 +0000352 { "Greater", &TfParser::ParseGreater},
surmeh01bceff2f2018-03-29 16:29:27 +0100353 { "ConcatV2", &TfParser::ParseConcat },
354 { "LRN", &TfParser::ParseLrn },
355 { "MatMul", &TfParser::ParseMatMul },
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000356 { "Mean", &TfParser::ParseMean },
surmeh01bceff2f2018-03-29 16:29:27 +0100357 { "Mul", &TfParser::ParseMul },
358 { "Placeholder", &TfParser::ParsePlaceholder },
saoste01bbd40612018-08-28 15:41:51 +0100359 { "RealDiv", &TfParser::ParseRealDiv },
surmeh01bceff2f2018-03-29 16:29:27 +0100360 { "Relu", &TfParser::ParseRelu },
361 { "Relu6", &TfParser::ParseRelu6 },
362 { "Reshape", &TfParser::ParseReshape },
363 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +0000364 { "Rsqrt", &TfParser::ParseRsqrt },
surmeh01bceff2f2018-03-29 16:29:27 +0100365 { "Shape", &TfParser::ParseShape },
366 { "Squeeze", &TfParser::ParseSqueeze },
367 { "Sigmoid", &TfParser::ParseSigmoid },
368 { "Softmax", &TfParser::ParseSoftmax },
369 { "Softplus", &TfParser::ParseSoftplus },
Sadik Armagan2ad6cb42018-12-27 11:23:44 +0000370 { "Split", &TfParser::ParseSplit },
Georgios Pinitas5e90aab2020-02-14 14:46:51 +0000371 { "StridedSlice", &TfParser::ParseStridedSlice },
surmeh01bceff2f2018-03-29 16:29:27 +0100372 { "Tanh", &TfParser::ParseTanh },
373 { "MaxPool", &TfParser::ParseMaxPool },
374 { "AvgPool", &TfParser::ParseAvgPool },
telsoa01c577f2c2018-08-31 09:22:23 +0100375 { "Maximum", &TfParser::ParseMaximum },
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +0000376 { "Minimum", &TfParser::ParseMinimum },
jimfly0184c70e62018-12-19 13:14:46 +0000377 { "Equal", &TfParser::ParseEqual },
jimfly01f6ba7472018-12-04 10:09:52 +0000378 { "Pad", &TfParser::ParsePad },
Sadik Armagan48d70932020-02-18 15:18:27 +0000379 { "Sub", &TfParser::ParseSub },
380 { "Pack" , &TfParser::ParseStack },
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +0000381 { "Stack", &TfParser::ParseStack },
382 { "Transpose", &TfParser::ParseTranspose },
narpra016f37f832018-12-21 18:30:00 +0000383};
384
385const std::list<std::string> TfParser::m_ControlInputs = {
386 "Assert"
surmeh01bceff2f2018-03-29 16:29:27 +0100387};
388
389ITfParser* ITfParser::CreateRaw()
390{
391 return new TfParser();
392}
393
394ITfParserPtr ITfParser::Create()
395{
396 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
397}
398
399void ITfParser::Destroy(ITfParser* parser)
400{
401 delete parser;
402}
403
404inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
405 uint32_t filterSize, bool samePadding,
406 uint32_t* paddingFront, uint32_t* paddingBack) {
407 *paddingFront = 0;
408 *paddingBack = 0;
409
410 if (samePadding) {
411 uint32_t outputSize = (inputSize + stride - 1) / stride;
412 uint32_t temp = (outputSize - 1) * stride + filterSize;
413 if (temp > inputSize) {
414 *paddingFront = (temp - inputSize) / 2;
415 *paddingBack = (temp - inputSize) - *paddingFront;
416 }
417 }
418}
419
420void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
421 bool samePadding)
422{
423 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
424}
425
426/// An Abstract base class which represents a single tensorflow operation (node)
427/// that has been (potentially partially) converted to Armnn.
428/// It may not yet have been fully converted into actual Armnn layers.
429class ParsedTfOperation
430{
431public:
432 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
433 : m_Parser(parser)
434 , m_Node(node)
435 {
436 }
437
438 virtual ~ParsedTfOperation() {};
439
440 const tensorflow::NodeDef& GetNode() const { return m_Node; }
441
442 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
443 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
444 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
445
446 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
447 virtual ParsedTfOperation* ResolveIdentityOperations()
448 {
449 return this;
450 }
451
452protected:
453 TfParser* m_Parser;
454 const tensorflow::NodeDef& m_Node;
455};
456
457/// An ParsedTfOperation where the Armnn equivalent is a single layer,
458/// with output slots that correspond directly to the Tf node outputs.
459class SingleLayerParsedTfOperation : public ParsedTfOperation
460{
461public:
462 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
463 : ParsedTfOperation(parser, node)
464 , m_Layer(layer)
465 {
466 }
467
468 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
469 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100470 ARMNN_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100471 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100472 unsigned int armnnOutputSlotIdx = tfOutputIndex;
473 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
474 {
475 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100476 boost::str(
477 boost::format(
478 "The requested output slot #%1% "
479 "for %2% does not exist %3%")
480 % armnnOutputSlotIdx
481 % m_Layer->GetName()
482 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100483 }
484 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
485 }
486
487protected:
488 IConnectableLayer* m_Layer;
489};
490
telsoa01c577f2c2018-08-31 09:22:23 +0100491/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100492class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
493{
494public:
495 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
496 : SingleLayerParsedTfOperation(parser, node, nullptr)
497 {
498 }
499
500 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
501 {
502 if (!m_Layer)
503 {
504 CreateLayerDeferred();
505 }
506 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
507 }
508
509private:
510 virtual void CreateLayerDeferred() = 0;
511};
512
513
514TfParser::TfParser()
515 : m_Network(nullptr, nullptr)
516{
517}
518
519
520const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
521{
522 if (nodeDef->op() != "Identity")
523 {
524 return nodeDef;
525 }
526
527 if (nodeDef->input_size() != 1)
528 {
telsoa01c577f2c2018-08-31 09:22:23 +0100529 throw ParseException(
530 boost::str(
531 boost::format(
532 "Identity node should have a single input! %1% has %2% inputs %3%")
533 % nodeDef->name()
534 % nodeDef->input_size()
535 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100536 }
537
538 auto it = m_NodesByName.find(nodeDef->input(0));
539 if (it != m_NodesByName.end())
540 {
541 const tensorflow::NodeDef* inputNode = it->second;
542 return ResolveIdentityNode(inputNode);
543 }
544 else
545 {
telsoa01c577f2c2018-08-31 09:22:23 +0100546 throw ParseException(
547 boost::str(
548 boost::format(
549 "Cannot find what the Identity node %1% is linked to! %2%")
550 % nodeDef->name()
551 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100552 }
553}
554
555std::vector<OutputOfConstNodeDef>
556TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
557{
558 std::vector<OutputOfConstNodeDef> ret;
559
surmeh013537c2c2018-05-18 16:31:43 +0100560 if (nodeDef.op() == "Const")
561 {
562 // For some reason const node can have "Control Inputs". We ignore them for now.
563 return ret;
564 }
565
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100566 ret.reserve(armnn::numeric_cast<size_t>(nodeDef.input_size()));
surmeh01bceff2f2018-03-29 16:29:27 +0100567 for (int j = 0; j < nodeDef.input_size(); ++j)
568 {
569 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100570
571 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
572 {
narpra016f37f832018-12-21 18:30:00 +0000573 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
574 continue;
surmeh013537c2c2018-05-18 16:31:43 +0100575 }
576
surmeh01bceff2f2018-03-29 16:29:27 +0100577 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
578 if (inputIt == m_NodesByName.end())
579 {
580 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100581 boost::str(
582 boost::format(
583 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
584 % nodeDef.input(j)
585 % nodeDef.name()
586 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100587 }
588 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
589 }
590
591 return ret;
592}
593
594std::vector<OutputOfParsedTfOperation>
595TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
596 std::size_t expectedNumInputs)
597{
telsoa01c577f2c2018-08-31 09:22:23 +0100598 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100599 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
600 const std::size_t numInputs = nodes.size();
601 if (numInputs != expectedNumInputs)
602 {
telsoa01c577f2c2018-08-31 09:22:23 +0100603 throw ParseException(
604 boost::str(
605 boost::format(
606 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
607 % nodeDef.name()
608 % expectedNumInputs
609 % numInputs
610 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100611 }
telsoa01c577f2c2018-08-31 09:22:23 +0100612 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100613 std::vector<OutputOfParsedTfOperation> result;
614 for (auto&& node : nodes)
615 {
616 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
617 if (it == m_ParsedTfOperations.end())
618 {
telsoa01c577f2c2018-08-31 09:22:23 +0100619 throw ParseException(
620 boost::str(
621 boost::format(
622 "Node with name '%1%' has not been parsed %2%")
623 % node.m_IndexedValue->name()
624 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100625 }
626 ParsedTfOperation* parsedOp = it->second.get();
627 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
628 parsedOp = parsedOp->ResolveIdentityOperations();
629 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
630 }
631 return result;
632}
633
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000634IConnectableLayer* TfParser::CreateAdditionLayer(
635 const tensorflow::NodeDef& nodeDef,
636 IOutputSlot* input0Slot,
637 IOutputSlot* input1Slot,
638 const std::string& layerName)
639{
640 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
641 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
642
643 const unsigned int input0Dim = input0Info.GetNumDimensions();
644 const unsigned int input1Dim = input1Info.GetNumDimensions();
645 if (input0Dim != input1Dim)
646 {
647 // broadcasting where input0 and input1 have different number of dimensions
648 // is only supported for 1D and 4D tensors pair
649 if (input0Dim == 1 && input1Dim == 4)
650 {
651 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
652 }
653 else if (input0Dim == 4 && input1Dim == 1)
654 {
655 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
656 }
657 else
658 {
659 throw ParseException(
660 boost::str(
661 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
662 % layerName
663 % nodeDef.name()
664 % CHECK_LOCATION().AsString()));
665 }
666 }
667 IConnectableLayer* const layer = m_Network->AddAdditionLayer(layerName.c_str());
668
669 input0Slot->Connect(layer->GetInputSlot(0));
670 input1Slot->Connect(layer->GetInputSlot(1));
671
672 // Ensure the output tensor has the correct dimensions even if a broadcast has been done
673 TensorInfo outputInfo = input0Slot->GetTensorInfo();
674 std::vector<unsigned int> outputShape;
675
676 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
677 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
678
679 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
680 {
681 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
682 }
683
684 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
685 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
686
687 return layer;
688}
689
690IConnectableLayer* TfParser::CreateAdditionLayer(
691 const tensorflow::NodeDef& nodeDef,
692 IConnectableLayer* layerOne,
693 IConnectableLayer* layerTwo,
694 unsigned int numberOfAddition,
695 unsigned long numberOfLayersToConnect,
696 bool isOdd)
697{
698 IOutputSlot* input0Slot = &layerOne->GetOutputSlot(0);
699 IOutputSlot* input1Slot = &layerTwo->GetOutputSlot(0);
700 std::string layerName(nodeDef.name());
701 if (isOdd || numberOfLayersToConnect != 2)
702 {
703 // we are not connecting the final layer
704 layerName.append("_addN_").append(std::to_string(numberOfAddition));
705 }
706 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
707}
708
709IConnectableLayer* TfParser::CreateAdditionLayer(
710 const tensorflow::NodeDef& nodeDef,
711 const OutputOfParsedTfOperation& opOne,
712 const OutputOfParsedTfOperation& opTwo,
713 unsigned int numberOfAddition)
714{
715 IOutputSlot* input0Slot = &opOne.m_IndexedValue->ResolveArmnnOutputSlot(opOne.m_Index);
716 IOutputSlot* input1Slot = &opTwo.m_IndexedValue->ResolveArmnnOutputSlot(opTwo.m_Index);
717 std::string layerName(nodeDef.name());
718 layerName.append("_addN_").append(std::to_string(numberOfAddition));
719 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
720}
721
722IConnectableLayer* TfParser::CreateAdditionLayer(
723 const tensorflow::NodeDef& nodeDef,
724 const OutputOfParsedTfOperation& op,
725 IConnectableLayer* layer)
726{
727 IOutputSlot* input0Slot = &op.m_IndexedValue->ResolveArmnnOutputSlot(op.m_Index);
728 IOutputSlot* input1Slot = &layer->GetOutputSlot(0);
729 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
730}
731
732ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
733{
Jan Eilers8eb25602020-03-09 12:13:48 +0000734 IgnoreUnused(graphDef);
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000735 uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
736 if (numberOfInputs < 2)
737 {
738 // should never happen
739 throw ParseException(
740 boost::str(
741 boost::format(
742 "AddN Node with name '%1%' has less than two (%2) inputs %3%")
743 % nodeDef.name()
744 % std::to_string(numberOfInputs)
745 % CHECK_LOCATION().AsString()));
746 }
747 else if (numberOfInputs == 2)
748 {
749 //this is the same as a simple Add operation
750 return AddAdditionLayer(nodeDef, false);
751 }
752 else
753 {
754 // build a binary tree of Add layers and return the final Add as the return from the function
755 // if we have an odd number of inputs then the final Add will consist of a layer connecting to an
756 // OutputOfParsedTfOperation, otherwise it will be two layers being added together
757 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
758 unsigned int numberOfAdditions = 0;
759 std::vector<IConnectableLayer*> layers;
760 // NOTE: at this point we will have a minimum of three inputs
761 for (unsigned int i = 0; i < numberOfInputs; ++i)
762 {
763 // every time i is odd we have two inputs to process.
764 bool onSecondItem = i % 2;
765 if (onSecondItem)
766 {
767 ++numberOfAdditions;
768 IConnectableLayer* newLayer = CreateAdditionLayer(
769 nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
770 layers.push_back(newLayer);
771 }
772 }
773
774 std::vector<IConnectableLayer*> layersToConnect(layers);
775 unsigned long numberOfLayersToConnect = layersToConnect.size();
776 bool isOdd = numberOfInputs % 2;
777
778 while (numberOfLayersToConnect > 1)
779 {
780 layers.clear();
781 for (unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
782 bool onSecondItem = i % 2;
783 if (onSecondItem) {
784 ++numberOfAdditions;
785 IConnectableLayer* newLayer = CreateAdditionLayer(
786 nodeDef,
787 layersToConnect[i - 1],
788 layersToConnect[i],
789 numberOfAdditions,
790 numberOfLayersToConnect,
791 isOdd);
792 layers.push_back(newLayer);
793 }
794 }
795 //OK... need to go again... maybe
796 layersToConnect = layers;
797 numberOfLayersToConnect = layersToConnect.size();
798 }
799 IConnectableLayer* finalLayer = layersToConnect[0];
800 // if we had an odd number of inputs we need to connect the final layer to the
801 // last OutputOfParsedTfOperation in order to create the last Add layer we will
802 // be handing back.
803 if (isOdd)
804 {
805 // connect the final layer to the last op
806 finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
807 }
808 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, finalLayer);
809 }
810}
811
surmeh01bceff2f2018-03-29 16:29:27 +0100812ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
813{
Jan Eilers8eb25602020-03-09 12:13:48 +0000814 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100815 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
816
telsoa01c577f2c2018-08-31 09:22:23 +0100817 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
818 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100819 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
820 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
821 {
822 IConnectableLayer* layer =
823 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
824 &nodeDef,nodeDef.name().c_str());
825 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
826 }
827 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
828 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
829 {
830 IConnectableLayer* layer =
831 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
832 &nodeDef,nodeDef.name().c_str());
833 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
834 }
835 else
836 {
telsoa01c577f2c2018-08-31 09:22:23 +0100837 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100838 return AddAdditionLayer(nodeDef);
839 }
840}
841
842ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
843{
Jan Eilers8eb25602020-03-09 12:13:48 +0000844 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100845 return AddAdditionLayer(nodeDef, true);
846}
847
848/// An ParsedTfOperation which forwards to another (used for Identity nodes).
849class ParsedIdentityTfOperation : public ParsedTfOperation
850{
851public:
852 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
853 : ParsedTfOperation(parser, node)
854 , m_Representative(representative)
855 {
856 }
857
858 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
859 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100860 ARMNN_ASSERT(m_Representative);
surmeh01bceff2f2018-03-29 16:29:27 +0100861 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
862 }
863
864 virtual ParsedTfOperation* ResolveIdentityOperations() override
865 {
866 return m_Representative->ResolveIdentityOperations();
867 }
868
869private:
870 ParsedTfOperation* m_Representative;
871};
872
873ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
874{
Jan Eilers8eb25602020-03-09 12:13:48 +0000875 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100876 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
877 // Any requests for the output slots of this node should be forwarded to the node connected as input.
878 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
879}
880
881/// An ParsedTfOperation for a Const node.
882/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
883/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
884template <typename T>
885class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
886{
887public:
888 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
889 const T* tensorData, const TensorInfo& tensorInfo)
890 : DeferredSingleLayerParsedTfOperation(parser, node),
891 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
892 m_TensorInfo(tensorInfo)
893 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100894 ARMNN_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
surmeh01bceff2f2018-03-29 16:29:27 +0100895 }
896
897 void CreateLayerDeferred() override
898 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100899 ARMNN_ASSERT(m_Layer == nullptr);
surmeh01bceff2f2018-03-29 16:29:27 +0100900 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
901 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
902 }
903
Matteo Martincigh482ca852018-12-12 09:20:55 +0000904 ConstTensor GetConstTensor(std::vector<T>& outputTensorData) const
surmeh01bceff2f2018-03-29 16:29:27 +0100905 {
surmeh01bceff2f2018-03-29 16:29:27 +0100906 outputTensorData.resize(m_TensorInfo.GetNumElements());
907
Matteo Martincigh482ca852018-12-12 09:20:55 +0000908 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
909
telsoa01c577f2c2018-08-31 09:22:23 +0100910 // Updates the result to point to the user provided storage.
Matteo Martincigh482ca852018-12-12 09:20:55 +0000911 ConstTensor constTensor(m_TensorInfo, outputTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +0100912 return constTensor;
913 }
914
Matteo Martincigh46315822018-11-28 16:22:36 +0000915 const T* GetStorage() const
916 {
917 return m_Storage.data();
918 }
919
920 const TensorInfo& GetTensorInfo() const
921 {
922 return m_TensorInfo;
923 }
924
surmeh01bceff2f2018-03-29 16:29:27 +0100925private:
926 ///< Manages the lifetime of the tensor data.
927 std::vector<T> m_Storage;
928 ///< Describes the layout of the tensor and points to the data in m_Storage.
929 TensorInfo m_TensorInfo;
930};
931
telsoa01c577f2c2018-08-31 09:22:23 +0100932DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
933 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100934{
935 switch (tfDataType)
936 {
937 case tensorflow::DT_FLOAT:
938 return DataType::Float32;
939 break;
940 case tensorflow::DT_INT32:
941 return DataType::Signed32;
942 break;
943 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100944 throw ParseException(
945 boost::str(
946 boost::format(
947 "Unknown DataType %1% for node %2% %3%")
948 % tensorflow::DataType_Name(tfDataType)
949 % nodeDef.name()
950 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100951 }
952}
953
954struct ParseTfTensorValueList
955{
956 template<typename DataType>
957 static void Parse(
958 const tensorflow::TensorProto& tfTensor,
959 unsigned int dstElements,
960 std::vector<int8_t>& outputData);
961
962 template <typename DataType>
963 static void ReadData(const void* srcData, unsigned int numSrcElements,
964 std::vector<int8_t>& dstData, unsigned int numDstElements)
965 {
telsoa01c577f2c2018-08-31 09:22:23 +0100966 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100967 if (numSrcElements == 0)
968 {
969 return;
970 }
971
telsoa01c577f2c2018-08-31 09:22:23 +0100972 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100973 if (numDstElements == 0)
974 {
975 numDstElements = numSrcElements;
976 }
977
telsoa01c577f2c2018-08-31 09:22:23 +0100978 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100979 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
980
981 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
982 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
983
telsoa01c577f2c2018-08-31 09:22:23 +0100984 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +0100985 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
986
987 if (numDstElements > numSrcElements)
988 {
telsoa01c577f2c2018-08-31 09:22:23 +0100989 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +0100990 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
991 }
992 }
993
994};
995
996template <>
997void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
998 unsigned int dstElements, std::vector<int8_t>& outputData)
999{
1000 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
1001 outputData, dstElements);
1002}
1003
1004template <>
1005void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
1006 unsigned int dstElements, std::vector<int8_t>& outputData)
1007{
1008 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
1009 outputData, dstElements);
1010}
1011
1012template <template<typename> class OperatorType, typename T = int8_t>
1013struct MakeTfOperation
1014{
1015 template<typename DataType, class... Args>
1016 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
1017 Args&&... args)
1018 {
1019 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
1020 }
1021};
1022
1023template <>
1024struct MakeTfOperation<ParsedConstTfOperation>
1025{
1026 template<typename DataType, class... Args>
1027 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
1028 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
1029 {
1030 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1031 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
1032 }
1033};
1034
1035template <class FuncType>
1036struct InvokeParseFunction
1037{
1038 template<class ResType, class... Args>
1039 inline static ResType Result(DataType dataType, Args&&... args)
1040 {
1041 if (dataType == DataType::Float32)
1042 {
1043 return FuncType::template Parse<float>(std::forward<Args>(args)...);
1044 }
1045 else if (dataType == DataType::Signed32)
1046 {
1047 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1048 }
1049
1050 return ResType();
1051 }
1052
1053 template<class... Args>
1054 inline static void Result(DataType dataType, Args&&... args)
1055 {
1056 if (dataType == DataType::Float32)
1057 {
1058 FuncType::template Parse<float>(std::forward<Args>(args)...);
1059 }
1060 else if (dataType == DataType::Signed32)
1061 {
1062 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1063 }
1064 }
1065};
1066
1067ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1068{
Jan Eilers8eb25602020-03-09 12:13:48 +00001069 IgnoreUnused(graphDef);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001070 ARMNN_ASSERT(nodeDef.op() == "Const");
surmeh01bceff2f2018-03-29 16:29:27 +01001071
1072 if (nodeDef.attr().count("value") == 0)
1073 {
telsoa01c577f2c2018-08-31 09:22:23 +01001074 throw ParseException(
1075 boost::str(
1076 boost::format(
1077 "Value not found for Const node - %1% %2%")
1078 % nodeDef.name()
1079 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001080 }
1081
1082 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
1083 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1084 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
1085
1086 const auto GetDimensionSize = [](auto& d) { return d.size(); };
1087
1088 std::vector<unsigned int> dimensionSizes;
1089 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1090 std::back_inserter(dimensionSizes), GetDimensionSize);
1091
telsoa01c577f2c2018-08-31 09:22:23 +01001092 // Calculates number of elements.
1093 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001094 unsigned int numElements = 0U;
1095
1096 if (!dimensionSizes.empty())
1097 {
1098 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1099 1U, std::multiplies<unsigned int>());
1100 }
1101
1102 std::vector<int8_t> tensorData;
1103
telsoa01c577f2c2018-08-31 09:22:23 +01001104 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001105 if (tfTensor.tensor_content().empty())
1106 {
1107 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
1108
1109 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +01001110 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +01001111 if (numElements == 0)
1112 {
telsoa01c577f2c2018-08-31 09:22:23 +01001113 const unsigned int tfNumElements =
1114 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001115 dimensionSizes.push_back(tfNumElements);
1116 }
1117 }
telsoa01c577f2c2018-08-31 09:22:23 +01001118 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001119 else
1120 {
1121 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1122
telsoa01c577f2c2018-08-31 09:22:23 +01001123 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +01001124 if (numElements == 0)
1125 {
telsoa01c577f2c2018-08-31 09:22:23 +01001126 throw ParseException(
1127 boost::str(
1128 boost::format(
1129 "No tensor shape found for Const node - %1% %2%")
1130 % nodeDef.name()
1131 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001132 }
1133 }
1134
telsoa01c577f2c2018-08-31 09:22:23 +01001135 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001136 if (tensorData.empty())
1137 {
telsoa01c577f2c2018-08-31 09:22:23 +01001138 throw ParseException(
1139 boost::str(
1140 boost::format(
1141 "No tensor data found for Const node - %1% %2%")
1142 % nodeDef.name()
1143 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001144 }
1145
telsoa01c577f2c2018-08-31 09:22:23 +01001146 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1147 dimensionSizes.data(),
1148 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001149
1150 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +01001151 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +01001152 if (tensorData.size() > tensorInfo.GetNumBytes())
1153 {
telsoa01c577f2c2018-08-31 09:22:23 +01001154 throw ParseException(
1155 boost::str(
1156 boost::format(
1157 "Number of elements (%1%) should be less than or equal "
1158 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
1159 % (tensorData.size() / GetDataTypeSize(dataType))
1160 % tensorInfo.GetNumElements()
1161 % nodeDef.name()
1162 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001163 }
1164
1165 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1166 dataType, this, nodeDef, tensorData, tensorInfo);
1167}
1168
1169template<typename Type>
1170bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
1171{
1172 auto it = m_ParsedTfOperations.find(nodeName);
jimfly01f6ba7472018-12-04 10:09:52 +00001173 if (it == m_ParsedTfOperations.end())
surmeh01bceff2f2018-03-29 16:29:27 +01001174 {
1175 return false;
1176 }
jimfly01f6ba7472018-12-04 10:09:52 +00001177 return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1178}
1179
1180template<typename Type>
1181bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
1182{
1183 return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
surmeh01bceff2f2018-03-29 16:29:27 +01001184}
1185
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00001186unsigned int TfParser::GetConstInputIndex(const std::vector<OutputOfParsedTfOperation>& inputs)
1187{
1188 for (unsigned int i = 0; i < inputs.size(); i++)
1189 {
1190 if (HasParsedConstTensor<int32_t>(inputs[i].m_IndexedValue->GetNode().name()))
1191 {
1192 return i;
1193 }
1194 }
1195 throw ParseException(
1196 boost::str(
1197 boost::format(
1198 "ArmNN only supports operators with constant axis. %1%")
1199 % CHECK_LOCATION().AsString()));
1200
1201}
1202
surmeh01bceff2f2018-03-29 16:29:27 +01001203ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
1204 const tensorflow::GraphDef& graphDef)
1205{
Jan Eilers8eb25602020-03-09 12:13:48 +00001206 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001207 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1208 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1209 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1210
1211 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1212 {
telsoa01c577f2c2018-08-31 09:22:23 +01001213 throw ParseException(
1214 boost::str(
1215 boost::format(
1216 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1217 % nodeDef.name()
1218 % inputs[1].m_IndexedValue->GetNode().name()
1219 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001220 }
1221 ParsedConstTfOperation<float>* weightNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001222 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001223
1224 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1225 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1226 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1227
telsoa01c577f2c2018-08-31 09:22:23 +01001228 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
surmeh01bceff2f2018-03-29 16:29:27 +01001229 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1230 if (!dilations.empty())
1231 {
1232 for (auto dilation : dilations)
1233 {
1234 if (dilation != 1u)
1235 {
telsoa01c577f2c2018-08-31 09:22:23 +01001236 throw ParseException(
1237 boost::str(
1238 boost::format(
1239 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1240 % nodeDef.name()
1241 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001242 }
1243 }
1244 }
1245
1246 Convolution2dDescriptor desc;
1247 desc.m_BiasEnabled = false;
1248
telsoa01c577f2c2018-08-31 09:22:23 +01001249 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1250
Matteo Martincigh46315822018-11-28 16:22:36 +00001251 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001252
Matteo Martincigh46315822018-11-28 16:22:36 +00001253 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001254
Matteo Martincigh46315822018-11-28 16:22:36 +00001255 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001256
Matteo Martincigh46315822018-11-28 16:22:36 +00001257 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1258 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001259
Matteo Martincigh46315822018-11-28 16:22:36 +00001260 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1261 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1262
1263 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1264 // Tensorflow weights are [H, W, In, Out].
1265 // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1266 // and [Out, In, H, W] when the data layout is NCHW.
1267 PermutationVector permutationVector =
1268 dataLayout == DataLayout::NHWC ?
1269 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1270 std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1271
1272 // Swizzle the tensor using the given permutation vector.
1273 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1274 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1275
1276 // Swizzles the content of the tensor's permanent storage into a local storage.
1277 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1278 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001279 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Matteo Martincigh46315822018-11-28 16:22:36 +00001280
1281 // Create a weight tensor with the newly swizzled data.
1282 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1283
1284 uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1285 uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001286
1287 bool padding = false;
1288 TensorInfo outputInfo;
Matteo Martincigh46315822018-11-28 16:22:36 +00001289 unsigned int outputHeight = 0;
1290 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001291
1292 CHECK_PADDING_TYPE(nodeDef, paddingString);
1293
surmeh01bceff2f2018-03-29 16:29:27 +01001294 if (paddingString == "SAME")
1295 {
1296 padding = true;
Matteo Martincigh46315822018-11-28 16:22:36 +00001297
1298 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1299 static_cast<float>(desc.m_StrideY)));
1300 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1301 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001302 }
1303 else if (paddingString == "VALID")
1304 {
1305 padding = false;
Matteo Martincigh46315822018-11-28 16:22:36 +00001306
1307 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1308 static_cast<float>(desc.m_StrideY)));
1309 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1310 static_cast<float>(desc.m_StrideX)));
1311 }
1312
1313 switch (dataLayout)
1314 {
1315 case DataLayout::NHWC:
1316 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1317 outputHeight,
1318 outputWidth,
1319 weightTensor.GetShape()[0] },
1320 DataType::Float32);
1321 break;
1322 case DataLayout::NCHW:
1323 default:
surmeh01bceff2f2018-03-29 16:29:27 +01001324 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1325 weightTensor.GetShape()[0],
Matteo Martincigh46315822018-11-28 16:22:36 +00001326 outputHeight,
1327 outputWidth },
1328 DataType::Float32);
1329 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001330 }
surmeh01bceff2f2018-03-29 16:29:27 +01001331
1332 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1333 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1334
Matteo Martincighfc598e12019-05-14 10:36:13 +01001335 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc,
1336 weightTensor,
1337 EmptyOptional(),
1338 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001339 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Matteo Martincigh46315822018-11-28 16:22:36 +00001340 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001341
1342 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1343}
1344
1345ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
telsoa01c577f2c2018-08-31 09:22:23 +01001346 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001347{
Jan Eilers8eb25602020-03-09 12:13:48 +00001348 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001349 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1350 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1351 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1352
1353 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1354 {
telsoa01c577f2c2018-08-31 09:22:23 +01001355 throw ParseException(
1356 boost::str(
1357 boost::format(
1358 "ArmNN only supports Depthwise Convolution layer with constant weights. "
1359 "Non const input found %1% for node %2% %3%")
1360 % inputs[1].m_IndexedValue->GetNode().name()
1361 % nodeDef.name()
1362 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001363 }
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001364
surmeh01bceff2f2018-03-29 16:29:27 +01001365 ParsedConstTfOperation<float>* weightNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001366 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001367
surmeh01bceff2f2018-03-29 16:29:27 +01001368 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1369 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1370 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1371
1372 DepthwiseConvolution2dDescriptor desc;
1373 desc.m_BiasEnabled = false;
1374
telsoa01c577f2c2018-08-31 09:22:23 +01001375 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1376
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001377 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001378
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001379 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001380
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001381 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001382
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001383 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1384 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001385
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001386 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1387 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1388
1389 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
Matteo Martincigh747ef822018-12-18 09:26:39 +00001390 // Tensorflow weights come in the format [H, W, I, M].
1391 // ArmNN weights have to be [M, I, H, W].
1392 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001393
1394 // Swizzle the tensor using the given permutation vector.
1395 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1396 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1397
1398 // Swizzles the content of the tensor's permanent storage into a local storage.
1399 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1400 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001401 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001402
1403 // Create a weight tensor with the newly swizzled data.
1404 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1405
Matteo Martincigh747ef822018-12-18 09:26:39 +00001406 uint32_t weightHeight = weightTensor.GetShape()[2];
1407 uint32_t weightWidth = weightTensor.GetShape()[3];
surmeh01bceff2f2018-03-29 16:29:27 +01001408
1409 bool padding = false;
1410 TensorInfo outputInfo;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001411 unsigned int outputHeight = 0;
1412 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001413
1414 CHECK_PADDING_TYPE(nodeDef, paddingString);
1415
surmeh01bceff2f2018-03-29 16:29:27 +01001416 if (paddingString == "SAME")
1417 {
1418 padding = true;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001419
1420 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1421 static_cast<float>(desc.m_StrideY)));
1422 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1423 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001424 }
1425 else if (paddingString == "VALID")
1426 {
1427 padding = false;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001428
1429 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1430 static_cast<float>(desc.m_StrideY)));
1431 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1432 static_cast<float>(desc.m_StrideX)));
1433 }
1434
1435 switch (dataLayout)
1436 {
1437 case DataLayout::NHWC:
1438 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1439 outputHeight,
1440 outputWidth,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001441 weightTensor.GetShape()[0] * weightTensor.GetShape()[1]},
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001442 DataType::Float32);
1443 break;
1444 case DataLayout::NCHW:
1445 default:
1446 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1447 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1448 outputHeight,
1449 outputWidth },
1450 DataType::Float32);
1451 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001452 }
surmeh01bceff2f2018-03-29 16:29:27 +01001453
1454 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1455 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1456
Matteo Martincighfc598e12019-05-14 10:36:13 +01001457 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1458 weightTensor,
1459 EmptyOptional(),
1460 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001461 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001462 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001463
1464 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1465}
1466
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001467TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef,
1468 TensorInfo inputTensorInfo,
1469 std::int32_t expandDim)
Conor Kennedyc2130a02018-12-05 11:05:54 +00001470{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001471 ARMNN_ASSERT(nodeDef.op() == "ExpandDims");
Conor Kennedyc2130a02018-12-05 11:05:54 +00001472
1473 if (inputTensorInfo.GetNumDimensions() > 4) {
1474 throw ParseException(
1475 boost::str(
1476 boost::format(
1477 "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1478 % inputTensorInfo.GetNumDimensions()
1479 % nodeDef.name()
1480 % CHECK_LOCATION().AsString()));
1481 }
1482
Matthew Sloyan589e3e82020-09-11 16:17:48 +01001483 std::int32_t inputDimSize = armnn::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
Conor Kennedyc2130a02018-12-05 11:05:54 +00001484 std::vector<uint32_t> outputDims;
1485
1486 // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1487 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1488 {
1489 // add current input shape to outputDims
1490 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1491 auto currentDimension = inputTensorInfo.GetShape()[i];
1492 outputDims.push_back(currentDimension);
1493 }
1494
1495 // insert a dimension of 1 at index 'expandDim' of inputs shape
1496 if (expandDim >= 0)
1497 {
1498 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1499 outputDims.insert(getPosition, 1);
1500 }
1501
1502 // if negative number for 'expandDim' then count backwards from the last element
1503 // and insert 1 dimension at index 'expandDim'
1504 if (expandDim < 0)
1505 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01001506 int outputDimSize = armnn::numeric_cast<int>(outputDims.size() + 1);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001507 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1508 outputDims.insert(getPosition, 1);
1509 }
1510 }
1511 else
1512 {
1513 throw InvalidArgumentException(
1514 boost::str(
1515 boost::format(
1516 "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1517 % expandDim
1518 % inputDimSize
1519 % CHECK_LOCATION().AsString()));
1520 }
1521
1522 if (outputDims.size() > 4)
1523 {
1524 throw ParseException(
1525 boost::str(
1526 boost::format(
1527 "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1528 % outputDims.size()
1529 % nodeDef.name()
1530 % CHECK_LOCATION().AsString()));
1531 }
1532
1533 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1534 outputDims.data());
1535
1536 TensorInfo outTensorInfo = inputTensorInfo;
1537 outTensorInfo.SetShape(outShape);
1538
1539 return outTensorInfo;
1540}
1541
1542ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1543{
Jan Eilers8eb25602020-03-09 12:13:48 +00001544 IgnoreUnused(graphDef);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001545
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001546 // Number of inputs can either
1547 // be 1 - that indicates that the axis parameter is passed as an attribute of the operation
1548 // or 2 - which means that the axis parameter is passed as a second input
1549 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
1550 const std::size_t numInputs = nodes.size();
1551 std::vector<OutputOfParsedTfOperation> inputs;
1552 std::int32_t expandDim; // axis or dim parameter. Describes which dimension to expand.
1553 if (numInputs == 1)
1554 {
1555 inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1556 expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1557 }
1558 else
1559 {
1560 inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1561
1562 // make sure data type is int32
1563 IOutputSlot& prevLayerOutputSlot = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1564 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1565
1566 if (inputTensorInfo.GetDataType()!=armnn::DataType::Signed32)
1567 {
1568 throw ParseException(
1569 fmt::format(
1570 "The axis parameter of ExpandDims operation given as second input is not of type int32. "
1571 "Input {0} Node {1} {2}",
1572 inputs[1].m_IndexedValue->GetNode().name(),
1573 nodeDef.name(),
1574 CHECK_LOCATION().AsString()));
1575 }
1576
1577 // ensure the second input is a constant value
1578 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1579 {
1580 throw ParseException(
1581 fmt::format(
1582 "ArmNN only supports ExpandDims layers with constant axis/dim parameter. "
1583 "Input {0} Node {1} {2}",
1584 inputs[1].m_IndexedValue->GetNode().name(),
1585 nodeDef.name(),
1586 CHECK_LOCATION().AsString()));
1587 }
1588
1589 // make sure the second input is scalar or contains only a single value
1590 // (we don't support expand dims for multiple axis but we don't care what shape the
1591 // given tensor has as long as there is only a single value in it
1592 // e.g. a tensor like this [[[1]]] is completely fine)
1593 if (inputTensorInfo.GetNumElements() != 1)
1594 {
1595 throw ParseException(
1596 fmt::format(
1597 "The axis parameter of ExpandDims operation given as second input is not "
1598 "allowed to hold more than one value. "
1599 "Input {0} Node {1} {2}",
1600 inputs[1].m_IndexedValue->GetNode().name(),
1601 nodeDef.name(),
1602 CHECK_LOCATION().AsString()));
1603 }
1604
1605 ParsedConstTfOperation<int32_t>* expandDimsNode =
1606 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1607
1608 memcpy(&expandDim, expandDimsNode->GetStorage(), sizeof(expandDim));
1609 }
1610
1611 // First input is the vector that should be expanded by another dimension
Conor Kennedyc2130a02018-12-05 11:05:54 +00001612 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1613 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1614
1615 TensorInfo outputInfo;
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001616 outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo, expandDim);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001617
1618 ReshapeDescriptor reshapeDesc;
1619 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1620 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1621 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1622 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1623
1624 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1625}
1626
surmeh01bceff2f2018-03-29 16:29:27 +01001627ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1628 const tensorflow::GraphDef& graphDef)
1629{
Jan Eilers8eb25602020-03-09 12:13:48 +00001630 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001631 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1632
1633 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1634 {
telsoa01c577f2c2018-08-31 09:22:23 +01001635 throw ParseException(
1636 boost::str(
1637 boost::format(
1638 "ArmNN only supports FusedBatchNormalization layers with constant scale. "
1639 "Input %1%. Node %2% %3%")
1640 % inputs[1].m_IndexedValue->GetNode().name()
1641 % nodeDef.name()
1642 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001643 }
1644 ParsedConstTfOperation<float>* scaleNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001645 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001646
1647 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1648 {
telsoa01c577f2c2018-08-31 09:22:23 +01001649 throw ParseException(
1650 boost::str(
1651 boost::format(
1652 "ArmNN only supports FusedBatchNormalization layers with constant offset. "
1653 "Input %1%. Node %2% %3%")
1654 % inputs[2].m_IndexedValue->GetNode().name()
1655 % nodeDef.name()
1656 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001657 }
1658 ParsedConstTfOperation<float>* offsetNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001659 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001660
1661 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1662 {
telsoa01c577f2c2018-08-31 09:22:23 +01001663 throw ParseException(
1664 boost::str(
1665 boost::format(
1666 "ArmNN only supports FusedBatchNormalization layers with constant mean. "
1667 "Input %1%. Node %2% %3%")
1668 % inputs[3].m_IndexedValue->GetNode().name()
1669 % nodeDef.name()
1670 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001671 }
1672 ParsedConstTfOperation<float>* meanNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001673 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001674
1675 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1676 {
telsoa01c577f2c2018-08-31 09:22:23 +01001677 throw ParseException(
1678 boost::str(
1679 boost::format(
1680 "ArmNN only supports FusedBatchNormalization layers with constant variance. "
1681 "Input %1%. Node %2% %3%")
1682 % inputs[4].m_IndexedValue->GetNode().name()
1683 % nodeDef.name()
1684 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001685 }
1686 ParsedConstTfOperation<float>* varianceNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001687 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001688
Aron Virginas-Tar2e259272019-11-27 13:29:51 +00001689 const std::string dataFormat = ReadOptionalNodeStringAttribute(nodeDef, "data_format", "NHWC");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001690 CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1691
telsoa01c577f2c2018-08-31 09:22:23 +01001692 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001693 BatchNormalizationDescriptor desc;
1694 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001695 desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001696
telsoa01c577f2c2018-08-31 09:22:23 +01001697 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1698 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001699 std::vector<float> scaleTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001700 ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001701
1702 std::vector<float> offsetTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001703 ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001704
1705 std::vector<float> meanTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001706 ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001707
1708 std::vector<float> varianceTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001709 ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001710
1711 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1712 meanTensor,
1713 varianceTensor,
1714 offsetTensor,
1715 scaleTensor,
1716 nodeDef.name().c_str());
1717
1718 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1719
Matteo Martincigh075c7502018-12-05 13:10:45 +00001720 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1721 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001722
1723 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1724}
1725
telsoa01c577f2c2018-08-31 09:22:23 +01001726bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1727 size_t alphaLayerIndex,
1728 const OutputOfParsedTfOperation& otherOp,
1729 armnn::IOutputSlot** outputOfLeakyRelu,
1730 armnn::ActivationDescriptor & desc)
1731{
1732 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1733
1734 // Verifying all these assumptions hold:
1735 //
1736 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1737 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1738 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1739 //
1740
1741 if (mulNodeDef.op() == "Mul")
1742 {
1743 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1744 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1745
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001746 ARMNN_ASSERT(inputs.size() == 2);
1747 ARMNN_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1748 ARMNN_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1749 ARMNN_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
telsoa01c577f2c2018-08-31 09:22:23 +01001750
1751 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1752 {
1753 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1754 {
1755 ParsedConstTfOperation<float>* alpha =
Jan Eilersbb446e52020-04-02 13:56:54 +01001756 PolymorphicDowncast<ParsedConstTfOperation<float> *>(
telsoa01c577f2c2018-08-31 09:22:23 +01001757 inputs[alphaLayerIndex].m_IndexedValue);
1758
1759 std::vector<float> const_data;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001760 ConstTensor const_tensor = alpha->GetConstTensor(const_data);
telsoa01c577f2c2018-08-31 09:22:23 +01001761
1762 if (const_data.size() == 1)
1763 {
1764 desc.m_Function = ActivationFunction::LeakyReLu;
1765 desc.m_A = const_data[0];
1766
1767 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1768 return true;
1769 }
1770 }
1771 }
1772 }
1773 return false;
1774}
1775
telsoa01c577f2c2018-08-31 09:22:23 +01001776ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1777 const tensorflow::GraphDef& graphDef)
1778{
Jan Eilers8eb25602020-03-09 12:13:48 +00001779 IgnoreUnused(graphDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001780 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
Sadik Armagan975c09a2018-12-04 10:02:08 +00001781 if (inputs.size() != 2)
1782 {
1783 throw ParseException(
1784 boost::str(
1785 boost::format(
1786 "Maximum expects two inputs!. Got %1% for Node %2% %3%")
1787 % inputs.size()
1788 % nodeDef.name()
1789 % CHECK_LOCATION().AsString()));
1790 }
1791
telsoa01c577f2c2018-08-31 09:22:23 +01001792 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1793 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1794 IOutputSlot* outputOfLeakyRelu = nullptr;
1795
1796 ActivationDescriptor desc;
1797
Sadik Armagan975c09a2018-12-04 10:02:08 +00001798 // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1799 // i.e. one of the four possible scenarios:
1800 // 1, max(mul(a, x), x)
1801 // 2, max(mul(x, a), x)
1802 // 3, max(x, mul(a, x))
1803 // 4, max(x, mul(x, a))
1804 // These are handled by an activation layer.
telsoa01c577f2c2018-08-31 09:22:23 +01001805
1806 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1807 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1808 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1809 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1810 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001811 ARMNN_ASSERT(outputOfLeakyRelu != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001812
1813 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1814 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1815 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1816 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1817 }
1818 else
1819 {
Sadik Armagan975c09a2018-12-04 10:02:08 +00001820 // Anything else is just a maximum layer.
1821
1822 return AddMaximumLayer(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001823 }
1824}
1825
jimfly0184c70e62018-12-19 13:14:46 +00001826std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> TfParser::ProcessElementwiseInputSlots(
1827 const tensorflow::NodeDef& nodeDef, const std::string& layerName)
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001828{
1829 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1830
1831 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1832 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1833 const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1834 const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1835
1836 if (input0Dim != input1Dim)
1837 {
1838 // broadcasting where input0 and input1 have different number of dimensions
1839 // is only supported for 1D and 4D tensors pair
1840 if (input0Dim == 1 && input1Dim == 4)
1841 {
1842 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1843 }
1844 else if (input0Dim == 4 && input1Dim == 1)
1845 {
1846 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1847 }
1848 else
1849 {
1850 throw ParseException(
jimfly0184c70e62018-12-19 13:14:46 +00001851 boost::str(
1852 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
1853 % layerName
1854 % nodeDef.name()
1855 % CHECK_LOCATION().AsString()));
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001856 }
1857 }
jimfly0184c70e62018-12-19 13:14:46 +00001858 return {input0Slot, input1Slot};
1859}
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001860
kevmay012b4d88e2019-01-24 14:05:09 +00001861ParsedTfOperationPtr TfParser::ProcessComparisonLayer(
1862 IOutputSlot* input0Slot,
1863 IOutputSlot* input1Slot,
1864 IConnectableLayer* const layer,
1865 const tensorflow::NodeDef& nodeDef)
1866{
1867 input0Slot->Connect(layer->GetInputSlot(0));
1868 input1Slot->Connect(layer->GetInputSlot(1));
1869
1870 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1871 outputInfo.SetDataType(DataType::Boolean);
1872 std::vector<unsigned int> outputShape;
1873
1874 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1875 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1876
1877 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1878 {
1879 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1880 }
1881
1882 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1883 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1884
1885 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1886}
1887
jimfly0184c70e62018-12-19 13:14:46 +00001888ParsedTfOperationPtr TfParser::ProcessElementwiseLayer(
1889 IOutputSlot* input0Slot,
1890 IOutputSlot* input1Slot,
1891 IConnectableLayer* const layer,
1892 const tensorflow::NodeDef& nodeDef)
1893{
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001894 input0Slot->Connect(layer->GetInputSlot(0));
1895 input1Slot->Connect(layer->GetInputSlot(1));
1896
1897 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1898 std::vector<unsigned int> outputShape;
1899
1900 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1901 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1902
1903 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1904 {
1905 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1906 }
1907
1908 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1909 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1910
1911 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1912}
1913
FrancisMurtagh94412af2019-01-24 10:53:39 +00001914ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
1915 const tensorflow::GraphDef& graphDef)
1916{
Jan Eilers8eb25602020-03-09 12:13:48 +00001917 IgnoreUnused(graphDef);
FrancisMurtagh94412af2019-01-24 10:53:39 +00001918 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1919 IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1920 IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
Teresa Charlin52664732020-06-29 16:27:03 +01001921 GatherDescriptor descriptor;
1922 descriptor.m_Axis = ReadMandatoryNodeInt32Attribute(nodeDef, "axis");
FrancisMurtagh94412af2019-01-24 10:53:39 +00001923
1924 // Infer shape of output tensor
1925 unsigned int paramsDim = params.GetTensorInfo().GetNumDimensions();
1926 unsigned int indicesDim = indices.GetTensorInfo().GetNumDimensions();
1927 unsigned int outputDim = paramsDim - 1 + indicesDim;
1928
1929 std::vector<unsigned int> dimSizes;
1930
1931 for (unsigned int i = 0; i < indicesDim; ++i)
1932 {
1933 dimSizes.push_back(indices.GetTensorInfo().GetShape()[i]);
1934 }
1935 for (unsigned int i = 1; i < paramsDim; ++i)
1936 {
1937 dimSizes.push_back(params.GetTensorInfo().GetShape()[i]);
1938 }
1939
1940 const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data());
1941
1942 const TensorInfo inferredOutputInfo(inferredShape, params.GetTensorInfo().GetDataType());
1943
Teresa Charlin52664732020-06-29 16:27:03 +01001944 IConnectableLayer* const layer = m_Network->AddGatherLayer(descriptor, nodeDef.name().c_str());
FrancisMurtagh94412af2019-01-24 10:53:39 +00001945 layer->GetOutputSlot(0).SetTensorInfo(inferredOutputInfo);
1946
1947 params.Connect(layer->GetInputSlot(0));
1948 indices.Connect(layer->GetInputSlot(1));
1949
1950 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1951}
1952
jimfly01a06bf312018-12-18 16:24:51 +00001953ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
1954 const tensorflow::GraphDef& graphDef)
1955{
Jan Eilers8eb25602020-03-09 12:13:48 +00001956 IgnoreUnused(graphDef);
jimfly01a06bf312018-12-18 16:24:51 +00001957 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
1958 IOutputSlot* input0Slot = inputLayers.first;
1959 IOutputSlot* input1Slot = inputLayers.second;
1960
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001961 ComparisonDescriptor descriptor(ComparisonOperation::Greater);
1962 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly01a06bf312018-12-18 16:24:51 +00001963
kevmay012b4d88e2019-01-24 14:05:09 +00001964 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly01a06bf312018-12-18 16:24:51 +00001965}
1966
jimfly0184c70e62018-12-19 13:14:46 +00001967ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
1968 const tensorflow::GraphDef& graphDef)
1969{
Jan Eilers8eb25602020-03-09 12:13:48 +00001970 IgnoreUnused(graphDef);
jimfly0184c70e62018-12-19 13:14:46 +00001971 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
1972 IOutputSlot* input0Slot = inputLayers.first;
1973 IOutputSlot* input1Slot = inputLayers.second;
1974
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001975 ComparisonDescriptor descriptor(ComparisonOperation::Equal);
1976 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly0184c70e62018-12-19 13:14:46 +00001977
kevmay012b4d88e2019-01-24 14:05:09 +00001978 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly0184c70e62018-12-19 13:14:46 +00001979}
1980
1981ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
1982 const tensorflow::GraphDef& graphDef)
1983{
Jan Eilers8eb25602020-03-09 12:13:48 +00001984 IgnoreUnused(graphDef);
jimfly0184c70e62018-12-19 13:14:46 +00001985 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
1986 IOutputSlot* input0Slot = inputLayers.first;
1987 IOutputSlot* input1Slot = inputLayers.second;
1988
1989 IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1990
1991 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1992}
1993
jimfly0123be07e2018-12-04 17:47:22 +00001994ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1995{
Jan Eilers8eb25602020-03-09 12:13:48 +00001996 IgnoreUnused(graphDef);
jimfly0123be07e2018-12-04 17:47:22 +00001997 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1998
1999 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2000 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2001
2002 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
2003 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
2004
2005 if (input0Info.GetNumDimensions() == 1)
2006 {
2007 const bool isNHWC = true;
2008 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2009 }
2010
2011 if (input1Info.GetNumDimensions() == 1)
2012 {
2013 const bool isNHWC = true;
2014 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2015 }
2016
2017 IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
2018
2019 input0Slot->Connect(layer->GetInputSlot(0));
2020 input1Slot->Connect(layer->GetInputSlot(1));
2021
2022 if (input0Info.GetNumDimensions() == 1)
2023 {
2024 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2025 }
2026 else
2027 {
2028 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2029 }
2030
2031 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2032}
2033
Sadik Armagan48d70932020-02-18 15:18:27 +00002034ParsedTfOperationPtr TfParser::ParseStack(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2035{
Jan Eilers8eb25602020-03-09 12:13:48 +00002036 IgnoreUnused(graphDef);
Sadik Armagan48d70932020-02-18 15:18:27 +00002037 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2038
2039 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2040 if (numInputs < 1)
2041 {
2042 throw ParseException(
2043 boost::str(
2044 boost::format(
2045 "Pack/Stack expects at least one input. Got %1% for Node %2% %3%")
2046 % numInputs
2047 % nodeDef.name()
2048 % CHECK_LOCATION().AsString()));
2049 }
2050
2051 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2052 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2053 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2054 const TensorInfo& inputTensorInfo = input0Slot->GetTensorInfo();
2055 auto numDimensions = inputTensorInfo.GetShape().GetNumDimensions();
2056
2057 // validate axis
2058 int32_t axis = ReadMandatoryNodeInt32Attribute(nodeDef, "axis");
2059 const int sNumDimensions = (static_cast<int>(numDimensions) + 1);
2060 if (!(axis < sNumDimensions && axis >= -sNumDimensions))
2061 {
2062 throw ParseException(
2063 boost::str(
2064 boost::format(
2065 "Axis index is not in range. Got %1% for Node %2% %3%")
2066 % axis
2067 % nodeDef.name()
2068 % CHECK_LOCATION().AsString()));
2069 }
2070
2071 if (axis < 0)
2072 {
2073 axis = static_cast<int32_t>(numDimensions) + axis + 1;
2074 }
2075
2076 StackDescriptor stackDescriptor;
2077 stackDescriptor.m_Axis = static_cast<uint32_t>(axis);
2078 stackDescriptor.m_NumInputs = static_cast<uint32_t>(numInputs);
2079 stackDescriptor.m_InputShape = inputTensorInfo.GetShape();
2080
2081 const unsigned int supportedNumDims = 4;
2082 for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2083 {
2084 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2085 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2086
2087 // Double check dimensions of the tensors
2088 if (inputTensorInfo.GetNumDimensions() >= supportedNumDims)
2089 {
2090 throw armnn::ParseException(
2091 boost::str(
2092 boost::format(
2093 "The number of dimensions: %1% for input tensors of the "
2094 "Pack/Stack op. Number of dimensions should be less than %2% %3%")
2095 % inputTensorInfo.GetNumDimensions()
2096 % supportedNumDims
2097 % CHECK_LOCATION().AsString()));
2098 }
2099 }
2100
2101 std::vector<unsigned int> outputDimensions;
2102 for (unsigned int i = 0; i < stackDescriptor.m_InputShape.GetNumDimensions(); ++i)
2103 {
2104 outputDimensions.push_back(stackDescriptor.m_InputShape[i]);
2105 }
2106 outputDimensions.insert(outputDimensions.begin() + axis, numInputs);
2107
2108 // add Stack Layer
2109 IConnectableLayer* const layer = m_Network->AddStackLayer(stackDescriptor, nodeDef.name().c_str());
2110
2111 for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2112 {
2113 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2114 inputSlot.Connect(layer->GetInputSlot(viewIndex));
2115 }
2116
2117 layer->GetOutputSlot(0).SetTensorInfo(
2118 armnn::TensorInfo(static_cast<uint32_t>(outputDimensions.size()),
2119 outputDimensions.data(),
2120 inputTensorInfo.GetDataType()));
2121
2122 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2123}
2124
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002125ParsedTfOperationPtr TfParser::ParseTranspose(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2126{
Jan Eilers8eb25602020-03-09 12:13:48 +00002127 IgnoreUnused(graphDef);
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002128
2129 auto inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2130 const auto inputCount = inputs.size();
2131
2132 if (inputCount != 2)
2133 {
2134 throw ParseException(
2135 boost::str(
2136 boost::format(
2137 "The number of given input is %1%. It should be two for Transpose op."
2138 "Node %2% %3%")
2139 % inputCount
2140 % nodeDef.name()
2141 % CHECK_LOCATION().AsString()));
2142 }
2143
2144 auto* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2145
2146 const auto constInput = inputs[GetConstInputIndex(inputs)];
2147 auto* permuteVectorInput =
Jan Eilersbb446e52020-04-02 13:56:54 +01002148 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(constInput.m_IndexedValue);
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002149 const auto& permuteVectorInfo = permuteVectorInput->GetTensorInfo();
2150
2151 std::vector<int32_t> permuteVectorData;
2152 permuteVectorInput->GetConstTensor(permuteVectorData);
2153
Mike Kelly08759e22020-03-02 11:41:31 +00002154 std::vector<unsigned int> armnnPermuteVectorData(permuteVectorData.begin(), permuteVectorData.end());
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002155
2156 const auto permutationVector = PermutationVector(armnnPermuteVectorData.data(), permuteVectorInfo.GetNumElements());
Mike Kelly08759e22020-03-02 11:41:31 +00002157 const auto desc = TransposeDescriptor(permutationVector);
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002158
Mike Kelly08759e22020-03-02 11:41:31 +00002159 auto* layer = m_Network->AddTransposeLayer(desc, nodeDef.name().c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002160 ARMNN_ASSERT(layer);
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002161
2162 input0Slot->Connect(layer->GetInputSlot(0));
2163
2164 const auto& input0Info = input0Slot->GetTensorInfo();
2165 armnn::TensorInfo outputInfo {input0Info};
Mike Kelly08759e22020-03-02 11:41:31 +00002166 outputInfo.SetShape(armnnUtils::TransposeTensorShape(input0Info.GetShape(), desc.m_DimMappings));
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002167 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2168
2169 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2170}
2171
jimfly01f6ba7472018-12-04 10:09:52 +00002172unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
2173 const TensorInfo& inputTensorInfo,
2174 const std::string& nodeName)
2175{
2176 unsigned int rank = paddingTensor.GetShape()[0];
2177 unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
2178 if (rank != expectedRank)
2179 {
2180 throw ParseException(
2181 boost::str(
2182 boost::format(
2183 "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
2184 % expectedRank
2185 % rank
2186 % nodeName
2187 % CHECK_LOCATION().AsString()));
2188 }
2189 unsigned int second = paddingTensor.GetShape()[1];
2190 if (second != 2)
2191 {
2192 throw ParseException(
2193 boost::str(
2194 boost::format(
2195 "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
2196 % rank
2197 % second
2198 % nodeName
2199 % CHECK_LOCATION().AsString()));
2200 }
2201 return rank;
2202}
2203
2204TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
2205 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
2206{
2207 unsigned int numDims = inputTensorInfo.GetNumDimensions();
2208 std::vector<unsigned int> outDims;
2209 for (unsigned int i = 0; i < numDims; ++i)
2210 {
2211 unsigned int dimSize = inputTensorInfo.GetShape()[i];
2212 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2213 dimSize += dimPadding.first;
2214 dimSize += dimPadding.second;
2215 outDims.push_back(dimSize);
2216 }
2217 TensorInfo paddedTensorInfo = inputTensorInfo;
2218 unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
2219 paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
2220 return paddedTensorInfo;
2221}
2222
2223ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
2224 const tensorflow::GraphDef& graphDef)
2225{
Jan Eilers8eb25602020-03-09 12:13:48 +00002226 IgnoreUnused(graphDef);
jimfly01f6ba7472018-12-04 10:09:52 +00002227 // input consists of:
2228 // input[0] the tensor which will be padded
2229 // input[1] the tensor holding the padding values
2230 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2231 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2232 TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
2233 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
2234 {
2235 throw ParseException(
2236 boost::str(
2237 boost::format(
2238 "ArmNN only supports Pad with constant padding. "
2239 "Input %1%. Node %2% %3%")
2240 % inputs[1].m_IndexedValue->GetNode().name()
2241 % nodeDef.name()
2242 % CHECK_LOCATION().AsString()));
2243
2244 }
2245 ParsedConstTfOperation<int32_t>* paddingTensorOp =
Jan Eilersbb446e52020-04-02 13:56:54 +01002246 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
jimfly01f6ba7472018-12-04 10:09:52 +00002247
2248 std::vector<int32_t> paddingTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002249 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
jimfly01f6ba7472018-12-04 10:09:52 +00002250 // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
2251 // and should match the rank of the input tensor that is being padded.
2252 // For each dimension D of input, paddings[D, 0] indicates how many values to add
2253 // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
2254 // many values to add after the contents of tensor in that dimension
2255 // This needs to be translated into a padList for ACL
2256 std::vector<std::pair<unsigned int, unsigned int>> padList;
2257 unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
2258 for (unsigned int i = 0; i < rank; ++i)
2259 {
2260 std::pair<unsigned int, unsigned int> paddingForDim;
2261 for (unsigned int j = 0; j < 2; j++)
2262 {
2263 unsigned int index = (i * 2) + j;
2264 int paddingAmount = paddingTensorData[index];
2265 // make sure we can cast to an unsigned value
2266 if (paddingAmount < 0)
2267 {
2268 throw ParseException(
2269 boost::str(
2270 boost::format(
2271 "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
2272 % paddingAmount
2273 % i
2274 % j
2275 % nodeDef.name()
2276 % CHECK_LOCATION().AsString()));
2277 }
2278 if (j == 0)
2279 {
2280 paddingForDim.first = static_cast<unsigned int>(paddingAmount);
2281 }
2282 else
2283 {
2284 paddingForDim.second = static_cast<unsigned int>(paddingAmount);
2285 }
2286 }
2287 padList.push_back(paddingForDim);
2288 }
2289 PadDescriptor padDescriptor(padList);
2290 IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
2291 previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
2292 // Use the padding to calculate the new output tensor shape
2293 TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
2294 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2295 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2296}
2297
surmeh01bceff2f2018-03-29 16:29:27 +01002298ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
2299 const tensorflow::GraphDef& graphDef)
2300{
Jan Eilers8eb25602020-03-09 12:13:48 +00002301 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002302 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002303
telsoa01c577f2c2018-08-31 09:22:23 +01002304 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01002305 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
surmeh01bceff2f2018-03-29 16:29:27 +01002306
surmeh01bceff2f2018-03-29 16:29:27 +01002307 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2308
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002309 // Constant tensor index
2310 unsigned int index = GetConstInputIndex(inputs);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002311 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002312 ParsedConstTfOperation<int32_t>* shapeNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002313 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002314
surmeh01bceff2f2018-03-29 16:29:27 +01002315 std::vector<int32_t> axisTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002316 shapeNode->GetConstTensor(axisTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002317
telsoa01c577f2c2018-08-31 09:22:23 +01002318 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002319 const unsigned int concatDim = static_cast<unsigned int>(axisTensorData[0]);
surmeh01bceff2f2018-03-29 16:29:27 +01002320
telsoa01c577f2c2018-08-31 09:22:23 +01002321 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002322 if (concatDim == 0 || concatDim == 2)
surmeh01bceff2f2018-03-29 16:29:27 +01002323 {
telsoa01c577f2c2018-08-31 09:22:23 +01002324 throw ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002325 boost::str(
2326 boost::format(
telsoa01c577f2c2018-08-31 09:22:23 +01002327 "Dimension %1% for concatenation is not supported by Armnn. "
2328 "Node %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002329 % concatDim
2330 % nodeDef.name()
2331 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002332 }
2333
Matthew Jacksondba634f2019-08-15 15:14:18 +01002334 const unsigned int supportedNumDims = 4;
Matteo Martincighf9afc792018-12-06 12:03:17 +00002335 unsigned int numConcatViews = numInputs - 1;
Matthew Jacksondba634f2019-08-15 15:14:18 +01002336 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002337 concatDescriptor.SetConcatAxis(concatDim);
Matthew Jacksondba634f2019-08-15 15:14:18 +01002338 TensorShape mergeDims(supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002339 unsigned int mergeDim = 0;
2340 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002341 {
telsoa01c577f2c2018-08-31 09:22:23 +01002342 // Need to double check whether it should be
Matteo Martincighf9afc792018-12-06 12:03:17 +00002343 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002344 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2345
Matteo Martincighf9afc792018-12-06 12:03:17 +00002346 // Double check dimensions of the tensors
Matthew Jacksondba634f2019-08-15 15:14:18 +01002347 if (inputTensorInfo.GetNumDimensions() != supportedNumDims)
Matteo Martincighf9afc792018-12-06 12:03:17 +00002348 {
2349 throw armnn::ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002350 boost::str(
2351 boost::format(
Matteo Martincighf9afc792018-12-06 12:03:17 +00002352 "The number of dimensions: %1% for input tensors of the "
2353 "concatenation op should be %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002354 % inputTensorInfo.GetNumDimensions()
Matthew Jacksondba634f2019-08-15 15:14:18 +01002355 % supportedNumDims
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002356 % CHECK_LOCATION().AsString()));
Matteo Martincighf9afc792018-12-06 12:03:17 +00002357 }
2358
2359 // Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
2360 mergeDims = inputTensorInfo.GetShape();
2361 unsigned int* viewOrigin = const_cast<unsigned int*>(concatDescriptor.GetViewOrigin(viewIndex));
Matthew Jacksondba634f2019-08-15 15:14:18 +01002362 std::fill(viewOrigin, viewOrigin + supportedNumDims, 0);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002363
2364 // Update the view origin coordinates and the merge dimension value
2365 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
2366 mergeDim += mergeDims[concatDim];
surmeh01bceff2f2018-03-29 16:29:27 +01002367 }
2368
Matteo Martincighf9afc792018-12-06 12:03:17 +00002369 // Update the output shape
2370 mergeDims[concatDim] = mergeDim;
Jim Flynn906f9462019-05-10 13:55:21 +01002371 armnn::IConnectableLayer *layer = m_Network->AddConcatLayer(concatDescriptor, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002372
Matteo Martincighf9afc792018-12-06 12:03:17 +00002373 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
surmeh01bceff2f2018-03-29 16:29:27 +01002374
Matteo Martincighf9afc792018-12-06 12:03:17 +00002375 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002376 {
Matteo Martincighf9afc792018-12-06 12:03:17 +00002377 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2378 inputSlot.Connect(layer->GetInputSlot(viewIndex));
surmeh01bceff2f2018-03-29 16:29:27 +01002379 }
2380
2381 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2382}
2383
2384ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
2385 const tensorflow::GraphDef& graphDef)
2386{
Jan Eilers8eb25602020-03-09 12:13:48 +00002387 IgnoreUnused(graphDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002388 // Note: the Shape layer is handled in a special way, because:
2389 // 1. ARMNN doesn't support int32 tensors which it outputs.
2390 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01002391 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01002392 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01002393
2394 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
2395 if (tfDataType != tensorflow::DT_INT32)
2396 {
telsoa01c577f2c2018-08-31 09:22:23 +01002397 throw ParseException(
2398 boost::str(
2399 boost::format(
2400 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
2401 % tensorflow::DataType_Name(tfDataType)
2402 % nodeDef.name()
2403 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002404 }
2405
2406 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2407 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2408 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2409 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
2410
2411 std::vector<int32_t> shapeTensorData;
2412 shapeTensorData.reserve(prevLayerDimensions);
2413
2414 for (unsigned int i=0; i<prevLayerDimensions; ++i)
2415 {
2416 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
2417 }
2418
2419 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2420
2421 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
2422 nodeDef,
2423 &shapeTensorData[0],
2424 shapeTensorInfo);
2425}
2426
2427ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
2428 const tensorflow::GraphDef& graphDef)
2429{
Jan Eilers8eb25602020-03-09 12:13:48 +00002430 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002431 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2432 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2433
2434 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2435 {
telsoa01c577f2c2018-08-31 09:22:23 +01002436 throw ParseException(
2437 boost::str(
2438 boost::format(
2439 "ArmNN only supports Reshape layers with constant shapes. "
2440 "Input %1% Node %2% %3%")
2441 % inputs[1].m_IndexedValue->GetNode().name()
2442 % nodeDef.name()
2443 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002444 }
2445 ParsedConstTfOperation<int32_t>* shapeNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002446 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01002447
2448 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2449 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2450
2451 std::vector<int32_t> shapeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002452 ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002453 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2454
2455 TensorShape targetShape = outputTensorInfo.GetShape();
2456 ReshapeDescriptor reshapeDesc;
2457 reshapeDesc.m_TargetShape = targetShape;
2458
2459 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2460 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2461 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2462
2463 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2464}
2465
2466ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
2467 const tensorflow::GraphDef& graphDef)
2468{
Jan Eilers8eb25602020-03-09 12:13:48 +00002469 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002470 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2471
2472 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2473 {
telsoa01c577f2c2018-08-31 09:22:23 +01002474 throw ParseException(
2475 boost::str(
2476 boost::format(
2477 "ArmNN only supports ResizeBilinear layers with constant sizes. "
2478 "Input %1%. Node %2% %3%")
2479 % inputs[1].m_IndexedValue->GetNode().name()
2480 % nodeDef.name()
2481 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002482 }
2483 ParsedConstTfOperation<int32_t>* sizeNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002484 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01002485
telsoa01c577f2c2018-08-31 09:22:23 +01002486 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01002487 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
2488 {
telsoa01c577f2c2018-08-31 09:22:23 +01002489 throw ParseException(
2490 boost::str(
2491 boost::format(
2492 "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
2493 "Node %1% %2%")
2494 % nodeDef.name()
2495 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002496 }
2497
telsoa01c577f2c2018-08-31 09:22:23 +01002498 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01002499 std::vector<int32_t> sizeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002500 ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002501
telsoa01c577f2c2018-08-31 09:22:23 +01002502 // The descriptor only has target height and width attributes, which we get from the size tensor.
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002503 ResizeDescriptor desc;
2504 desc.m_Method = armnn::ResizeMethod::Bilinear;
surmeh01bceff2f2018-03-29 16:29:27 +01002505 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002506 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2507 desc.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002508
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002509 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002510
2511 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2512 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01002513 // The input shape is always in BHWC format, this will be swizzled below; for now,
2514 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01002515 unsigned int outBatch = inputTensorInfo.GetShape()[0];
2516 unsigned int outChannels = inputTensorInfo.GetShape()[3];
2517 unsigned int outHeight = desc.m_TargetHeight;
2518 unsigned int outWidth = desc.m_TargetWidth;
jimfly018a121502018-12-06 16:19:52 +00002519 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
telsoa01c577f2c2018-08-31 09:22:23 +01002520 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01002521 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2522 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2523
jimfly018a121502018-12-06 16:19:52 +00002524 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002525
2526 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2527}
2528
2529TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
2530{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002531 ARMNN_ASSERT(nodeDef.op() == "Squeeze");
surmeh01bceff2f2018-03-29 16:29:27 +01002532 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2533
2534 DataType type;
2535 if (tfDataType == tensorflow::DT_FLOAT)
2536 {
2537 type = DataType::Float32;
2538 }
2539 else if (tfDataType == tensorflow::DT_INT32)
2540 {
2541 type = DataType::Signed32;
2542 }
2543 else
2544 {
telsoa01c577f2c2018-08-31 09:22:23 +01002545 throw ParseException(
2546 boost::str(
2547 boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
2548 % tensorflow::DataType_Name(tfDataType)
2549 % nodeDef.name()
2550 % CHECK_LOCATION().AsString()));
2551 }
2552
2553
2554 if (inputTensorInfo.GetNumDimensions() > 4)
2555 {
2556 throw ParseException(
2557 boost::str(
2558 boost::format(
2559 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2560 % inputTensorInfo.GetNumDimensions()
2561 % nodeDef.name()
2562 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002563 }
2564
2565 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01002566 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2567
surmeh01bceff2f2018-03-29 16:29:27 +01002568 if (squeezeDims.empty())
2569 {
telsoa01c577f2c2018-08-31 09:22:23 +01002570 squeezeDims.assign(dimensionSequence,
2571 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01002572 }
2573
2574 std::vector<uint32_t> outputDims;
2575 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2576 {
telsoa01c577f2c2018-08-31 09:22:23 +01002577 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2578 auto currentDimension = inputTensorInfo.GetShape()[i];
2579 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01002580 {
telsoa01c577f2c2018-08-31 09:22:23 +01002581 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01002582 }
2583 }
2584
2585 if (outputDims.size() > 4)
2586 {
telsoa01c577f2c2018-08-31 09:22:23 +01002587 throw ParseException(
2588 boost::str(
2589 boost::format(
2590 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2591 % outputDims.size()
2592 % nodeDef.name()
2593 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002594 }
2595
telsoa01c577f2c2018-08-31 09:22:23 +01002596 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2597 outputDims.data());
2598
2599 TensorInfo outTensorInfo = inputTensorInfo;
2600 outTensorInfo.SetShape(outShape);
2601 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01002602
2603 return outTensorInfo;
2604}
2605
2606ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2607{
Jan Eilers8eb25602020-03-09 12:13:48 +00002608 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002609 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2610
2611 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2612 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2613
2614 TensorInfo outputInfo;
2615 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2616
2617 ReshapeDescriptor reshapeDesc;
2618 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2619 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2620 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2621 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2622
2623 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2624}
2625
2626ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2627{
Jan Eilers8eb25602020-03-09 12:13:48 +00002628 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002629 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2630
2631 NormalizationDescriptor normalizationDescriptor;
2632 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2633 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2634 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2635 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2636 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2637 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
ruoyan018174f362018-12-04 18:24:08 +00002638 normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002639
2640 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2641 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2642
2643 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002644 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2645 nodeDef.name().c_str());
ruoyan018174f362018-12-04 18:24:08 +00002646 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2647 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
surmeh01bceff2f2018-03-29 16:29:27 +01002648
2649 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2650}
2651
2652/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01002653/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2654/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2655/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2656///
surmeh01bceff2f2018-03-29 16:29:27 +01002657class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
2658{
2659public:
2660 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2661 : DeferredSingleLayerParsedTfOperation(parser, node)
2662 {
2663 }
2664
2665 void CreateLayerDeferred() override
2666 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002667 ARMNN_ASSERT(m_Layer == nullptr);
surmeh01bceff2f2018-03-29 16:29:27 +01002668 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2669 }
2670};
2671
2672ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2673{
Jan Eilers8eb25602020-03-09 12:13:48 +00002674 IgnoreUnused(graphDef);
Derek Lambertibaa177f2019-12-10 22:00:43 +00002675
telsoa01c577f2c2018-08-31 09:22:23 +01002676 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01002677 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2678}
2679
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002680ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2681{
Jan Eilers8eb25602020-03-09 12:13:48 +00002682 IgnoreUnused(graphDef);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002683 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2684 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2685 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2686
2687 if (inputs.size() != 2)
2688 {
2689 throw ParseException(
2690 boost::str(boost::format("Mean expects two inputs!. Got %1% for Node %2% %3%")
2691 % inputs.size()
2692 % nodeDef.name()
2693 % CHECK_LOCATION().AsString()));
2694 }
2695
2696 bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef, "keep_dims");
2697
2698 ParsedConstTfOperation<int32_t>* axisNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002699 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002700
2701 const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo();
2702
2703 ConstTensor axisTensor(axisTensorInfo, axisNode->GetStorage());
2704 const int* axisData = static_cast<const int*>(axisTensor.GetMemoryArea());
2705
2706 TensorInfo outputTensorInfo;
2707 MeanDescriptor meanDescriptor;
2708 meanDescriptor.m_KeepDims = keepDims;
2709
2710 // Negative axis values are supported so that the process requires
2711 // to convert them into the corresponding positive ones.
2712 // Duplicate values are also removed.
2713 std::vector<int> rawAxisVector(axisData, axisData + axisTensorInfo.GetNumElements());
2714 std::set<unsigned int> positiveAxisSet;
2715 int rank = static_cast<int>(inputTensorInfo.GetNumDimensions());
2716
2717 std::transform(rawAxisVector.begin(), rawAxisVector.end(),
2718 std::inserter(positiveAxisSet, positiveAxisSet.begin()),
2719 [rank](int i) -> unsigned int { return static_cast<unsigned int>((i + rank) % rank); });
2720
Derek Lambertibaa177f2019-12-10 22:00:43 +00002721 CalculateReducedOutputTensoInfo(inputTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002722
2723 if (inputTensorInfo.GetNumDimensions() > positiveAxisSet.size())
2724 {
2725 meanDescriptor.m_Axis.assign(positiveAxisSet.begin(), positiveAxisSet.end());
2726 }
2727
2728 IConnectableLayer* layer = m_Network->AddMeanLayer(meanDescriptor, nodeDef.name().c_str());
2729 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2730 inputSlot.Connect(layer->GetInputSlot(0));
2731
2732 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2733}
2734
telsoa01c577f2c2018-08-31 09:22:23 +01002735/// An ParsedTfOperation for a Mul node.
2736/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2737/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2738/// and in these cases armnn doesn't need a separate layer for the Mul.
2739///
2740class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
2741{
2742public:
2743 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2744 : DeferredSingleLayerParsedTfOperation(parser, node)
2745 {
2746 }
2747
2748 void CreateLayerDeferred() override
2749 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002750 ARMNN_ASSERT(m_Layer == nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01002751 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2752 }
2753};
2754
surmeh01bceff2f2018-03-29 16:29:27 +01002755ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2756{
Jan Eilers8eb25602020-03-09 12:13:48 +00002757 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002758
telsoa01c577f2c2018-08-31 09:22:23 +01002759 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002760}
2761
2762ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
2763 const tensorflow::GraphDef& graphDef)
2764{
Jan Eilers8eb25602020-03-09 12:13:48 +00002765 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002766
2767 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2768
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002769 const LayerBindingId layerId = armnn::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
surmeh01bceff2f2018-03-29 16:29:27 +01002770
2771 auto it = m_InputShapes.find(nodeDef.name());
2772 if (it == m_InputShapes.end())
2773 {
telsoa01c577f2c2018-08-31 09:22:23 +01002774 throw ParseException(
2775 boost::str(
2776 boost::format(
2777 "Missing input shape for Placeholder '%1%' %2%")
2778 % nodeDef.name()
2779 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002780 }
2781 TensorInfo tensorInfo(it->second, DataType::Float32);
2782
2783 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2784
2785 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2786
2787 TrackInputBinding(layer, layerId, tensorInfo);
2788
2789 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2790}
2791
saoste01bbd40612018-08-28 15:41:51 +01002792ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2793{
Jan Eilers8eb25602020-03-09 12:13:48 +00002794 IgnoreUnused(graphDef);
saoste01bbd40612018-08-28 15:41:51 +01002795 return AddRealDivLayer(nodeDef);
2796}
2797
surmeh01bceff2f2018-03-29 16:29:27 +01002798ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
2799 const tensorflow::GraphDef& graphDef)
2800{
Jan Eilers8eb25602020-03-09 12:13:48 +00002801 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002802
2803 ActivationDescriptor activationDesc;
2804 activationDesc.m_Function = ActivationFunction::ReLu;
2805 return AddActivationLayer(nodeDef, activationDesc);
2806}
2807
2808ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
2809 const tensorflow::GraphDef& graphDef)
2810{
Jan Eilers8eb25602020-03-09 12:13:48 +00002811 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002812
2813 ActivationDescriptor activationDesc;
2814 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2815 activationDesc.m_A = 6.0f;
2816 activationDesc.m_B = 0.0f;
2817
2818 return AddActivationLayer(nodeDef, activationDesc);
2819}
2820
2821ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
2822 const tensorflow::GraphDef& graphDef)
2823{
Jan Eilers8eb25602020-03-09 12:13:48 +00002824 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002825
2826 ActivationDescriptor activationDesc;
2827 activationDesc.m_Function = ActivationFunction::Sigmoid;
2828
2829 return AddActivationLayer(nodeDef, activationDesc);
2830}
2831
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002832ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
2833 const tensorflow::GraphDef &graphDef)
2834{
Jan Eilers8eb25602020-03-09 12:13:48 +00002835 IgnoreUnused(graphDef);
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002836
2837 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2838
josh minor4a3c6102020-01-06 16:40:46 -06002839 ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
2840 IConnectableLayer* const layer = m_Network->AddElementwiseUnaryLayer(descriptor, nodeDef.name().c_str());
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002841
2842 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2843 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2844 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2845
2846 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2847}
2848
surmeh01bceff2f2018-03-29 16:29:27 +01002849ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
2850 const tensorflow::GraphDef& graphDef)
2851{
Jan Eilers8eb25602020-03-09 12:13:48 +00002852 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002853
2854 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2855
2856 SoftmaxDescriptor softmaxDescriptor;
2857 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2858
2859 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2860 prevLayerSlot.Connect(layer->GetInputSlot(0));
2861 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2862
2863 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2864}
2865
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002866ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
2867 const tensorflow::GraphDef& graphDef)
2868{
Jan Eilers8eb25602020-03-09 12:13:48 +00002869 IgnoreUnused(graphDef);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002870
2871 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2872 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2873 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2874
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002875 // Constant tensor index
2876 unsigned int index = GetConstInputIndex(inputs);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002877 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002878 ParsedConstTfOperation<int32_t>* shapeNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002879 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002880
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002881 std::vector<int32_t> axisTensorData;
2882 shapeNode->GetConstTensor(axisTensorData);
2883
2884 // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2885 const unsigned int splitDim = static_cast<unsigned int>(axisTensorData[0]);
2886
2887 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2888 if (splitDim == 0 || splitDim == 2)
2889 {
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002890 throw armnn::ParseException(
2891 boost::str(
2892 boost::format(
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002893 "Dimension %1% for split is not supported by Armnn. "
2894 "Node %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002895 % splitDim
2896 % nodeDef.name()
2897 % CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002898 }
2899
Saoirse Stewart315258e2019-02-28 11:32:41 +00002900 // As Armnn only supports splitter outputs of the same shape, therefore num_split will be limited to an integer.
2901 uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef, "num_split");
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002902
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002903 IOutputSlot& inputSlot = inputs[1 - index].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1 - index].m_Index);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002904 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2905
Matthew Jacksondba634f2019-08-15 15:14:18 +01002906 const unsigned int supportedNumDims = 4;
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002907 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2908
Matthew Jacksondba634f2019-08-15 15:14:18 +01002909 if (inputDimSize != supportedNumDims)
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002910 {
2911 throw armnn::ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002912 boost::str(
2913 boost::format(
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002914 "The number of dimensions: %1% for input tensors of the "
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002915 "split op should be %2% %3%")
2916 % inputTensorInfo.GetNumDimensions()
Matthew Jacksondba634f2019-08-15 15:14:18 +01002917 % supportedNumDims
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002918 % CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002919 }
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002920
2921 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2922
2923 // Add current input shape to splitterDimSizes
2924 for (unsigned int i = 0; i < inputDimSize; ++i)
2925 {
2926 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2927 }
2928
2929 if (splitterDimSizes[splitDim] % num_split != 0)
2930 {
2931 throw ParseException("Number of splits must evenly divide the dimension");
2932 }
2933 splitterDimSizes[splitDim] /= num_split;
2934
2935 SplitterDescriptor splitDesc(num_split);
2936 for (unsigned int g = 0; g < num_split; ++g)
2937 {
2938 // Set the size of the views.
2939 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2940 {
2941 splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2942 }
2943 splitDesc.SetViewOriginCoord(g, splitDim, splitterDimSizes[splitDim] * g);
2944 }
2945
2946 IConnectableLayer *layer = m_Network->AddSplitterLayer(splitDesc, nodeDef.name().c_str());
2947
2948 inputSlot.Connect(layer->GetInputSlot(0));
2949
2950 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2951 splitterDimSizes.data());
2952
2953 for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
2954 {
2955 layer->GetOutputSlot(i).SetTensorInfo(armnn::TensorInfo(outShape, inputTensorInfo.GetDataType()));
2956 }
2957
2958 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2959}
2960
surmeh01bceff2f2018-03-29 16:29:27 +01002961ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
2962 const tensorflow::GraphDef& graphDef)
2963{
Jan Eilers8eb25602020-03-09 12:13:48 +00002964 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002965
2966 ActivationDescriptor activationDesc;
2967 activationDesc.m_Function = ActivationFunction::SoftReLu;
2968
2969 return AddActivationLayer(nodeDef, activationDesc);
2970}
2971
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002972ParsedTfOperationPtr TfParser::ParseStridedSlice(const tensorflow::NodeDef& nodeDef,
2973 const tensorflow::GraphDef& graphDef)
2974{
Jan Eilers8eb25602020-03-09 12:13:48 +00002975 IgnoreUnused(graphDef);
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002976
2977 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2978 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2979 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2980
2981 ParsedConstTfOperation<int32_t>* beginNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002982 PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[1].m_IndexedValue);
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002983 std::vector<int32_t> beginTensorData;
2984 beginNode->GetConstTensor(beginTensorData);
2985
2986 ParsedConstTfOperation<int32_t>* endNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002987 PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[2].m_IndexedValue);
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002988 std::vector<int32_t> endTensorData;
2989 endNode->GetConstTensor(endTensorData);
2990
2991 ParsedConstTfOperation<int32_t>* stridesNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002992 PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[3].m_IndexedValue);
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002993 std::vector<int32_t> stridesTensorData;
2994 stridesNode->GetConstTensor(stridesTensorData);
2995
2996 StridedSliceDescriptor desc;
2997 desc.m_Begin = beginTensorData;
2998 desc.m_End = endTensorData;
2999 desc.m_Stride = stridesTensorData;
3000 desc.m_BeginMask = ReadMandatoryNodeInt32Attribute(nodeDef, "begin_mask");
3001 desc.m_EndMask = ReadMandatoryNodeInt32Attribute(nodeDef, "end_mask");
3002 desc.m_EllipsisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "ellipsis_mask");
3003 desc.m_NewAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "new_axis_mask");
3004 desc.m_ShrinkAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "shrink_axis_mask");
3005 desc.m_DataLayout = armnn::DataLayout::NHWC;
3006 IConnectableLayer* const layer = m_Network->AddStridedSliceLayer(desc, nodeDef.name().c_str());
3007
3008 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3009 TensorInfo inputTensorInfo = prevLayerSlot.GetTensorInfo();
3010
3011 TensorInfo outputTensorInfo;
3012 CalculateStridedSliceOutputTensorInfo(inputTensorInfo, desc, outputTensorInfo);
3013
3014 prevLayerSlot.Connect(layer->GetInputSlot(0));
3015 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3016
3017 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3018}
3019
surmeh01bceff2f2018-03-29 16:29:27 +01003020ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
3021{
Jan Eilers8eb25602020-03-09 12:13:48 +00003022 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003023
3024 ActivationDescriptor activationDesc;
3025 activationDesc.m_Function = ActivationFunction::TanH;
3026 activationDesc.m_A = 1.0f;
3027 activationDesc.m_B = 1.0f;
3028
3029 return AddActivationLayer(nodeDef, activationDesc);
3030}
3031
3032ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
3033 ActivationDescriptor& activationDesc)
3034{
3035 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
3036
3037 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
3038
3039 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3040 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
3041 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
3042 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3043}
3044
3045ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
3046 const tensorflow::GraphDef& graphDef)
3047{
3048 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
3049}
3050
3051ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
3052 const tensorflow::GraphDef& graphDef)
3053{
3054 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
3055}
3056
3057ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
3058 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
3059{
Jan Eilers8eb25602020-03-09 12:13:48 +00003060 IgnoreUnused(graphDef);
Derek Lambertibaa177f2019-12-10 22:00:43 +00003061
surmeh01bceff2f2018-03-29 16:29:27 +01003062 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
3063 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3064 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
3065
3066 if (inputs.size() != 1)
3067 {
telsoa01c577f2c2018-08-31 09:22:23 +01003068 throw ParseException(
3069 boost::str(
3070 boost::format(
3071 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
3072 % inputs.size()
3073 % nodeDef.name()
3074 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003075 }
3076
3077 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
3078 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
3079 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
3080 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
3081
3082 Pooling2dDescriptor pooling2dDescriptor;
FrancisMurtaghf005e312018-12-06 15:26:04 +00003083 pooling2dDescriptor.m_PoolType = pooltype;
3084 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
surmeh01bceff2f2018-03-29 16:29:27 +01003085 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
3086
telsoa01c577f2c2018-08-31 09:22:23 +01003087 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
FrancisMurtaghf005e312018-12-06 15:26:04 +00003088 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
3089 pooling2dDescriptor.m_DataLayout = dataLayout;
3090 DataLayoutIndexed dataLayoutIndexed(dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +01003091
FrancisMurtaghf005e312018-12-06 15:26:04 +00003092 pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
3093 pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
3094 pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
3095 pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01003096
FrancisMurtaghf005e312018-12-06 15:26:04 +00003097 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
3098 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01003099
3100 bool padding = false;
3101 TensorInfo outputInfo;
FrancisMurtaghf005e312018-12-06 15:26:04 +00003102 unsigned int outputHeight = 0;
3103 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01003104
3105 CHECK_PADDING_TYPE(nodeDef, paddingString);
3106
surmeh01bceff2f2018-03-29 16:29:27 +01003107 if (paddingString == "SAME")
3108 {
3109 padding = true;
FrancisMurtaghf005e312018-12-06 15:26:04 +00003110
3111 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
3112 static_cast<float>(pooling2dDescriptor.m_StrideY)));
3113 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
3114 static_cast<float>(pooling2dDescriptor.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01003115 }
3116 else if (paddingString == "VALID")
3117 {
3118 padding = false;
FrancisMurtaghf005e312018-12-06 15:26:04 +00003119
3120 outputHeight = static_cast<uint32_t>(ceil(
3121 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
3122 static_cast<float>(pooling2dDescriptor.m_StrideY)));
3123 outputWidth = static_cast<uint32_t>(ceil(
3124 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
3125 static_cast<float>(pooling2dDescriptor.m_StrideX)));
3126 }
3127
3128 switch (dataLayout)
3129 {
3130 case DataLayout::NHWC:
3131 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
3132 outputHeight,
3133 outputWidth,
3134 inputTensorInfo.GetShape()[3] },
3135 DataType::Float32);
3136 break;
3137 case DataLayout::NCHW:
3138 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
3139 inputTensorInfo.GetShape()[1],
3140 outputHeight,
3141 outputWidth },
3142 DataType::Float32);
3143 break;
surmeh01bceff2f2018-03-29 16:29:27 +01003144 }
surmeh01bceff2f2018-03-29 16:29:27 +01003145
3146 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
FrancisMurtaghf005e312018-12-06 15:26:04 +00003147 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01003148 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
FrancisMurtaghf005e312018-12-06 15:26:04 +00003149 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01003150
3151
3152 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
3153 if (layer == nullptr)
3154 {
telsoa01c577f2c2018-08-31 09:22:23 +01003155 throw ParseException(
3156 boost::str(
3157 boost::format(
3158 "Failed to add pooling2d layer for %1% %2%")
3159 % nodeDef.name()
3160 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003161 }
3162
3163 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3164
FrancisMurtaghf005e312018-12-06 15:26:04 +00003165 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01003166
3167 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3168}
3169
3170ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
3171{
3172 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3173
3174 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3175 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3176
3177 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
3178 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
3179
3180 if (isBiasAdd)
3181 {
3182 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
3183 // with the same data in the correct dimension for broadcast in addition.
3184 if(input1Info.GetNumDimensions() != 1)
3185 {
telsoa01c577f2c2018-08-31 09:22:23 +01003186 throw ParseException(
3187 boost::str(
3188 boost::format(
3189 "Unsupported bias for BiasAdd. It should be a 1D vector. "
3190 "Got %1% dimensions for input %2%. Node %3% %4%")
3191 % input1Info.GetNumDimensions()
3192 % inputs[1].m_IndexedValue->GetNode().name()
3193 % nodeDef.name()
3194 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003195 }
3196
3197 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01003198
telsoa01c577f2c2018-08-31 09:22:23 +01003199 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01003200 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003201 }
3202 else
3203 {
3204 if (input0Info.GetNumDimensions() == 1)
3205 {
3206 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003207 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003208 }
3209
3210 if (input1Info.GetNumDimensions() == 1)
3211 {
3212 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003213 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003214 }
3215 }
3216
3217 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
3218
3219 input0Slot->Connect(layer->GetInputSlot(0));
3220 input1Slot->Connect(layer->GetInputSlot(1));
3221
Nattapat Chaimanowongfab64f02019-02-15 16:46:24 +00003222 if (input0Info.GetNumDimensions() == input1Info.GetNumDimensions())
3223 {
3224 const TensorShape& input0Shape = input0Info.GetShape();
3225 const TensorShape& input1Shape = input1Info.GetShape();
3226
3227 std::vector<unsigned int> outputShape;
3228 outputShape.reserve(input0Shape.GetNumDimensions());
3229 TensorInfo outputInfo(input0Info);
3230
3231 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3232 {
3233 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3234 }
3235
3236 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3237
3238 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3239 }
3240 else if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
surmeh01bceff2f2018-03-29 16:29:27 +01003241 {
3242 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3243 }
3244 else
3245 {
3246 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3247 }
3248
3249 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3250}
3251
saoste01bbd40612018-08-28 15:41:51 +01003252ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
3253{
3254 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3255
3256 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
3257 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3258 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3259
3260 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3261 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3262
3263
3264 if (input0NumDims < input1NumDims)
3265 {
3266 const bool isNHWC = true;
3267 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3268 }
3269 if (input1NumDims < input0NumDims)
3270 {
3271 const bool isNHWC = true;
3272 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3273 }
3274
3275 input0Slot->Connect(layer->GetInputSlot(0));
3276 input1Slot->Connect(layer->GetInputSlot(1));
3277
3278 if (input0NumDims < input1NumDims)
3279 {
3280 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3281 }
3282 else
3283 {
3284 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3285
3286 }
3287 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3288}
3289
Sadik Armagan975c09a2018-12-04 10:02:08 +00003290ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
3291{
3292 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3293
3294 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3295 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3296
3297 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3298 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3299
3300 if (input0NumDims < input1NumDims)
3301 {
3302 const bool isNHWC = true;
3303 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3304 }
3305 if (input1NumDims < input0NumDims)
3306 {
3307 const bool isNHWC = true;
3308 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3309 }
3310
3311 IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
3312
3313 input0Slot->Connect(layer->GetInputSlot(0));
3314 input1Slot->Connect(layer->GetInputSlot(1));
3315
3316 TensorInfo outputInfo = input0Slot->GetTensorInfo();
3317 std::vector<unsigned int> outputShape;
3318
3319 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
3320 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
3321
3322 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3323 {
3324 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3325 }
3326
3327 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3328 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3329
3330 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3331}
3332
telsoa01c577f2c2018-08-31 09:22:23 +01003333IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
3334{
3335 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3336
3337 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
3338 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3339 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3340
3341 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3342 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3343
3344 if (input0NumDims < input1NumDims)
3345 {
3346 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003347 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003348 }
3349 if (input1NumDims < input0NumDims)
3350 {
3351 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003352 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003353 }
3354
3355 input0Slot->Connect(layer->GetInputSlot(0));
3356 input1Slot->Connect(layer->GetInputSlot(1));
3357
3358 if (input0NumDims < input1NumDims)
3359 {
3360 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3361 }
3362 else
3363 {
3364 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3365 }
3366 return layer;
3367}
3368
surmeh01bceff2f2018-03-29 16:29:27 +01003369IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
3370 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
3371{
telsoa01c577f2c2018-08-31 09:22:23 +01003372 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01003373 ParsedConstTfOperation<float>* biasNode = nullptr;
3374 if (addNodeDef != nullptr)
3375 {
3376 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01003377 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003378 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
3379 {
Jan Eilersbb446e52020-04-02 13:56:54 +01003380 biasNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01003381 }
3382 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
3383 {
Jan Eilersbb446e52020-04-02 13:56:54 +01003384 biasNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01003385 }
3386 else
3387 {
telsoa01c577f2c2018-08-31 09:22:23 +01003388 throw ParseException(
3389 boost::str(
3390 boost::format(
3391 "ArmNN only supports fully connected layers with constant bias. "
3392 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
3393 % addInputs[0].m_IndexedValue->GetNode().name()
3394 % addInputs[1].m_IndexedValue->GetNode().name()
3395 % addNodeDef->name()
3396 % matMulNodeDef.name()
3397 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003398 }
3399 }
3400
telsoa01c577f2c2018-08-31 09:22:23 +01003401 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003402 ParsedConstTfOperation<float>* weightNode = nullptr;
3403 ParsedTfOperation* inputNode = nullptr;
3404 unsigned int inputIdx = 0;
3405 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
3406 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
3407 {
Jan Eilersbb446e52020-04-02 13:56:54 +01003408 weightNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01003409 inputNode = mulInputs[1].m_IndexedValue;
3410 inputIdx = mulInputs[1].m_Index;
3411 }
3412 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
3413 {
Jan Eilersbb446e52020-04-02 13:56:54 +01003414 weightNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01003415 inputNode = mulInputs[0].m_IndexedValue;
3416 inputIdx = mulInputs[0].m_Index;
3417 }
3418 else
3419 {
telsoa01c577f2c2018-08-31 09:22:23 +01003420 throw ParseException(
3421 boost::str(
3422 boost::format(
3423 "ArmNN only supports fully connected layers with constant weights. "
3424 "Inputs %1% and %2%. MatMulNode %3% %4%")
3425 % mulInputs[0].m_IndexedValue->GetNode().name()
3426 % mulInputs[1].m_IndexedValue->GetNode().name()
3427 % matMulNodeDef.name()
3428 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003429 }
3430
3431 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003432 // Handles weight.
Matteo Martincigh482ca852018-12-12 09:20:55 +00003433 ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003434
3435 FullyConnectedDescriptor desc;
3436 desc.m_BiasEnabled = addNodeDef != nullptr;
3437
3438 IConnectableLayer* layer = nullptr;
Matteo Martincighfc598e12019-05-14 10:36:13 +01003439 Optional<ConstTensor> optionalBiases;
3440 std::vector<float> biasTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003441 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003442 if (addNodeDef != nullptr)
3443 {
Matteo Martincigh482ca852018-12-12 09:20:55 +00003444 ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003445
3446 if (weights.GetShape()[1] != biases.GetShape()[0])
3447 {
telsoa01c577f2c2018-08-31 09:22:23 +01003448 throw ParseException(
3449 boost::str(
3450 boost::format(
3451 "Shape of matmul weights and bias do not match. "
3452 "AddNode %1%. MatMulNode %2% %3%")
3453 % addNodeDef->name()
3454 % matMulNodeDef.name()
3455 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003456 }
3457
Matteo Martincighfc598e12019-05-14 10:36:13 +01003458 optionalBiases = Optional<ConstTensor>(biases);
surmeh01bceff2f2018-03-29 16:29:27 +01003459 }
Matteo Martincighfc598e12019-05-14 10:36:13 +01003460 layer = m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
surmeh01bceff2f2018-03-29 16:29:27 +01003461
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003462 ARMNN_ASSERT(layer != nullptr);
surmeh01bceff2f2018-03-29 16:29:27 +01003463
3464 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3465 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3466
telsoa01c577f2c2018-08-31 09:22:23 +01003467 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01003468 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
3469 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3470 return layer;
3471}
3472
3473void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
3474{
telsoa01c577f2c2018-08-31 09:22:23 +01003475 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01003476 tensorflow::DataType type = tensorflow::DT_FLOAT;
3477 if (nodeDef.attr().count("T") != 0)
3478 {
3479 auto attr = nodeDef.attr().at("T");
3480 type = attr.type();
3481 }
3482 else if (nodeDef.attr().count("dtype") != 0)
3483 {
3484 auto attr = nodeDef.attr().at("dtype");
3485 type = attr.type();
3486 }
3487
Ferran Balaguerc602f292019-02-08 17:09:55 +00003488 if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() != "Const")
surmeh01bceff2f2018-03-29 16:29:27 +01003489 {
telsoa01c577f2c2018-08-31 09:22:23 +01003490 throw ParseException(
3491 boost::str(
3492 boost::format(
Ferran Balaguerc602f292019-02-08 17:09:55 +00003493 "Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). "
telsoa01c577f2c2018-08-31 09:22:23 +01003494 "Got %1% for Node %2% %3%")
3495 % tensorflow::DataType_Name(type)
3496 % nodeDef.name()
3497 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003498 }
3499
3500 const std::string& operation = nodeDef.op();
narpra016f37f832018-12-21 18:30:00 +00003501 auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3502 if (itControlInput != m_ControlInputs.end())
3503 {
3504 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
3505 return;
3506 }
surmeh01bceff2f2018-03-29 16:29:27 +01003507 auto it = ms_OperationNameToParsingFunctions.find(operation);
3508 if (it != ms_OperationNameToParsingFunctions.end())
3509 {
3510 auto func = it->second;
3511 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
3512 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3513
telsoa01c577f2c2018-08-31 09:22:23 +01003514 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01003515 auto it = m_ParsedTfOperations.find(nodeDef.name());
3516 if (it != m_ParsedTfOperations.end())
3517 {
3518 throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
3519 }
3520 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3521
telsoa01c577f2c2018-08-31 09:22:23 +01003522 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003523 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3524 m_RequestedOutputs.end())
3525 {
3526 auto outId = ParseOutputId(nodeDef.name());
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003527 const LayerBindingId layerId = armnn::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
surmeh01bceff2f2018-03-29 16:29:27 +01003528 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3529
3530 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3531
3532 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
3533
3534 prevSlot.Connect(outputLayer->GetInputSlot(0));
3535
3536 TrackOutputBinding(outputLayer, layerId, tensorInfo);
3537 }
3538 }
3539 else
3540 {
telsoa01c577f2c2018-08-31 09:22:23 +01003541 throw ParseException(
3542 boost::str(
3543 boost::format(
3544 "Unsupported operation %1% in tensorflow::GraphDef %2%")
3545 % operation
3546 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003547 }
3548}
3549
3550void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
3551{
telsoa01c577f2c2018-08-31 09:22:23 +01003552 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01003553 m_NodesByName.clear();
3554 m_NetworkInputsBindingInfo.clear();
3555 m_NetworkOutputsBindingInfo.clear();
3556
3557 for (int i = 0; i < graphDef.node_size(); ++i)
3558 {
3559 const tensorflow::NodeDef& node = graphDef.node(i);
3560 m_NodesByName[node.name()] = &node;
3561 }
3562
Francis Murtaghbb190a62019-04-04 11:16:29 +01003563 // Checks that the input nodes the user has requested exist.
3564 for (const auto& pair : m_InputShapes)
3565 {
3566 const std::string& requestedInputName = pair.first;
3567 auto nodeIt = m_NodesByName.find(requestedInputName);
3568 if (nodeIt == m_NodesByName.end())
3569 {
3570 throw ParseException(
3571 boost::str(
3572 boost::format(
3573 "Couldn't find requested input node '%1%' in graph %2%")
3574 % requestedInputName
3575 % CHECK_LOCATION().AsString()));
3576 }
3577 }
3578
telsoa01c577f2c2018-08-31 09:22:23 +01003579 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01003580 std::vector<const tensorflow::NodeDef*> targetNodes;
3581 for (const std::string& requestedOutputName : m_RequestedOutputs)
3582 {
3583 auto nodeIt = m_NodesByName.find(requestedOutputName);
3584 if (nodeIt == m_NodesByName.end())
3585 {
telsoa01c577f2c2018-08-31 09:22:23 +01003586 throw ParseException(
3587 boost::str(
3588 boost::format(
3589 "Couldn't find requested output node '%1%' in graph %2%")
3590 % requestedOutputName
3591 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003592 }
3593 targetNodes.push_back(nodeIt->second);
3594 }
3595
telsoa01c577f2c2018-08-31 09:22:23 +01003596 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003597 std::vector<const tensorflow::NodeDef*> sortedNodes;
3598 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3599 targetNodes,
3600 [this](const tensorflow::NodeDef* node)
3601 {
3602 auto outputs = GetTfInputNodes(*node);
3603 std::vector<const tensorflow::NodeDef*> nodesOnly;
3604 for (const auto & o : outputs) {
3605 nodesOnly.push_back(o.m_IndexedValue);
3606 }
3607 return nodesOnly;
3608 },
3609 sortedNodes))
3610 {
telsoa01c577f2c2018-08-31 09:22:23 +01003611 throw ParseException(
3612 boost::str(
3613 boost::format(
3614 "Cycle detected in graph %1%")
3615 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003616 }
3617
telsoa01c577f2c2018-08-31 09:22:23 +01003618 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003619 for (const auto& it : sortedNodes)
3620 {
3621 const tensorflow::NodeDef& currentNode = *it;
3622 LoadNodeDef(currentNode, graphDef);
3623 }
3624}
3625
3626INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
3627 const std::map<std::string, TensorShape>& inputShapes,
3628 const std::vector<std::string>& requestedOutputs)
3629{
3630 FILE* fd = fopen(graphFile, "r");
3631
3632 if (fd == nullptr)
3633 {
telsoa01c577f2c2018-08-31 09:22:23 +01003634 throw FileNotFoundException(
3635 boost::str(
3636 boost::format(
3637 "Graph file %1% failed to open %2%")
3638 % graphFile
3639 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003640 }
3641
telsoa01c577f2c2018-08-31 09:22:23 +01003642 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003643 tensorflow::GraphDef graphDef;
3644 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
3645 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3646 delete input;
3647 fclose(fd);
3648
3649 if (!success)
3650 {
telsoa01c577f2c2018-08-31 09:22:23 +01003651 throw ParseException(
3652 boost::str(
3653 boost::format(
3654 "Failed to parse graph file %1%")
3655 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003656 }
3657
3658 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3659}
3660
3661INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
3662 const std::map<std::string, TensorShape>& inputShapes,
3663 const std::vector<std::string>& requestedOutputs)
3664{
telsoa01c577f2c2018-08-31 09:22:23 +01003665 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003666 tensorflow::GraphDef graphDef;
3667 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3668
3669 if (!success)
3670 {
telsoa01c577f2c2018-08-31 09:22:23 +01003671 throw ParseException(
3672 boost::str(
3673 boost::format(
3674 "Failed to parse graph file %1%")
3675 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003676 }
3677
3678 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3679}
3680
3681INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
3682 const std::map<std::string, TensorShape>& inputShapes,
3683 const std::vector<std::string>& requestedOutputs)
3684{
3685 FILE* fd = fopen(graphFile, "rb");
3686
3687 if (fd == nullptr)
3688 {
telsoa01c577f2c2018-08-31 09:22:23 +01003689 throw FileNotFoundException(
3690 boost::str(
3691 boost::format(
3692 "Graph file %1% failed to open %2%")
3693 % graphFile
3694 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003695 }
3696
telsoa01c577f2c2018-08-31 09:22:23 +01003697 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003698 tensorflow::GraphDef graphDef;
3699
3700 google::protobuf::io::FileInputStream inStream(fileno(fd));
3701 google::protobuf::io::CodedInputStream codedStream(&inStream);
3702 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
3703 bool success = graphDef.ParseFromCodedStream(&codedStream);
3704 fclose(fd);
3705
3706 if (!success)
3707 {
telsoa01c577f2c2018-08-31 09:22:23 +01003708 throw ParseException(
3709 boost::str(
3710 boost::format(
3711 "Failed to parse protobuf file %1% %2%")
3712 % graphFile
3713 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003714 }
3715
3716 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3717}
3718
3719INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
3720 const std::map<std::string, TensorShape>& inputShapes,
3721 const std::vector<std::string>& requestedOutputs)
3722{
3723 m_Network = INetwork::Create();
3724
3725 m_InputShapes = inputShapes;
3726 if (requestedOutputs.size() == 0)
3727 {
telsoa01c577f2c2018-08-31 09:22:23 +01003728 throw ParseException(
3729 boost::str(
3730 boost::format(
3731 "requestedOutputs must have at least one entry %1%")
3732 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003733 }
3734 m_RequestedOutputs = requestedOutputs;
3735
3736 try
3737 {
3738 LoadGraphDef(graphDef);
3739 }
3740 catch (const ParseException& e)
3741 {
3742 Cleanup();
3743 throw e;
3744 }
3745
3746 Cleanup();
3747
3748 return std::move(m_Network);
3749}
3750
3751void TfParser::Cleanup()
3752{
telsoa01c577f2c2018-08-31 09:22:23 +01003753 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01003754 m_InputShapes.clear();
3755 m_RequestedOutputs.clear();
3756 m_NodesByName.clear();
3757 m_ParsedTfOperations.clear();
3758}
3759
3760BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
3761{
3762 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
3763}
3764
3765BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
3766{
3767 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
3768}
3769
3770std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
3771 const char* bindingPointDesc,
3772 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3773{
3774 auto it = nameToBindingInfo.find(layerName);
3775 if (it == nameToBindingInfo.end())
3776 {
telsoa01c577f2c2018-08-31 09:22:23 +01003777 throw InvalidArgumentException(
3778 boost::str(
3779 boost::format(
3780 "Unknown %1% '%2%' %3%")
3781 % bindingPointDesc
3782 % layerName
3783 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003784 }
3785 return it->second;
3786}
3787
3788void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3789{
3790 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3791}
3792
3793void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3794{
3795 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3796}
3797
3798void TfParser::TrackBindingPoint(IConnectableLayer* layer,
3799 LayerBindingId id,
3800 const TensorInfo& tensorInfo,
3801 const char* bindingPointDesc,
3802 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3803{
3804 const std::string layerName = layer->GetName();
3805 auto it = nameToBindingInfo.find(layerName);
3806 if (it == nameToBindingInfo.end())
3807 {
3808 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3809 }
3810 else
3811 {
telsoa01c577f2c2018-08-31 09:22:23 +01003812 throw ParseException(
3813 boost::str(
3814 boost::format(
3815 "Id %1% used by more than one %2% layer %3%")
3816 % id
3817 % bindingPointDesc
3818 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003819 }
3820}
3821
3822} // namespace armnnTfParser