blob: b5a421145a570a94b9eef972bf34122a250216e9 [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00005
surmeh01bceff2f2018-03-29 16:29:27 +01006#include "TfParser.hpp"
7
surmeh01bceff2f2018-03-29 16:29:27 +01008#include <armnn/TypesUtils.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +01009#include <armnn/Descriptors.hpp>
10
Matteo Martincighe011d202019-11-28 11:35:47 +000011#include <armnnUtils/Permute.hpp>
12#include <armnnUtils/DataLayoutIndexed.hpp>
13
surmeh01bceff2f2018-03-29 16:29:27 +010014#include <GraphTopologicalSort.hpp>
Sadik Armagan479045b2018-10-01 11:51:37 +010015#include <ParserHelper.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010016
17#include <google/protobuf/io/zero_copy_stream_impl.h>
18#include <google/protobuf/text_format.h>
19
Derek Lambertibaa177f2019-12-10 22:00:43 +000020#include <tensorflow/core/framework/graph.pb.h>
surmeh01bceff2f2018-03-29 16:29:27 +010021
surmeh01bceff2f2018-03-29 16:29:27 +010022#include <boost/format.hpp>
23#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010024#include <boost/format.hpp>
25#include <boost/numeric/conversion/cast.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010026#include <boost/polymorphic_cast.hpp>
27
surmeh01bceff2f2018-03-29 16:29:27 +010028#include <numeric>
surmeh01bceff2f2018-03-29 16:29:27 +010029
Matteo Martincigh46315822018-11-28 16:22:36 +000030using namespace armnnUtils;
surmeh01bceff2f2018-03-29 16:29:27 +010031using namespace armnn;
32
33namespace armnnTfParser
34{
35namespace
36{
37
38const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
39const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
40
surmeh01bceff2f2018-03-29 16:29:27 +010041
42template <typename Callable>
43void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
44 const std::string& attribName,
45 tensorflow::AttrValue::ValueCase expectedValueCase,
46 Callable callable)
47{
48 auto iter = nodeDef.attr().find(attribName);
49 if (iter != nodeDef.attr().end())
50 {
51 const auto& attrValue = iter->second;
52 if (attrValue.value_case() == expectedValueCase)
53 {
54 callable(attrValue);
55 }
56 else
57 {
telsoa01c577f2c2018-08-31 09:22:23 +010058 throw ParseException(
59 boost::str(
60 boost::format(
61 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
62 "but found %4% instead %5%")
63 % attribName
64 % nodeDef.name()
65 % static_cast<int>(expectedValueCase)
66 % static_cast<int>(attrValue.value_case())
67 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010068 }
69 }
70 else
71 {
telsoa01c577f2c2018-08-31 09:22:23 +010072 throw ParseException(
73 boost::str(
74 boost::format(
75 "Could not find required attribute %1% in node %2% %3%")
76 % attribName
77 % nodeDef.name()
78 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010079 }
80}
81
82template <typename Callable>
83void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
84 const std::string& attribName,
85 tensorflow::AttrValue::ValueCase expectedValueCase,
86 Callable callable)
87{
88 auto iter = nodeDef.attr().find(attribName);
89 if (iter != nodeDef.attr().end())
90 {
91 const auto& attrValue = iter->second;
92 if (attrValue.value_case() == expectedValueCase)
93 {
94 callable(attrValue);
95 }
96 else
97 {
telsoa01c577f2c2018-08-31 09:22:23 +010098 throw ParseException(
99 boost::str(
100 boost::format(
101 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
102 "but found %4% instead %5%")
103 % attribName
104 % nodeDef.name()
105 % static_cast<int>(expectedValueCase)
106 % static_cast<int>(attrValue.value_case())
107 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100108 }
109 }
110}
111
112float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
113{
114 float attribValue = 0.0f;
115 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
116 [&attribValue](const tensorflow::AttrValue& attrValue)
117 {
118 attribValue = attrValue.f();
119 });
120 return attribValue;
121}
122
Conor Kennedyc2130a02018-12-05 11:05:54 +0000123int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
124{
125 int32_t attribValue = 0u;
126 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
127 [&attribValue](const tensorflow::AttrValue& attrValue)
128 {
129 attribValue = static_cast<int32_t>(attrValue.i());
130 });
131 return attribValue;
132}
133
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000134bool ReadMandatoryNodeBoolAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
135{
136 bool attribValue = false;
137 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
138 [&attribValue](const tensorflow::AttrValue& attrValue)
139 {
140 attribValue = static_cast<bool>(attrValue.b());
141 });
142 return attribValue;
143}
144
surmeh01bceff2f2018-03-29 16:29:27 +0100145uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
146{
147 uint32_t attribValue = 0u;
148 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
149 [&attribValue](const tensorflow::AttrValue& attrValue)
150 {
151 attribValue = static_cast<uint32_t>(attrValue.i());
152 });
153 return attribValue;
154}
155
156std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
157{
158 std::string attribValue = "";
159 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
160 [&attribValue](const tensorflow::AttrValue& attrValue)
161 {
162 attribValue = attrValue.s();
163 });
164 return attribValue;
165}
166
167std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
168 const std::string& name)
169{
170 std::vector<uint32_t> attriList;
171 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
172 [&attriList](const tensorflow::AttrValue& attrValue)
173 {
174 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
175 {
176 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
177 }
178 });
179
180 return attriList;
181}
182
183std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
184 const std::string& name)
185{
186 std::vector<uint32_t> attriList;
187 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
188 [&attriList](const tensorflow::AttrValue& attrValue)
189 {
190 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
191 {
192 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
193 }
194 });
195
196 return attriList;
197}
198
Aron Virginas-Tar2e259272019-11-27 13:29:51 +0000199std::string ReadOptionalNodeStringAttribute(const tensorflow::NodeDef& nodeDef,
200 const std::string& name,
201 const std::string& defaultValue = "")
202{
203 std::string attribValue = defaultValue;
204 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
205 [&attribValue](const tensorflow::AttrValue& attrValue)
206 {
207 attribValue = attrValue.s();
208 });
209 return attribValue;
210}
211
surmeh01bceff2f2018-03-29 16:29:27 +0100212bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
213 const std::string& name,
214 bool defaultValue = false)
215{
216 bool attribValue = defaultValue;
217 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
218 [&attribValue](const tensorflow::AttrValue& attrValue)
219 {
220 attribValue = attrValue.b();
221 });
222 return attribValue;
223}
224
225tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
226{
227 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
228 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
229 [&attribValue](const tensorflow::AttrValue& attrValue)
230 {
231 attribValue = attrValue.type();
232 });
233 return attribValue;
234}
235
236TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
237{
238 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
239 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
240
241 if (stretchDim != targetDims.end())
242 {
243 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
244 {
telsoa01c577f2c2018-08-31 09:22:23 +0100245 throw ParseException(
246 boost::str(
247 boost::format(
248 "At most one component of shape can be -1 %1%")
249 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100250 }
251
telsoa01c577f2c2018-08-31 09:22:23 +0100252 auto targetNumElements =
253 boost::numeric_cast<unsigned int>(
254 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100255 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
256 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
257 }
258
259 TensorInfo reshapeInfo = input;
260 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
261
262 return reshapeInfo;
263}
264
telsoa01c577f2c2018-08-31 09:22:23 +0100265// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100266IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
267 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100268{
269 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
270 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
271 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
272 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
273 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
274 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
275
276 armnn::TensorInfo reshapedInfo = input1Info;
277 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
278
279 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
280 ReshapeDescriptor reshapeDesc;
281 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
282 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
283
284 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
285 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
286
287 input1Slot = &reshapeLayer->GetOutputSlot(0);
288
289 return input1Slot;
290}
291
292OutputId ParseOutputId(const std::string & name)
293{
294 unsigned int outputNum = 0;
295 size_t colonPos = name.find_last_of(":");
296 if (colonPos != std::string::npos)
297 {
298 int n = std::stoi(name.substr(colonPos+1));
299 if (n<0 || n>100)
300 {
telsoa01c577f2c2018-08-31 09:22:23 +0100301 throw ParseException(
302 boost::str(
303 boost::format(
304 "Output tensor id is out of range for %1% %2%")
305 % name
306 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100307 }
308 outputNum = static_cast<unsigned int>(n);
309 }
310 return OutputId(name.substr(0,colonPos),outputNum);
311}
312
telsoa01c577f2c2018-08-31 09:22:23 +0100313#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
314 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
315 { \
316 throw ParseException( \
317 boost::str( \
318 boost::format( \
319 "Unsupported data format %1% passed for %2% node %3%. " \
320 "Only NHWC and NCHW supported %4%") \
321 % FORMAT \
322 % NODE_TYPE \
323 % NODE_DEF.name() \
324 % CHECK_LOCATION().AsString())); \
325 }
326
327#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
328 if(PADDING != "SAME" && PADDING != "VALID" ) \
329 { \
330 throw ParseException( \
331 boost::str( \
332 boost::format( \
333 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
334 % PADDING \
335 % NODE_DEF.name() \
336 % CHECK_LOCATION().AsString())); \
337 } \
338
surmeh01bceff2f2018-03-29 16:29:27 +0100339} // namespace
340
341const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
342 { "Const", &TfParser::ParseConst },
343 { "Add", &TfParser::ParseAdd },
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000344 { "AddN", &TfParser::ParseAddN },
surmeh01bceff2f2018-03-29 16:29:27 +0100345 { "BiasAdd", &TfParser::ParseBiasAdd },
346 { "Identity", &TfParser::ParseIdentity },
347 { "Conv2D", &TfParser::ParseConv2D },
348 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
Conor Kennedyc2130a02018-12-05 11:05:54 +0000349 { "ExpandDims", &TfParser::ParseExpandDims },
surmeh01bceff2f2018-03-29 16:29:27 +0100350 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
FrancisMurtagh94412af2019-01-24 10:53:39 +0000351 { "Gather", &TfParser::ParseGather},
jimfly01a06bf312018-12-18 16:24:51 +0000352 { "Greater", &TfParser::ParseGreater},
surmeh01bceff2f2018-03-29 16:29:27 +0100353 { "ConcatV2", &TfParser::ParseConcat },
354 { "LRN", &TfParser::ParseLrn },
355 { "MatMul", &TfParser::ParseMatMul },
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000356 { "Mean", &TfParser::ParseMean },
surmeh01bceff2f2018-03-29 16:29:27 +0100357 { "Mul", &TfParser::ParseMul },
358 { "Placeholder", &TfParser::ParsePlaceholder },
saoste01bbd40612018-08-28 15:41:51 +0100359 { "RealDiv", &TfParser::ParseRealDiv },
surmeh01bceff2f2018-03-29 16:29:27 +0100360 { "Relu", &TfParser::ParseRelu },
361 { "Relu6", &TfParser::ParseRelu6 },
362 { "Reshape", &TfParser::ParseReshape },
363 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +0000364 { "Rsqrt", &TfParser::ParseRsqrt },
surmeh01bceff2f2018-03-29 16:29:27 +0100365 { "Shape", &TfParser::ParseShape },
366 { "Squeeze", &TfParser::ParseSqueeze },
367 { "Sigmoid", &TfParser::ParseSigmoid },
368 { "Softmax", &TfParser::ParseSoftmax },
369 { "Softplus", &TfParser::ParseSoftplus },
Sadik Armagan2ad6cb42018-12-27 11:23:44 +0000370 { "Split", &TfParser::ParseSplit },
Georgios Pinitas5e90aab2020-02-14 14:46:51 +0000371 { "StridedSlice", &TfParser::ParseStridedSlice },
surmeh01bceff2f2018-03-29 16:29:27 +0100372 { "Tanh", &TfParser::ParseTanh },
373 { "MaxPool", &TfParser::ParseMaxPool },
374 { "AvgPool", &TfParser::ParseAvgPool },
telsoa01c577f2c2018-08-31 09:22:23 +0100375 { "Maximum", &TfParser::ParseMaximum },
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +0000376 { "Minimum", &TfParser::ParseMinimum },
jimfly0184c70e62018-12-19 13:14:46 +0000377 { "Equal", &TfParser::ParseEqual },
jimfly01f6ba7472018-12-04 10:09:52 +0000378 { "Pad", &TfParser::ParsePad },
Sadik Armagan48d70932020-02-18 15:18:27 +0000379 { "Sub", &TfParser::ParseSub },
380 { "Pack" , &TfParser::ParseStack },
381 { "Stack", &TfParser::ParseStack }
narpra016f37f832018-12-21 18:30:00 +0000382};
383
384const std::list<std::string> TfParser::m_ControlInputs = {
385 "Assert"
surmeh01bceff2f2018-03-29 16:29:27 +0100386};
387
388ITfParser* ITfParser::CreateRaw()
389{
390 return new TfParser();
391}
392
393ITfParserPtr ITfParser::Create()
394{
395 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
396}
397
398void ITfParser::Destroy(ITfParser* parser)
399{
400 delete parser;
401}
402
403inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
404 uint32_t filterSize, bool samePadding,
405 uint32_t* paddingFront, uint32_t* paddingBack) {
406 *paddingFront = 0;
407 *paddingBack = 0;
408
409 if (samePadding) {
410 uint32_t outputSize = (inputSize + stride - 1) / stride;
411 uint32_t temp = (outputSize - 1) * stride + filterSize;
412 if (temp > inputSize) {
413 *paddingFront = (temp - inputSize) / 2;
414 *paddingBack = (temp - inputSize) - *paddingFront;
415 }
416 }
417}
418
419void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
420 bool samePadding)
421{
422 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
423}
424
425/// An Abstract base class which represents a single tensorflow operation (node)
426/// that has been (potentially partially) converted to Armnn.
427/// It may not yet have been fully converted into actual Armnn layers.
428class ParsedTfOperation
429{
430public:
431 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
432 : m_Parser(parser)
433 , m_Node(node)
434 {
435 }
436
437 virtual ~ParsedTfOperation() {};
438
439 const tensorflow::NodeDef& GetNode() const { return m_Node; }
440
441 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
442 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
443 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
444
445 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
446 virtual ParsedTfOperation* ResolveIdentityOperations()
447 {
448 return this;
449 }
450
451protected:
452 TfParser* m_Parser;
453 const tensorflow::NodeDef& m_Node;
454};
455
456/// An ParsedTfOperation where the Armnn equivalent is a single layer,
457/// with output slots that correspond directly to the Tf node outputs.
458class SingleLayerParsedTfOperation : public ParsedTfOperation
459{
460public:
461 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
462 : ParsedTfOperation(parser, node)
463 , m_Layer(layer)
464 {
465 }
466
467 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
468 {
469 BOOST_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100470 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100471 unsigned int armnnOutputSlotIdx = tfOutputIndex;
472 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
473 {
474 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100475 boost::str(
476 boost::format(
477 "The requested output slot #%1% "
478 "for %2% does not exist %3%")
479 % armnnOutputSlotIdx
480 % m_Layer->GetName()
481 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100482 }
483 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
484 }
485
486protected:
487 IConnectableLayer* m_Layer;
488};
489
telsoa01c577f2c2018-08-31 09:22:23 +0100490/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100491class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
492{
493public:
494 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
495 : SingleLayerParsedTfOperation(parser, node, nullptr)
496 {
497 }
498
499 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
500 {
501 if (!m_Layer)
502 {
503 CreateLayerDeferred();
504 }
505 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
506 }
507
508private:
509 virtual void CreateLayerDeferred() = 0;
510};
511
512
513TfParser::TfParser()
514 : m_Network(nullptr, nullptr)
515{
516}
517
518
519const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
520{
521 if (nodeDef->op() != "Identity")
522 {
523 return nodeDef;
524 }
525
526 if (nodeDef->input_size() != 1)
527 {
telsoa01c577f2c2018-08-31 09:22:23 +0100528 throw ParseException(
529 boost::str(
530 boost::format(
531 "Identity node should have a single input! %1% has %2% inputs %3%")
532 % nodeDef->name()
533 % nodeDef->input_size()
534 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100535 }
536
537 auto it = m_NodesByName.find(nodeDef->input(0));
538 if (it != m_NodesByName.end())
539 {
540 const tensorflow::NodeDef* inputNode = it->second;
541 return ResolveIdentityNode(inputNode);
542 }
543 else
544 {
telsoa01c577f2c2018-08-31 09:22:23 +0100545 throw ParseException(
546 boost::str(
547 boost::format(
548 "Cannot find what the Identity node %1% is linked to! %2%")
549 % nodeDef->name()
550 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100551 }
552}
553
554std::vector<OutputOfConstNodeDef>
555TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
556{
557 std::vector<OutputOfConstNodeDef> ret;
558
surmeh013537c2c2018-05-18 16:31:43 +0100559 if (nodeDef.op() == "Const")
560 {
561 // For some reason const node can have "Control Inputs". We ignore them for now.
562 return ret;
563 }
564
surmeh01bceff2f2018-03-29 16:29:27 +0100565 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
566 for (int j = 0; j < nodeDef.input_size(); ++j)
567 {
568 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100569
570 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
571 {
narpra016f37f832018-12-21 18:30:00 +0000572 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
573 continue;
surmeh013537c2c2018-05-18 16:31:43 +0100574 }
575
surmeh01bceff2f2018-03-29 16:29:27 +0100576 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
577 if (inputIt == m_NodesByName.end())
578 {
579 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100580 boost::str(
581 boost::format(
582 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
583 % nodeDef.input(j)
584 % nodeDef.name()
585 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100586 }
587 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
588 }
589
590 return ret;
591}
592
593std::vector<OutputOfParsedTfOperation>
594TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
595 std::size_t expectedNumInputs)
596{
telsoa01c577f2c2018-08-31 09:22:23 +0100597 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100598 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
599 const std::size_t numInputs = nodes.size();
600 if (numInputs != expectedNumInputs)
601 {
telsoa01c577f2c2018-08-31 09:22:23 +0100602 throw ParseException(
603 boost::str(
604 boost::format(
605 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
606 % nodeDef.name()
607 % expectedNumInputs
608 % numInputs
609 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100610 }
telsoa01c577f2c2018-08-31 09:22:23 +0100611 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100612 std::vector<OutputOfParsedTfOperation> result;
613 for (auto&& node : nodes)
614 {
615 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
616 if (it == m_ParsedTfOperations.end())
617 {
telsoa01c577f2c2018-08-31 09:22:23 +0100618 throw ParseException(
619 boost::str(
620 boost::format(
621 "Node with name '%1%' has not been parsed %2%")
622 % node.m_IndexedValue->name()
623 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100624 }
625 ParsedTfOperation* parsedOp = it->second.get();
626 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
627 parsedOp = parsedOp->ResolveIdentityOperations();
628 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
629 }
630 return result;
631}
632
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000633IConnectableLayer* TfParser::CreateAdditionLayer(
634 const tensorflow::NodeDef& nodeDef,
635 IOutputSlot* input0Slot,
636 IOutputSlot* input1Slot,
637 const std::string& layerName)
638{
639 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
640 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
641
642 const unsigned int input0Dim = input0Info.GetNumDimensions();
643 const unsigned int input1Dim = input1Info.GetNumDimensions();
644 if (input0Dim != input1Dim)
645 {
646 // broadcasting where input0 and input1 have different number of dimensions
647 // is only supported for 1D and 4D tensors pair
648 if (input0Dim == 1 && input1Dim == 4)
649 {
650 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
651 }
652 else if (input0Dim == 4 && input1Dim == 1)
653 {
654 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
655 }
656 else
657 {
658 throw ParseException(
659 boost::str(
660 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
661 % layerName
662 % nodeDef.name()
663 % CHECK_LOCATION().AsString()));
664 }
665 }
666 IConnectableLayer* const layer = m_Network->AddAdditionLayer(layerName.c_str());
667
668 input0Slot->Connect(layer->GetInputSlot(0));
669 input1Slot->Connect(layer->GetInputSlot(1));
670
671 // Ensure the output tensor has the correct dimensions even if a broadcast has been done
672 TensorInfo outputInfo = input0Slot->GetTensorInfo();
673 std::vector<unsigned int> outputShape;
674
675 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
676 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
677
678 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
679 {
680 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
681 }
682
683 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
684 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
685
686 return layer;
687}
688
689IConnectableLayer* TfParser::CreateAdditionLayer(
690 const tensorflow::NodeDef& nodeDef,
691 IConnectableLayer* layerOne,
692 IConnectableLayer* layerTwo,
693 unsigned int numberOfAddition,
694 unsigned long numberOfLayersToConnect,
695 bool isOdd)
696{
697 IOutputSlot* input0Slot = &layerOne->GetOutputSlot(0);
698 IOutputSlot* input1Slot = &layerTwo->GetOutputSlot(0);
699 std::string layerName(nodeDef.name());
700 if (isOdd || numberOfLayersToConnect != 2)
701 {
702 // we are not connecting the final layer
703 layerName.append("_addN_").append(std::to_string(numberOfAddition));
704 }
705 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
706}
707
708IConnectableLayer* TfParser::CreateAdditionLayer(
709 const tensorflow::NodeDef& nodeDef,
710 const OutputOfParsedTfOperation& opOne,
711 const OutputOfParsedTfOperation& opTwo,
712 unsigned int numberOfAddition)
713{
714 IOutputSlot* input0Slot = &opOne.m_IndexedValue->ResolveArmnnOutputSlot(opOne.m_Index);
715 IOutputSlot* input1Slot = &opTwo.m_IndexedValue->ResolveArmnnOutputSlot(opTwo.m_Index);
716 std::string layerName(nodeDef.name());
717 layerName.append("_addN_").append(std::to_string(numberOfAddition));
718 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
719}
720
721IConnectableLayer* TfParser::CreateAdditionLayer(
722 const tensorflow::NodeDef& nodeDef,
723 const OutputOfParsedTfOperation& op,
724 IConnectableLayer* layer)
725{
726 IOutputSlot* input0Slot = &op.m_IndexedValue->ResolveArmnnOutputSlot(op.m_Index);
727 IOutputSlot* input1Slot = &layer->GetOutputSlot(0);
728 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
729}
730
731ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
732{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000733 boost::ignore_unused(graphDef);
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000734 uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
735 if (numberOfInputs < 2)
736 {
737 // should never happen
738 throw ParseException(
739 boost::str(
740 boost::format(
741 "AddN Node with name '%1%' has less than two (%2) inputs %3%")
742 % nodeDef.name()
743 % std::to_string(numberOfInputs)
744 % CHECK_LOCATION().AsString()));
745 }
746 else if (numberOfInputs == 2)
747 {
748 //this is the same as a simple Add operation
749 return AddAdditionLayer(nodeDef, false);
750 }
751 else
752 {
753 // build a binary tree of Add layers and return the final Add as the return from the function
754 // if we have an odd number of inputs then the final Add will consist of a layer connecting to an
755 // OutputOfParsedTfOperation, otherwise it will be two layers being added together
756 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
757 unsigned int numberOfAdditions = 0;
758 std::vector<IConnectableLayer*> layers;
759 // NOTE: at this point we will have a minimum of three inputs
760 for (unsigned int i = 0; i < numberOfInputs; ++i)
761 {
762 // every time i is odd we have two inputs to process.
763 bool onSecondItem = i % 2;
764 if (onSecondItem)
765 {
766 ++numberOfAdditions;
767 IConnectableLayer* newLayer = CreateAdditionLayer(
768 nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
769 layers.push_back(newLayer);
770 }
771 }
772
773 std::vector<IConnectableLayer*> layersToConnect(layers);
774 unsigned long numberOfLayersToConnect = layersToConnect.size();
775 bool isOdd = numberOfInputs % 2;
776
777 while (numberOfLayersToConnect > 1)
778 {
779 layers.clear();
780 for (unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
781 bool onSecondItem = i % 2;
782 if (onSecondItem) {
783 ++numberOfAdditions;
784 IConnectableLayer* newLayer = CreateAdditionLayer(
785 nodeDef,
786 layersToConnect[i - 1],
787 layersToConnect[i],
788 numberOfAdditions,
789 numberOfLayersToConnect,
790 isOdd);
791 layers.push_back(newLayer);
792 }
793 }
794 //OK... need to go again... maybe
795 layersToConnect = layers;
796 numberOfLayersToConnect = layersToConnect.size();
797 }
798 IConnectableLayer* finalLayer = layersToConnect[0];
799 // if we had an odd number of inputs we need to connect the final layer to the
800 // last OutputOfParsedTfOperation in order to create the last Add layer we will
801 // be handing back.
802 if (isOdd)
803 {
804 // connect the final layer to the last op
805 finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
806 }
807 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, finalLayer);
808 }
809}
810
surmeh01bceff2f2018-03-29 16:29:27 +0100811ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
812{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000813 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100814 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
815
telsoa01c577f2c2018-08-31 09:22:23 +0100816 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
817 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100818 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
819 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
820 {
821 IConnectableLayer* layer =
822 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
823 &nodeDef,nodeDef.name().c_str());
824 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
825 }
826 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
827 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
828 {
829 IConnectableLayer* layer =
830 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
831 &nodeDef,nodeDef.name().c_str());
832 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
833 }
834 else
835 {
telsoa01c577f2c2018-08-31 09:22:23 +0100836 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100837 return AddAdditionLayer(nodeDef);
838 }
839}
840
841ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
842{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000843 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100844 return AddAdditionLayer(nodeDef, true);
845}
846
847/// An ParsedTfOperation which forwards to another (used for Identity nodes).
848class ParsedIdentityTfOperation : public ParsedTfOperation
849{
850public:
851 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
852 : ParsedTfOperation(parser, node)
853 , m_Representative(representative)
854 {
855 }
856
857 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
858 {
859 BOOST_ASSERT(m_Representative);
860 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
861 }
862
863 virtual ParsedTfOperation* ResolveIdentityOperations() override
864 {
865 return m_Representative->ResolveIdentityOperations();
866 }
867
868private:
869 ParsedTfOperation* m_Representative;
870};
871
872ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
873{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000874 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100875 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
876 // Any requests for the output slots of this node should be forwarded to the node connected as input.
877 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
878}
879
880/// An ParsedTfOperation for a Const node.
881/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
882/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
883template <typename T>
884class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
885{
886public:
887 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
888 const T* tensorData, const TensorInfo& tensorInfo)
889 : DeferredSingleLayerParsedTfOperation(parser, node),
890 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
891 m_TensorInfo(tensorInfo)
892 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000893 BOOST_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
surmeh01bceff2f2018-03-29 16:29:27 +0100894 }
895
896 void CreateLayerDeferred() override
897 {
898 BOOST_ASSERT(m_Layer == nullptr);
899 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
900 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
901 }
902
Matteo Martincigh482ca852018-12-12 09:20:55 +0000903 ConstTensor GetConstTensor(std::vector<T>& outputTensorData) const
surmeh01bceff2f2018-03-29 16:29:27 +0100904 {
surmeh01bceff2f2018-03-29 16:29:27 +0100905 outputTensorData.resize(m_TensorInfo.GetNumElements());
906
Matteo Martincigh482ca852018-12-12 09:20:55 +0000907 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
908
telsoa01c577f2c2018-08-31 09:22:23 +0100909 // Updates the result to point to the user provided storage.
Matteo Martincigh482ca852018-12-12 09:20:55 +0000910 ConstTensor constTensor(m_TensorInfo, outputTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +0100911 return constTensor;
912 }
913
Matteo Martincigh46315822018-11-28 16:22:36 +0000914 const T* GetStorage() const
915 {
916 return m_Storage.data();
917 }
918
919 const TensorInfo& GetTensorInfo() const
920 {
921 return m_TensorInfo;
922 }
923
surmeh01bceff2f2018-03-29 16:29:27 +0100924private:
925 ///< Manages the lifetime of the tensor data.
926 std::vector<T> m_Storage;
927 ///< Describes the layout of the tensor and points to the data in m_Storage.
928 TensorInfo m_TensorInfo;
929};
930
telsoa01c577f2c2018-08-31 09:22:23 +0100931DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
932 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100933{
934 switch (tfDataType)
935 {
936 case tensorflow::DT_FLOAT:
937 return DataType::Float32;
938 break;
939 case tensorflow::DT_INT32:
940 return DataType::Signed32;
941 break;
942 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100943 throw ParseException(
944 boost::str(
945 boost::format(
946 "Unknown DataType %1% for node %2% %3%")
947 % tensorflow::DataType_Name(tfDataType)
948 % nodeDef.name()
949 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100950 }
951}
952
953struct ParseTfTensorValueList
954{
955 template<typename DataType>
956 static void Parse(
957 const tensorflow::TensorProto& tfTensor,
958 unsigned int dstElements,
959 std::vector<int8_t>& outputData);
960
961 template <typename DataType>
962 static void ReadData(const void* srcData, unsigned int numSrcElements,
963 std::vector<int8_t>& dstData, unsigned int numDstElements)
964 {
telsoa01c577f2c2018-08-31 09:22:23 +0100965 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100966 if (numSrcElements == 0)
967 {
968 return;
969 }
970
telsoa01c577f2c2018-08-31 09:22:23 +0100971 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100972 if (numDstElements == 0)
973 {
974 numDstElements = numSrcElements;
975 }
976
telsoa01c577f2c2018-08-31 09:22:23 +0100977 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100978 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
979
980 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
981 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
982
telsoa01c577f2c2018-08-31 09:22:23 +0100983 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +0100984 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
985
986 if (numDstElements > numSrcElements)
987 {
telsoa01c577f2c2018-08-31 09:22:23 +0100988 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +0100989 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
990 }
991 }
992
993};
994
995template <>
996void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
997 unsigned int dstElements, std::vector<int8_t>& outputData)
998{
999 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
1000 outputData, dstElements);
1001}
1002
1003template <>
1004void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
1005 unsigned int dstElements, std::vector<int8_t>& outputData)
1006{
1007 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
1008 outputData, dstElements);
1009}
1010
1011template <template<typename> class OperatorType, typename T = int8_t>
1012struct MakeTfOperation
1013{
1014 template<typename DataType, class... Args>
1015 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
1016 Args&&... args)
1017 {
1018 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
1019 }
1020};
1021
1022template <>
1023struct MakeTfOperation<ParsedConstTfOperation>
1024{
1025 template<typename DataType, class... Args>
1026 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
1027 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
1028 {
1029 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1030 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
1031 }
1032};
1033
1034template <class FuncType>
1035struct InvokeParseFunction
1036{
1037 template<class ResType, class... Args>
1038 inline static ResType Result(DataType dataType, Args&&... args)
1039 {
1040 if (dataType == DataType::Float32)
1041 {
1042 return FuncType::template Parse<float>(std::forward<Args>(args)...);
1043 }
1044 else if (dataType == DataType::Signed32)
1045 {
1046 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1047 }
1048
1049 return ResType();
1050 }
1051
1052 template<class... Args>
1053 inline static void Result(DataType dataType, Args&&... args)
1054 {
1055 if (dataType == DataType::Float32)
1056 {
1057 FuncType::template Parse<float>(std::forward<Args>(args)...);
1058 }
1059 else if (dataType == DataType::Signed32)
1060 {
1061 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1062 }
1063 }
1064};
1065
1066ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1067{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001068 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001069 BOOST_ASSERT(nodeDef.op() == "Const");
1070
1071 if (nodeDef.attr().count("value") == 0)
1072 {
telsoa01c577f2c2018-08-31 09:22:23 +01001073 throw ParseException(
1074 boost::str(
1075 boost::format(
1076 "Value not found for Const node - %1% %2%")
1077 % nodeDef.name()
1078 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001079 }
1080
1081 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
1082 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1083 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
1084
1085 const auto GetDimensionSize = [](auto& d) { return d.size(); };
1086
1087 std::vector<unsigned int> dimensionSizes;
1088 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1089 std::back_inserter(dimensionSizes), GetDimensionSize);
1090
telsoa01c577f2c2018-08-31 09:22:23 +01001091 // Calculates number of elements.
1092 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001093 unsigned int numElements = 0U;
1094
1095 if (!dimensionSizes.empty())
1096 {
1097 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1098 1U, std::multiplies<unsigned int>());
1099 }
1100
1101 std::vector<int8_t> tensorData;
1102
telsoa01c577f2c2018-08-31 09:22:23 +01001103 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001104 if (tfTensor.tensor_content().empty())
1105 {
1106 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
1107
1108 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +01001109 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +01001110 if (numElements == 0)
1111 {
telsoa01c577f2c2018-08-31 09:22:23 +01001112 const unsigned int tfNumElements =
1113 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001114 dimensionSizes.push_back(tfNumElements);
1115 }
1116 }
telsoa01c577f2c2018-08-31 09:22:23 +01001117 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001118 else
1119 {
1120 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1121
telsoa01c577f2c2018-08-31 09:22:23 +01001122 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +01001123 if (numElements == 0)
1124 {
telsoa01c577f2c2018-08-31 09:22:23 +01001125 throw ParseException(
1126 boost::str(
1127 boost::format(
1128 "No tensor shape found for Const node - %1% %2%")
1129 % nodeDef.name()
1130 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001131 }
1132 }
1133
telsoa01c577f2c2018-08-31 09:22:23 +01001134 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001135 if (tensorData.empty())
1136 {
telsoa01c577f2c2018-08-31 09:22:23 +01001137 throw ParseException(
1138 boost::str(
1139 boost::format(
1140 "No tensor data found for Const node - %1% %2%")
1141 % nodeDef.name()
1142 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001143 }
1144
telsoa01c577f2c2018-08-31 09:22:23 +01001145 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1146 dimensionSizes.data(),
1147 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001148
1149 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +01001150 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +01001151 if (tensorData.size() > tensorInfo.GetNumBytes())
1152 {
telsoa01c577f2c2018-08-31 09:22:23 +01001153 throw ParseException(
1154 boost::str(
1155 boost::format(
1156 "Number of elements (%1%) should be less than or equal "
1157 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
1158 % (tensorData.size() / GetDataTypeSize(dataType))
1159 % tensorInfo.GetNumElements()
1160 % nodeDef.name()
1161 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001162 }
1163
1164 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1165 dataType, this, nodeDef, tensorData, tensorInfo);
1166}
1167
1168template<typename Type>
1169bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
1170{
1171 auto it = m_ParsedTfOperations.find(nodeName);
jimfly01f6ba7472018-12-04 10:09:52 +00001172 if (it == m_ParsedTfOperations.end())
surmeh01bceff2f2018-03-29 16:29:27 +01001173 {
1174 return false;
1175 }
jimfly01f6ba7472018-12-04 10:09:52 +00001176 return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1177}
1178
1179template<typename Type>
1180bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
1181{
1182 return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
surmeh01bceff2f2018-03-29 16:29:27 +01001183}
1184
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00001185unsigned int TfParser::GetConstInputIndex(const std::vector<OutputOfParsedTfOperation>& inputs)
1186{
1187 for (unsigned int i = 0; i < inputs.size(); i++)
1188 {
1189 if (HasParsedConstTensor<int32_t>(inputs[i].m_IndexedValue->GetNode().name()))
1190 {
1191 return i;
1192 }
1193 }
1194 throw ParseException(
1195 boost::str(
1196 boost::format(
1197 "ArmNN only supports operators with constant axis. %1%")
1198 % CHECK_LOCATION().AsString()));
1199
1200}
1201
surmeh01bceff2f2018-03-29 16:29:27 +01001202ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
1203 const tensorflow::GraphDef& graphDef)
1204{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001205 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001206 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1207 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1208 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1209
1210 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1211 {
telsoa01c577f2c2018-08-31 09:22:23 +01001212 throw ParseException(
1213 boost::str(
1214 boost::format(
1215 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1216 % nodeDef.name()
1217 % inputs[1].m_IndexedValue->GetNode().name()
1218 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001219 }
1220 ParsedConstTfOperation<float>* weightNode =
1221 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1222
1223 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1224 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1225 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1226
telsoa01c577f2c2018-08-31 09:22:23 +01001227 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
surmeh01bceff2f2018-03-29 16:29:27 +01001228 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1229 if (!dilations.empty())
1230 {
1231 for (auto dilation : dilations)
1232 {
1233 if (dilation != 1u)
1234 {
telsoa01c577f2c2018-08-31 09:22:23 +01001235 throw ParseException(
1236 boost::str(
1237 boost::format(
1238 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1239 % nodeDef.name()
1240 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001241 }
1242 }
1243 }
1244
1245 Convolution2dDescriptor desc;
1246 desc.m_BiasEnabled = false;
1247
telsoa01c577f2c2018-08-31 09:22:23 +01001248 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1249
Matteo Martincigh46315822018-11-28 16:22:36 +00001250 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001251
Matteo Martincigh46315822018-11-28 16:22:36 +00001252 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001253
Matteo Martincigh46315822018-11-28 16:22:36 +00001254 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001255
Matteo Martincigh46315822018-11-28 16:22:36 +00001256 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1257 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001258
Matteo Martincigh46315822018-11-28 16:22:36 +00001259 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1260 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1261
1262 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1263 // Tensorflow weights are [H, W, In, Out].
1264 // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1265 // and [Out, In, H, W] when the data layout is NCHW.
1266 PermutationVector permutationVector =
1267 dataLayout == DataLayout::NHWC ?
1268 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1269 std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1270
1271 // Swizzle the tensor using the given permutation vector.
1272 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1273 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1274
1275 // Swizzles the content of the tensor's permanent storage into a local storage.
1276 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1277 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001278 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Matteo Martincigh46315822018-11-28 16:22:36 +00001279
1280 // Create a weight tensor with the newly swizzled data.
1281 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1282
1283 uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1284 uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001285
1286 bool padding = false;
1287 TensorInfo outputInfo;
Matteo Martincigh46315822018-11-28 16:22:36 +00001288 unsigned int outputHeight = 0;
1289 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001290
1291 CHECK_PADDING_TYPE(nodeDef, paddingString);
1292
surmeh01bceff2f2018-03-29 16:29:27 +01001293 if (paddingString == "SAME")
1294 {
1295 padding = true;
Matteo Martincigh46315822018-11-28 16:22:36 +00001296
1297 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1298 static_cast<float>(desc.m_StrideY)));
1299 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1300 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001301 }
1302 else if (paddingString == "VALID")
1303 {
1304 padding = false;
Matteo Martincigh46315822018-11-28 16:22:36 +00001305
1306 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1307 static_cast<float>(desc.m_StrideY)));
1308 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1309 static_cast<float>(desc.m_StrideX)));
1310 }
1311
1312 switch (dataLayout)
1313 {
1314 case DataLayout::NHWC:
1315 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1316 outputHeight,
1317 outputWidth,
1318 weightTensor.GetShape()[0] },
1319 DataType::Float32);
1320 break;
1321 case DataLayout::NCHW:
1322 default:
surmeh01bceff2f2018-03-29 16:29:27 +01001323 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1324 weightTensor.GetShape()[0],
Matteo Martincigh46315822018-11-28 16:22:36 +00001325 outputHeight,
1326 outputWidth },
1327 DataType::Float32);
1328 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001329 }
surmeh01bceff2f2018-03-29 16:29:27 +01001330
1331 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1332 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1333
Matteo Martincighfc598e12019-05-14 10:36:13 +01001334 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc,
1335 weightTensor,
1336 EmptyOptional(),
1337 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001338 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Matteo Martincigh46315822018-11-28 16:22:36 +00001339 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001340
1341 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1342}
1343
1344ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
telsoa01c577f2c2018-08-31 09:22:23 +01001345 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001346{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001347 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001348 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1349 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1350 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1351
1352 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1353 {
telsoa01c577f2c2018-08-31 09:22:23 +01001354 throw ParseException(
1355 boost::str(
1356 boost::format(
1357 "ArmNN only supports Depthwise Convolution layer with constant weights. "
1358 "Non const input found %1% for node %2% %3%")
1359 % inputs[1].m_IndexedValue->GetNode().name()
1360 % nodeDef.name()
1361 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001362 }
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001363
surmeh01bceff2f2018-03-29 16:29:27 +01001364 ParsedConstTfOperation<float>* weightNode =
1365 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1366
surmeh01bceff2f2018-03-29 16:29:27 +01001367 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1368 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1369 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1370
1371 DepthwiseConvolution2dDescriptor desc;
1372 desc.m_BiasEnabled = false;
1373
telsoa01c577f2c2018-08-31 09:22:23 +01001374 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1375
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001376 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001377
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001378 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001379
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001380 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001381
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001382 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1383 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001384
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001385 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1386 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1387
1388 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
Matteo Martincigh747ef822018-12-18 09:26:39 +00001389 // Tensorflow weights come in the format [H, W, I, M].
1390 // ArmNN weights have to be [M, I, H, W].
1391 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001392
1393 // Swizzle the tensor using the given permutation vector.
1394 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1395 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1396
1397 // Swizzles the content of the tensor's permanent storage into a local storage.
1398 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1399 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001400 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001401
1402 // Create a weight tensor with the newly swizzled data.
1403 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1404
Matteo Martincigh747ef822018-12-18 09:26:39 +00001405 uint32_t weightHeight = weightTensor.GetShape()[2];
1406 uint32_t weightWidth = weightTensor.GetShape()[3];
surmeh01bceff2f2018-03-29 16:29:27 +01001407
1408 bool padding = false;
1409 TensorInfo outputInfo;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001410 unsigned int outputHeight = 0;
1411 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001412
1413 CHECK_PADDING_TYPE(nodeDef, paddingString);
1414
surmeh01bceff2f2018-03-29 16:29:27 +01001415 if (paddingString == "SAME")
1416 {
1417 padding = true;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001418
1419 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1420 static_cast<float>(desc.m_StrideY)));
1421 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1422 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001423 }
1424 else if (paddingString == "VALID")
1425 {
1426 padding = false;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001427
1428 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1429 static_cast<float>(desc.m_StrideY)));
1430 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1431 static_cast<float>(desc.m_StrideX)));
1432 }
1433
1434 switch (dataLayout)
1435 {
1436 case DataLayout::NHWC:
1437 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1438 outputHeight,
1439 outputWidth,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001440 weightTensor.GetShape()[0] * weightTensor.GetShape()[1]},
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001441 DataType::Float32);
1442 break;
1443 case DataLayout::NCHW:
1444 default:
1445 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1446 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1447 outputHeight,
1448 outputWidth },
1449 DataType::Float32);
1450 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001451 }
surmeh01bceff2f2018-03-29 16:29:27 +01001452
1453 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1454 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1455
Matteo Martincighfc598e12019-05-14 10:36:13 +01001456 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1457 weightTensor,
1458 EmptyOptional(),
1459 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001460 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001461 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001462
1463 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1464}
1465
Conor Kennedyc2130a02018-12-05 11:05:54 +00001466TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
1467{
1468 BOOST_ASSERT(nodeDef.op() == "ExpandDims");
1469
1470 if (inputTensorInfo.GetNumDimensions() > 4) {
1471 throw ParseException(
1472 boost::str(
1473 boost::format(
1474 "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1475 % inputTensorInfo.GetNumDimensions()
1476 % nodeDef.name()
1477 % CHECK_LOCATION().AsString()));
1478 }
1479
1480 std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1481
1482 std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1483 std::vector<uint32_t> outputDims;
1484
1485 // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1486 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1487 {
1488 // add current input shape to outputDims
1489 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1490 auto currentDimension = inputTensorInfo.GetShape()[i];
1491 outputDims.push_back(currentDimension);
1492 }
1493
1494 // insert a dimension of 1 at index 'expandDim' of inputs shape
1495 if (expandDim >= 0)
1496 {
1497 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1498 outputDims.insert(getPosition, 1);
1499 }
1500
1501 // if negative number for 'expandDim' then count backwards from the last element
1502 // and insert 1 dimension at index 'expandDim'
1503 if (expandDim < 0)
1504 {
Matteo Martincighd7cceeb2018-12-06 09:06:29 +00001505 int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001506 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1507 outputDims.insert(getPosition, 1);
1508 }
1509 }
1510 else
1511 {
1512 throw InvalidArgumentException(
1513 boost::str(
1514 boost::format(
1515 "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1516 % expandDim
1517 % inputDimSize
1518 % CHECK_LOCATION().AsString()));
1519 }
1520
1521 if (outputDims.size() > 4)
1522 {
1523 throw ParseException(
1524 boost::str(
1525 boost::format(
1526 "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1527 % outputDims.size()
1528 % nodeDef.name()
1529 % CHECK_LOCATION().AsString()));
1530 }
1531
1532 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1533 outputDims.data());
1534
1535 TensorInfo outTensorInfo = inputTensorInfo;
1536 outTensorInfo.SetShape(outShape);
1537
1538 return outTensorInfo;
1539}
1540
1541ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1542{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001543 boost::ignore_unused(graphDef);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001544 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1545
1546 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1547 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1548
1549 TensorInfo outputInfo;
1550 outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo);
1551
1552 ReshapeDescriptor reshapeDesc;
1553 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1554 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1555 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1556 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1557
1558 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1559}
1560
surmeh01bceff2f2018-03-29 16:29:27 +01001561ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1562 const tensorflow::GraphDef& graphDef)
1563{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001564 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001565 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1566
1567 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1568 {
telsoa01c577f2c2018-08-31 09:22:23 +01001569 throw ParseException(
1570 boost::str(
1571 boost::format(
1572 "ArmNN only supports FusedBatchNormalization layers with constant scale. "
1573 "Input %1%. Node %2% %3%")
1574 % inputs[1].m_IndexedValue->GetNode().name()
1575 % nodeDef.name()
1576 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001577 }
1578 ParsedConstTfOperation<float>* scaleNode =
1579 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1580
1581 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1582 {
telsoa01c577f2c2018-08-31 09:22:23 +01001583 throw ParseException(
1584 boost::str(
1585 boost::format(
1586 "ArmNN only supports FusedBatchNormalization layers with constant offset. "
1587 "Input %1%. Node %2% %3%")
1588 % inputs[2].m_IndexedValue->GetNode().name()
1589 % nodeDef.name()
1590 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001591 }
1592 ParsedConstTfOperation<float>* offsetNode =
1593 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1594
1595 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1596 {
telsoa01c577f2c2018-08-31 09:22:23 +01001597 throw ParseException(
1598 boost::str(
1599 boost::format(
1600 "ArmNN only supports FusedBatchNormalization layers with constant mean. "
1601 "Input %1%. Node %2% %3%")
1602 % inputs[3].m_IndexedValue->GetNode().name()
1603 % nodeDef.name()
1604 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001605 }
1606 ParsedConstTfOperation<float>* meanNode =
1607 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1608
1609 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1610 {
telsoa01c577f2c2018-08-31 09:22:23 +01001611 throw ParseException(
1612 boost::str(
1613 boost::format(
1614 "ArmNN only supports FusedBatchNormalization layers with constant variance. "
1615 "Input %1%. Node %2% %3%")
1616 % inputs[4].m_IndexedValue->GetNode().name()
1617 % nodeDef.name()
1618 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001619 }
1620 ParsedConstTfOperation<float>* varianceNode =
1621 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1622
Aron Virginas-Tar2e259272019-11-27 13:29:51 +00001623 const std::string dataFormat = ReadOptionalNodeStringAttribute(nodeDef, "data_format", "NHWC");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001624 CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1625
telsoa01c577f2c2018-08-31 09:22:23 +01001626 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001627 BatchNormalizationDescriptor desc;
1628 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001629 desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001630
telsoa01c577f2c2018-08-31 09:22:23 +01001631 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1632 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001633 std::vector<float> scaleTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001634 ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001635
1636 std::vector<float> offsetTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001637 ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001638
1639 std::vector<float> meanTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001640 ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001641
1642 std::vector<float> varianceTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001643 ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001644
1645 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1646 meanTensor,
1647 varianceTensor,
1648 offsetTensor,
1649 scaleTensor,
1650 nodeDef.name().c_str());
1651
1652 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1653
Matteo Martincigh075c7502018-12-05 13:10:45 +00001654 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1655 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001656
1657 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1658}
1659
telsoa01c577f2c2018-08-31 09:22:23 +01001660bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1661 size_t alphaLayerIndex,
1662 const OutputOfParsedTfOperation& otherOp,
1663 armnn::IOutputSlot** outputOfLeakyRelu,
1664 armnn::ActivationDescriptor & desc)
1665{
1666 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1667
1668 // Verifying all these assumptions hold:
1669 //
1670 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1671 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1672 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1673 //
1674
1675 if (mulNodeDef.op() == "Mul")
1676 {
1677 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1678 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1679
1680 BOOST_ASSERT(inputs.size() == 2);
1681 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1682 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1683 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1684
1685 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1686 {
1687 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1688 {
1689 ParsedConstTfOperation<float>* alpha =
1690 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1691 inputs[alphaLayerIndex].m_IndexedValue);
1692
1693 std::vector<float> const_data;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001694 ConstTensor const_tensor = alpha->GetConstTensor(const_data);
telsoa01c577f2c2018-08-31 09:22:23 +01001695
1696 if (const_data.size() == 1)
1697 {
1698 desc.m_Function = ActivationFunction::LeakyReLu;
1699 desc.m_A = const_data[0];
1700
1701 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1702 return true;
1703 }
1704 }
1705 }
1706 }
1707 return false;
1708}
1709
telsoa01c577f2c2018-08-31 09:22:23 +01001710ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1711 const tensorflow::GraphDef& graphDef)
1712{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001713 boost::ignore_unused(graphDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001714 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
Sadik Armagan975c09a2018-12-04 10:02:08 +00001715 if (inputs.size() != 2)
1716 {
1717 throw ParseException(
1718 boost::str(
1719 boost::format(
1720 "Maximum expects two inputs!. Got %1% for Node %2% %3%")
1721 % inputs.size()
1722 % nodeDef.name()
1723 % CHECK_LOCATION().AsString()));
1724 }
1725
telsoa01c577f2c2018-08-31 09:22:23 +01001726 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1727 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1728 IOutputSlot* outputOfLeakyRelu = nullptr;
1729
1730 ActivationDescriptor desc;
1731
Sadik Armagan975c09a2018-12-04 10:02:08 +00001732 // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1733 // i.e. one of the four possible scenarios:
1734 // 1, max(mul(a, x), x)
1735 // 2, max(mul(x, a), x)
1736 // 3, max(x, mul(a, x))
1737 // 4, max(x, mul(x, a))
1738 // These are handled by an activation layer.
telsoa01c577f2c2018-08-31 09:22:23 +01001739
1740 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1741 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1742 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1743 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1744 {
1745 BOOST_ASSERT(outputOfLeakyRelu != nullptr);
1746
1747 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1748 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1749 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1750 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1751 }
1752 else
1753 {
Sadik Armagan975c09a2018-12-04 10:02:08 +00001754 // Anything else is just a maximum layer.
1755
1756 return AddMaximumLayer(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001757 }
1758}
1759
jimfly0184c70e62018-12-19 13:14:46 +00001760std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> TfParser::ProcessElementwiseInputSlots(
1761 const tensorflow::NodeDef& nodeDef, const std::string& layerName)
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001762{
1763 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1764
1765 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1766 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1767 const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1768 const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1769
1770 if (input0Dim != input1Dim)
1771 {
1772 // broadcasting where input0 and input1 have different number of dimensions
1773 // is only supported for 1D and 4D tensors pair
1774 if (input0Dim == 1 && input1Dim == 4)
1775 {
1776 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1777 }
1778 else if (input0Dim == 4 && input1Dim == 1)
1779 {
1780 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1781 }
1782 else
1783 {
1784 throw ParseException(
jimfly0184c70e62018-12-19 13:14:46 +00001785 boost::str(
1786 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
1787 % layerName
1788 % nodeDef.name()
1789 % CHECK_LOCATION().AsString()));
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001790 }
1791 }
jimfly0184c70e62018-12-19 13:14:46 +00001792 return {input0Slot, input1Slot};
1793}
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001794
kevmay012b4d88e2019-01-24 14:05:09 +00001795ParsedTfOperationPtr TfParser::ProcessComparisonLayer(
1796 IOutputSlot* input0Slot,
1797 IOutputSlot* input1Slot,
1798 IConnectableLayer* const layer,
1799 const tensorflow::NodeDef& nodeDef)
1800{
1801 input0Slot->Connect(layer->GetInputSlot(0));
1802 input1Slot->Connect(layer->GetInputSlot(1));
1803
1804 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1805 outputInfo.SetDataType(DataType::Boolean);
1806 std::vector<unsigned int> outputShape;
1807
1808 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1809 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1810
1811 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1812 {
1813 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1814 }
1815
1816 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1817 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1818
1819 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1820}
1821
jimfly0184c70e62018-12-19 13:14:46 +00001822ParsedTfOperationPtr TfParser::ProcessElementwiseLayer(
1823 IOutputSlot* input0Slot,
1824 IOutputSlot* input1Slot,
1825 IConnectableLayer* const layer,
1826 const tensorflow::NodeDef& nodeDef)
1827{
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001828 input0Slot->Connect(layer->GetInputSlot(0));
1829 input1Slot->Connect(layer->GetInputSlot(1));
1830
1831 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1832 std::vector<unsigned int> outputShape;
1833
1834 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1835 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1836
1837 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1838 {
1839 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1840 }
1841
1842 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1843 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1844
1845 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1846}
1847
FrancisMurtagh94412af2019-01-24 10:53:39 +00001848ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
1849 const tensorflow::GraphDef& graphDef)
1850{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001851 boost::ignore_unused(graphDef);
FrancisMurtagh94412af2019-01-24 10:53:39 +00001852 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1853 IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1854 IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1855
1856 // Infer shape of output tensor
1857 unsigned int paramsDim = params.GetTensorInfo().GetNumDimensions();
1858 unsigned int indicesDim = indices.GetTensorInfo().GetNumDimensions();
1859 unsigned int outputDim = paramsDim - 1 + indicesDim;
1860
1861 std::vector<unsigned int> dimSizes;
1862
1863 for (unsigned int i = 0; i < indicesDim; ++i)
1864 {
1865 dimSizes.push_back(indices.GetTensorInfo().GetShape()[i]);
1866 }
1867 for (unsigned int i = 1; i < paramsDim; ++i)
1868 {
1869 dimSizes.push_back(params.GetTensorInfo().GetShape()[i]);
1870 }
1871
1872 const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data());
1873
1874 const TensorInfo inferredOutputInfo(inferredShape, params.GetTensorInfo().GetDataType());
1875
1876 IConnectableLayer* const layer = m_Network->AddGatherLayer(nodeDef.name().c_str());
1877 layer->GetOutputSlot(0).SetTensorInfo(inferredOutputInfo);
1878
1879 params.Connect(layer->GetInputSlot(0));
1880 indices.Connect(layer->GetInputSlot(1));
1881
1882 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1883}
1884
jimfly01a06bf312018-12-18 16:24:51 +00001885ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
1886 const tensorflow::GraphDef& graphDef)
1887{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001888 boost::ignore_unused(graphDef);
jimfly01a06bf312018-12-18 16:24:51 +00001889 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
1890 IOutputSlot* input0Slot = inputLayers.first;
1891 IOutputSlot* input1Slot = inputLayers.second;
1892
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001893 ComparisonDescriptor descriptor(ComparisonOperation::Greater);
1894 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly01a06bf312018-12-18 16:24:51 +00001895
kevmay012b4d88e2019-01-24 14:05:09 +00001896 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly01a06bf312018-12-18 16:24:51 +00001897}
1898
jimfly0184c70e62018-12-19 13:14:46 +00001899ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
1900 const tensorflow::GraphDef& graphDef)
1901{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001902 boost::ignore_unused(graphDef);
jimfly0184c70e62018-12-19 13:14:46 +00001903 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
1904 IOutputSlot* input0Slot = inputLayers.first;
1905 IOutputSlot* input1Slot = inputLayers.second;
1906
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001907 ComparisonDescriptor descriptor(ComparisonOperation::Equal);
1908 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly0184c70e62018-12-19 13:14:46 +00001909
kevmay012b4d88e2019-01-24 14:05:09 +00001910 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly0184c70e62018-12-19 13:14:46 +00001911}
1912
1913ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
1914 const tensorflow::GraphDef& graphDef)
1915{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001916 boost::ignore_unused(graphDef);
jimfly0184c70e62018-12-19 13:14:46 +00001917 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
1918 IOutputSlot* input0Slot = inputLayers.first;
1919 IOutputSlot* input1Slot = inputLayers.second;
1920
1921 IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1922
1923 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1924}
1925
jimfly0123be07e2018-12-04 17:47:22 +00001926ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1927{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001928 boost::ignore_unused(graphDef);
jimfly0123be07e2018-12-04 17:47:22 +00001929 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1930
1931 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1932 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1933
1934 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
1935 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
1936
1937 if (input0Info.GetNumDimensions() == 1)
1938 {
1939 const bool isNHWC = true;
1940 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1941 }
1942
1943 if (input1Info.GetNumDimensions() == 1)
1944 {
1945 const bool isNHWC = true;
1946 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1947 }
1948
1949 IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
1950
1951 input0Slot->Connect(layer->GetInputSlot(0));
1952 input1Slot->Connect(layer->GetInputSlot(1));
1953
1954 if (input0Info.GetNumDimensions() == 1)
1955 {
1956 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
1957 }
1958 else
1959 {
1960 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
1961 }
1962
1963 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1964}
1965
Sadik Armagan48d70932020-02-18 15:18:27 +00001966ParsedTfOperationPtr TfParser::ParseStack(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1967{
1968 boost::ignore_unused(graphDef);
1969 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
1970
1971 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
1972 if (numInputs < 1)
1973 {
1974 throw ParseException(
1975 boost::str(
1976 boost::format(
1977 "Pack/Stack expects at least one input. Got %1% for Node %2% %3%")
1978 % numInputs
1979 % nodeDef.name()
1980 % CHECK_LOCATION().AsString()));
1981 }
1982
1983 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
1984 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
1985 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1986 const TensorInfo& inputTensorInfo = input0Slot->GetTensorInfo();
1987 auto numDimensions = inputTensorInfo.GetShape().GetNumDimensions();
1988
1989 // validate axis
1990 int32_t axis = ReadMandatoryNodeInt32Attribute(nodeDef, "axis");
1991 const int sNumDimensions = (static_cast<int>(numDimensions) + 1);
1992 if (!(axis < sNumDimensions && axis >= -sNumDimensions))
1993 {
1994 throw ParseException(
1995 boost::str(
1996 boost::format(
1997 "Axis index is not in range. Got %1% for Node %2% %3%")
1998 % axis
1999 % nodeDef.name()
2000 % CHECK_LOCATION().AsString()));
2001 }
2002
2003 if (axis < 0)
2004 {
2005 axis = static_cast<int32_t>(numDimensions) + axis + 1;
2006 }
2007
2008 StackDescriptor stackDescriptor;
2009 stackDescriptor.m_Axis = static_cast<uint32_t>(axis);
2010 stackDescriptor.m_NumInputs = static_cast<uint32_t>(numInputs);
2011 stackDescriptor.m_InputShape = inputTensorInfo.GetShape();
2012
2013 const unsigned int supportedNumDims = 4;
2014 for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2015 {
2016 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2017 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2018
2019 // Double check dimensions of the tensors
2020 if (inputTensorInfo.GetNumDimensions() >= supportedNumDims)
2021 {
2022 throw armnn::ParseException(
2023 boost::str(
2024 boost::format(
2025 "The number of dimensions: %1% for input tensors of the "
2026 "Pack/Stack op. Number of dimensions should be less than %2% %3%")
2027 % inputTensorInfo.GetNumDimensions()
2028 % supportedNumDims
2029 % CHECK_LOCATION().AsString()));
2030 }
2031 }
2032
2033 std::vector<unsigned int> outputDimensions;
2034 for (unsigned int i = 0; i < stackDescriptor.m_InputShape.GetNumDimensions(); ++i)
2035 {
2036 outputDimensions.push_back(stackDescriptor.m_InputShape[i]);
2037 }
2038 outputDimensions.insert(outputDimensions.begin() + axis, numInputs);
2039
2040 // add Stack Layer
2041 IConnectableLayer* const layer = m_Network->AddStackLayer(stackDescriptor, nodeDef.name().c_str());
2042
2043 for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2044 {
2045 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2046 inputSlot.Connect(layer->GetInputSlot(viewIndex));
2047 }
2048
2049 layer->GetOutputSlot(0).SetTensorInfo(
2050 armnn::TensorInfo(static_cast<uint32_t>(outputDimensions.size()),
2051 outputDimensions.data(),
2052 inputTensorInfo.GetDataType()));
2053
2054 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2055}
2056
jimfly01f6ba7472018-12-04 10:09:52 +00002057unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
2058 const TensorInfo& inputTensorInfo,
2059 const std::string& nodeName)
2060{
2061 unsigned int rank = paddingTensor.GetShape()[0];
2062 unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
2063 if (rank != expectedRank)
2064 {
2065 throw ParseException(
2066 boost::str(
2067 boost::format(
2068 "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
2069 % expectedRank
2070 % rank
2071 % nodeName
2072 % CHECK_LOCATION().AsString()));
2073 }
2074 unsigned int second = paddingTensor.GetShape()[1];
2075 if (second != 2)
2076 {
2077 throw ParseException(
2078 boost::str(
2079 boost::format(
2080 "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
2081 % rank
2082 % second
2083 % nodeName
2084 % CHECK_LOCATION().AsString()));
2085 }
2086 return rank;
2087}
2088
2089TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
2090 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
2091{
2092 unsigned int numDims = inputTensorInfo.GetNumDimensions();
2093 std::vector<unsigned int> outDims;
2094 for (unsigned int i = 0; i < numDims; ++i)
2095 {
2096 unsigned int dimSize = inputTensorInfo.GetShape()[i];
2097 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2098 dimSize += dimPadding.first;
2099 dimSize += dimPadding.second;
2100 outDims.push_back(dimSize);
2101 }
2102 TensorInfo paddedTensorInfo = inputTensorInfo;
2103 unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
2104 paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
2105 return paddedTensorInfo;
2106}
2107
2108ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
2109 const tensorflow::GraphDef& graphDef)
2110{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002111 boost::ignore_unused(graphDef);
jimfly01f6ba7472018-12-04 10:09:52 +00002112 // input consists of:
2113 // input[0] the tensor which will be padded
2114 // input[1] the tensor holding the padding values
2115 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2116 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2117 TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
2118 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
2119 {
2120 throw ParseException(
2121 boost::str(
2122 boost::format(
2123 "ArmNN only supports Pad with constant padding. "
2124 "Input %1%. Node %2% %3%")
2125 % inputs[1].m_IndexedValue->GetNode().name()
2126 % nodeDef.name()
2127 % CHECK_LOCATION().AsString()));
2128
2129 }
2130 ParsedConstTfOperation<int32_t>* paddingTensorOp =
2131 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2132
2133 std::vector<int32_t> paddingTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002134 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
jimfly01f6ba7472018-12-04 10:09:52 +00002135 // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
2136 // and should match the rank of the input tensor that is being padded.
2137 // For each dimension D of input, paddings[D, 0] indicates how many values to add
2138 // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
2139 // many values to add after the contents of tensor in that dimension
2140 // This needs to be translated into a padList for ACL
2141 std::vector<std::pair<unsigned int, unsigned int>> padList;
2142 unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
2143 for (unsigned int i = 0; i < rank; ++i)
2144 {
2145 std::pair<unsigned int, unsigned int> paddingForDim;
2146 for (unsigned int j = 0; j < 2; j++)
2147 {
2148 unsigned int index = (i * 2) + j;
2149 int paddingAmount = paddingTensorData[index];
2150 // make sure we can cast to an unsigned value
2151 if (paddingAmount < 0)
2152 {
2153 throw ParseException(
2154 boost::str(
2155 boost::format(
2156 "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
2157 % paddingAmount
2158 % i
2159 % j
2160 % nodeDef.name()
2161 % CHECK_LOCATION().AsString()));
2162 }
2163 if (j == 0)
2164 {
2165 paddingForDim.first = static_cast<unsigned int>(paddingAmount);
2166 }
2167 else
2168 {
2169 paddingForDim.second = static_cast<unsigned int>(paddingAmount);
2170 }
2171 }
2172 padList.push_back(paddingForDim);
2173 }
2174 PadDescriptor padDescriptor(padList);
2175 IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
2176 previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
2177 // Use the padding to calculate the new output tensor shape
2178 TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
2179 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2180 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2181}
2182
surmeh01bceff2f2018-03-29 16:29:27 +01002183ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
2184 const tensorflow::GraphDef& graphDef)
2185{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002186 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002187 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002188
telsoa01c577f2c2018-08-31 09:22:23 +01002189 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01002190 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
surmeh01bceff2f2018-03-29 16:29:27 +01002191
surmeh01bceff2f2018-03-29 16:29:27 +01002192 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2193
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002194 // Constant tensor index
2195 unsigned int index = GetConstInputIndex(inputs);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002196 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002197 ParsedConstTfOperation<int32_t>* shapeNode =
2198 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2199
surmeh01bceff2f2018-03-29 16:29:27 +01002200 std::vector<int32_t> axisTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002201 shapeNode->GetConstTensor(axisTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002202
telsoa01c577f2c2018-08-31 09:22:23 +01002203 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002204 const unsigned int concatDim = static_cast<unsigned int>(axisTensorData[0]);
surmeh01bceff2f2018-03-29 16:29:27 +01002205
telsoa01c577f2c2018-08-31 09:22:23 +01002206 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002207 if (concatDim == 0 || concatDim == 2)
surmeh01bceff2f2018-03-29 16:29:27 +01002208 {
telsoa01c577f2c2018-08-31 09:22:23 +01002209 throw ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002210 boost::str(
2211 boost::format(
telsoa01c577f2c2018-08-31 09:22:23 +01002212 "Dimension %1% for concatenation is not supported by Armnn. "
2213 "Node %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002214 % concatDim
2215 % nodeDef.name()
2216 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002217 }
2218
Matthew Jacksondba634f2019-08-15 15:14:18 +01002219 const unsigned int supportedNumDims = 4;
Matteo Martincighf9afc792018-12-06 12:03:17 +00002220 unsigned int numConcatViews = numInputs - 1;
Matthew Jacksondba634f2019-08-15 15:14:18 +01002221 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002222 concatDescriptor.SetConcatAxis(concatDim);
Matthew Jacksondba634f2019-08-15 15:14:18 +01002223 TensorShape mergeDims(supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002224 unsigned int mergeDim = 0;
2225 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002226 {
telsoa01c577f2c2018-08-31 09:22:23 +01002227 // Need to double check whether it should be
Matteo Martincighf9afc792018-12-06 12:03:17 +00002228 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002229 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2230
Matteo Martincighf9afc792018-12-06 12:03:17 +00002231 // Double check dimensions of the tensors
Matthew Jacksondba634f2019-08-15 15:14:18 +01002232 if (inputTensorInfo.GetNumDimensions() != supportedNumDims)
Matteo Martincighf9afc792018-12-06 12:03:17 +00002233 {
2234 throw armnn::ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002235 boost::str(
2236 boost::format(
Matteo Martincighf9afc792018-12-06 12:03:17 +00002237 "The number of dimensions: %1% for input tensors of the "
2238 "concatenation op should be %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002239 % inputTensorInfo.GetNumDimensions()
Matthew Jacksondba634f2019-08-15 15:14:18 +01002240 % supportedNumDims
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002241 % CHECK_LOCATION().AsString()));
Matteo Martincighf9afc792018-12-06 12:03:17 +00002242 }
2243
2244 // Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
2245 mergeDims = inputTensorInfo.GetShape();
2246 unsigned int* viewOrigin = const_cast<unsigned int*>(concatDescriptor.GetViewOrigin(viewIndex));
Matthew Jacksondba634f2019-08-15 15:14:18 +01002247 std::fill(viewOrigin, viewOrigin + supportedNumDims, 0);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002248
2249 // Update the view origin coordinates and the merge dimension value
2250 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
2251 mergeDim += mergeDims[concatDim];
surmeh01bceff2f2018-03-29 16:29:27 +01002252 }
2253
Matteo Martincighf9afc792018-12-06 12:03:17 +00002254 // Update the output shape
2255 mergeDims[concatDim] = mergeDim;
Jim Flynn906f9462019-05-10 13:55:21 +01002256 armnn::IConnectableLayer *layer = m_Network->AddConcatLayer(concatDescriptor, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002257
Matteo Martincighf9afc792018-12-06 12:03:17 +00002258 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
surmeh01bceff2f2018-03-29 16:29:27 +01002259
Matteo Martincighf9afc792018-12-06 12:03:17 +00002260 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002261 {
Matteo Martincighf9afc792018-12-06 12:03:17 +00002262 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2263 inputSlot.Connect(layer->GetInputSlot(viewIndex));
surmeh01bceff2f2018-03-29 16:29:27 +01002264 }
2265
2266 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2267}
2268
2269ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
2270 const tensorflow::GraphDef& graphDef)
2271{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002272 boost::ignore_unused(graphDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002273 // Note: the Shape layer is handled in a special way, because:
2274 // 1. ARMNN doesn't support int32 tensors which it outputs.
2275 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01002276 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01002277 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01002278
2279 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
2280 if (tfDataType != tensorflow::DT_INT32)
2281 {
telsoa01c577f2c2018-08-31 09:22:23 +01002282 throw ParseException(
2283 boost::str(
2284 boost::format(
2285 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
2286 % tensorflow::DataType_Name(tfDataType)
2287 % nodeDef.name()
2288 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002289 }
2290
2291 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2292 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2293 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2294 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
2295
2296 std::vector<int32_t> shapeTensorData;
2297 shapeTensorData.reserve(prevLayerDimensions);
2298
2299 for (unsigned int i=0; i<prevLayerDimensions; ++i)
2300 {
2301 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
2302 }
2303
2304 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2305
2306 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
2307 nodeDef,
2308 &shapeTensorData[0],
2309 shapeTensorInfo);
2310}
2311
2312ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
2313 const tensorflow::GraphDef& graphDef)
2314{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002315 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002316 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2317 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2318
2319 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2320 {
telsoa01c577f2c2018-08-31 09:22:23 +01002321 throw ParseException(
2322 boost::str(
2323 boost::format(
2324 "ArmNN only supports Reshape layers with constant shapes. "
2325 "Input %1% Node %2% %3%")
2326 % inputs[1].m_IndexedValue->GetNode().name()
2327 % nodeDef.name()
2328 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002329 }
2330 ParsedConstTfOperation<int32_t>* shapeNode =
2331 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2332
2333 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2334 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2335
2336 std::vector<int32_t> shapeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002337 ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002338 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2339
2340 TensorShape targetShape = outputTensorInfo.GetShape();
2341 ReshapeDescriptor reshapeDesc;
2342 reshapeDesc.m_TargetShape = targetShape;
2343
2344 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2345 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2346 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2347
2348 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2349}
2350
2351ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
2352 const tensorflow::GraphDef& graphDef)
2353{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002354 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002355 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2356
2357 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2358 {
telsoa01c577f2c2018-08-31 09:22:23 +01002359 throw ParseException(
2360 boost::str(
2361 boost::format(
2362 "ArmNN only supports ResizeBilinear layers with constant sizes. "
2363 "Input %1%. Node %2% %3%")
2364 % inputs[1].m_IndexedValue->GetNode().name()
2365 % nodeDef.name()
2366 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002367 }
2368 ParsedConstTfOperation<int32_t>* sizeNode =
2369 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2370
telsoa01c577f2c2018-08-31 09:22:23 +01002371 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01002372 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
2373 {
telsoa01c577f2c2018-08-31 09:22:23 +01002374 throw ParseException(
2375 boost::str(
2376 boost::format(
2377 "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
2378 "Node %1% %2%")
2379 % nodeDef.name()
2380 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002381 }
2382
telsoa01c577f2c2018-08-31 09:22:23 +01002383 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01002384 std::vector<int32_t> sizeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002385 ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002386
telsoa01c577f2c2018-08-31 09:22:23 +01002387 // The descriptor only has target height and width attributes, which we get from the size tensor.
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002388 ResizeDescriptor desc;
2389 desc.m_Method = armnn::ResizeMethod::Bilinear;
surmeh01bceff2f2018-03-29 16:29:27 +01002390 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002391 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2392 desc.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002393
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002394 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002395
2396 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2397 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01002398 // The input shape is always in BHWC format, this will be swizzled below; for now,
2399 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01002400 unsigned int outBatch = inputTensorInfo.GetShape()[0];
2401 unsigned int outChannels = inputTensorInfo.GetShape()[3];
2402 unsigned int outHeight = desc.m_TargetHeight;
2403 unsigned int outWidth = desc.m_TargetWidth;
jimfly018a121502018-12-06 16:19:52 +00002404 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
telsoa01c577f2c2018-08-31 09:22:23 +01002405 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01002406 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2407 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2408
jimfly018a121502018-12-06 16:19:52 +00002409 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002410
2411 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2412}
2413
2414TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
2415{
2416 BOOST_ASSERT(nodeDef.op() == "Squeeze");
2417 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2418
2419 DataType type;
2420 if (tfDataType == tensorflow::DT_FLOAT)
2421 {
2422 type = DataType::Float32;
2423 }
2424 else if (tfDataType == tensorflow::DT_INT32)
2425 {
2426 type = DataType::Signed32;
2427 }
2428 else
2429 {
telsoa01c577f2c2018-08-31 09:22:23 +01002430 throw ParseException(
2431 boost::str(
2432 boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
2433 % tensorflow::DataType_Name(tfDataType)
2434 % nodeDef.name()
2435 % CHECK_LOCATION().AsString()));
2436 }
2437
2438
2439 if (inputTensorInfo.GetNumDimensions() > 4)
2440 {
2441 throw ParseException(
2442 boost::str(
2443 boost::format(
2444 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2445 % inputTensorInfo.GetNumDimensions()
2446 % nodeDef.name()
2447 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002448 }
2449
2450 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01002451 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2452
surmeh01bceff2f2018-03-29 16:29:27 +01002453 if (squeezeDims.empty())
2454 {
telsoa01c577f2c2018-08-31 09:22:23 +01002455 squeezeDims.assign(dimensionSequence,
2456 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01002457 }
2458
2459 std::vector<uint32_t> outputDims;
2460 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2461 {
telsoa01c577f2c2018-08-31 09:22:23 +01002462 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2463 auto currentDimension = inputTensorInfo.GetShape()[i];
2464 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01002465 {
telsoa01c577f2c2018-08-31 09:22:23 +01002466 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01002467 }
2468 }
2469
2470 if (outputDims.size() > 4)
2471 {
telsoa01c577f2c2018-08-31 09:22:23 +01002472 throw ParseException(
2473 boost::str(
2474 boost::format(
2475 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2476 % outputDims.size()
2477 % nodeDef.name()
2478 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002479 }
2480
telsoa01c577f2c2018-08-31 09:22:23 +01002481 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2482 outputDims.data());
2483
2484 TensorInfo outTensorInfo = inputTensorInfo;
2485 outTensorInfo.SetShape(outShape);
2486 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01002487
2488 return outTensorInfo;
2489}
2490
2491ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2492{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002493 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002494 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2495
2496 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2497 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2498
2499 TensorInfo outputInfo;
2500 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2501
2502 ReshapeDescriptor reshapeDesc;
2503 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2504 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2505 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2506 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2507
2508 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2509}
2510
2511ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2512{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002513 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002514 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2515
2516 NormalizationDescriptor normalizationDescriptor;
2517 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2518 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2519 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2520 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2521 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2522 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
ruoyan018174f362018-12-04 18:24:08 +00002523 normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002524
2525 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2526 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2527
2528 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002529 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2530 nodeDef.name().c_str());
ruoyan018174f362018-12-04 18:24:08 +00002531 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2532 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
surmeh01bceff2f2018-03-29 16:29:27 +01002533
2534 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2535}
2536
2537/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01002538/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2539/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2540/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2541///
surmeh01bceff2f2018-03-29 16:29:27 +01002542class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
2543{
2544public:
2545 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2546 : DeferredSingleLayerParsedTfOperation(parser, node)
2547 {
2548 }
2549
2550 void CreateLayerDeferred() override
2551 {
2552 BOOST_ASSERT(m_Layer == nullptr);
2553 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2554 }
2555};
2556
2557ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2558{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002559 boost::ignore_unused(graphDef);
2560
telsoa01c577f2c2018-08-31 09:22:23 +01002561 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01002562 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2563}
2564
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002565ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2566{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002567 boost::ignore_unused(graphDef);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002568 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2569 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2570 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2571
2572 if (inputs.size() != 2)
2573 {
2574 throw ParseException(
2575 boost::str(boost::format("Mean expects two inputs!. Got %1% for Node %2% %3%")
2576 % inputs.size()
2577 % nodeDef.name()
2578 % CHECK_LOCATION().AsString()));
2579 }
2580
2581 bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef, "keep_dims");
2582
2583 ParsedConstTfOperation<int32_t>* axisNode =
2584 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2585
2586 const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo();
2587
2588 ConstTensor axisTensor(axisTensorInfo, axisNode->GetStorage());
2589 const int* axisData = static_cast<const int*>(axisTensor.GetMemoryArea());
2590
2591 TensorInfo outputTensorInfo;
2592 MeanDescriptor meanDescriptor;
2593 meanDescriptor.m_KeepDims = keepDims;
2594
2595 // Negative axis values are supported so that the process requires
2596 // to convert them into the corresponding positive ones.
2597 // Duplicate values are also removed.
2598 std::vector<int> rawAxisVector(axisData, axisData + axisTensorInfo.GetNumElements());
2599 std::set<unsigned int> positiveAxisSet;
2600 int rank = static_cast<int>(inputTensorInfo.GetNumDimensions());
2601
2602 std::transform(rawAxisVector.begin(), rawAxisVector.end(),
2603 std::inserter(positiveAxisSet, positiveAxisSet.begin()),
2604 [rank](int i) -> unsigned int { return static_cast<unsigned int>((i + rank) % rank); });
2605
Derek Lambertibaa177f2019-12-10 22:00:43 +00002606 CalculateReducedOutputTensoInfo(inputTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002607
2608 if (inputTensorInfo.GetNumDimensions() > positiveAxisSet.size())
2609 {
2610 meanDescriptor.m_Axis.assign(positiveAxisSet.begin(), positiveAxisSet.end());
2611 }
2612
2613 IConnectableLayer* layer = m_Network->AddMeanLayer(meanDescriptor, nodeDef.name().c_str());
2614 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2615 inputSlot.Connect(layer->GetInputSlot(0));
2616
2617 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2618}
2619
telsoa01c577f2c2018-08-31 09:22:23 +01002620/// An ParsedTfOperation for a Mul node.
2621/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2622/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2623/// and in these cases armnn doesn't need a separate layer for the Mul.
2624///
2625class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
2626{
2627public:
2628 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2629 : DeferredSingleLayerParsedTfOperation(parser, node)
2630 {
2631 }
2632
2633 void CreateLayerDeferred() override
2634 {
2635 BOOST_ASSERT(m_Layer == nullptr);
2636 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2637 }
2638};
2639
surmeh01bceff2f2018-03-29 16:29:27 +01002640ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2641{
2642 boost::ignore_unused(graphDef);
2643
telsoa01c577f2c2018-08-31 09:22:23 +01002644 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002645}
2646
2647ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
2648 const tensorflow::GraphDef& graphDef)
2649{
2650 boost::ignore_unused(graphDef);
2651
2652 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2653
2654 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
2655
2656 auto it = m_InputShapes.find(nodeDef.name());
2657 if (it == m_InputShapes.end())
2658 {
telsoa01c577f2c2018-08-31 09:22:23 +01002659 throw ParseException(
2660 boost::str(
2661 boost::format(
2662 "Missing input shape for Placeholder '%1%' %2%")
2663 % nodeDef.name()
2664 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002665 }
2666 TensorInfo tensorInfo(it->second, DataType::Float32);
2667
2668 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2669
2670 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2671
2672 TrackInputBinding(layer, layerId, tensorInfo);
2673
2674 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2675}
2676
saoste01bbd40612018-08-28 15:41:51 +01002677ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2678{
2679 boost::ignore_unused(graphDef);
2680 return AddRealDivLayer(nodeDef);
2681}
2682
surmeh01bceff2f2018-03-29 16:29:27 +01002683ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
2684 const tensorflow::GraphDef& graphDef)
2685{
2686 boost::ignore_unused(graphDef);
2687
2688 ActivationDescriptor activationDesc;
2689 activationDesc.m_Function = ActivationFunction::ReLu;
2690 return AddActivationLayer(nodeDef, activationDesc);
2691}
2692
2693ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
2694 const tensorflow::GraphDef& graphDef)
2695{
2696 boost::ignore_unused(graphDef);
2697
2698 ActivationDescriptor activationDesc;
2699 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2700 activationDesc.m_A = 6.0f;
2701 activationDesc.m_B = 0.0f;
2702
2703 return AddActivationLayer(nodeDef, activationDesc);
2704}
2705
2706ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
2707 const tensorflow::GraphDef& graphDef)
2708{
2709 boost::ignore_unused(graphDef);
2710
2711 ActivationDescriptor activationDesc;
2712 activationDesc.m_Function = ActivationFunction::Sigmoid;
2713
2714 return AddActivationLayer(nodeDef, activationDesc);
2715}
2716
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002717ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
2718 const tensorflow::GraphDef &graphDef)
2719{
2720 boost::ignore_unused(graphDef);
2721
2722 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2723
josh minor4a3c6102020-01-06 16:40:46 -06002724 ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
2725 IConnectableLayer* const layer = m_Network->AddElementwiseUnaryLayer(descriptor, nodeDef.name().c_str());
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002726
2727 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2728 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2729 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2730
2731 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2732}
2733
surmeh01bceff2f2018-03-29 16:29:27 +01002734ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
2735 const tensorflow::GraphDef& graphDef)
2736{
2737 boost::ignore_unused(graphDef);
2738
2739 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2740
2741 SoftmaxDescriptor softmaxDescriptor;
2742 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2743
2744 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2745 prevLayerSlot.Connect(layer->GetInputSlot(0));
2746 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2747
2748 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2749}
2750
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002751ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
2752 const tensorflow::GraphDef& graphDef)
2753{
2754 boost::ignore_unused(graphDef);
2755
2756 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2757 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2758 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2759
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002760 // Constant tensor index
2761 unsigned int index = GetConstInputIndex(inputs);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002762 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002763 ParsedConstTfOperation<int32_t>* shapeNode =
2764 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2765
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002766 std::vector<int32_t> axisTensorData;
2767 shapeNode->GetConstTensor(axisTensorData);
2768
2769 // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2770 const unsigned int splitDim = static_cast<unsigned int>(axisTensorData[0]);
2771
2772 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2773 if (splitDim == 0 || splitDim == 2)
2774 {
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002775 throw armnn::ParseException(
2776 boost::str(
2777 boost::format(
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002778 "Dimension %1% for split is not supported by Armnn. "
2779 "Node %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002780 % splitDim
2781 % nodeDef.name()
2782 % CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002783 }
2784
Saoirse Stewart315258e2019-02-28 11:32:41 +00002785 // As Armnn only supports splitter outputs of the same shape, therefore num_split will be limited to an integer.
2786 uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef, "num_split");
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002787
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002788 IOutputSlot& inputSlot = inputs[1 - index].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1 - index].m_Index);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002789 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2790
Matthew Jacksondba634f2019-08-15 15:14:18 +01002791 const unsigned int supportedNumDims = 4;
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002792 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2793
Matthew Jacksondba634f2019-08-15 15:14:18 +01002794 if (inputDimSize != supportedNumDims)
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002795 {
2796 throw armnn::ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002797 boost::str(
2798 boost::format(
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002799 "The number of dimensions: %1% for input tensors of the "
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002800 "split op should be %2% %3%")
2801 % inputTensorInfo.GetNumDimensions()
Matthew Jacksondba634f2019-08-15 15:14:18 +01002802 % supportedNumDims
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002803 % CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002804 }
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002805
2806 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2807
2808 // Add current input shape to splitterDimSizes
2809 for (unsigned int i = 0; i < inputDimSize; ++i)
2810 {
2811 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2812 }
2813
2814 if (splitterDimSizes[splitDim] % num_split != 0)
2815 {
2816 throw ParseException("Number of splits must evenly divide the dimension");
2817 }
2818 splitterDimSizes[splitDim] /= num_split;
2819
2820 SplitterDescriptor splitDesc(num_split);
2821 for (unsigned int g = 0; g < num_split; ++g)
2822 {
2823 // Set the size of the views.
2824 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2825 {
2826 splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2827 }
2828 splitDesc.SetViewOriginCoord(g, splitDim, splitterDimSizes[splitDim] * g);
2829 }
2830
2831 IConnectableLayer *layer = m_Network->AddSplitterLayer(splitDesc, nodeDef.name().c_str());
2832
2833 inputSlot.Connect(layer->GetInputSlot(0));
2834
2835 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2836 splitterDimSizes.data());
2837
2838 for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
2839 {
2840 layer->GetOutputSlot(i).SetTensorInfo(armnn::TensorInfo(outShape, inputTensorInfo.GetDataType()));
2841 }
2842
2843 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2844}
2845
surmeh01bceff2f2018-03-29 16:29:27 +01002846ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
2847 const tensorflow::GraphDef& graphDef)
2848{
2849 boost::ignore_unused(graphDef);
2850
2851 ActivationDescriptor activationDesc;
2852 activationDesc.m_Function = ActivationFunction::SoftReLu;
2853
2854 return AddActivationLayer(nodeDef, activationDesc);
2855}
2856
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002857ParsedTfOperationPtr TfParser::ParseStridedSlice(const tensorflow::NodeDef& nodeDef,
2858 const tensorflow::GraphDef& graphDef)
2859{
2860 boost::ignore_unused(graphDef);
2861
2862 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2863 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2864 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2865
2866 ParsedConstTfOperation<int32_t>* beginNode =
2867 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t> *>(inputs[1].m_IndexedValue);
2868 std::vector<int32_t> beginTensorData;
2869 beginNode->GetConstTensor(beginTensorData);
2870
2871 ParsedConstTfOperation<int32_t>* endNode =
2872 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t> *>(inputs[2].m_IndexedValue);
2873 std::vector<int32_t> endTensorData;
2874 endNode->GetConstTensor(endTensorData);
2875
2876 ParsedConstTfOperation<int32_t>* stridesNode =
2877 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t> *>(inputs[3].m_IndexedValue);
2878 std::vector<int32_t> stridesTensorData;
2879 stridesNode->GetConstTensor(stridesTensorData);
2880
2881 StridedSliceDescriptor desc;
2882 desc.m_Begin = beginTensorData;
2883 desc.m_End = endTensorData;
2884 desc.m_Stride = stridesTensorData;
2885 desc.m_BeginMask = ReadMandatoryNodeInt32Attribute(nodeDef, "begin_mask");
2886 desc.m_EndMask = ReadMandatoryNodeInt32Attribute(nodeDef, "end_mask");
2887 desc.m_EllipsisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "ellipsis_mask");
2888 desc.m_NewAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "new_axis_mask");
2889 desc.m_ShrinkAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "shrink_axis_mask");
2890 desc.m_DataLayout = armnn::DataLayout::NHWC;
2891 IConnectableLayer* const layer = m_Network->AddStridedSliceLayer(desc, nodeDef.name().c_str());
2892
2893 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2894 TensorInfo inputTensorInfo = prevLayerSlot.GetTensorInfo();
2895
2896 TensorInfo outputTensorInfo;
2897 CalculateStridedSliceOutputTensorInfo(inputTensorInfo, desc, outputTensorInfo);
2898
2899 prevLayerSlot.Connect(layer->GetInputSlot(0));
2900 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2901
2902 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2903}
2904
surmeh01bceff2f2018-03-29 16:29:27 +01002905ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2906{
2907 boost::ignore_unused(graphDef);
2908
2909 ActivationDescriptor activationDesc;
2910 activationDesc.m_Function = ActivationFunction::TanH;
2911 activationDesc.m_A = 1.0f;
2912 activationDesc.m_B = 1.0f;
2913
2914 return AddActivationLayer(nodeDef, activationDesc);
2915}
2916
2917ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
2918 ActivationDescriptor& activationDesc)
2919{
2920 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2921
2922 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2923
2924 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2925 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2926 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2927 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2928}
2929
2930ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
2931 const tensorflow::GraphDef& graphDef)
2932{
2933 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
2934}
2935
2936ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
2937 const tensorflow::GraphDef& graphDef)
2938{
2939 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2940}
2941
2942ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
2943 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
2944{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002945 boost::ignore_unused(graphDef);
2946
surmeh01bceff2f2018-03-29 16:29:27 +01002947 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2948 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2949 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2950
2951 if (inputs.size() != 1)
2952 {
telsoa01c577f2c2018-08-31 09:22:23 +01002953 throw ParseException(
2954 boost::str(
2955 boost::format(
2956 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
2957 % inputs.size()
2958 % nodeDef.name()
2959 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002960 }
2961
2962 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
2963 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2964 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
2965 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
2966
2967 Pooling2dDescriptor pooling2dDescriptor;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002968 pooling2dDescriptor.m_PoolType = pooltype;
2969 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
surmeh01bceff2f2018-03-29 16:29:27 +01002970 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
2971
telsoa01c577f2c2018-08-31 09:22:23 +01002972 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
FrancisMurtaghf005e312018-12-06 15:26:04 +00002973 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
2974 pooling2dDescriptor.m_DataLayout = dataLayout;
2975 DataLayoutIndexed dataLayoutIndexed(dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +01002976
FrancisMurtaghf005e312018-12-06 15:26:04 +00002977 pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
2978 pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
2979 pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
2980 pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002981
FrancisMurtaghf005e312018-12-06 15:26:04 +00002982 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
2983 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002984
2985 bool padding = false;
2986 TensorInfo outputInfo;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002987 unsigned int outputHeight = 0;
2988 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01002989
2990 CHECK_PADDING_TYPE(nodeDef, paddingString);
2991
surmeh01bceff2f2018-03-29 16:29:27 +01002992 if (paddingString == "SAME")
2993 {
2994 padding = true;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002995
2996 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
2997 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2998 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
2999 static_cast<float>(pooling2dDescriptor.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01003000 }
3001 else if (paddingString == "VALID")
3002 {
3003 padding = false;
FrancisMurtaghf005e312018-12-06 15:26:04 +00003004
3005 outputHeight = static_cast<uint32_t>(ceil(
3006 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
3007 static_cast<float>(pooling2dDescriptor.m_StrideY)));
3008 outputWidth = static_cast<uint32_t>(ceil(
3009 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
3010 static_cast<float>(pooling2dDescriptor.m_StrideX)));
3011 }
3012
3013 switch (dataLayout)
3014 {
3015 case DataLayout::NHWC:
3016 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
3017 outputHeight,
3018 outputWidth,
3019 inputTensorInfo.GetShape()[3] },
3020 DataType::Float32);
3021 break;
3022 case DataLayout::NCHW:
3023 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
3024 inputTensorInfo.GetShape()[1],
3025 outputHeight,
3026 outputWidth },
3027 DataType::Float32);
3028 break;
surmeh01bceff2f2018-03-29 16:29:27 +01003029 }
surmeh01bceff2f2018-03-29 16:29:27 +01003030
3031 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
FrancisMurtaghf005e312018-12-06 15:26:04 +00003032 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01003033 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
FrancisMurtaghf005e312018-12-06 15:26:04 +00003034 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01003035
3036
3037 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
3038 if (layer == nullptr)
3039 {
telsoa01c577f2c2018-08-31 09:22:23 +01003040 throw ParseException(
3041 boost::str(
3042 boost::format(
3043 "Failed to add pooling2d layer for %1% %2%")
3044 % nodeDef.name()
3045 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003046 }
3047
3048 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3049
FrancisMurtaghf005e312018-12-06 15:26:04 +00003050 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01003051
3052 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3053}
3054
3055ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
3056{
3057 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3058
3059 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3060 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3061
3062 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
3063 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
3064
3065 if (isBiasAdd)
3066 {
3067 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
3068 // with the same data in the correct dimension for broadcast in addition.
3069 if(input1Info.GetNumDimensions() != 1)
3070 {
telsoa01c577f2c2018-08-31 09:22:23 +01003071 throw ParseException(
3072 boost::str(
3073 boost::format(
3074 "Unsupported bias for BiasAdd. It should be a 1D vector. "
3075 "Got %1% dimensions for input %2%. Node %3% %4%")
3076 % input1Info.GetNumDimensions()
3077 % inputs[1].m_IndexedValue->GetNode().name()
3078 % nodeDef.name()
3079 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003080 }
3081
3082 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01003083
telsoa01c577f2c2018-08-31 09:22:23 +01003084 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01003085 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003086 }
3087 else
3088 {
3089 if (input0Info.GetNumDimensions() == 1)
3090 {
3091 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003092 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003093 }
3094
3095 if (input1Info.GetNumDimensions() == 1)
3096 {
3097 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003098 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003099 }
3100 }
3101
3102 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
3103
3104 input0Slot->Connect(layer->GetInputSlot(0));
3105 input1Slot->Connect(layer->GetInputSlot(1));
3106
Nattapat Chaimanowongfab64f02019-02-15 16:46:24 +00003107 if (input0Info.GetNumDimensions() == input1Info.GetNumDimensions())
3108 {
3109 const TensorShape& input0Shape = input0Info.GetShape();
3110 const TensorShape& input1Shape = input1Info.GetShape();
3111
3112 std::vector<unsigned int> outputShape;
3113 outputShape.reserve(input0Shape.GetNumDimensions());
3114 TensorInfo outputInfo(input0Info);
3115
3116 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3117 {
3118 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3119 }
3120
3121 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3122
3123 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3124 }
3125 else if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
surmeh01bceff2f2018-03-29 16:29:27 +01003126 {
3127 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3128 }
3129 else
3130 {
3131 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3132 }
3133
3134 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3135}
3136
saoste01bbd40612018-08-28 15:41:51 +01003137ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
3138{
3139 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3140
3141 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
3142 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3143 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3144
3145 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3146 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3147
3148
3149 if (input0NumDims < input1NumDims)
3150 {
3151 const bool isNHWC = true;
3152 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3153 }
3154 if (input1NumDims < input0NumDims)
3155 {
3156 const bool isNHWC = true;
3157 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3158 }
3159
3160 input0Slot->Connect(layer->GetInputSlot(0));
3161 input1Slot->Connect(layer->GetInputSlot(1));
3162
3163 if (input0NumDims < input1NumDims)
3164 {
3165 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3166 }
3167 else
3168 {
3169 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3170
3171 }
3172 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3173}
3174
Sadik Armagan975c09a2018-12-04 10:02:08 +00003175ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
3176{
3177 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3178
3179 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3180 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3181
3182 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3183 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3184
3185 if (input0NumDims < input1NumDims)
3186 {
3187 const bool isNHWC = true;
3188 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3189 }
3190 if (input1NumDims < input0NumDims)
3191 {
3192 const bool isNHWC = true;
3193 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3194 }
3195
3196 IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
3197
3198 input0Slot->Connect(layer->GetInputSlot(0));
3199 input1Slot->Connect(layer->GetInputSlot(1));
3200
3201 TensorInfo outputInfo = input0Slot->GetTensorInfo();
3202 std::vector<unsigned int> outputShape;
3203
3204 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
3205 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
3206
3207 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3208 {
3209 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3210 }
3211
3212 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3213 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3214
3215 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3216}
3217
telsoa01c577f2c2018-08-31 09:22:23 +01003218IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
3219{
3220 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3221
3222 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
3223 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3224 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3225
3226 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3227 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3228
3229 if (input0NumDims < input1NumDims)
3230 {
3231 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003232 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003233 }
3234 if (input1NumDims < input0NumDims)
3235 {
3236 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003237 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003238 }
3239
3240 input0Slot->Connect(layer->GetInputSlot(0));
3241 input1Slot->Connect(layer->GetInputSlot(1));
3242
3243 if (input0NumDims < input1NumDims)
3244 {
3245 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3246 }
3247 else
3248 {
3249 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3250 }
3251 return layer;
3252}
3253
surmeh01bceff2f2018-03-29 16:29:27 +01003254IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
3255 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
3256{
telsoa01c577f2c2018-08-31 09:22:23 +01003257 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01003258 ParsedConstTfOperation<float>* biasNode = nullptr;
3259 if (addNodeDef != nullptr)
3260 {
3261 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01003262 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003263 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
3264 {
3265 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
3266 }
3267 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
3268 {
3269 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
3270 }
3271 else
3272 {
telsoa01c577f2c2018-08-31 09:22:23 +01003273 throw ParseException(
3274 boost::str(
3275 boost::format(
3276 "ArmNN only supports fully connected layers with constant bias. "
3277 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
3278 % addInputs[0].m_IndexedValue->GetNode().name()
3279 % addInputs[1].m_IndexedValue->GetNode().name()
3280 % addNodeDef->name()
3281 % matMulNodeDef.name()
3282 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003283 }
3284 }
3285
telsoa01c577f2c2018-08-31 09:22:23 +01003286 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003287 ParsedConstTfOperation<float>* weightNode = nullptr;
3288 ParsedTfOperation* inputNode = nullptr;
3289 unsigned int inputIdx = 0;
3290 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
3291 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
3292 {
3293 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
3294 inputNode = mulInputs[1].m_IndexedValue;
3295 inputIdx = mulInputs[1].m_Index;
3296 }
3297 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
3298 {
3299 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
3300 inputNode = mulInputs[0].m_IndexedValue;
3301 inputIdx = mulInputs[0].m_Index;
3302 }
3303 else
3304 {
telsoa01c577f2c2018-08-31 09:22:23 +01003305 throw ParseException(
3306 boost::str(
3307 boost::format(
3308 "ArmNN only supports fully connected layers with constant weights. "
3309 "Inputs %1% and %2%. MatMulNode %3% %4%")
3310 % mulInputs[0].m_IndexedValue->GetNode().name()
3311 % mulInputs[1].m_IndexedValue->GetNode().name()
3312 % matMulNodeDef.name()
3313 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003314 }
3315
3316 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003317 // Handles weight.
Matteo Martincigh482ca852018-12-12 09:20:55 +00003318 ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003319
3320 FullyConnectedDescriptor desc;
3321 desc.m_BiasEnabled = addNodeDef != nullptr;
3322
3323 IConnectableLayer* layer = nullptr;
Matteo Martincighfc598e12019-05-14 10:36:13 +01003324 Optional<ConstTensor> optionalBiases;
3325 std::vector<float> biasTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003326 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003327 if (addNodeDef != nullptr)
3328 {
Matteo Martincigh482ca852018-12-12 09:20:55 +00003329 ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003330
3331 if (weights.GetShape()[1] != biases.GetShape()[0])
3332 {
telsoa01c577f2c2018-08-31 09:22:23 +01003333 throw ParseException(
3334 boost::str(
3335 boost::format(
3336 "Shape of matmul weights and bias do not match. "
3337 "AddNode %1%. MatMulNode %2% %3%")
3338 % addNodeDef->name()
3339 % matMulNodeDef.name()
3340 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003341 }
3342
Matteo Martincighfc598e12019-05-14 10:36:13 +01003343 optionalBiases = Optional<ConstTensor>(biases);
surmeh01bceff2f2018-03-29 16:29:27 +01003344 }
Matteo Martincighfc598e12019-05-14 10:36:13 +01003345 layer = m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
surmeh01bceff2f2018-03-29 16:29:27 +01003346
3347 BOOST_ASSERT(layer != nullptr);
3348
3349 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3350 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3351
telsoa01c577f2c2018-08-31 09:22:23 +01003352 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01003353 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
3354 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3355 return layer;
3356}
3357
3358void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
3359{
telsoa01c577f2c2018-08-31 09:22:23 +01003360 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01003361 tensorflow::DataType type = tensorflow::DT_FLOAT;
3362 if (nodeDef.attr().count("T") != 0)
3363 {
3364 auto attr = nodeDef.attr().at("T");
3365 type = attr.type();
3366 }
3367 else if (nodeDef.attr().count("dtype") != 0)
3368 {
3369 auto attr = nodeDef.attr().at("dtype");
3370 type = attr.type();
3371 }
3372
Ferran Balaguerc602f292019-02-08 17:09:55 +00003373 if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() != "Const")
surmeh01bceff2f2018-03-29 16:29:27 +01003374 {
telsoa01c577f2c2018-08-31 09:22:23 +01003375 throw ParseException(
3376 boost::str(
3377 boost::format(
Ferran Balaguerc602f292019-02-08 17:09:55 +00003378 "Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). "
telsoa01c577f2c2018-08-31 09:22:23 +01003379 "Got %1% for Node %2% %3%")
3380 % tensorflow::DataType_Name(type)
3381 % nodeDef.name()
3382 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003383 }
3384
3385 const std::string& operation = nodeDef.op();
narpra016f37f832018-12-21 18:30:00 +00003386 auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3387 if (itControlInput != m_ControlInputs.end())
3388 {
3389 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
3390 return;
3391 }
surmeh01bceff2f2018-03-29 16:29:27 +01003392 auto it = ms_OperationNameToParsingFunctions.find(operation);
3393 if (it != ms_OperationNameToParsingFunctions.end())
3394 {
3395 auto func = it->second;
3396 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
3397 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3398
telsoa01c577f2c2018-08-31 09:22:23 +01003399 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01003400 auto it = m_ParsedTfOperations.find(nodeDef.name());
3401 if (it != m_ParsedTfOperations.end())
3402 {
3403 throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
3404 }
3405 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3406
telsoa01c577f2c2018-08-31 09:22:23 +01003407 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003408 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3409 m_RequestedOutputs.end())
3410 {
3411 auto outId = ParseOutputId(nodeDef.name());
3412 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
3413 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3414
3415 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3416
3417 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
3418
3419 prevSlot.Connect(outputLayer->GetInputSlot(0));
3420
3421 TrackOutputBinding(outputLayer, layerId, tensorInfo);
3422 }
3423 }
3424 else
3425 {
telsoa01c577f2c2018-08-31 09:22:23 +01003426 throw ParseException(
3427 boost::str(
3428 boost::format(
3429 "Unsupported operation %1% in tensorflow::GraphDef %2%")
3430 % operation
3431 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003432 }
3433}
3434
3435void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
3436{
telsoa01c577f2c2018-08-31 09:22:23 +01003437 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01003438 m_NodesByName.clear();
3439 m_NetworkInputsBindingInfo.clear();
3440 m_NetworkOutputsBindingInfo.clear();
3441
3442 for (int i = 0; i < graphDef.node_size(); ++i)
3443 {
3444 const tensorflow::NodeDef& node = graphDef.node(i);
3445 m_NodesByName[node.name()] = &node;
3446 }
3447
Francis Murtaghbb190a62019-04-04 11:16:29 +01003448 // Checks that the input nodes the user has requested exist.
3449 for (const auto& pair : m_InputShapes)
3450 {
3451 const std::string& requestedInputName = pair.first;
3452 auto nodeIt = m_NodesByName.find(requestedInputName);
3453 if (nodeIt == m_NodesByName.end())
3454 {
3455 throw ParseException(
3456 boost::str(
3457 boost::format(
3458 "Couldn't find requested input node '%1%' in graph %2%")
3459 % requestedInputName
3460 % CHECK_LOCATION().AsString()));
3461 }
3462 }
3463
telsoa01c577f2c2018-08-31 09:22:23 +01003464 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01003465 std::vector<const tensorflow::NodeDef*> targetNodes;
3466 for (const std::string& requestedOutputName : m_RequestedOutputs)
3467 {
3468 auto nodeIt = m_NodesByName.find(requestedOutputName);
3469 if (nodeIt == m_NodesByName.end())
3470 {
telsoa01c577f2c2018-08-31 09:22:23 +01003471 throw ParseException(
3472 boost::str(
3473 boost::format(
3474 "Couldn't find requested output node '%1%' in graph %2%")
3475 % requestedOutputName
3476 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003477 }
3478 targetNodes.push_back(nodeIt->second);
3479 }
3480
telsoa01c577f2c2018-08-31 09:22:23 +01003481 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003482 std::vector<const tensorflow::NodeDef*> sortedNodes;
3483 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3484 targetNodes,
3485 [this](const tensorflow::NodeDef* node)
3486 {
3487 auto outputs = GetTfInputNodes(*node);
3488 std::vector<const tensorflow::NodeDef*> nodesOnly;
3489 for (const auto & o : outputs) {
3490 nodesOnly.push_back(o.m_IndexedValue);
3491 }
3492 return nodesOnly;
3493 },
3494 sortedNodes))
3495 {
telsoa01c577f2c2018-08-31 09:22:23 +01003496 throw ParseException(
3497 boost::str(
3498 boost::format(
3499 "Cycle detected in graph %1%")
3500 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003501 }
3502
telsoa01c577f2c2018-08-31 09:22:23 +01003503 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003504 for (const auto& it : sortedNodes)
3505 {
3506 const tensorflow::NodeDef& currentNode = *it;
3507 LoadNodeDef(currentNode, graphDef);
3508 }
3509}
3510
3511INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
3512 const std::map<std::string, TensorShape>& inputShapes,
3513 const std::vector<std::string>& requestedOutputs)
3514{
3515 FILE* fd = fopen(graphFile, "r");
3516
3517 if (fd == nullptr)
3518 {
telsoa01c577f2c2018-08-31 09:22:23 +01003519 throw FileNotFoundException(
3520 boost::str(
3521 boost::format(
3522 "Graph file %1% failed to open %2%")
3523 % graphFile
3524 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003525 }
3526
telsoa01c577f2c2018-08-31 09:22:23 +01003527 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003528 tensorflow::GraphDef graphDef;
3529 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
3530 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3531 delete input;
3532 fclose(fd);
3533
3534 if (!success)
3535 {
telsoa01c577f2c2018-08-31 09:22:23 +01003536 throw ParseException(
3537 boost::str(
3538 boost::format(
3539 "Failed to parse graph file %1%")
3540 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003541 }
3542
3543 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3544}
3545
3546INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
3547 const std::map<std::string, TensorShape>& inputShapes,
3548 const std::vector<std::string>& requestedOutputs)
3549{
telsoa01c577f2c2018-08-31 09:22:23 +01003550 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003551 tensorflow::GraphDef graphDef;
3552 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3553
3554 if (!success)
3555 {
telsoa01c577f2c2018-08-31 09:22:23 +01003556 throw ParseException(
3557 boost::str(
3558 boost::format(
3559 "Failed to parse graph file %1%")
3560 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003561 }
3562
3563 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3564}
3565
3566INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
3567 const std::map<std::string, TensorShape>& inputShapes,
3568 const std::vector<std::string>& requestedOutputs)
3569{
3570 FILE* fd = fopen(graphFile, "rb");
3571
3572 if (fd == nullptr)
3573 {
telsoa01c577f2c2018-08-31 09:22:23 +01003574 throw FileNotFoundException(
3575 boost::str(
3576 boost::format(
3577 "Graph file %1% failed to open %2%")
3578 % graphFile
3579 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003580 }
3581
telsoa01c577f2c2018-08-31 09:22:23 +01003582 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003583 tensorflow::GraphDef graphDef;
3584
3585 google::protobuf::io::FileInputStream inStream(fileno(fd));
3586 google::protobuf::io::CodedInputStream codedStream(&inStream);
3587 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
3588 bool success = graphDef.ParseFromCodedStream(&codedStream);
3589 fclose(fd);
3590
3591 if (!success)
3592 {
telsoa01c577f2c2018-08-31 09:22:23 +01003593 throw ParseException(
3594 boost::str(
3595 boost::format(
3596 "Failed to parse protobuf file %1% %2%")
3597 % graphFile
3598 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003599 }
3600
3601 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3602}
3603
3604INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
3605 const std::map<std::string, TensorShape>& inputShapes,
3606 const std::vector<std::string>& requestedOutputs)
3607{
3608 m_Network = INetwork::Create();
3609
3610 m_InputShapes = inputShapes;
3611 if (requestedOutputs.size() == 0)
3612 {
telsoa01c577f2c2018-08-31 09:22:23 +01003613 throw ParseException(
3614 boost::str(
3615 boost::format(
3616 "requestedOutputs must have at least one entry %1%")
3617 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003618 }
3619 m_RequestedOutputs = requestedOutputs;
3620
3621 try
3622 {
3623 LoadGraphDef(graphDef);
3624 }
3625 catch (const ParseException& e)
3626 {
3627 Cleanup();
3628 throw e;
3629 }
3630
3631 Cleanup();
3632
3633 return std::move(m_Network);
3634}
3635
3636void TfParser::Cleanup()
3637{
telsoa01c577f2c2018-08-31 09:22:23 +01003638 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01003639 m_InputShapes.clear();
3640 m_RequestedOutputs.clear();
3641 m_NodesByName.clear();
3642 m_ParsedTfOperations.clear();
3643}
3644
3645BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
3646{
3647 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
3648}
3649
3650BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
3651{
3652 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
3653}
3654
3655std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
3656 const char* bindingPointDesc,
3657 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3658{
3659 auto it = nameToBindingInfo.find(layerName);
3660 if (it == nameToBindingInfo.end())
3661 {
telsoa01c577f2c2018-08-31 09:22:23 +01003662 throw InvalidArgumentException(
3663 boost::str(
3664 boost::format(
3665 "Unknown %1% '%2%' %3%")
3666 % bindingPointDesc
3667 % layerName
3668 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003669 }
3670 return it->second;
3671}
3672
3673void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3674{
3675 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3676}
3677
3678void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3679{
3680 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3681}
3682
3683void TfParser::TrackBindingPoint(IConnectableLayer* layer,
3684 LayerBindingId id,
3685 const TensorInfo& tensorInfo,
3686 const char* bindingPointDesc,
3687 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3688{
3689 const std::string layerName = layer->GetName();
3690 auto it = nameToBindingInfo.find(layerName);
3691 if (it == nameToBindingInfo.end())
3692 {
3693 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3694 }
3695 else
3696 {
telsoa01c577f2c2018-08-31 09:22:23 +01003697 throw ParseException(
3698 boost::str(
3699 boost::format(
3700 "Id %1% used by more than one %2% layer %3%")
3701 % id
3702 % bindingPointDesc
3703 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003704 }
3705}
3706
3707} // namespace armnnTfParser