blob: af86619249d58416da0e5fbf4b906d4d996ac9f8 [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00005
surmeh01bceff2f2018-03-29 16:29:27 +01006#include "TfParser.hpp"
7
surmeh01bceff2f2018-03-29 16:29:27 +01008#include <armnn/TypesUtils.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +01009#include <armnn/Descriptors.hpp>
10
Matteo Martincighe011d202019-11-28 11:35:47 +000011#include <armnnUtils/Permute.hpp>
12#include <armnnUtils/DataLayoutIndexed.hpp>
13
surmeh01bceff2f2018-03-29 16:29:27 +010014#include <GraphTopologicalSort.hpp>
Sadik Armagan479045b2018-10-01 11:51:37 +010015#include <ParserHelper.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010016
17#include <google/protobuf/io/zero_copy_stream_impl.h>
18#include <google/protobuf/text_format.h>
19
Derek Lambertibaa177f2019-12-10 22:00:43 +000020#include <tensorflow/core/framework/graph.pb.h>
surmeh01bceff2f2018-03-29 16:29:27 +010021
surmeh01bceff2f2018-03-29 16:29:27 +010022#include <boost/format.hpp>
23#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010024#include <boost/format.hpp>
25#include <boost/numeric/conversion/cast.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010026#include <boost/polymorphic_cast.hpp>
27
surmeh01bceff2f2018-03-29 16:29:27 +010028#include <numeric>
surmeh01bceff2f2018-03-29 16:29:27 +010029
Matteo Martincigh46315822018-11-28 16:22:36 +000030using namespace armnnUtils;
surmeh01bceff2f2018-03-29 16:29:27 +010031using namespace armnn;
32
33namespace armnnTfParser
34{
35namespace
36{
37
38const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
39const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
40
surmeh01bceff2f2018-03-29 16:29:27 +010041
42template <typename Callable>
43void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
44 const std::string& attribName,
45 tensorflow::AttrValue::ValueCase expectedValueCase,
46 Callable callable)
47{
48 auto iter = nodeDef.attr().find(attribName);
49 if (iter != nodeDef.attr().end())
50 {
51 const auto& attrValue = iter->second;
52 if (attrValue.value_case() == expectedValueCase)
53 {
54 callable(attrValue);
55 }
56 else
57 {
telsoa01c577f2c2018-08-31 09:22:23 +010058 throw ParseException(
59 boost::str(
60 boost::format(
61 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
62 "but found %4% instead %5%")
63 % attribName
64 % nodeDef.name()
65 % static_cast<int>(expectedValueCase)
66 % static_cast<int>(attrValue.value_case())
67 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010068 }
69 }
70 else
71 {
telsoa01c577f2c2018-08-31 09:22:23 +010072 throw ParseException(
73 boost::str(
74 boost::format(
75 "Could not find required attribute %1% in node %2% %3%")
76 % attribName
77 % nodeDef.name()
78 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010079 }
80}
81
82template <typename Callable>
83void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
84 const std::string& attribName,
85 tensorflow::AttrValue::ValueCase expectedValueCase,
86 Callable callable)
87{
88 auto iter = nodeDef.attr().find(attribName);
89 if (iter != nodeDef.attr().end())
90 {
91 const auto& attrValue = iter->second;
92 if (attrValue.value_case() == expectedValueCase)
93 {
94 callable(attrValue);
95 }
96 else
97 {
telsoa01c577f2c2018-08-31 09:22:23 +010098 throw ParseException(
99 boost::str(
100 boost::format(
101 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
102 "but found %4% instead %5%")
103 % attribName
104 % nodeDef.name()
105 % static_cast<int>(expectedValueCase)
106 % static_cast<int>(attrValue.value_case())
107 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100108 }
109 }
110}
111
112float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
113{
114 float attribValue = 0.0f;
115 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
116 [&attribValue](const tensorflow::AttrValue& attrValue)
117 {
118 attribValue = attrValue.f();
119 });
120 return attribValue;
121}
122
Conor Kennedyc2130a02018-12-05 11:05:54 +0000123int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
124{
125 int32_t attribValue = 0u;
126 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
127 [&attribValue](const tensorflow::AttrValue& attrValue)
128 {
129 attribValue = static_cast<int32_t>(attrValue.i());
130 });
131 return attribValue;
132}
133
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000134bool ReadMandatoryNodeBoolAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
135{
136 bool attribValue = false;
137 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
138 [&attribValue](const tensorflow::AttrValue& attrValue)
139 {
140 attribValue = static_cast<bool>(attrValue.b());
141 });
142 return attribValue;
143}
144
surmeh01bceff2f2018-03-29 16:29:27 +0100145uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
146{
147 uint32_t attribValue = 0u;
148 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
149 [&attribValue](const tensorflow::AttrValue& attrValue)
150 {
151 attribValue = static_cast<uint32_t>(attrValue.i());
152 });
153 return attribValue;
154}
155
156std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
157{
158 std::string attribValue = "";
159 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
160 [&attribValue](const tensorflow::AttrValue& attrValue)
161 {
162 attribValue = attrValue.s();
163 });
164 return attribValue;
165}
166
167std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
168 const std::string& name)
169{
170 std::vector<uint32_t> attriList;
171 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
172 [&attriList](const tensorflow::AttrValue& attrValue)
173 {
174 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
175 {
176 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
177 }
178 });
179
180 return attriList;
181}
182
183std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
184 const std::string& name)
185{
186 std::vector<uint32_t> attriList;
187 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
188 [&attriList](const tensorflow::AttrValue& attrValue)
189 {
190 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
191 {
192 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
193 }
194 });
195
196 return attriList;
197}
198
Aron Virginas-Tar2e259272019-11-27 13:29:51 +0000199std::string ReadOptionalNodeStringAttribute(const tensorflow::NodeDef& nodeDef,
200 const std::string& name,
201 const std::string& defaultValue = "")
202{
203 std::string attribValue = defaultValue;
204 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
205 [&attribValue](const tensorflow::AttrValue& attrValue)
206 {
207 attribValue = attrValue.s();
208 });
209 return attribValue;
210}
211
surmeh01bceff2f2018-03-29 16:29:27 +0100212bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
213 const std::string& name,
214 bool defaultValue = false)
215{
216 bool attribValue = defaultValue;
217 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
218 [&attribValue](const tensorflow::AttrValue& attrValue)
219 {
220 attribValue = attrValue.b();
221 });
222 return attribValue;
223}
224
225tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
226{
227 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
228 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
229 [&attribValue](const tensorflow::AttrValue& attrValue)
230 {
231 attribValue = attrValue.type();
232 });
233 return attribValue;
234}
235
236TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
237{
238 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
239 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
240
241 if (stretchDim != targetDims.end())
242 {
243 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
244 {
telsoa01c577f2c2018-08-31 09:22:23 +0100245 throw ParseException(
246 boost::str(
247 boost::format(
248 "At most one component of shape can be -1 %1%")
249 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100250 }
251
telsoa01c577f2c2018-08-31 09:22:23 +0100252 auto targetNumElements =
253 boost::numeric_cast<unsigned int>(
254 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100255 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
256 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
257 }
258
259 TensorInfo reshapeInfo = input;
260 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
261
262 return reshapeInfo;
263}
264
telsoa01c577f2c2018-08-31 09:22:23 +0100265// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100266IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
267 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100268{
269 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
270 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
271 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
272 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
273 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
274 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
275
276 armnn::TensorInfo reshapedInfo = input1Info;
277 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
278
279 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
280 ReshapeDescriptor reshapeDesc;
281 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
282 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
283
284 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
285 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
286
287 input1Slot = &reshapeLayer->GetOutputSlot(0);
288
289 return input1Slot;
290}
291
292OutputId ParseOutputId(const std::string & name)
293{
294 unsigned int outputNum = 0;
295 size_t colonPos = name.find_last_of(":");
296 if (colonPos != std::string::npos)
297 {
298 int n = std::stoi(name.substr(colonPos+1));
299 if (n<0 || n>100)
300 {
telsoa01c577f2c2018-08-31 09:22:23 +0100301 throw ParseException(
302 boost::str(
303 boost::format(
304 "Output tensor id is out of range for %1% %2%")
305 % name
306 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100307 }
308 outputNum = static_cast<unsigned int>(n);
309 }
310 return OutputId(name.substr(0,colonPos),outputNum);
311}
312
telsoa01c577f2c2018-08-31 09:22:23 +0100313#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
314 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
315 { \
316 throw ParseException( \
317 boost::str( \
318 boost::format( \
319 "Unsupported data format %1% passed for %2% node %3%. " \
320 "Only NHWC and NCHW supported %4%") \
321 % FORMAT \
322 % NODE_TYPE \
323 % NODE_DEF.name() \
324 % CHECK_LOCATION().AsString())); \
325 }
326
327#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
328 if(PADDING != "SAME" && PADDING != "VALID" ) \
329 { \
330 throw ParseException( \
331 boost::str( \
332 boost::format( \
333 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
334 % PADDING \
335 % NODE_DEF.name() \
336 % CHECK_LOCATION().AsString())); \
337 } \
338
surmeh01bceff2f2018-03-29 16:29:27 +0100339} // namespace
340
341const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
342 { "Const", &TfParser::ParseConst },
343 { "Add", &TfParser::ParseAdd },
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000344 { "AddN", &TfParser::ParseAddN },
surmeh01bceff2f2018-03-29 16:29:27 +0100345 { "BiasAdd", &TfParser::ParseBiasAdd },
346 { "Identity", &TfParser::ParseIdentity },
347 { "Conv2D", &TfParser::ParseConv2D },
348 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
Conor Kennedyc2130a02018-12-05 11:05:54 +0000349 { "ExpandDims", &TfParser::ParseExpandDims },
surmeh01bceff2f2018-03-29 16:29:27 +0100350 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
FrancisMurtagh94412af2019-01-24 10:53:39 +0000351 { "Gather", &TfParser::ParseGather},
jimfly01a06bf312018-12-18 16:24:51 +0000352 { "Greater", &TfParser::ParseGreater},
surmeh01bceff2f2018-03-29 16:29:27 +0100353 { "ConcatV2", &TfParser::ParseConcat },
354 { "LRN", &TfParser::ParseLrn },
355 { "MatMul", &TfParser::ParseMatMul },
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000356 { "Mean", &TfParser::ParseMean },
surmeh01bceff2f2018-03-29 16:29:27 +0100357 { "Mul", &TfParser::ParseMul },
358 { "Placeholder", &TfParser::ParsePlaceholder },
saoste01bbd40612018-08-28 15:41:51 +0100359 { "RealDiv", &TfParser::ParseRealDiv },
surmeh01bceff2f2018-03-29 16:29:27 +0100360 { "Relu", &TfParser::ParseRelu },
361 { "Relu6", &TfParser::ParseRelu6 },
362 { "Reshape", &TfParser::ParseReshape },
363 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +0000364 { "Rsqrt", &TfParser::ParseRsqrt },
surmeh01bceff2f2018-03-29 16:29:27 +0100365 { "Shape", &TfParser::ParseShape },
366 { "Squeeze", &TfParser::ParseSqueeze },
367 { "Sigmoid", &TfParser::ParseSigmoid },
368 { "Softmax", &TfParser::ParseSoftmax },
369 { "Softplus", &TfParser::ParseSoftplus },
Sadik Armagan2ad6cb42018-12-27 11:23:44 +0000370 { "Split", &TfParser::ParseSplit },
surmeh01bceff2f2018-03-29 16:29:27 +0100371 { "Tanh", &TfParser::ParseTanh },
372 { "MaxPool", &TfParser::ParseMaxPool },
373 { "AvgPool", &TfParser::ParseAvgPool },
telsoa01c577f2c2018-08-31 09:22:23 +0100374 { "Maximum", &TfParser::ParseMaximum },
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +0000375 { "Minimum", &TfParser::ParseMinimum },
jimfly0184c70e62018-12-19 13:14:46 +0000376 { "Equal", &TfParser::ParseEqual },
jimfly01f6ba7472018-12-04 10:09:52 +0000377 { "Pad", &TfParser::ParsePad },
narpra016f37f832018-12-21 18:30:00 +0000378 { "Sub", &TfParser::ParseSub }
379};
380
381const std::list<std::string> TfParser::m_ControlInputs = {
382 "Assert"
surmeh01bceff2f2018-03-29 16:29:27 +0100383};
384
385ITfParser* ITfParser::CreateRaw()
386{
387 return new TfParser();
388}
389
390ITfParserPtr ITfParser::Create()
391{
392 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
393}
394
395void ITfParser::Destroy(ITfParser* parser)
396{
397 delete parser;
398}
399
400inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
401 uint32_t filterSize, bool samePadding,
402 uint32_t* paddingFront, uint32_t* paddingBack) {
403 *paddingFront = 0;
404 *paddingBack = 0;
405
406 if (samePadding) {
407 uint32_t outputSize = (inputSize + stride - 1) / stride;
408 uint32_t temp = (outputSize - 1) * stride + filterSize;
409 if (temp > inputSize) {
410 *paddingFront = (temp - inputSize) / 2;
411 *paddingBack = (temp - inputSize) - *paddingFront;
412 }
413 }
414}
415
416void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
417 bool samePadding)
418{
419 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
420}
421
422/// An Abstract base class which represents a single tensorflow operation (node)
423/// that has been (potentially partially) converted to Armnn.
424/// It may not yet have been fully converted into actual Armnn layers.
425class ParsedTfOperation
426{
427public:
428 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
429 : m_Parser(parser)
430 , m_Node(node)
431 {
432 }
433
434 virtual ~ParsedTfOperation() {};
435
436 const tensorflow::NodeDef& GetNode() const { return m_Node; }
437
438 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
439 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
440 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
441
442 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
443 virtual ParsedTfOperation* ResolveIdentityOperations()
444 {
445 return this;
446 }
447
448protected:
449 TfParser* m_Parser;
450 const tensorflow::NodeDef& m_Node;
451};
452
453/// An ParsedTfOperation where the Armnn equivalent is a single layer,
454/// with output slots that correspond directly to the Tf node outputs.
455class SingleLayerParsedTfOperation : public ParsedTfOperation
456{
457public:
458 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
459 : ParsedTfOperation(parser, node)
460 , m_Layer(layer)
461 {
462 }
463
464 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
465 {
466 BOOST_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100467 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100468 unsigned int armnnOutputSlotIdx = tfOutputIndex;
469 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
470 {
471 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100472 boost::str(
473 boost::format(
474 "The requested output slot #%1% "
475 "for %2% does not exist %3%")
476 % armnnOutputSlotIdx
477 % m_Layer->GetName()
478 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100479 }
480 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
481 }
482
483protected:
484 IConnectableLayer* m_Layer;
485};
486
telsoa01c577f2c2018-08-31 09:22:23 +0100487/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100488class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
489{
490public:
491 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
492 : SingleLayerParsedTfOperation(parser, node, nullptr)
493 {
494 }
495
496 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
497 {
498 if (!m_Layer)
499 {
500 CreateLayerDeferred();
501 }
502 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
503 }
504
505private:
506 virtual void CreateLayerDeferred() = 0;
507};
508
509
510TfParser::TfParser()
511 : m_Network(nullptr, nullptr)
512{
513}
514
515
516const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
517{
518 if (nodeDef->op() != "Identity")
519 {
520 return nodeDef;
521 }
522
523 if (nodeDef->input_size() != 1)
524 {
telsoa01c577f2c2018-08-31 09:22:23 +0100525 throw ParseException(
526 boost::str(
527 boost::format(
528 "Identity node should have a single input! %1% has %2% inputs %3%")
529 % nodeDef->name()
530 % nodeDef->input_size()
531 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100532 }
533
534 auto it = m_NodesByName.find(nodeDef->input(0));
535 if (it != m_NodesByName.end())
536 {
537 const tensorflow::NodeDef* inputNode = it->second;
538 return ResolveIdentityNode(inputNode);
539 }
540 else
541 {
telsoa01c577f2c2018-08-31 09:22:23 +0100542 throw ParseException(
543 boost::str(
544 boost::format(
545 "Cannot find what the Identity node %1% is linked to! %2%")
546 % nodeDef->name()
547 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100548 }
549}
550
551std::vector<OutputOfConstNodeDef>
552TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
553{
554 std::vector<OutputOfConstNodeDef> ret;
555
surmeh013537c2c2018-05-18 16:31:43 +0100556 if (nodeDef.op() == "Const")
557 {
558 // For some reason const node can have "Control Inputs". We ignore them for now.
559 return ret;
560 }
561
surmeh01bceff2f2018-03-29 16:29:27 +0100562 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
563 for (int j = 0; j < nodeDef.input_size(); ++j)
564 {
565 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100566
567 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
568 {
narpra016f37f832018-12-21 18:30:00 +0000569 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
570 continue;
surmeh013537c2c2018-05-18 16:31:43 +0100571 }
572
surmeh01bceff2f2018-03-29 16:29:27 +0100573 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
574 if (inputIt == m_NodesByName.end())
575 {
576 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100577 boost::str(
578 boost::format(
579 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
580 % nodeDef.input(j)
581 % nodeDef.name()
582 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100583 }
584 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
585 }
586
587 return ret;
588}
589
590std::vector<OutputOfParsedTfOperation>
591TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
592 std::size_t expectedNumInputs)
593{
telsoa01c577f2c2018-08-31 09:22:23 +0100594 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100595 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
596 const std::size_t numInputs = nodes.size();
597 if (numInputs != expectedNumInputs)
598 {
telsoa01c577f2c2018-08-31 09:22:23 +0100599 throw ParseException(
600 boost::str(
601 boost::format(
602 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
603 % nodeDef.name()
604 % expectedNumInputs
605 % numInputs
606 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100607 }
telsoa01c577f2c2018-08-31 09:22:23 +0100608 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100609 std::vector<OutputOfParsedTfOperation> result;
610 for (auto&& node : nodes)
611 {
612 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
613 if (it == m_ParsedTfOperations.end())
614 {
telsoa01c577f2c2018-08-31 09:22:23 +0100615 throw ParseException(
616 boost::str(
617 boost::format(
618 "Node with name '%1%' has not been parsed %2%")
619 % node.m_IndexedValue->name()
620 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100621 }
622 ParsedTfOperation* parsedOp = it->second.get();
623 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
624 parsedOp = parsedOp->ResolveIdentityOperations();
625 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
626 }
627 return result;
628}
629
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000630IConnectableLayer* TfParser::CreateAdditionLayer(
631 const tensorflow::NodeDef& nodeDef,
632 IOutputSlot* input0Slot,
633 IOutputSlot* input1Slot,
634 const std::string& layerName)
635{
636 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
637 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
638
639 const unsigned int input0Dim = input0Info.GetNumDimensions();
640 const unsigned int input1Dim = input1Info.GetNumDimensions();
641 if (input0Dim != input1Dim)
642 {
643 // broadcasting where input0 and input1 have different number of dimensions
644 // is only supported for 1D and 4D tensors pair
645 if (input0Dim == 1 && input1Dim == 4)
646 {
647 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
648 }
649 else if (input0Dim == 4 && input1Dim == 1)
650 {
651 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
652 }
653 else
654 {
655 throw ParseException(
656 boost::str(
657 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
658 % layerName
659 % nodeDef.name()
660 % CHECK_LOCATION().AsString()));
661 }
662 }
663 IConnectableLayer* const layer = m_Network->AddAdditionLayer(layerName.c_str());
664
665 input0Slot->Connect(layer->GetInputSlot(0));
666 input1Slot->Connect(layer->GetInputSlot(1));
667
668 // Ensure the output tensor has the correct dimensions even if a broadcast has been done
669 TensorInfo outputInfo = input0Slot->GetTensorInfo();
670 std::vector<unsigned int> outputShape;
671
672 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
673 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
674
675 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
676 {
677 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
678 }
679
680 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
681 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
682
683 return layer;
684}
685
686IConnectableLayer* TfParser::CreateAdditionLayer(
687 const tensorflow::NodeDef& nodeDef,
688 IConnectableLayer* layerOne,
689 IConnectableLayer* layerTwo,
690 unsigned int numberOfAddition,
691 unsigned long numberOfLayersToConnect,
692 bool isOdd)
693{
694 IOutputSlot* input0Slot = &layerOne->GetOutputSlot(0);
695 IOutputSlot* input1Slot = &layerTwo->GetOutputSlot(0);
696 std::string layerName(nodeDef.name());
697 if (isOdd || numberOfLayersToConnect != 2)
698 {
699 // we are not connecting the final layer
700 layerName.append("_addN_").append(std::to_string(numberOfAddition));
701 }
702 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
703}
704
705IConnectableLayer* TfParser::CreateAdditionLayer(
706 const tensorflow::NodeDef& nodeDef,
707 const OutputOfParsedTfOperation& opOne,
708 const OutputOfParsedTfOperation& opTwo,
709 unsigned int numberOfAddition)
710{
711 IOutputSlot* input0Slot = &opOne.m_IndexedValue->ResolveArmnnOutputSlot(opOne.m_Index);
712 IOutputSlot* input1Slot = &opTwo.m_IndexedValue->ResolveArmnnOutputSlot(opTwo.m_Index);
713 std::string layerName(nodeDef.name());
714 layerName.append("_addN_").append(std::to_string(numberOfAddition));
715 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
716}
717
718IConnectableLayer* TfParser::CreateAdditionLayer(
719 const tensorflow::NodeDef& nodeDef,
720 const OutputOfParsedTfOperation& op,
721 IConnectableLayer* layer)
722{
723 IOutputSlot* input0Slot = &op.m_IndexedValue->ResolveArmnnOutputSlot(op.m_Index);
724 IOutputSlot* input1Slot = &layer->GetOutputSlot(0);
725 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
726}
727
728ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
729{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000730 boost::ignore_unused(graphDef);
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000731 uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
732 if (numberOfInputs < 2)
733 {
734 // should never happen
735 throw ParseException(
736 boost::str(
737 boost::format(
738 "AddN Node with name '%1%' has less than two (%2) inputs %3%")
739 % nodeDef.name()
740 % std::to_string(numberOfInputs)
741 % CHECK_LOCATION().AsString()));
742 }
743 else if (numberOfInputs == 2)
744 {
745 //this is the same as a simple Add operation
746 return AddAdditionLayer(nodeDef, false);
747 }
748 else
749 {
750 // build a binary tree of Add layers and return the final Add as the return from the function
751 // if we have an odd number of inputs then the final Add will consist of a layer connecting to an
752 // OutputOfParsedTfOperation, otherwise it will be two layers being added together
753 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
754 unsigned int numberOfAdditions = 0;
755 std::vector<IConnectableLayer*> layers;
756 // NOTE: at this point we will have a minimum of three inputs
757 for (unsigned int i = 0; i < numberOfInputs; ++i)
758 {
759 // every time i is odd we have two inputs to process.
760 bool onSecondItem = i % 2;
761 if (onSecondItem)
762 {
763 ++numberOfAdditions;
764 IConnectableLayer* newLayer = CreateAdditionLayer(
765 nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
766 layers.push_back(newLayer);
767 }
768 }
769
770 std::vector<IConnectableLayer*> layersToConnect(layers);
771 unsigned long numberOfLayersToConnect = layersToConnect.size();
772 bool isOdd = numberOfInputs % 2;
773
774 while (numberOfLayersToConnect > 1)
775 {
776 layers.clear();
777 for (unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
778 bool onSecondItem = i % 2;
779 if (onSecondItem) {
780 ++numberOfAdditions;
781 IConnectableLayer* newLayer = CreateAdditionLayer(
782 nodeDef,
783 layersToConnect[i - 1],
784 layersToConnect[i],
785 numberOfAdditions,
786 numberOfLayersToConnect,
787 isOdd);
788 layers.push_back(newLayer);
789 }
790 }
791 //OK... need to go again... maybe
792 layersToConnect = layers;
793 numberOfLayersToConnect = layersToConnect.size();
794 }
795 IConnectableLayer* finalLayer = layersToConnect[0];
796 // if we had an odd number of inputs we need to connect the final layer to the
797 // last OutputOfParsedTfOperation in order to create the last Add layer we will
798 // be handing back.
799 if (isOdd)
800 {
801 // connect the final layer to the last op
802 finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
803 }
804 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, finalLayer);
805 }
806}
807
surmeh01bceff2f2018-03-29 16:29:27 +0100808ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
809{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000810 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100811 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
812
telsoa01c577f2c2018-08-31 09:22:23 +0100813 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
814 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100815 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
816 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
817 {
818 IConnectableLayer* layer =
819 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
820 &nodeDef,nodeDef.name().c_str());
821 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
822 }
823 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
824 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
825 {
826 IConnectableLayer* layer =
827 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
828 &nodeDef,nodeDef.name().c_str());
829 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
830 }
831 else
832 {
telsoa01c577f2c2018-08-31 09:22:23 +0100833 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100834 return AddAdditionLayer(nodeDef);
835 }
836}
837
838ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
839{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000840 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100841 return AddAdditionLayer(nodeDef, true);
842}
843
844/// An ParsedTfOperation which forwards to another (used for Identity nodes).
845class ParsedIdentityTfOperation : public ParsedTfOperation
846{
847public:
848 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
849 : ParsedTfOperation(parser, node)
850 , m_Representative(representative)
851 {
852 }
853
854 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
855 {
856 BOOST_ASSERT(m_Representative);
857 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
858 }
859
860 virtual ParsedTfOperation* ResolveIdentityOperations() override
861 {
862 return m_Representative->ResolveIdentityOperations();
863 }
864
865private:
866 ParsedTfOperation* m_Representative;
867};
868
869ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
870{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000871 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100872 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
873 // Any requests for the output slots of this node should be forwarded to the node connected as input.
874 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
875}
876
877/// An ParsedTfOperation for a Const node.
878/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
879/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
880template <typename T>
881class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
882{
883public:
884 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
885 const T* tensorData, const TensorInfo& tensorInfo)
886 : DeferredSingleLayerParsedTfOperation(parser, node),
887 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
888 m_TensorInfo(tensorInfo)
889 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000890 BOOST_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
surmeh01bceff2f2018-03-29 16:29:27 +0100891 }
892
893 void CreateLayerDeferred() override
894 {
895 BOOST_ASSERT(m_Layer == nullptr);
896 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
897 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
898 }
899
Matteo Martincigh482ca852018-12-12 09:20:55 +0000900 ConstTensor GetConstTensor(std::vector<T>& outputTensorData) const
surmeh01bceff2f2018-03-29 16:29:27 +0100901 {
surmeh01bceff2f2018-03-29 16:29:27 +0100902 outputTensorData.resize(m_TensorInfo.GetNumElements());
903
Matteo Martincigh482ca852018-12-12 09:20:55 +0000904 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
905
telsoa01c577f2c2018-08-31 09:22:23 +0100906 // Updates the result to point to the user provided storage.
Matteo Martincigh482ca852018-12-12 09:20:55 +0000907 ConstTensor constTensor(m_TensorInfo, outputTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +0100908 return constTensor;
909 }
910
Matteo Martincigh46315822018-11-28 16:22:36 +0000911 const T* GetStorage() const
912 {
913 return m_Storage.data();
914 }
915
916 const TensorInfo& GetTensorInfo() const
917 {
918 return m_TensorInfo;
919 }
920
surmeh01bceff2f2018-03-29 16:29:27 +0100921private:
922 ///< Manages the lifetime of the tensor data.
923 std::vector<T> m_Storage;
924 ///< Describes the layout of the tensor and points to the data in m_Storage.
925 TensorInfo m_TensorInfo;
926};
927
telsoa01c577f2c2018-08-31 09:22:23 +0100928DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
929 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100930{
931 switch (tfDataType)
932 {
933 case tensorflow::DT_FLOAT:
934 return DataType::Float32;
935 break;
936 case tensorflow::DT_INT32:
937 return DataType::Signed32;
938 break;
939 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100940 throw ParseException(
941 boost::str(
942 boost::format(
943 "Unknown DataType %1% for node %2% %3%")
944 % tensorflow::DataType_Name(tfDataType)
945 % nodeDef.name()
946 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100947 }
948}
949
950struct ParseTfTensorValueList
951{
952 template<typename DataType>
953 static void Parse(
954 const tensorflow::TensorProto& tfTensor,
955 unsigned int dstElements,
956 std::vector<int8_t>& outputData);
957
958 template <typename DataType>
959 static void ReadData(const void* srcData, unsigned int numSrcElements,
960 std::vector<int8_t>& dstData, unsigned int numDstElements)
961 {
telsoa01c577f2c2018-08-31 09:22:23 +0100962 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100963 if (numSrcElements == 0)
964 {
965 return;
966 }
967
telsoa01c577f2c2018-08-31 09:22:23 +0100968 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100969 if (numDstElements == 0)
970 {
971 numDstElements = numSrcElements;
972 }
973
telsoa01c577f2c2018-08-31 09:22:23 +0100974 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100975 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
976
977 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
978 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
979
telsoa01c577f2c2018-08-31 09:22:23 +0100980 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +0100981 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
982
983 if (numDstElements > numSrcElements)
984 {
telsoa01c577f2c2018-08-31 09:22:23 +0100985 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +0100986 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
987 }
988 }
989
990};
991
992template <>
993void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
994 unsigned int dstElements, std::vector<int8_t>& outputData)
995{
996 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
997 outputData, dstElements);
998}
999
1000template <>
1001void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
1002 unsigned int dstElements, std::vector<int8_t>& outputData)
1003{
1004 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
1005 outputData, dstElements);
1006}
1007
1008template <template<typename> class OperatorType, typename T = int8_t>
1009struct MakeTfOperation
1010{
1011 template<typename DataType, class... Args>
1012 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
1013 Args&&... args)
1014 {
1015 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
1016 }
1017};
1018
1019template <>
1020struct MakeTfOperation<ParsedConstTfOperation>
1021{
1022 template<typename DataType, class... Args>
1023 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
1024 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
1025 {
1026 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1027 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
1028 }
1029};
1030
1031template <class FuncType>
1032struct InvokeParseFunction
1033{
1034 template<class ResType, class... Args>
1035 inline static ResType Result(DataType dataType, Args&&... args)
1036 {
1037 if (dataType == DataType::Float32)
1038 {
1039 return FuncType::template Parse<float>(std::forward<Args>(args)...);
1040 }
1041 else if (dataType == DataType::Signed32)
1042 {
1043 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1044 }
1045
1046 return ResType();
1047 }
1048
1049 template<class... Args>
1050 inline static void Result(DataType dataType, Args&&... args)
1051 {
1052 if (dataType == DataType::Float32)
1053 {
1054 FuncType::template Parse<float>(std::forward<Args>(args)...);
1055 }
1056 else if (dataType == DataType::Signed32)
1057 {
1058 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1059 }
1060 }
1061};
1062
1063ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1064{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001065 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001066 BOOST_ASSERT(nodeDef.op() == "Const");
1067
1068 if (nodeDef.attr().count("value") == 0)
1069 {
telsoa01c577f2c2018-08-31 09:22:23 +01001070 throw ParseException(
1071 boost::str(
1072 boost::format(
1073 "Value not found for Const node - %1% %2%")
1074 % nodeDef.name()
1075 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001076 }
1077
1078 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
1079 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1080 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
1081
1082 const auto GetDimensionSize = [](auto& d) { return d.size(); };
1083
1084 std::vector<unsigned int> dimensionSizes;
1085 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1086 std::back_inserter(dimensionSizes), GetDimensionSize);
1087
telsoa01c577f2c2018-08-31 09:22:23 +01001088 // Calculates number of elements.
1089 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001090 unsigned int numElements = 0U;
1091
1092 if (!dimensionSizes.empty())
1093 {
1094 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1095 1U, std::multiplies<unsigned int>());
1096 }
1097
1098 std::vector<int8_t> tensorData;
1099
telsoa01c577f2c2018-08-31 09:22:23 +01001100 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001101 if (tfTensor.tensor_content().empty())
1102 {
1103 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
1104
1105 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +01001106 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +01001107 if (numElements == 0)
1108 {
telsoa01c577f2c2018-08-31 09:22:23 +01001109 const unsigned int tfNumElements =
1110 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001111 dimensionSizes.push_back(tfNumElements);
1112 }
1113 }
telsoa01c577f2c2018-08-31 09:22:23 +01001114 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001115 else
1116 {
1117 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1118
telsoa01c577f2c2018-08-31 09:22:23 +01001119 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +01001120 if (numElements == 0)
1121 {
telsoa01c577f2c2018-08-31 09:22:23 +01001122 throw ParseException(
1123 boost::str(
1124 boost::format(
1125 "No tensor shape found for Const node - %1% %2%")
1126 % nodeDef.name()
1127 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001128 }
1129 }
1130
telsoa01c577f2c2018-08-31 09:22:23 +01001131 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001132 if (tensorData.empty())
1133 {
telsoa01c577f2c2018-08-31 09:22:23 +01001134 throw ParseException(
1135 boost::str(
1136 boost::format(
1137 "No tensor data found for Const node - %1% %2%")
1138 % nodeDef.name()
1139 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001140 }
1141
telsoa01c577f2c2018-08-31 09:22:23 +01001142 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1143 dimensionSizes.data(),
1144 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001145
1146 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +01001147 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +01001148 if (tensorData.size() > tensorInfo.GetNumBytes())
1149 {
telsoa01c577f2c2018-08-31 09:22:23 +01001150 throw ParseException(
1151 boost::str(
1152 boost::format(
1153 "Number of elements (%1%) should be less than or equal "
1154 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
1155 % (tensorData.size() / GetDataTypeSize(dataType))
1156 % tensorInfo.GetNumElements()
1157 % nodeDef.name()
1158 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001159 }
1160
1161 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1162 dataType, this, nodeDef, tensorData, tensorInfo);
1163}
1164
1165template<typename Type>
1166bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
1167{
1168 auto it = m_ParsedTfOperations.find(nodeName);
jimfly01f6ba7472018-12-04 10:09:52 +00001169 if (it == m_ParsedTfOperations.end())
surmeh01bceff2f2018-03-29 16:29:27 +01001170 {
1171 return false;
1172 }
jimfly01f6ba7472018-12-04 10:09:52 +00001173 return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1174}
1175
1176template<typename Type>
1177bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
1178{
1179 return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
surmeh01bceff2f2018-03-29 16:29:27 +01001180}
1181
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00001182unsigned int TfParser::GetConstInputIndex(const std::vector<OutputOfParsedTfOperation>& inputs)
1183{
1184 for (unsigned int i = 0; i < inputs.size(); i++)
1185 {
1186 if (HasParsedConstTensor<int32_t>(inputs[i].m_IndexedValue->GetNode().name()))
1187 {
1188 return i;
1189 }
1190 }
1191 throw ParseException(
1192 boost::str(
1193 boost::format(
1194 "ArmNN only supports operators with constant axis. %1%")
1195 % CHECK_LOCATION().AsString()));
1196
1197}
1198
surmeh01bceff2f2018-03-29 16:29:27 +01001199ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
1200 const tensorflow::GraphDef& graphDef)
1201{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001202 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001203 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1204 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1205 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1206
1207 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1208 {
telsoa01c577f2c2018-08-31 09:22:23 +01001209 throw ParseException(
1210 boost::str(
1211 boost::format(
1212 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1213 % nodeDef.name()
1214 % inputs[1].m_IndexedValue->GetNode().name()
1215 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001216 }
1217 ParsedConstTfOperation<float>* weightNode =
1218 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1219
1220 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1221 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1222 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1223
telsoa01c577f2c2018-08-31 09:22:23 +01001224 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
surmeh01bceff2f2018-03-29 16:29:27 +01001225 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1226 if (!dilations.empty())
1227 {
1228 for (auto dilation : dilations)
1229 {
1230 if (dilation != 1u)
1231 {
telsoa01c577f2c2018-08-31 09:22:23 +01001232 throw ParseException(
1233 boost::str(
1234 boost::format(
1235 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1236 % nodeDef.name()
1237 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001238 }
1239 }
1240 }
1241
1242 Convolution2dDescriptor desc;
1243 desc.m_BiasEnabled = false;
1244
telsoa01c577f2c2018-08-31 09:22:23 +01001245 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1246
Matteo Martincigh46315822018-11-28 16:22:36 +00001247 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001248
Matteo Martincigh46315822018-11-28 16:22:36 +00001249 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001250
Matteo Martincigh46315822018-11-28 16:22:36 +00001251 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001252
Matteo Martincigh46315822018-11-28 16:22:36 +00001253 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1254 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001255
Matteo Martincigh46315822018-11-28 16:22:36 +00001256 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1257 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1258
1259 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1260 // Tensorflow weights are [H, W, In, Out].
1261 // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1262 // and [Out, In, H, W] when the data layout is NCHW.
1263 PermutationVector permutationVector =
1264 dataLayout == DataLayout::NHWC ?
1265 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1266 std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1267
1268 // Swizzle the tensor using the given permutation vector.
1269 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1270 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1271
1272 // Swizzles the content of the tensor's permanent storage into a local storage.
1273 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1274 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001275 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Matteo Martincigh46315822018-11-28 16:22:36 +00001276
1277 // Create a weight tensor with the newly swizzled data.
1278 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1279
1280 uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1281 uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001282
1283 bool padding = false;
1284 TensorInfo outputInfo;
Matteo Martincigh46315822018-11-28 16:22:36 +00001285 unsigned int outputHeight = 0;
1286 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001287
1288 CHECK_PADDING_TYPE(nodeDef, paddingString);
1289
surmeh01bceff2f2018-03-29 16:29:27 +01001290 if (paddingString == "SAME")
1291 {
1292 padding = true;
Matteo Martincigh46315822018-11-28 16:22:36 +00001293
1294 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1295 static_cast<float>(desc.m_StrideY)));
1296 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1297 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001298 }
1299 else if (paddingString == "VALID")
1300 {
1301 padding = false;
Matteo Martincigh46315822018-11-28 16:22:36 +00001302
1303 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1304 static_cast<float>(desc.m_StrideY)));
1305 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1306 static_cast<float>(desc.m_StrideX)));
1307 }
1308
1309 switch (dataLayout)
1310 {
1311 case DataLayout::NHWC:
1312 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1313 outputHeight,
1314 outputWidth,
1315 weightTensor.GetShape()[0] },
1316 DataType::Float32);
1317 break;
1318 case DataLayout::NCHW:
1319 default:
surmeh01bceff2f2018-03-29 16:29:27 +01001320 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1321 weightTensor.GetShape()[0],
Matteo Martincigh46315822018-11-28 16:22:36 +00001322 outputHeight,
1323 outputWidth },
1324 DataType::Float32);
1325 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001326 }
surmeh01bceff2f2018-03-29 16:29:27 +01001327
1328 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1329 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1330
Matteo Martincighfc598e12019-05-14 10:36:13 +01001331 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc,
1332 weightTensor,
1333 EmptyOptional(),
1334 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001335 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Matteo Martincigh46315822018-11-28 16:22:36 +00001336 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001337
1338 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1339}
1340
1341ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
telsoa01c577f2c2018-08-31 09:22:23 +01001342 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001343{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001344 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001345 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1346 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1347 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1348
1349 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1350 {
telsoa01c577f2c2018-08-31 09:22:23 +01001351 throw ParseException(
1352 boost::str(
1353 boost::format(
1354 "ArmNN only supports Depthwise Convolution layer with constant weights. "
1355 "Non const input found %1% for node %2% %3%")
1356 % inputs[1].m_IndexedValue->GetNode().name()
1357 % nodeDef.name()
1358 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001359 }
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001360
surmeh01bceff2f2018-03-29 16:29:27 +01001361 ParsedConstTfOperation<float>* weightNode =
1362 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1363
surmeh01bceff2f2018-03-29 16:29:27 +01001364 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1365 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1366 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1367
1368 DepthwiseConvolution2dDescriptor desc;
1369 desc.m_BiasEnabled = false;
1370
telsoa01c577f2c2018-08-31 09:22:23 +01001371 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1372
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001373 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001374
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001375 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001376
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001377 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001378
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001379 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1380 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001381
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001382 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1383 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1384
1385 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
Matteo Martincigh747ef822018-12-18 09:26:39 +00001386 // Tensorflow weights come in the format [H, W, I, M].
1387 // ArmNN weights have to be [M, I, H, W].
1388 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001389
1390 // Swizzle the tensor using the given permutation vector.
1391 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1392 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1393
1394 // Swizzles the content of the tensor's permanent storage into a local storage.
1395 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1396 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001397 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001398
1399 // Create a weight tensor with the newly swizzled data.
1400 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1401
Matteo Martincigh747ef822018-12-18 09:26:39 +00001402 uint32_t weightHeight = weightTensor.GetShape()[2];
1403 uint32_t weightWidth = weightTensor.GetShape()[3];
surmeh01bceff2f2018-03-29 16:29:27 +01001404
1405 bool padding = false;
1406 TensorInfo outputInfo;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001407 unsigned int outputHeight = 0;
1408 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001409
1410 CHECK_PADDING_TYPE(nodeDef, paddingString);
1411
surmeh01bceff2f2018-03-29 16:29:27 +01001412 if (paddingString == "SAME")
1413 {
1414 padding = true;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001415
1416 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1417 static_cast<float>(desc.m_StrideY)));
1418 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1419 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001420 }
1421 else if (paddingString == "VALID")
1422 {
1423 padding = false;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001424
1425 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1426 static_cast<float>(desc.m_StrideY)));
1427 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1428 static_cast<float>(desc.m_StrideX)));
1429 }
1430
1431 switch (dataLayout)
1432 {
1433 case DataLayout::NHWC:
1434 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1435 outputHeight,
1436 outputWidth,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001437 weightTensor.GetShape()[0] * weightTensor.GetShape()[1]},
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001438 DataType::Float32);
1439 break;
1440 case DataLayout::NCHW:
1441 default:
1442 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1443 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1444 outputHeight,
1445 outputWidth },
1446 DataType::Float32);
1447 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001448 }
surmeh01bceff2f2018-03-29 16:29:27 +01001449
1450 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1451 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1452
Matteo Martincighfc598e12019-05-14 10:36:13 +01001453 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1454 weightTensor,
1455 EmptyOptional(),
1456 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001457 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001458 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001459
1460 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1461}
1462
Conor Kennedyc2130a02018-12-05 11:05:54 +00001463TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
1464{
1465 BOOST_ASSERT(nodeDef.op() == "ExpandDims");
1466
1467 if (inputTensorInfo.GetNumDimensions() > 4) {
1468 throw ParseException(
1469 boost::str(
1470 boost::format(
1471 "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1472 % inputTensorInfo.GetNumDimensions()
1473 % nodeDef.name()
1474 % CHECK_LOCATION().AsString()));
1475 }
1476
1477 std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1478
1479 std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1480 std::vector<uint32_t> outputDims;
1481
1482 // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1483 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1484 {
1485 // add current input shape to outputDims
1486 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1487 auto currentDimension = inputTensorInfo.GetShape()[i];
1488 outputDims.push_back(currentDimension);
1489 }
1490
1491 // insert a dimension of 1 at index 'expandDim' of inputs shape
1492 if (expandDim >= 0)
1493 {
1494 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1495 outputDims.insert(getPosition, 1);
1496 }
1497
1498 // if negative number for 'expandDim' then count backwards from the last element
1499 // and insert 1 dimension at index 'expandDim'
1500 if (expandDim < 0)
1501 {
Matteo Martincighd7cceeb2018-12-06 09:06:29 +00001502 int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001503 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1504 outputDims.insert(getPosition, 1);
1505 }
1506 }
1507 else
1508 {
1509 throw InvalidArgumentException(
1510 boost::str(
1511 boost::format(
1512 "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1513 % expandDim
1514 % inputDimSize
1515 % CHECK_LOCATION().AsString()));
1516 }
1517
1518 if (outputDims.size() > 4)
1519 {
1520 throw ParseException(
1521 boost::str(
1522 boost::format(
1523 "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1524 % outputDims.size()
1525 % nodeDef.name()
1526 % CHECK_LOCATION().AsString()));
1527 }
1528
1529 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1530 outputDims.data());
1531
1532 TensorInfo outTensorInfo = inputTensorInfo;
1533 outTensorInfo.SetShape(outShape);
1534
1535 return outTensorInfo;
1536}
1537
1538ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1539{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001540 boost::ignore_unused(graphDef);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001541 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1542
1543 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1544 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1545
1546 TensorInfo outputInfo;
1547 outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo);
1548
1549 ReshapeDescriptor reshapeDesc;
1550 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1551 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1552 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1553 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1554
1555 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1556}
1557
surmeh01bceff2f2018-03-29 16:29:27 +01001558ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1559 const tensorflow::GraphDef& graphDef)
1560{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001561 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001562 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1563
1564 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1565 {
telsoa01c577f2c2018-08-31 09:22:23 +01001566 throw ParseException(
1567 boost::str(
1568 boost::format(
1569 "ArmNN only supports FusedBatchNormalization layers with constant scale. "
1570 "Input %1%. Node %2% %3%")
1571 % inputs[1].m_IndexedValue->GetNode().name()
1572 % nodeDef.name()
1573 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001574 }
1575 ParsedConstTfOperation<float>* scaleNode =
1576 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1577
1578 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1579 {
telsoa01c577f2c2018-08-31 09:22:23 +01001580 throw ParseException(
1581 boost::str(
1582 boost::format(
1583 "ArmNN only supports FusedBatchNormalization layers with constant offset. "
1584 "Input %1%. Node %2% %3%")
1585 % inputs[2].m_IndexedValue->GetNode().name()
1586 % nodeDef.name()
1587 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001588 }
1589 ParsedConstTfOperation<float>* offsetNode =
1590 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1591
1592 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1593 {
telsoa01c577f2c2018-08-31 09:22:23 +01001594 throw ParseException(
1595 boost::str(
1596 boost::format(
1597 "ArmNN only supports FusedBatchNormalization layers with constant mean. "
1598 "Input %1%. Node %2% %3%")
1599 % inputs[3].m_IndexedValue->GetNode().name()
1600 % nodeDef.name()
1601 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001602 }
1603 ParsedConstTfOperation<float>* meanNode =
1604 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1605
1606 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1607 {
telsoa01c577f2c2018-08-31 09:22:23 +01001608 throw ParseException(
1609 boost::str(
1610 boost::format(
1611 "ArmNN only supports FusedBatchNormalization layers with constant variance. "
1612 "Input %1%. Node %2% %3%")
1613 % inputs[4].m_IndexedValue->GetNode().name()
1614 % nodeDef.name()
1615 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001616 }
1617 ParsedConstTfOperation<float>* varianceNode =
1618 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1619
Aron Virginas-Tar2e259272019-11-27 13:29:51 +00001620 const std::string dataFormat = ReadOptionalNodeStringAttribute(nodeDef, "data_format", "NHWC");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001621 CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1622
telsoa01c577f2c2018-08-31 09:22:23 +01001623 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001624 BatchNormalizationDescriptor desc;
1625 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001626 desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001627
telsoa01c577f2c2018-08-31 09:22:23 +01001628 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1629 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001630 std::vector<float> scaleTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001631 ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001632
1633 std::vector<float> offsetTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001634 ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001635
1636 std::vector<float> meanTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001637 ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001638
1639 std::vector<float> varianceTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001640 ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001641
1642 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1643 meanTensor,
1644 varianceTensor,
1645 offsetTensor,
1646 scaleTensor,
1647 nodeDef.name().c_str());
1648
1649 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1650
Matteo Martincigh075c7502018-12-05 13:10:45 +00001651 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1652 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001653
1654 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1655}
1656
telsoa01c577f2c2018-08-31 09:22:23 +01001657bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1658 size_t alphaLayerIndex,
1659 const OutputOfParsedTfOperation& otherOp,
1660 armnn::IOutputSlot** outputOfLeakyRelu,
1661 armnn::ActivationDescriptor & desc)
1662{
1663 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1664
1665 // Verifying all these assumptions hold:
1666 //
1667 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1668 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1669 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1670 //
1671
1672 if (mulNodeDef.op() == "Mul")
1673 {
1674 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1675 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1676
1677 BOOST_ASSERT(inputs.size() == 2);
1678 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1679 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1680 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1681
1682 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1683 {
1684 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1685 {
1686 ParsedConstTfOperation<float>* alpha =
1687 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1688 inputs[alphaLayerIndex].m_IndexedValue);
1689
1690 std::vector<float> const_data;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001691 ConstTensor const_tensor = alpha->GetConstTensor(const_data);
telsoa01c577f2c2018-08-31 09:22:23 +01001692
1693 if (const_data.size() == 1)
1694 {
1695 desc.m_Function = ActivationFunction::LeakyReLu;
1696 desc.m_A = const_data[0];
1697
1698 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1699 return true;
1700 }
1701 }
1702 }
1703 }
1704 return false;
1705}
1706
telsoa01c577f2c2018-08-31 09:22:23 +01001707ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1708 const tensorflow::GraphDef& graphDef)
1709{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001710 boost::ignore_unused(graphDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001711 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
Sadik Armagan975c09a2018-12-04 10:02:08 +00001712 if (inputs.size() != 2)
1713 {
1714 throw ParseException(
1715 boost::str(
1716 boost::format(
1717 "Maximum expects two inputs!. Got %1% for Node %2% %3%")
1718 % inputs.size()
1719 % nodeDef.name()
1720 % CHECK_LOCATION().AsString()));
1721 }
1722
telsoa01c577f2c2018-08-31 09:22:23 +01001723 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1724 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1725 IOutputSlot* outputOfLeakyRelu = nullptr;
1726
1727 ActivationDescriptor desc;
1728
Sadik Armagan975c09a2018-12-04 10:02:08 +00001729 // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1730 // i.e. one of the four possible scenarios:
1731 // 1, max(mul(a, x), x)
1732 // 2, max(mul(x, a), x)
1733 // 3, max(x, mul(a, x))
1734 // 4, max(x, mul(x, a))
1735 // These are handled by an activation layer.
telsoa01c577f2c2018-08-31 09:22:23 +01001736
1737 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1738 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1739 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1740 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1741 {
1742 BOOST_ASSERT(outputOfLeakyRelu != nullptr);
1743
1744 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1745 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1746 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1747 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1748 }
1749 else
1750 {
Sadik Armagan975c09a2018-12-04 10:02:08 +00001751 // Anything else is just a maximum layer.
1752
1753 return AddMaximumLayer(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001754 }
1755}
1756
jimfly0184c70e62018-12-19 13:14:46 +00001757std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> TfParser::ProcessElementwiseInputSlots(
1758 const tensorflow::NodeDef& nodeDef, const std::string& layerName)
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001759{
1760 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1761
1762 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1763 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1764 const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1765 const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1766
1767 if (input0Dim != input1Dim)
1768 {
1769 // broadcasting where input0 and input1 have different number of dimensions
1770 // is only supported for 1D and 4D tensors pair
1771 if (input0Dim == 1 && input1Dim == 4)
1772 {
1773 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1774 }
1775 else if (input0Dim == 4 && input1Dim == 1)
1776 {
1777 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1778 }
1779 else
1780 {
1781 throw ParseException(
jimfly0184c70e62018-12-19 13:14:46 +00001782 boost::str(
1783 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
1784 % layerName
1785 % nodeDef.name()
1786 % CHECK_LOCATION().AsString()));
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001787 }
1788 }
jimfly0184c70e62018-12-19 13:14:46 +00001789 return {input0Slot, input1Slot};
1790}
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001791
kevmay012b4d88e2019-01-24 14:05:09 +00001792ParsedTfOperationPtr TfParser::ProcessComparisonLayer(
1793 IOutputSlot* input0Slot,
1794 IOutputSlot* input1Slot,
1795 IConnectableLayer* const layer,
1796 const tensorflow::NodeDef& nodeDef)
1797{
1798 input0Slot->Connect(layer->GetInputSlot(0));
1799 input1Slot->Connect(layer->GetInputSlot(1));
1800
1801 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1802 outputInfo.SetDataType(DataType::Boolean);
1803 std::vector<unsigned int> outputShape;
1804
1805 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1806 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1807
1808 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1809 {
1810 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1811 }
1812
1813 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1814 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1815
1816 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1817}
1818
jimfly0184c70e62018-12-19 13:14:46 +00001819ParsedTfOperationPtr TfParser::ProcessElementwiseLayer(
1820 IOutputSlot* input0Slot,
1821 IOutputSlot* input1Slot,
1822 IConnectableLayer* const layer,
1823 const tensorflow::NodeDef& nodeDef)
1824{
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001825 input0Slot->Connect(layer->GetInputSlot(0));
1826 input1Slot->Connect(layer->GetInputSlot(1));
1827
1828 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1829 std::vector<unsigned int> outputShape;
1830
1831 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1832 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1833
1834 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1835 {
1836 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1837 }
1838
1839 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1840 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1841
1842 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1843}
1844
FrancisMurtagh94412af2019-01-24 10:53:39 +00001845ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
1846 const tensorflow::GraphDef& graphDef)
1847{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001848 boost::ignore_unused(graphDef);
FrancisMurtagh94412af2019-01-24 10:53:39 +00001849 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1850 IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1851 IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1852
1853 // Infer shape of output tensor
1854 unsigned int paramsDim = params.GetTensorInfo().GetNumDimensions();
1855 unsigned int indicesDim = indices.GetTensorInfo().GetNumDimensions();
1856 unsigned int outputDim = paramsDim - 1 + indicesDim;
1857
1858 std::vector<unsigned int> dimSizes;
1859
1860 for (unsigned int i = 0; i < indicesDim; ++i)
1861 {
1862 dimSizes.push_back(indices.GetTensorInfo().GetShape()[i]);
1863 }
1864 for (unsigned int i = 1; i < paramsDim; ++i)
1865 {
1866 dimSizes.push_back(params.GetTensorInfo().GetShape()[i]);
1867 }
1868
1869 const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data());
1870
1871 const TensorInfo inferredOutputInfo(inferredShape, params.GetTensorInfo().GetDataType());
1872
1873 IConnectableLayer* const layer = m_Network->AddGatherLayer(nodeDef.name().c_str());
1874 layer->GetOutputSlot(0).SetTensorInfo(inferredOutputInfo);
1875
1876 params.Connect(layer->GetInputSlot(0));
1877 indices.Connect(layer->GetInputSlot(1));
1878
1879 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1880}
1881
jimfly01a06bf312018-12-18 16:24:51 +00001882ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
1883 const tensorflow::GraphDef& graphDef)
1884{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001885 boost::ignore_unused(graphDef);
jimfly01a06bf312018-12-18 16:24:51 +00001886 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
1887 IOutputSlot* input0Slot = inputLayers.first;
1888 IOutputSlot* input1Slot = inputLayers.second;
1889
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001890 ComparisonDescriptor descriptor(ComparisonOperation::Greater);
1891 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly01a06bf312018-12-18 16:24:51 +00001892
kevmay012b4d88e2019-01-24 14:05:09 +00001893 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly01a06bf312018-12-18 16:24:51 +00001894}
1895
jimfly0184c70e62018-12-19 13:14:46 +00001896ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
1897 const tensorflow::GraphDef& graphDef)
1898{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001899 boost::ignore_unused(graphDef);
jimfly0184c70e62018-12-19 13:14:46 +00001900 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
1901 IOutputSlot* input0Slot = inputLayers.first;
1902 IOutputSlot* input1Slot = inputLayers.second;
1903
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001904 ComparisonDescriptor descriptor(ComparisonOperation::Equal);
1905 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly0184c70e62018-12-19 13:14:46 +00001906
kevmay012b4d88e2019-01-24 14:05:09 +00001907 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly0184c70e62018-12-19 13:14:46 +00001908}
1909
1910ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
1911 const tensorflow::GraphDef& graphDef)
1912{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001913 boost::ignore_unused(graphDef);
jimfly0184c70e62018-12-19 13:14:46 +00001914 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
1915 IOutputSlot* input0Slot = inputLayers.first;
1916 IOutputSlot* input1Slot = inputLayers.second;
1917
1918 IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1919
1920 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1921}
1922
jimfly0123be07e2018-12-04 17:47:22 +00001923ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1924{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001925 boost::ignore_unused(graphDef);
jimfly0123be07e2018-12-04 17:47:22 +00001926 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1927
1928 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1929 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1930
1931 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
1932 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
1933
1934 if (input0Info.GetNumDimensions() == 1)
1935 {
1936 const bool isNHWC = true;
1937 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1938 }
1939
1940 if (input1Info.GetNumDimensions() == 1)
1941 {
1942 const bool isNHWC = true;
1943 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1944 }
1945
1946 IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
1947
1948 input0Slot->Connect(layer->GetInputSlot(0));
1949 input1Slot->Connect(layer->GetInputSlot(1));
1950
1951 if (input0Info.GetNumDimensions() == 1)
1952 {
1953 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
1954 }
1955 else
1956 {
1957 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
1958 }
1959
1960 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1961}
1962
jimfly01f6ba7472018-12-04 10:09:52 +00001963unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
1964 const TensorInfo& inputTensorInfo,
1965 const std::string& nodeName)
1966{
1967 unsigned int rank = paddingTensor.GetShape()[0];
1968 unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
1969 if (rank != expectedRank)
1970 {
1971 throw ParseException(
1972 boost::str(
1973 boost::format(
1974 "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
1975 % expectedRank
1976 % rank
1977 % nodeName
1978 % CHECK_LOCATION().AsString()));
1979 }
1980 unsigned int second = paddingTensor.GetShape()[1];
1981 if (second != 2)
1982 {
1983 throw ParseException(
1984 boost::str(
1985 boost::format(
1986 "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
1987 % rank
1988 % second
1989 % nodeName
1990 % CHECK_LOCATION().AsString()));
1991 }
1992 return rank;
1993}
1994
1995TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
1996 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1997{
1998 unsigned int numDims = inputTensorInfo.GetNumDimensions();
1999 std::vector<unsigned int> outDims;
2000 for (unsigned int i = 0; i < numDims; ++i)
2001 {
2002 unsigned int dimSize = inputTensorInfo.GetShape()[i];
2003 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2004 dimSize += dimPadding.first;
2005 dimSize += dimPadding.second;
2006 outDims.push_back(dimSize);
2007 }
2008 TensorInfo paddedTensorInfo = inputTensorInfo;
2009 unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
2010 paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
2011 return paddedTensorInfo;
2012}
2013
2014ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
2015 const tensorflow::GraphDef& graphDef)
2016{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002017 boost::ignore_unused(graphDef);
jimfly01f6ba7472018-12-04 10:09:52 +00002018 // input consists of:
2019 // input[0] the tensor which will be padded
2020 // input[1] the tensor holding the padding values
2021 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2022 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2023 TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
2024 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
2025 {
2026 throw ParseException(
2027 boost::str(
2028 boost::format(
2029 "ArmNN only supports Pad with constant padding. "
2030 "Input %1%. Node %2% %3%")
2031 % inputs[1].m_IndexedValue->GetNode().name()
2032 % nodeDef.name()
2033 % CHECK_LOCATION().AsString()));
2034
2035 }
2036 ParsedConstTfOperation<int32_t>* paddingTensorOp =
2037 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2038
2039 std::vector<int32_t> paddingTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002040 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
jimfly01f6ba7472018-12-04 10:09:52 +00002041 // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
2042 // and should match the rank of the input tensor that is being padded.
2043 // For each dimension D of input, paddings[D, 0] indicates how many values to add
2044 // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
2045 // many values to add after the contents of tensor in that dimension
2046 // This needs to be translated into a padList for ACL
2047 std::vector<std::pair<unsigned int, unsigned int>> padList;
2048 unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
2049 for (unsigned int i = 0; i < rank; ++i)
2050 {
2051 std::pair<unsigned int, unsigned int> paddingForDim;
2052 for (unsigned int j = 0; j < 2; j++)
2053 {
2054 unsigned int index = (i * 2) + j;
2055 int paddingAmount = paddingTensorData[index];
2056 // make sure we can cast to an unsigned value
2057 if (paddingAmount < 0)
2058 {
2059 throw ParseException(
2060 boost::str(
2061 boost::format(
2062 "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
2063 % paddingAmount
2064 % i
2065 % j
2066 % nodeDef.name()
2067 % CHECK_LOCATION().AsString()));
2068 }
2069 if (j == 0)
2070 {
2071 paddingForDim.first = static_cast<unsigned int>(paddingAmount);
2072 }
2073 else
2074 {
2075 paddingForDim.second = static_cast<unsigned int>(paddingAmount);
2076 }
2077 }
2078 padList.push_back(paddingForDim);
2079 }
2080 PadDescriptor padDescriptor(padList);
2081 IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
2082 previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
2083 // Use the padding to calculate the new output tensor shape
2084 TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
2085 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2086 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2087}
2088
surmeh01bceff2f2018-03-29 16:29:27 +01002089ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
2090 const tensorflow::GraphDef& graphDef)
2091{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002092 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002093 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002094
telsoa01c577f2c2018-08-31 09:22:23 +01002095 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01002096 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
surmeh01bceff2f2018-03-29 16:29:27 +01002097
surmeh01bceff2f2018-03-29 16:29:27 +01002098 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2099
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002100 // Constant tensor index
2101 unsigned int index = GetConstInputIndex(inputs);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002102 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002103 ParsedConstTfOperation<int32_t>* shapeNode =
2104 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2105
surmeh01bceff2f2018-03-29 16:29:27 +01002106 std::vector<int32_t> axisTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002107 shapeNode->GetConstTensor(axisTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002108
telsoa01c577f2c2018-08-31 09:22:23 +01002109 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002110 const unsigned int concatDim = static_cast<unsigned int>(axisTensorData[0]);
surmeh01bceff2f2018-03-29 16:29:27 +01002111
telsoa01c577f2c2018-08-31 09:22:23 +01002112 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002113 if (concatDim == 0 || concatDim == 2)
surmeh01bceff2f2018-03-29 16:29:27 +01002114 {
telsoa01c577f2c2018-08-31 09:22:23 +01002115 throw ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002116 boost::str(
2117 boost::format(
telsoa01c577f2c2018-08-31 09:22:23 +01002118 "Dimension %1% for concatenation is not supported by Armnn. "
2119 "Node %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002120 % concatDim
2121 % nodeDef.name()
2122 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002123 }
2124
Matthew Jacksondba634f2019-08-15 15:14:18 +01002125 const unsigned int supportedNumDims = 4;
Matteo Martincighf9afc792018-12-06 12:03:17 +00002126 unsigned int numConcatViews = numInputs - 1;
Matthew Jacksondba634f2019-08-15 15:14:18 +01002127 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002128 concatDescriptor.SetConcatAxis(concatDim);
Matthew Jacksondba634f2019-08-15 15:14:18 +01002129 TensorShape mergeDims(supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002130 unsigned int mergeDim = 0;
2131 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002132 {
telsoa01c577f2c2018-08-31 09:22:23 +01002133 // Need to double check whether it should be
Matteo Martincighf9afc792018-12-06 12:03:17 +00002134 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002135 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2136
Matteo Martincighf9afc792018-12-06 12:03:17 +00002137 // Double check dimensions of the tensors
Matthew Jacksondba634f2019-08-15 15:14:18 +01002138 if (inputTensorInfo.GetNumDimensions() != supportedNumDims)
Matteo Martincighf9afc792018-12-06 12:03:17 +00002139 {
2140 throw armnn::ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002141 boost::str(
2142 boost::format(
Matteo Martincighf9afc792018-12-06 12:03:17 +00002143 "The number of dimensions: %1% for input tensors of the "
2144 "concatenation op should be %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002145 % inputTensorInfo.GetNumDimensions()
Matthew Jacksondba634f2019-08-15 15:14:18 +01002146 % supportedNumDims
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002147 % CHECK_LOCATION().AsString()));
Matteo Martincighf9afc792018-12-06 12:03:17 +00002148 }
2149
2150 // Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
2151 mergeDims = inputTensorInfo.GetShape();
2152 unsigned int* viewOrigin = const_cast<unsigned int*>(concatDescriptor.GetViewOrigin(viewIndex));
Matthew Jacksondba634f2019-08-15 15:14:18 +01002153 std::fill(viewOrigin, viewOrigin + supportedNumDims, 0);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002154
2155 // Update the view origin coordinates and the merge dimension value
2156 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
2157 mergeDim += mergeDims[concatDim];
surmeh01bceff2f2018-03-29 16:29:27 +01002158 }
2159
Matteo Martincighf9afc792018-12-06 12:03:17 +00002160 // Update the output shape
2161 mergeDims[concatDim] = mergeDim;
Jim Flynn906f9462019-05-10 13:55:21 +01002162 armnn::IConnectableLayer *layer = m_Network->AddConcatLayer(concatDescriptor, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002163
Matteo Martincighf9afc792018-12-06 12:03:17 +00002164 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
surmeh01bceff2f2018-03-29 16:29:27 +01002165
Matteo Martincighf9afc792018-12-06 12:03:17 +00002166 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002167 {
Matteo Martincighf9afc792018-12-06 12:03:17 +00002168 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2169 inputSlot.Connect(layer->GetInputSlot(viewIndex));
surmeh01bceff2f2018-03-29 16:29:27 +01002170 }
2171
2172 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2173}
2174
2175ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
2176 const tensorflow::GraphDef& graphDef)
2177{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002178 boost::ignore_unused(graphDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002179 // Note: the Shape layer is handled in a special way, because:
2180 // 1. ARMNN doesn't support int32 tensors which it outputs.
2181 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01002182 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01002183 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01002184
2185 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
2186 if (tfDataType != tensorflow::DT_INT32)
2187 {
telsoa01c577f2c2018-08-31 09:22:23 +01002188 throw ParseException(
2189 boost::str(
2190 boost::format(
2191 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
2192 % tensorflow::DataType_Name(tfDataType)
2193 % nodeDef.name()
2194 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002195 }
2196
2197 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2198 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2199 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2200 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
2201
2202 std::vector<int32_t> shapeTensorData;
2203 shapeTensorData.reserve(prevLayerDimensions);
2204
2205 for (unsigned int i=0; i<prevLayerDimensions; ++i)
2206 {
2207 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
2208 }
2209
2210 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2211
2212 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
2213 nodeDef,
2214 &shapeTensorData[0],
2215 shapeTensorInfo);
2216}
2217
2218ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
2219 const tensorflow::GraphDef& graphDef)
2220{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002221 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002222 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2223 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2224
2225 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2226 {
telsoa01c577f2c2018-08-31 09:22:23 +01002227 throw ParseException(
2228 boost::str(
2229 boost::format(
2230 "ArmNN only supports Reshape layers with constant shapes. "
2231 "Input %1% Node %2% %3%")
2232 % inputs[1].m_IndexedValue->GetNode().name()
2233 % nodeDef.name()
2234 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002235 }
2236 ParsedConstTfOperation<int32_t>* shapeNode =
2237 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2238
2239 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2240 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2241
2242 std::vector<int32_t> shapeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002243 ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002244 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2245
2246 TensorShape targetShape = outputTensorInfo.GetShape();
2247 ReshapeDescriptor reshapeDesc;
2248 reshapeDesc.m_TargetShape = targetShape;
2249
2250 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2251 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2252 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2253
2254 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2255}
2256
2257ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
2258 const tensorflow::GraphDef& graphDef)
2259{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002260 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002261 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2262
2263 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2264 {
telsoa01c577f2c2018-08-31 09:22:23 +01002265 throw ParseException(
2266 boost::str(
2267 boost::format(
2268 "ArmNN only supports ResizeBilinear layers with constant sizes. "
2269 "Input %1%. Node %2% %3%")
2270 % inputs[1].m_IndexedValue->GetNode().name()
2271 % nodeDef.name()
2272 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002273 }
2274 ParsedConstTfOperation<int32_t>* sizeNode =
2275 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2276
telsoa01c577f2c2018-08-31 09:22:23 +01002277 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01002278 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
2279 {
telsoa01c577f2c2018-08-31 09:22:23 +01002280 throw ParseException(
2281 boost::str(
2282 boost::format(
2283 "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
2284 "Node %1% %2%")
2285 % nodeDef.name()
2286 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002287 }
2288
telsoa01c577f2c2018-08-31 09:22:23 +01002289 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01002290 std::vector<int32_t> sizeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002291 ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002292
telsoa01c577f2c2018-08-31 09:22:23 +01002293 // The descriptor only has target height and width attributes, which we get from the size tensor.
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002294 ResizeDescriptor desc;
2295 desc.m_Method = armnn::ResizeMethod::Bilinear;
surmeh01bceff2f2018-03-29 16:29:27 +01002296 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002297 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2298 desc.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002299
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002300 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002301
2302 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2303 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01002304 // The input shape is always in BHWC format, this will be swizzled below; for now,
2305 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01002306 unsigned int outBatch = inputTensorInfo.GetShape()[0];
2307 unsigned int outChannels = inputTensorInfo.GetShape()[3];
2308 unsigned int outHeight = desc.m_TargetHeight;
2309 unsigned int outWidth = desc.m_TargetWidth;
jimfly018a121502018-12-06 16:19:52 +00002310 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
telsoa01c577f2c2018-08-31 09:22:23 +01002311 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01002312 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2313 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2314
jimfly018a121502018-12-06 16:19:52 +00002315 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002316
2317 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2318}
2319
2320TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
2321{
2322 BOOST_ASSERT(nodeDef.op() == "Squeeze");
2323 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2324
2325 DataType type;
2326 if (tfDataType == tensorflow::DT_FLOAT)
2327 {
2328 type = DataType::Float32;
2329 }
2330 else if (tfDataType == tensorflow::DT_INT32)
2331 {
2332 type = DataType::Signed32;
2333 }
2334 else
2335 {
telsoa01c577f2c2018-08-31 09:22:23 +01002336 throw ParseException(
2337 boost::str(
2338 boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
2339 % tensorflow::DataType_Name(tfDataType)
2340 % nodeDef.name()
2341 % CHECK_LOCATION().AsString()));
2342 }
2343
2344
2345 if (inputTensorInfo.GetNumDimensions() > 4)
2346 {
2347 throw ParseException(
2348 boost::str(
2349 boost::format(
2350 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2351 % inputTensorInfo.GetNumDimensions()
2352 % nodeDef.name()
2353 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002354 }
2355
2356 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01002357 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2358
surmeh01bceff2f2018-03-29 16:29:27 +01002359 if (squeezeDims.empty())
2360 {
telsoa01c577f2c2018-08-31 09:22:23 +01002361 squeezeDims.assign(dimensionSequence,
2362 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01002363 }
2364
2365 std::vector<uint32_t> outputDims;
2366 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2367 {
telsoa01c577f2c2018-08-31 09:22:23 +01002368 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2369 auto currentDimension = inputTensorInfo.GetShape()[i];
2370 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01002371 {
telsoa01c577f2c2018-08-31 09:22:23 +01002372 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01002373 }
2374 }
2375
2376 if (outputDims.size() > 4)
2377 {
telsoa01c577f2c2018-08-31 09:22:23 +01002378 throw ParseException(
2379 boost::str(
2380 boost::format(
2381 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2382 % outputDims.size()
2383 % nodeDef.name()
2384 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002385 }
2386
telsoa01c577f2c2018-08-31 09:22:23 +01002387 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2388 outputDims.data());
2389
2390 TensorInfo outTensorInfo = inputTensorInfo;
2391 outTensorInfo.SetShape(outShape);
2392 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01002393
2394 return outTensorInfo;
2395}
2396
2397ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2398{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002399 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002400 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2401
2402 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2403 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2404
2405 TensorInfo outputInfo;
2406 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2407
2408 ReshapeDescriptor reshapeDesc;
2409 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2410 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2411 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2412 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2413
2414 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2415}
2416
2417ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2418{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002419 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002420 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2421
2422 NormalizationDescriptor normalizationDescriptor;
2423 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2424 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2425 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2426 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2427 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2428 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
ruoyan018174f362018-12-04 18:24:08 +00002429 normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002430
2431 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2432 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2433
2434 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002435 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2436 nodeDef.name().c_str());
ruoyan018174f362018-12-04 18:24:08 +00002437 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2438 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
surmeh01bceff2f2018-03-29 16:29:27 +01002439
2440 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2441}
2442
2443/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01002444/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2445/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2446/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2447///
surmeh01bceff2f2018-03-29 16:29:27 +01002448class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
2449{
2450public:
2451 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2452 : DeferredSingleLayerParsedTfOperation(parser, node)
2453 {
2454 }
2455
2456 void CreateLayerDeferred() override
2457 {
2458 BOOST_ASSERT(m_Layer == nullptr);
2459 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2460 }
2461};
2462
2463ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2464{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002465 boost::ignore_unused(graphDef);
2466
telsoa01c577f2c2018-08-31 09:22:23 +01002467 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01002468 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2469}
2470
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002471ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2472{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002473 boost::ignore_unused(graphDef);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002474 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2475 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2476 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2477
2478 if (inputs.size() != 2)
2479 {
2480 throw ParseException(
2481 boost::str(boost::format("Mean expects two inputs!. Got %1% for Node %2% %3%")
2482 % inputs.size()
2483 % nodeDef.name()
2484 % CHECK_LOCATION().AsString()));
2485 }
2486
2487 bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef, "keep_dims");
2488
2489 ParsedConstTfOperation<int32_t>* axisNode =
2490 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2491
2492 const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo();
2493
2494 ConstTensor axisTensor(axisTensorInfo, axisNode->GetStorage());
2495 const int* axisData = static_cast<const int*>(axisTensor.GetMemoryArea());
2496
2497 TensorInfo outputTensorInfo;
2498 MeanDescriptor meanDescriptor;
2499 meanDescriptor.m_KeepDims = keepDims;
2500
2501 // Negative axis values are supported so that the process requires
2502 // to convert them into the corresponding positive ones.
2503 // Duplicate values are also removed.
2504 std::vector<int> rawAxisVector(axisData, axisData + axisTensorInfo.GetNumElements());
2505 std::set<unsigned int> positiveAxisSet;
2506 int rank = static_cast<int>(inputTensorInfo.GetNumDimensions());
2507
2508 std::transform(rawAxisVector.begin(), rawAxisVector.end(),
2509 std::inserter(positiveAxisSet, positiveAxisSet.begin()),
2510 [rank](int i) -> unsigned int { return static_cast<unsigned int>((i + rank) % rank); });
2511
Derek Lambertibaa177f2019-12-10 22:00:43 +00002512 CalculateReducedOutputTensoInfo(inputTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002513
2514 if (inputTensorInfo.GetNumDimensions() > positiveAxisSet.size())
2515 {
2516 meanDescriptor.m_Axis.assign(positiveAxisSet.begin(), positiveAxisSet.end());
2517 }
2518
2519 IConnectableLayer* layer = m_Network->AddMeanLayer(meanDescriptor, nodeDef.name().c_str());
2520 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2521 inputSlot.Connect(layer->GetInputSlot(0));
2522
2523 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2524}
2525
telsoa01c577f2c2018-08-31 09:22:23 +01002526/// An ParsedTfOperation for a Mul node.
2527/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2528/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2529/// and in these cases armnn doesn't need a separate layer for the Mul.
2530///
2531class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
2532{
2533public:
2534 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2535 : DeferredSingleLayerParsedTfOperation(parser, node)
2536 {
2537 }
2538
2539 void CreateLayerDeferred() override
2540 {
2541 BOOST_ASSERT(m_Layer == nullptr);
2542 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2543 }
2544};
2545
surmeh01bceff2f2018-03-29 16:29:27 +01002546ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2547{
2548 boost::ignore_unused(graphDef);
2549
telsoa01c577f2c2018-08-31 09:22:23 +01002550 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002551}
2552
2553ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
2554 const tensorflow::GraphDef& graphDef)
2555{
2556 boost::ignore_unused(graphDef);
2557
2558 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2559
2560 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
2561
2562 auto it = m_InputShapes.find(nodeDef.name());
2563 if (it == m_InputShapes.end())
2564 {
telsoa01c577f2c2018-08-31 09:22:23 +01002565 throw ParseException(
2566 boost::str(
2567 boost::format(
2568 "Missing input shape for Placeholder '%1%' %2%")
2569 % nodeDef.name()
2570 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002571 }
2572 TensorInfo tensorInfo(it->second, DataType::Float32);
2573
2574 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2575
2576 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2577
2578 TrackInputBinding(layer, layerId, tensorInfo);
2579
2580 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2581}
2582
saoste01bbd40612018-08-28 15:41:51 +01002583ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2584{
2585 boost::ignore_unused(graphDef);
2586 return AddRealDivLayer(nodeDef);
2587}
2588
surmeh01bceff2f2018-03-29 16:29:27 +01002589ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
2590 const tensorflow::GraphDef& graphDef)
2591{
2592 boost::ignore_unused(graphDef);
2593
2594 ActivationDescriptor activationDesc;
2595 activationDesc.m_Function = ActivationFunction::ReLu;
2596 return AddActivationLayer(nodeDef, activationDesc);
2597}
2598
2599ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
2600 const tensorflow::GraphDef& graphDef)
2601{
2602 boost::ignore_unused(graphDef);
2603
2604 ActivationDescriptor activationDesc;
2605 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2606 activationDesc.m_A = 6.0f;
2607 activationDesc.m_B = 0.0f;
2608
2609 return AddActivationLayer(nodeDef, activationDesc);
2610}
2611
2612ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
2613 const tensorflow::GraphDef& graphDef)
2614{
2615 boost::ignore_unused(graphDef);
2616
2617 ActivationDescriptor activationDesc;
2618 activationDesc.m_Function = ActivationFunction::Sigmoid;
2619
2620 return AddActivationLayer(nodeDef, activationDesc);
2621}
2622
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002623ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
2624 const tensorflow::GraphDef &graphDef)
2625{
2626 boost::ignore_unused(graphDef);
2627
2628 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2629
josh minor4a3c6102020-01-06 16:40:46 -06002630 ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
2631 IConnectableLayer* const layer = m_Network->AddElementwiseUnaryLayer(descriptor, nodeDef.name().c_str());
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002632
2633 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2634 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2635 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2636
2637 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2638}
2639
surmeh01bceff2f2018-03-29 16:29:27 +01002640ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
2641 const tensorflow::GraphDef& graphDef)
2642{
2643 boost::ignore_unused(graphDef);
2644
2645 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2646
2647 SoftmaxDescriptor softmaxDescriptor;
2648 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2649
2650 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2651 prevLayerSlot.Connect(layer->GetInputSlot(0));
2652 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2653
2654 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2655}
2656
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002657ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
2658 const tensorflow::GraphDef& graphDef)
2659{
2660 boost::ignore_unused(graphDef);
2661
2662 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2663 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2664 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2665
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002666 // Constant tensor index
2667 unsigned int index = GetConstInputIndex(inputs);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002668 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002669 ParsedConstTfOperation<int32_t>* shapeNode =
2670 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2671
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002672 std::vector<int32_t> axisTensorData;
2673 shapeNode->GetConstTensor(axisTensorData);
2674
2675 // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2676 const unsigned int splitDim = static_cast<unsigned int>(axisTensorData[0]);
2677
2678 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2679 if (splitDim == 0 || splitDim == 2)
2680 {
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002681 throw armnn::ParseException(
2682 boost::str(
2683 boost::format(
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002684 "Dimension %1% for split is not supported by Armnn. "
2685 "Node %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002686 % splitDim
2687 % nodeDef.name()
2688 % CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002689 }
2690
Saoirse Stewart315258e2019-02-28 11:32:41 +00002691 // As Armnn only supports splitter outputs of the same shape, therefore num_split will be limited to an integer.
2692 uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef, "num_split");
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002693
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002694 IOutputSlot& inputSlot = inputs[1 - index].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1 - index].m_Index);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002695 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2696
Matthew Jacksondba634f2019-08-15 15:14:18 +01002697 const unsigned int supportedNumDims = 4;
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002698 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2699
Matthew Jacksondba634f2019-08-15 15:14:18 +01002700 if (inputDimSize != supportedNumDims)
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002701 {
2702 throw armnn::ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002703 boost::str(
2704 boost::format(
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002705 "The number of dimensions: %1% for input tensors of the "
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002706 "split op should be %2% %3%")
2707 % inputTensorInfo.GetNumDimensions()
Matthew Jacksondba634f2019-08-15 15:14:18 +01002708 % supportedNumDims
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002709 % CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002710 }
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002711
2712 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2713
2714 // Add current input shape to splitterDimSizes
2715 for (unsigned int i = 0; i < inputDimSize; ++i)
2716 {
2717 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2718 }
2719
2720 if (splitterDimSizes[splitDim] % num_split != 0)
2721 {
2722 throw ParseException("Number of splits must evenly divide the dimension");
2723 }
2724 splitterDimSizes[splitDim] /= num_split;
2725
2726 SplitterDescriptor splitDesc(num_split);
2727 for (unsigned int g = 0; g < num_split; ++g)
2728 {
2729 // Set the size of the views.
2730 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2731 {
2732 splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2733 }
2734 splitDesc.SetViewOriginCoord(g, splitDim, splitterDimSizes[splitDim] * g);
2735 }
2736
2737 IConnectableLayer *layer = m_Network->AddSplitterLayer(splitDesc, nodeDef.name().c_str());
2738
2739 inputSlot.Connect(layer->GetInputSlot(0));
2740
2741 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2742 splitterDimSizes.data());
2743
2744 for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
2745 {
2746 layer->GetOutputSlot(i).SetTensorInfo(armnn::TensorInfo(outShape, inputTensorInfo.GetDataType()));
2747 }
2748
2749 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2750}
2751
surmeh01bceff2f2018-03-29 16:29:27 +01002752ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
2753 const tensorflow::GraphDef& graphDef)
2754{
2755 boost::ignore_unused(graphDef);
2756
2757 ActivationDescriptor activationDesc;
2758 activationDesc.m_Function = ActivationFunction::SoftReLu;
2759
2760 return AddActivationLayer(nodeDef, activationDesc);
2761}
2762
2763ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2764{
2765 boost::ignore_unused(graphDef);
2766
2767 ActivationDescriptor activationDesc;
2768 activationDesc.m_Function = ActivationFunction::TanH;
2769 activationDesc.m_A = 1.0f;
2770 activationDesc.m_B = 1.0f;
2771
2772 return AddActivationLayer(nodeDef, activationDesc);
2773}
2774
2775ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
2776 ActivationDescriptor& activationDesc)
2777{
2778 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2779
2780 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2781
2782 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2783 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2784 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2785 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2786}
2787
2788ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
2789 const tensorflow::GraphDef& graphDef)
2790{
2791 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
2792}
2793
2794ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
2795 const tensorflow::GraphDef& graphDef)
2796{
2797 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2798}
2799
2800ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
2801 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
2802{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002803 boost::ignore_unused(graphDef);
2804
surmeh01bceff2f2018-03-29 16:29:27 +01002805 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2806 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2807 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2808
2809 if (inputs.size() != 1)
2810 {
telsoa01c577f2c2018-08-31 09:22:23 +01002811 throw ParseException(
2812 boost::str(
2813 boost::format(
2814 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
2815 % inputs.size()
2816 % nodeDef.name()
2817 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002818 }
2819
2820 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
2821 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2822 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
2823 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
2824
2825 Pooling2dDescriptor pooling2dDescriptor;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002826 pooling2dDescriptor.m_PoolType = pooltype;
2827 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
surmeh01bceff2f2018-03-29 16:29:27 +01002828 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
2829
telsoa01c577f2c2018-08-31 09:22:23 +01002830 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
FrancisMurtaghf005e312018-12-06 15:26:04 +00002831 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
2832 pooling2dDescriptor.m_DataLayout = dataLayout;
2833 DataLayoutIndexed dataLayoutIndexed(dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +01002834
FrancisMurtaghf005e312018-12-06 15:26:04 +00002835 pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
2836 pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
2837 pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
2838 pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002839
FrancisMurtaghf005e312018-12-06 15:26:04 +00002840 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
2841 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002842
2843 bool padding = false;
2844 TensorInfo outputInfo;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002845 unsigned int outputHeight = 0;
2846 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01002847
2848 CHECK_PADDING_TYPE(nodeDef, paddingString);
2849
surmeh01bceff2f2018-03-29 16:29:27 +01002850 if (paddingString == "SAME")
2851 {
2852 padding = true;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002853
2854 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
2855 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2856 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
2857 static_cast<float>(pooling2dDescriptor.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01002858 }
2859 else if (paddingString == "VALID")
2860 {
2861 padding = false;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002862
2863 outputHeight = static_cast<uint32_t>(ceil(
2864 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
2865 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2866 outputWidth = static_cast<uint32_t>(ceil(
2867 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
2868 static_cast<float>(pooling2dDescriptor.m_StrideX)));
2869 }
2870
2871 switch (dataLayout)
2872 {
2873 case DataLayout::NHWC:
2874 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2875 outputHeight,
2876 outputWidth,
2877 inputTensorInfo.GetShape()[3] },
2878 DataType::Float32);
2879 break;
2880 case DataLayout::NCHW:
2881 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2882 inputTensorInfo.GetShape()[1],
2883 outputHeight,
2884 outputWidth },
2885 DataType::Float32);
2886 break;
surmeh01bceff2f2018-03-29 16:29:27 +01002887 }
surmeh01bceff2f2018-03-29 16:29:27 +01002888
2889 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002890 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002891 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002892 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002893
2894
2895 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
2896 if (layer == nullptr)
2897 {
telsoa01c577f2c2018-08-31 09:22:23 +01002898 throw ParseException(
2899 boost::str(
2900 boost::format(
2901 "Failed to add pooling2d layer for %1% %2%")
2902 % nodeDef.name()
2903 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002904 }
2905
2906 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2907
FrancisMurtaghf005e312018-12-06 15:26:04 +00002908 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002909
2910 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2911}
2912
2913ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
2914{
2915 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2916
2917 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2918 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2919
2920 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
2921 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
2922
2923 if (isBiasAdd)
2924 {
2925 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
2926 // with the same data in the correct dimension for broadcast in addition.
2927 if(input1Info.GetNumDimensions() != 1)
2928 {
telsoa01c577f2c2018-08-31 09:22:23 +01002929 throw ParseException(
2930 boost::str(
2931 boost::format(
2932 "Unsupported bias for BiasAdd. It should be a 1D vector. "
2933 "Got %1% dimensions for input %2%. Node %3% %4%")
2934 % input1Info.GetNumDimensions()
2935 % inputs[1].m_IndexedValue->GetNode().name()
2936 % nodeDef.name()
2937 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002938 }
2939
2940 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01002941
telsoa01c577f2c2018-08-31 09:22:23 +01002942 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01002943 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002944 }
2945 else
2946 {
2947 if (input0Info.GetNumDimensions() == 1)
2948 {
2949 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002950 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002951 }
2952
2953 if (input1Info.GetNumDimensions() == 1)
2954 {
2955 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002956 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002957 }
2958 }
2959
2960 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
2961
2962 input0Slot->Connect(layer->GetInputSlot(0));
2963 input1Slot->Connect(layer->GetInputSlot(1));
2964
Nattapat Chaimanowongfab64f02019-02-15 16:46:24 +00002965 if (input0Info.GetNumDimensions() == input1Info.GetNumDimensions())
2966 {
2967 const TensorShape& input0Shape = input0Info.GetShape();
2968 const TensorShape& input1Shape = input1Info.GetShape();
2969
2970 std::vector<unsigned int> outputShape;
2971 outputShape.reserve(input0Shape.GetNumDimensions());
2972 TensorInfo outputInfo(input0Info);
2973
2974 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
2975 {
2976 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
2977 }
2978
2979 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
2980
2981 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2982 }
2983 else if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
surmeh01bceff2f2018-03-29 16:29:27 +01002984 {
2985 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2986 }
2987 else
2988 {
2989 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2990 }
2991
2992 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2993}
2994
saoste01bbd40612018-08-28 15:41:51 +01002995ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
2996{
2997 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2998
2999 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
3000 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3001 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3002
3003 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3004 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3005
3006
3007 if (input0NumDims < input1NumDims)
3008 {
3009 const bool isNHWC = true;
3010 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3011 }
3012 if (input1NumDims < input0NumDims)
3013 {
3014 const bool isNHWC = true;
3015 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3016 }
3017
3018 input0Slot->Connect(layer->GetInputSlot(0));
3019 input1Slot->Connect(layer->GetInputSlot(1));
3020
3021 if (input0NumDims < input1NumDims)
3022 {
3023 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3024 }
3025 else
3026 {
3027 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3028
3029 }
3030 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3031}
3032
Sadik Armagan975c09a2018-12-04 10:02:08 +00003033ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
3034{
3035 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3036
3037 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3038 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3039
3040 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3041 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3042
3043 if (input0NumDims < input1NumDims)
3044 {
3045 const bool isNHWC = true;
3046 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3047 }
3048 if (input1NumDims < input0NumDims)
3049 {
3050 const bool isNHWC = true;
3051 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3052 }
3053
3054 IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
3055
3056 input0Slot->Connect(layer->GetInputSlot(0));
3057 input1Slot->Connect(layer->GetInputSlot(1));
3058
3059 TensorInfo outputInfo = input0Slot->GetTensorInfo();
3060 std::vector<unsigned int> outputShape;
3061
3062 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
3063 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
3064
3065 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3066 {
3067 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3068 }
3069
3070 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3071 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3072
3073 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3074}
3075
telsoa01c577f2c2018-08-31 09:22:23 +01003076IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
3077{
3078 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3079
3080 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
3081 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3082 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3083
3084 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3085 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3086
3087 if (input0NumDims < input1NumDims)
3088 {
3089 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003090 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003091 }
3092 if (input1NumDims < input0NumDims)
3093 {
3094 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003095 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003096 }
3097
3098 input0Slot->Connect(layer->GetInputSlot(0));
3099 input1Slot->Connect(layer->GetInputSlot(1));
3100
3101 if (input0NumDims < input1NumDims)
3102 {
3103 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3104 }
3105 else
3106 {
3107 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3108 }
3109 return layer;
3110}
3111
surmeh01bceff2f2018-03-29 16:29:27 +01003112IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
3113 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
3114{
telsoa01c577f2c2018-08-31 09:22:23 +01003115 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01003116 ParsedConstTfOperation<float>* biasNode = nullptr;
3117 if (addNodeDef != nullptr)
3118 {
3119 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01003120 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003121 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
3122 {
3123 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
3124 }
3125 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
3126 {
3127 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
3128 }
3129 else
3130 {
telsoa01c577f2c2018-08-31 09:22:23 +01003131 throw ParseException(
3132 boost::str(
3133 boost::format(
3134 "ArmNN only supports fully connected layers with constant bias. "
3135 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
3136 % addInputs[0].m_IndexedValue->GetNode().name()
3137 % addInputs[1].m_IndexedValue->GetNode().name()
3138 % addNodeDef->name()
3139 % matMulNodeDef.name()
3140 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003141 }
3142 }
3143
telsoa01c577f2c2018-08-31 09:22:23 +01003144 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003145 ParsedConstTfOperation<float>* weightNode = nullptr;
3146 ParsedTfOperation* inputNode = nullptr;
3147 unsigned int inputIdx = 0;
3148 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
3149 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
3150 {
3151 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
3152 inputNode = mulInputs[1].m_IndexedValue;
3153 inputIdx = mulInputs[1].m_Index;
3154 }
3155 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
3156 {
3157 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
3158 inputNode = mulInputs[0].m_IndexedValue;
3159 inputIdx = mulInputs[0].m_Index;
3160 }
3161 else
3162 {
telsoa01c577f2c2018-08-31 09:22:23 +01003163 throw ParseException(
3164 boost::str(
3165 boost::format(
3166 "ArmNN only supports fully connected layers with constant weights. "
3167 "Inputs %1% and %2%. MatMulNode %3% %4%")
3168 % mulInputs[0].m_IndexedValue->GetNode().name()
3169 % mulInputs[1].m_IndexedValue->GetNode().name()
3170 % matMulNodeDef.name()
3171 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003172 }
3173
3174 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003175 // Handles weight.
Matteo Martincigh482ca852018-12-12 09:20:55 +00003176 ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003177
3178 FullyConnectedDescriptor desc;
3179 desc.m_BiasEnabled = addNodeDef != nullptr;
3180
3181 IConnectableLayer* layer = nullptr;
Matteo Martincighfc598e12019-05-14 10:36:13 +01003182 Optional<ConstTensor> optionalBiases;
3183 std::vector<float> biasTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003184 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003185 if (addNodeDef != nullptr)
3186 {
Matteo Martincigh482ca852018-12-12 09:20:55 +00003187 ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003188
3189 if (weights.GetShape()[1] != biases.GetShape()[0])
3190 {
telsoa01c577f2c2018-08-31 09:22:23 +01003191 throw ParseException(
3192 boost::str(
3193 boost::format(
3194 "Shape of matmul weights and bias do not match. "
3195 "AddNode %1%. MatMulNode %2% %3%")
3196 % addNodeDef->name()
3197 % matMulNodeDef.name()
3198 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003199 }
3200
Matteo Martincighfc598e12019-05-14 10:36:13 +01003201 optionalBiases = Optional<ConstTensor>(biases);
surmeh01bceff2f2018-03-29 16:29:27 +01003202 }
Matteo Martincighfc598e12019-05-14 10:36:13 +01003203 layer = m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
surmeh01bceff2f2018-03-29 16:29:27 +01003204
3205 BOOST_ASSERT(layer != nullptr);
3206
3207 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3208 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3209
telsoa01c577f2c2018-08-31 09:22:23 +01003210 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01003211 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
3212 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3213 return layer;
3214}
3215
3216void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
3217{
telsoa01c577f2c2018-08-31 09:22:23 +01003218 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01003219 tensorflow::DataType type = tensorflow::DT_FLOAT;
3220 if (nodeDef.attr().count("T") != 0)
3221 {
3222 auto attr = nodeDef.attr().at("T");
3223 type = attr.type();
3224 }
3225 else if (nodeDef.attr().count("dtype") != 0)
3226 {
3227 auto attr = nodeDef.attr().at("dtype");
3228 type = attr.type();
3229 }
3230
Ferran Balaguerc602f292019-02-08 17:09:55 +00003231 if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() != "Const")
surmeh01bceff2f2018-03-29 16:29:27 +01003232 {
telsoa01c577f2c2018-08-31 09:22:23 +01003233 throw ParseException(
3234 boost::str(
3235 boost::format(
Ferran Balaguerc602f292019-02-08 17:09:55 +00003236 "Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). "
telsoa01c577f2c2018-08-31 09:22:23 +01003237 "Got %1% for Node %2% %3%")
3238 % tensorflow::DataType_Name(type)
3239 % nodeDef.name()
3240 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003241 }
3242
3243 const std::string& operation = nodeDef.op();
narpra016f37f832018-12-21 18:30:00 +00003244 auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3245 if (itControlInput != m_ControlInputs.end())
3246 {
3247 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
3248 return;
3249 }
surmeh01bceff2f2018-03-29 16:29:27 +01003250 auto it = ms_OperationNameToParsingFunctions.find(operation);
3251 if (it != ms_OperationNameToParsingFunctions.end())
3252 {
3253 auto func = it->second;
3254 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
3255 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3256
telsoa01c577f2c2018-08-31 09:22:23 +01003257 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01003258 auto it = m_ParsedTfOperations.find(nodeDef.name());
3259 if (it != m_ParsedTfOperations.end())
3260 {
3261 throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
3262 }
3263 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3264
telsoa01c577f2c2018-08-31 09:22:23 +01003265 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003266 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3267 m_RequestedOutputs.end())
3268 {
3269 auto outId = ParseOutputId(nodeDef.name());
3270 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
3271 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3272
3273 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3274
3275 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
3276
3277 prevSlot.Connect(outputLayer->GetInputSlot(0));
3278
3279 TrackOutputBinding(outputLayer, layerId, tensorInfo);
3280 }
3281 }
3282 else
3283 {
telsoa01c577f2c2018-08-31 09:22:23 +01003284 throw ParseException(
3285 boost::str(
3286 boost::format(
3287 "Unsupported operation %1% in tensorflow::GraphDef %2%")
3288 % operation
3289 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003290 }
3291}
3292
3293void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
3294{
telsoa01c577f2c2018-08-31 09:22:23 +01003295 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01003296 m_NodesByName.clear();
3297 m_NetworkInputsBindingInfo.clear();
3298 m_NetworkOutputsBindingInfo.clear();
3299
3300 for (int i = 0; i < graphDef.node_size(); ++i)
3301 {
3302 const tensorflow::NodeDef& node = graphDef.node(i);
3303 m_NodesByName[node.name()] = &node;
3304 }
3305
Francis Murtaghbb190a62019-04-04 11:16:29 +01003306 // Checks that the input nodes the user has requested exist.
3307 for (const auto& pair : m_InputShapes)
3308 {
3309 const std::string& requestedInputName = pair.first;
3310 auto nodeIt = m_NodesByName.find(requestedInputName);
3311 if (nodeIt == m_NodesByName.end())
3312 {
3313 throw ParseException(
3314 boost::str(
3315 boost::format(
3316 "Couldn't find requested input node '%1%' in graph %2%")
3317 % requestedInputName
3318 % CHECK_LOCATION().AsString()));
3319 }
3320 }
3321
telsoa01c577f2c2018-08-31 09:22:23 +01003322 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01003323 std::vector<const tensorflow::NodeDef*> targetNodes;
3324 for (const std::string& requestedOutputName : m_RequestedOutputs)
3325 {
3326 auto nodeIt = m_NodesByName.find(requestedOutputName);
3327 if (nodeIt == m_NodesByName.end())
3328 {
telsoa01c577f2c2018-08-31 09:22:23 +01003329 throw ParseException(
3330 boost::str(
3331 boost::format(
3332 "Couldn't find requested output node '%1%' in graph %2%")
3333 % requestedOutputName
3334 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003335 }
3336 targetNodes.push_back(nodeIt->second);
3337 }
3338
telsoa01c577f2c2018-08-31 09:22:23 +01003339 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003340 std::vector<const tensorflow::NodeDef*> sortedNodes;
3341 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3342 targetNodes,
3343 [this](const tensorflow::NodeDef* node)
3344 {
3345 auto outputs = GetTfInputNodes(*node);
3346 std::vector<const tensorflow::NodeDef*> nodesOnly;
3347 for (const auto & o : outputs) {
3348 nodesOnly.push_back(o.m_IndexedValue);
3349 }
3350 return nodesOnly;
3351 },
3352 sortedNodes))
3353 {
telsoa01c577f2c2018-08-31 09:22:23 +01003354 throw ParseException(
3355 boost::str(
3356 boost::format(
3357 "Cycle detected in graph %1%")
3358 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003359 }
3360
telsoa01c577f2c2018-08-31 09:22:23 +01003361 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003362 for (const auto& it : sortedNodes)
3363 {
3364 const tensorflow::NodeDef& currentNode = *it;
3365 LoadNodeDef(currentNode, graphDef);
3366 }
3367}
3368
3369INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
3370 const std::map<std::string, TensorShape>& inputShapes,
3371 const std::vector<std::string>& requestedOutputs)
3372{
3373 FILE* fd = fopen(graphFile, "r");
3374
3375 if (fd == nullptr)
3376 {
telsoa01c577f2c2018-08-31 09:22:23 +01003377 throw FileNotFoundException(
3378 boost::str(
3379 boost::format(
3380 "Graph file %1% failed to open %2%")
3381 % graphFile
3382 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003383 }
3384
telsoa01c577f2c2018-08-31 09:22:23 +01003385 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003386 tensorflow::GraphDef graphDef;
3387 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
3388 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3389 delete input;
3390 fclose(fd);
3391
3392 if (!success)
3393 {
telsoa01c577f2c2018-08-31 09:22:23 +01003394 throw ParseException(
3395 boost::str(
3396 boost::format(
3397 "Failed to parse graph file %1%")
3398 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003399 }
3400
3401 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3402}
3403
3404INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
3405 const std::map<std::string, TensorShape>& inputShapes,
3406 const std::vector<std::string>& requestedOutputs)
3407{
telsoa01c577f2c2018-08-31 09:22:23 +01003408 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003409 tensorflow::GraphDef graphDef;
3410 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3411
3412 if (!success)
3413 {
telsoa01c577f2c2018-08-31 09:22:23 +01003414 throw ParseException(
3415 boost::str(
3416 boost::format(
3417 "Failed to parse graph file %1%")
3418 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003419 }
3420
3421 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3422}
3423
3424INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
3425 const std::map<std::string, TensorShape>& inputShapes,
3426 const std::vector<std::string>& requestedOutputs)
3427{
3428 FILE* fd = fopen(graphFile, "rb");
3429
3430 if (fd == nullptr)
3431 {
telsoa01c577f2c2018-08-31 09:22:23 +01003432 throw FileNotFoundException(
3433 boost::str(
3434 boost::format(
3435 "Graph file %1% failed to open %2%")
3436 % graphFile
3437 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003438 }
3439
telsoa01c577f2c2018-08-31 09:22:23 +01003440 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003441 tensorflow::GraphDef graphDef;
3442
3443 google::protobuf::io::FileInputStream inStream(fileno(fd));
3444 google::protobuf::io::CodedInputStream codedStream(&inStream);
3445 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
3446 bool success = graphDef.ParseFromCodedStream(&codedStream);
3447 fclose(fd);
3448
3449 if (!success)
3450 {
telsoa01c577f2c2018-08-31 09:22:23 +01003451 throw ParseException(
3452 boost::str(
3453 boost::format(
3454 "Failed to parse protobuf file %1% %2%")
3455 % graphFile
3456 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003457 }
3458
3459 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3460}
3461
3462INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
3463 const std::map<std::string, TensorShape>& inputShapes,
3464 const std::vector<std::string>& requestedOutputs)
3465{
3466 m_Network = INetwork::Create();
3467
3468 m_InputShapes = inputShapes;
3469 if (requestedOutputs.size() == 0)
3470 {
telsoa01c577f2c2018-08-31 09:22:23 +01003471 throw ParseException(
3472 boost::str(
3473 boost::format(
3474 "requestedOutputs must have at least one entry %1%")
3475 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003476 }
3477 m_RequestedOutputs = requestedOutputs;
3478
3479 try
3480 {
3481 LoadGraphDef(graphDef);
3482 }
3483 catch (const ParseException& e)
3484 {
3485 Cleanup();
3486 throw e;
3487 }
3488
3489 Cleanup();
3490
3491 return std::move(m_Network);
3492}
3493
3494void TfParser::Cleanup()
3495{
telsoa01c577f2c2018-08-31 09:22:23 +01003496 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01003497 m_InputShapes.clear();
3498 m_RequestedOutputs.clear();
3499 m_NodesByName.clear();
3500 m_ParsedTfOperations.clear();
3501}
3502
3503BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
3504{
3505 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
3506}
3507
3508BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
3509{
3510 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
3511}
3512
3513std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
3514 const char* bindingPointDesc,
3515 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3516{
3517 auto it = nameToBindingInfo.find(layerName);
3518 if (it == nameToBindingInfo.end())
3519 {
telsoa01c577f2c2018-08-31 09:22:23 +01003520 throw InvalidArgumentException(
3521 boost::str(
3522 boost::format(
3523 "Unknown %1% '%2%' %3%")
3524 % bindingPointDesc
3525 % layerName
3526 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003527 }
3528 return it->second;
3529}
3530
3531void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3532{
3533 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3534}
3535
3536void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3537{
3538 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3539}
3540
3541void TfParser::TrackBindingPoint(IConnectableLayer* layer,
3542 LayerBindingId id,
3543 const TensorInfo& tensorInfo,
3544 const char* bindingPointDesc,
3545 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3546{
3547 const std::string layerName = layer->GetName();
3548 auto it = nameToBindingInfo.find(layerName);
3549 if (it == nameToBindingInfo.end())
3550 {
3551 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3552 }
3553 else
3554 {
telsoa01c577f2c2018-08-31 09:22:23 +01003555 throw ParseException(
3556 boost::str(
3557 boost::format(
3558 "Id %1% used by more than one %2% layer %3%")
3559 % id
3560 % bindingPointDesc
3561 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003562 }
3563}
3564
3565} // namespace armnnTfParser