blob: d65af2365b294a9c46622de19c6971ca09cc0f93 [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00005
surmeh01bceff2f2018-03-29 16:29:27 +01006#include "TfParser.hpp"
7
surmeh01bceff2f2018-03-29 16:29:27 +01008#include <armnn/TypesUtils.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +01009#include <armnn/Descriptors.hpp>
10
Matteo Martincighe011d202019-11-28 11:35:47 +000011#include <armnnUtils/Permute.hpp>
12#include <armnnUtils/DataLayoutIndexed.hpp>
13
surmeh01bceff2f2018-03-29 16:29:27 +010014#include <GraphTopologicalSort.hpp>
Sadik Armagan479045b2018-10-01 11:51:37 +010015#include <ParserHelper.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010016
17#include <google/protobuf/io/zero_copy_stream_impl.h>
18#include <google/protobuf/text_format.h>
19
Derek Lambertibaa177f2019-12-10 22:00:43 +000020#include <tensorflow/core/framework/graph.pb.h>
surmeh01bceff2f2018-03-29 16:29:27 +010021
surmeh01bceff2f2018-03-29 16:29:27 +010022#include <boost/format.hpp>
23#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010024#include <boost/format.hpp>
25#include <boost/numeric/conversion/cast.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010026#include <boost/polymorphic_cast.hpp>
27
surmeh01bceff2f2018-03-29 16:29:27 +010028#include <numeric>
surmeh01bceff2f2018-03-29 16:29:27 +010029
Matteo Martincigh46315822018-11-28 16:22:36 +000030using namespace armnnUtils;
surmeh01bceff2f2018-03-29 16:29:27 +010031using namespace armnn;
32
33namespace armnnTfParser
34{
35namespace
36{
37
38const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
39const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
40
surmeh01bceff2f2018-03-29 16:29:27 +010041
42template <typename Callable>
43void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
44 const std::string& attribName,
45 tensorflow::AttrValue::ValueCase expectedValueCase,
46 Callable callable)
47{
48 auto iter = nodeDef.attr().find(attribName);
49 if (iter != nodeDef.attr().end())
50 {
51 const auto& attrValue = iter->second;
52 if (attrValue.value_case() == expectedValueCase)
53 {
54 callable(attrValue);
55 }
56 else
57 {
telsoa01c577f2c2018-08-31 09:22:23 +010058 throw ParseException(
59 boost::str(
60 boost::format(
61 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
62 "but found %4% instead %5%")
63 % attribName
64 % nodeDef.name()
65 % static_cast<int>(expectedValueCase)
66 % static_cast<int>(attrValue.value_case())
67 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010068 }
69 }
70 else
71 {
telsoa01c577f2c2018-08-31 09:22:23 +010072 throw ParseException(
73 boost::str(
74 boost::format(
75 "Could not find required attribute %1% in node %2% %3%")
76 % attribName
77 % nodeDef.name()
78 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010079 }
80}
81
82template <typename Callable>
83void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
84 const std::string& attribName,
85 tensorflow::AttrValue::ValueCase expectedValueCase,
86 Callable callable)
87{
88 auto iter = nodeDef.attr().find(attribName);
89 if (iter != nodeDef.attr().end())
90 {
91 const auto& attrValue = iter->second;
92 if (attrValue.value_case() == expectedValueCase)
93 {
94 callable(attrValue);
95 }
96 else
97 {
telsoa01c577f2c2018-08-31 09:22:23 +010098 throw ParseException(
99 boost::str(
100 boost::format(
101 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
102 "but found %4% instead %5%")
103 % attribName
104 % nodeDef.name()
105 % static_cast<int>(expectedValueCase)
106 % static_cast<int>(attrValue.value_case())
107 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100108 }
109 }
110}
111
112float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
113{
114 float attribValue = 0.0f;
115 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
116 [&attribValue](const tensorflow::AttrValue& attrValue)
117 {
118 attribValue = attrValue.f();
119 });
120 return attribValue;
121}
122
Conor Kennedyc2130a02018-12-05 11:05:54 +0000123int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
124{
125 int32_t attribValue = 0u;
126 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
127 [&attribValue](const tensorflow::AttrValue& attrValue)
128 {
129 attribValue = static_cast<int32_t>(attrValue.i());
130 });
131 return attribValue;
132}
133
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000134bool ReadMandatoryNodeBoolAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
135{
136 bool attribValue = false;
137 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
138 [&attribValue](const tensorflow::AttrValue& attrValue)
139 {
140 attribValue = static_cast<bool>(attrValue.b());
141 });
142 return attribValue;
143}
144
surmeh01bceff2f2018-03-29 16:29:27 +0100145uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
146{
147 uint32_t attribValue = 0u;
148 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
149 [&attribValue](const tensorflow::AttrValue& attrValue)
150 {
151 attribValue = static_cast<uint32_t>(attrValue.i());
152 });
153 return attribValue;
154}
155
156std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
157{
158 std::string attribValue = "";
159 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
160 [&attribValue](const tensorflow::AttrValue& attrValue)
161 {
162 attribValue = attrValue.s();
163 });
164 return attribValue;
165}
166
167std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
168 const std::string& name)
169{
170 std::vector<uint32_t> attriList;
171 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
172 [&attriList](const tensorflow::AttrValue& attrValue)
173 {
174 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
175 {
176 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
177 }
178 });
179
180 return attriList;
181}
182
183std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
184 const std::string& name)
185{
186 std::vector<uint32_t> attriList;
187 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
188 [&attriList](const tensorflow::AttrValue& attrValue)
189 {
190 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
191 {
192 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
193 }
194 });
195
196 return attriList;
197}
198
Aron Virginas-Tar2e259272019-11-27 13:29:51 +0000199std::string ReadOptionalNodeStringAttribute(const tensorflow::NodeDef& nodeDef,
200 const std::string& name,
201 const std::string& defaultValue = "")
202{
203 std::string attribValue = defaultValue;
204 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
205 [&attribValue](const tensorflow::AttrValue& attrValue)
206 {
207 attribValue = attrValue.s();
208 });
209 return attribValue;
210}
211
surmeh01bceff2f2018-03-29 16:29:27 +0100212bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
213 const std::string& name,
214 bool defaultValue = false)
215{
216 bool attribValue = defaultValue;
217 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
218 [&attribValue](const tensorflow::AttrValue& attrValue)
219 {
220 attribValue = attrValue.b();
221 });
222 return attribValue;
223}
224
225tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
226{
227 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
228 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
229 [&attribValue](const tensorflow::AttrValue& attrValue)
230 {
231 attribValue = attrValue.type();
232 });
233 return attribValue;
234}
235
236TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
237{
238 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
239 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
240
241 if (stretchDim != targetDims.end())
242 {
243 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
244 {
telsoa01c577f2c2018-08-31 09:22:23 +0100245 throw ParseException(
246 boost::str(
247 boost::format(
248 "At most one component of shape can be -1 %1%")
249 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100250 }
251
telsoa01c577f2c2018-08-31 09:22:23 +0100252 auto targetNumElements =
253 boost::numeric_cast<unsigned int>(
254 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100255 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
256 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
257 }
258
259 TensorInfo reshapeInfo = input;
260 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
261
262 return reshapeInfo;
263}
264
telsoa01c577f2c2018-08-31 09:22:23 +0100265// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100266IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
267 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100268{
269 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
270 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
271 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
272 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
273 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
274 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
275
276 armnn::TensorInfo reshapedInfo = input1Info;
277 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
278
279 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
280 ReshapeDescriptor reshapeDesc;
281 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
282 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
283
284 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
285 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
286
287 input1Slot = &reshapeLayer->GetOutputSlot(0);
288
289 return input1Slot;
290}
291
292OutputId ParseOutputId(const std::string & name)
293{
294 unsigned int outputNum = 0;
295 size_t colonPos = name.find_last_of(":");
296 if (colonPos != std::string::npos)
297 {
298 int n = std::stoi(name.substr(colonPos+1));
299 if (n<0 || n>100)
300 {
telsoa01c577f2c2018-08-31 09:22:23 +0100301 throw ParseException(
302 boost::str(
303 boost::format(
304 "Output tensor id is out of range for %1% %2%")
305 % name
306 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100307 }
308 outputNum = static_cast<unsigned int>(n);
309 }
310 return OutputId(name.substr(0,colonPos),outputNum);
311}
312
telsoa01c577f2c2018-08-31 09:22:23 +0100313#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
314 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
315 { \
316 throw ParseException( \
317 boost::str( \
318 boost::format( \
319 "Unsupported data format %1% passed for %2% node %3%. " \
320 "Only NHWC and NCHW supported %4%") \
321 % FORMAT \
322 % NODE_TYPE \
323 % NODE_DEF.name() \
324 % CHECK_LOCATION().AsString())); \
325 }
326
327#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
328 if(PADDING != "SAME" && PADDING != "VALID" ) \
329 { \
330 throw ParseException( \
331 boost::str( \
332 boost::format( \
333 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
334 % PADDING \
335 % NODE_DEF.name() \
336 % CHECK_LOCATION().AsString())); \
337 } \
338
surmeh01bceff2f2018-03-29 16:29:27 +0100339} // namespace
340
341const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
342 { "Const", &TfParser::ParseConst },
343 { "Add", &TfParser::ParseAdd },
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000344 { "AddN", &TfParser::ParseAddN },
surmeh01bceff2f2018-03-29 16:29:27 +0100345 { "BiasAdd", &TfParser::ParseBiasAdd },
346 { "Identity", &TfParser::ParseIdentity },
347 { "Conv2D", &TfParser::ParseConv2D },
348 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
Conor Kennedyc2130a02018-12-05 11:05:54 +0000349 { "ExpandDims", &TfParser::ParseExpandDims },
surmeh01bceff2f2018-03-29 16:29:27 +0100350 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
FrancisMurtagh94412af2019-01-24 10:53:39 +0000351 { "Gather", &TfParser::ParseGather},
jimfly01a06bf312018-12-18 16:24:51 +0000352 { "Greater", &TfParser::ParseGreater},
surmeh01bceff2f2018-03-29 16:29:27 +0100353 { "ConcatV2", &TfParser::ParseConcat },
354 { "LRN", &TfParser::ParseLrn },
355 { "MatMul", &TfParser::ParseMatMul },
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000356 { "Mean", &TfParser::ParseMean },
surmeh01bceff2f2018-03-29 16:29:27 +0100357 { "Mul", &TfParser::ParseMul },
358 { "Placeholder", &TfParser::ParsePlaceholder },
saoste01bbd40612018-08-28 15:41:51 +0100359 { "RealDiv", &TfParser::ParseRealDiv },
surmeh01bceff2f2018-03-29 16:29:27 +0100360 { "Relu", &TfParser::ParseRelu },
361 { "Relu6", &TfParser::ParseRelu6 },
362 { "Reshape", &TfParser::ParseReshape },
363 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +0000364 { "Rsqrt", &TfParser::ParseRsqrt },
surmeh01bceff2f2018-03-29 16:29:27 +0100365 { "Shape", &TfParser::ParseShape },
366 { "Squeeze", &TfParser::ParseSqueeze },
367 { "Sigmoid", &TfParser::ParseSigmoid },
368 { "Softmax", &TfParser::ParseSoftmax },
369 { "Softplus", &TfParser::ParseSoftplus },
Sadik Armagan2ad6cb42018-12-27 11:23:44 +0000370 { "Split", &TfParser::ParseSplit },
Georgios Pinitas5e90aab2020-02-14 14:46:51 +0000371 { "StridedSlice", &TfParser::ParseStridedSlice },
surmeh01bceff2f2018-03-29 16:29:27 +0100372 { "Tanh", &TfParser::ParseTanh },
373 { "MaxPool", &TfParser::ParseMaxPool },
374 { "AvgPool", &TfParser::ParseAvgPool },
telsoa01c577f2c2018-08-31 09:22:23 +0100375 { "Maximum", &TfParser::ParseMaximum },
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +0000376 { "Minimum", &TfParser::ParseMinimum },
jimfly0184c70e62018-12-19 13:14:46 +0000377 { "Equal", &TfParser::ParseEqual },
jimfly01f6ba7472018-12-04 10:09:52 +0000378 { "Pad", &TfParser::ParsePad },
narpra016f37f832018-12-21 18:30:00 +0000379 { "Sub", &TfParser::ParseSub }
380};
381
382const std::list<std::string> TfParser::m_ControlInputs = {
383 "Assert"
surmeh01bceff2f2018-03-29 16:29:27 +0100384};
385
386ITfParser* ITfParser::CreateRaw()
387{
388 return new TfParser();
389}
390
391ITfParserPtr ITfParser::Create()
392{
393 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
394}
395
396void ITfParser::Destroy(ITfParser* parser)
397{
398 delete parser;
399}
400
401inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
402 uint32_t filterSize, bool samePadding,
403 uint32_t* paddingFront, uint32_t* paddingBack) {
404 *paddingFront = 0;
405 *paddingBack = 0;
406
407 if (samePadding) {
408 uint32_t outputSize = (inputSize + stride - 1) / stride;
409 uint32_t temp = (outputSize - 1) * stride + filterSize;
410 if (temp > inputSize) {
411 *paddingFront = (temp - inputSize) / 2;
412 *paddingBack = (temp - inputSize) - *paddingFront;
413 }
414 }
415}
416
417void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
418 bool samePadding)
419{
420 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
421}
422
423/// An Abstract base class which represents a single tensorflow operation (node)
424/// that has been (potentially partially) converted to Armnn.
425/// It may not yet have been fully converted into actual Armnn layers.
426class ParsedTfOperation
427{
428public:
429 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
430 : m_Parser(parser)
431 , m_Node(node)
432 {
433 }
434
435 virtual ~ParsedTfOperation() {};
436
437 const tensorflow::NodeDef& GetNode() const { return m_Node; }
438
439 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
440 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
441 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
442
443 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
444 virtual ParsedTfOperation* ResolveIdentityOperations()
445 {
446 return this;
447 }
448
449protected:
450 TfParser* m_Parser;
451 const tensorflow::NodeDef& m_Node;
452};
453
454/// An ParsedTfOperation where the Armnn equivalent is a single layer,
455/// with output slots that correspond directly to the Tf node outputs.
456class SingleLayerParsedTfOperation : public ParsedTfOperation
457{
458public:
459 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
460 : ParsedTfOperation(parser, node)
461 , m_Layer(layer)
462 {
463 }
464
465 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
466 {
467 BOOST_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100468 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100469 unsigned int armnnOutputSlotIdx = tfOutputIndex;
470 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
471 {
472 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100473 boost::str(
474 boost::format(
475 "The requested output slot #%1% "
476 "for %2% does not exist %3%")
477 % armnnOutputSlotIdx
478 % m_Layer->GetName()
479 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100480 }
481 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
482 }
483
484protected:
485 IConnectableLayer* m_Layer;
486};
487
telsoa01c577f2c2018-08-31 09:22:23 +0100488/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100489class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
490{
491public:
492 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
493 : SingleLayerParsedTfOperation(parser, node, nullptr)
494 {
495 }
496
497 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
498 {
499 if (!m_Layer)
500 {
501 CreateLayerDeferred();
502 }
503 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
504 }
505
506private:
507 virtual void CreateLayerDeferred() = 0;
508};
509
510
511TfParser::TfParser()
512 : m_Network(nullptr, nullptr)
513{
514}
515
516
517const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
518{
519 if (nodeDef->op() != "Identity")
520 {
521 return nodeDef;
522 }
523
524 if (nodeDef->input_size() != 1)
525 {
telsoa01c577f2c2018-08-31 09:22:23 +0100526 throw ParseException(
527 boost::str(
528 boost::format(
529 "Identity node should have a single input! %1% has %2% inputs %3%")
530 % nodeDef->name()
531 % nodeDef->input_size()
532 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100533 }
534
535 auto it = m_NodesByName.find(nodeDef->input(0));
536 if (it != m_NodesByName.end())
537 {
538 const tensorflow::NodeDef* inputNode = it->second;
539 return ResolveIdentityNode(inputNode);
540 }
541 else
542 {
telsoa01c577f2c2018-08-31 09:22:23 +0100543 throw ParseException(
544 boost::str(
545 boost::format(
546 "Cannot find what the Identity node %1% is linked to! %2%")
547 % nodeDef->name()
548 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100549 }
550}
551
552std::vector<OutputOfConstNodeDef>
553TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
554{
555 std::vector<OutputOfConstNodeDef> ret;
556
surmeh013537c2c2018-05-18 16:31:43 +0100557 if (nodeDef.op() == "Const")
558 {
559 // For some reason const node can have "Control Inputs". We ignore them for now.
560 return ret;
561 }
562
surmeh01bceff2f2018-03-29 16:29:27 +0100563 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
564 for (int j = 0; j < nodeDef.input_size(); ++j)
565 {
566 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100567
568 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
569 {
narpra016f37f832018-12-21 18:30:00 +0000570 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
571 continue;
surmeh013537c2c2018-05-18 16:31:43 +0100572 }
573
surmeh01bceff2f2018-03-29 16:29:27 +0100574 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
575 if (inputIt == m_NodesByName.end())
576 {
577 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100578 boost::str(
579 boost::format(
580 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
581 % nodeDef.input(j)
582 % nodeDef.name()
583 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100584 }
585 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
586 }
587
588 return ret;
589}
590
591std::vector<OutputOfParsedTfOperation>
592TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
593 std::size_t expectedNumInputs)
594{
telsoa01c577f2c2018-08-31 09:22:23 +0100595 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100596 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
597 const std::size_t numInputs = nodes.size();
598 if (numInputs != expectedNumInputs)
599 {
telsoa01c577f2c2018-08-31 09:22:23 +0100600 throw ParseException(
601 boost::str(
602 boost::format(
603 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
604 % nodeDef.name()
605 % expectedNumInputs
606 % numInputs
607 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100608 }
telsoa01c577f2c2018-08-31 09:22:23 +0100609 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100610 std::vector<OutputOfParsedTfOperation> result;
611 for (auto&& node : nodes)
612 {
613 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
614 if (it == m_ParsedTfOperations.end())
615 {
telsoa01c577f2c2018-08-31 09:22:23 +0100616 throw ParseException(
617 boost::str(
618 boost::format(
619 "Node with name '%1%' has not been parsed %2%")
620 % node.m_IndexedValue->name()
621 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100622 }
623 ParsedTfOperation* parsedOp = it->second.get();
624 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
625 parsedOp = parsedOp->ResolveIdentityOperations();
626 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
627 }
628 return result;
629}
630
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000631IConnectableLayer* TfParser::CreateAdditionLayer(
632 const tensorflow::NodeDef& nodeDef,
633 IOutputSlot* input0Slot,
634 IOutputSlot* input1Slot,
635 const std::string& layerName)
636{
637 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
638 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
639
640 const unsigned int input0Dim = input0Info.GetNumDimensions();
641 const unsigned int input1Dim = input1Info.GetNumDimensions();
642 if (input0Dim != input1Dim)
643 {
644 // broadcasting where input0 and input1 have different number of dimensions
645 // is only supported for 1D and 4D tensors pair
646 if (input0Dim == 1 && input1Dim == 4)
647 {
648 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
649 }
650 else if (input0Dim == 4 && input1Dim == 1)
651 {
652 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
653 }
654 else
655 {
656 throw ParseException(
657 boost::str(
658 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
659 % layerName
660 % nodeDef.name()
661 % CHECK_LOCATION().AsString()));
662 }
663 }
664 IConnectableLayer* const layer = m_Network->AddAdditionLayer(layerName.c_str());
665
666 input0Slot->Connect(layer->GetInputSlot(0));
667 input1Slot->Connect(layer->GetInputSlot(1));
668
669 // Ensure the output tensor has the correct dimensions even if a broadcast has been done
670 TensorInfo outputInfo = input0Slot->GetTensorInfo();
671 std::vector<unsigned int> outputShape;
672
673 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
674 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
675
676 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
677 {
678 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
679 }
680
681 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
682 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
683
684 return layer;
685}
686
687IConnectableLayer* TfParser::CreateAdditionLayer(
688 const tensorflow::NodeDef& nodeDef,
689 IConnectableLayer* layerOne,
690 IConnectableLayer* layerTwo,
691 unsigned int numberOfAddition,
692 unsigned long numberOfLayersToConnect,
693 bool isOdd)
694{
695 IOutputSlot* input0Slot = &layerOne->GetOutputSlot(0);
696 IOutputSlot* input1Slot = &layerTwo->GetOutputSlot(0);
697 std::string layerName(nodeDef.name());
698 if (isOdd || numberOfLayersToConnect != 2)
699 {
700 // we are not connecting the final layer
701 layerName.append("_addN_").append(std::to_string(numberOfAddition));
702 }
703 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
704}
705
706IConnectableLayer* TfParser::CreateAdditionLayer(
707 const tensorflow::NodeDef& nodeDef,
708 const OutputOfParsedTfOperation& opOne,
709 const OutputOfParsedTfOperation& opTwo,
710 unsigned int numberOfAddition)
711{
712 IOutputSlot* input0Slot = &opOne.m_IndexedValue->ResolveArmnnOutputSlot(opOne.m_Index);
713 IOutputSlot* input1Slot = &opTwo.m_IndexedValue->ResolveArmnnOutputSlot(opTwo.m_Index);
714 std::string layerName(nodeDef.name());
715 layerName.append("_addN_").append(std::to_string(numberOfAddition));
716 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
717}
718
719IConnectableLayer* TfParser::CreateAdditionLayer(
720 const tensorflow::NodeDef& nodeDef,
721 const OutputOfParsedTfOperation& op,
722 IConnectableLayer* layer)
723{
724 IOutputSlot* input0Slot = &op.m_IndexedValue->ResolveArmnnOutputSlot(op.m_Index);
725 IOutputSlot* input1Slot = &layer->GetOutputSlot(0);
726 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
727}
728
729ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
730{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000731 boost::ignore_unused(graphDef);
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000732 uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
733 if (numberOfInputs < 2)
734 {
735 // should never happen
736 throw ParseException(
737 boost::str(
738 boost::format(
739 "AddN Node with name '%1%' has less than two (%2) inputs %3%")
740 % nodeDef.name()
741 % std::to_string(numberOfInputs)
742 % CHECK_LOCATION().AsString()));
743 }
744 else if (numberOfInputs == 2)
745 {
746 //this is the same as a simple Add operation
747 return AddAdditionLayer(nodeDef, false);
748 }
749 else
750 {
751 // build a binary tree of Add layers and return the final Add as the return from the function
752 // if we have an odd number of inputs then the final Add will consist of a layer connecting to an
753 // OutputOfParsedTfOperation, otherwise it will be two layers being added together
754 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
755 unsigned int numberOfAdditions = 0;
756 std::vector<IConnectableLayer*> layers;
757 // NOTE: at this point we will have a minimum of three inputs
758 for (unsigned int i = 0; i < numberOfInputs; ++i)
759 {
760 // every time i is odd we have two inputs to process.
761 bool onSecondItem = i % 2;
762 if (onSecondItem)
763 {
764 ++numberOfAdditions;
765 IConnectableLayer* newLayer = CreateAdditionLayer(
766 nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
767 layers.push_back(newLayer);
768 }
769 }
770
771 std::vector<IConnectableLayer*> layersToConnect(layers);
772 unsigned long numberOfLayersToConnect = layersToConnect.size();
773 bool isOdd = numberOfInputs % 2;
774
775 while (numberOfLayersToConnect > 1)
776 {
777 layers.clear();
778 for (unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
779 bool onSecondItem = i % 2;
780 if (onSecondItem) {
781 ++numberOfAdditions;
782 IConnectableLayer* newLayer = CreateAdditionLayer(
783 nodeDef,
784 layersToConnect[i - 1],
785 layersToConnect[i],
786 numberOfAdditions,
787 numberOfLayersToConnect,
788 isOdd);
789 layers.push_back(newLayer);
790 }
791 }
792 //OK... need to go again... maybe
793 layersToConnect = layers;
794 numberOfLayersToConnect = layersToConnect.size();
795 }
796 IConnectableLayer* finalLayer = layersToConnect[0];
797 // if we had an odd number of inputs we need to connect the final layer to the
798 // last OutputOfParsedTfOperation in order to create the last Add layer we will
799 // be handing back.
800 if (isOdd)
801 {
802 // connect the final layer to the last op
803 finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
804 }
805 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, finalLayer);
806 }
807}
808
surmeh01bceff2f2018-03-29 16:29:27 +0100809ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
810{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000811 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100812 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
813
telsoa01c577f2c2018-08-31 09:22:23 +0100814 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
815 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100816 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
817 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
818 {
819 IConnectableLayer* layer =
820 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
821 &nodeDef,nodeDef.name().c_str());
822 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
823 }
824 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
825 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
826 {
827 IConnectableLayer* layer =
828 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
829 &nodeDef,nodeDef.name().c_str());
830 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
831 }
832 else
833 {
telsoa01c577f2c2018-08-31 09:22:23 +0100834 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100835 return AddAdditionLayer(nodeDef);
836 }
837}
838
839ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
840{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000841 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100842 return AddAdditionLayer(nodeDef, true);
843}
844
845/// An ParsedTfOperation which forwards to another (used for Identity nodes).
846class ParsedIdentityTfOperation : public ParsedTfOperation
847{
848public:
849 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
850 : ParsedTfOperation(parser, node)
851 , m_Representative(representative)
852 {
853 }
854
855 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
856 {
857 BOOST_ASSERT(m_Representative);
858 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
859 }
860
861 virtual ParsedTfOperation* ResolveIdentityOperations() override
862 {
863 return m_Representative->ResolveIdentityOperations();
864 }
865
866private:
867 ParsedTfOperation* m_Representative;
868};
869
870ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
871{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000872 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100873 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
874 // Any requests for the output slots of this node should be forwarded to the node connected as input.
875 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
876}
877
878/// An ParsedTfOperation for a Const node.
879/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
880/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
881template <typename T>
882class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
883{
884public:
885 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
886 const T* tensorData, const TensorInfo& tensorInfo)
887 : DeferredSingleLayerParsedTfOperation(parser, node),
888 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
889 m_TensorInfo(tensorInfo)
890 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000891 BOOST_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
surmeh01bceff2f2018-03-29 16:29:27 +0100892 }
893
894 void CreateLayerDeferred() override
895 {
896 BOOST_ASSERT(m_Layer == nullptr);
897 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
898 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
899 }
900
Matteo Martincigh482ca852018-12-12 09:20:55 +0000901 ConstTensor GetConstTensor(std::vector<T>& outputTensorData) const
surmeh01bceff2f2018-03-29 16:29:27 +0100902 {
surmeh01bceff2f2018-03-29 16:29:27 +0100903 outputTensorData.resize(m_TensorInfo.GetNumElements());
904
Matteo Martincigh482ca852018-12-12 09:20:55 +0000905 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
906
telsoa01c577f2c2018-08-31 09:22:23 +0100907 // Updates the result to point to the user provided storage.
Matteo Martincigh482ca852018-12-12 09:20:55 +0000908 ConstTensor constTensor(m_TensorInfo, outputTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +0100909 return constTensor;
910 }
911
Matteo Martincigh46315822018-11-28 16:22:36 +0000912 const T* GetStorage() const
913 {
914 return m_Storage.data();
915 }
916
917 const TensorInfo& GetTensorInfo() const
918 {
919 return m_TensorInfo;
920 }
921
surmeh01bceff2f2018-03-29 16:29:27 +0100922private:
923 ///< Manages the lifetime of the tensor data.
924 std::vector<T> m_Storage;
925 ///< Describes the layout of the tensor and points to the data in m_Storage.
926 TensorInfo m_TensorInfo;
927};
928
telsoa01c577f2c2018-08-31 09:22:23 +0100929DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
930 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100931{
932 switch (tfDataType)
933 {
934 case tensorflow::DT_FLOAT:
935 return DataType::Float32;
936 break;
937 case tensorflow::DT_INT32:
938 return DataType::Signed32;
939 break;
940 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100941 throw ParseException(
942 boost::str(
943 boost::format(
944 "Unknown DataType %1% for node %2% %3%")
945 % tensorflow::DataType_Name(tfDataType)
946 % nodeDef.name()
947 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100948 }
949}
950
951struct ParseTfTensorValueList
952{
953 template<typename DataType>
954 static void Parse(
955 const tensorflow::TensorProto& tfTensor,
956 unsigned int dstElements,
957 std::vector<int8_t>& outputData);
958
959 template <typename DataType>
960 static void ReadData(const void* srcData, unsigned int numSrcElements,
961 std::vector<int8_t>& dstData, unsigned int numDstElements)
962 {
telsoa01c577f2c2018-08-31 09:22:23 +0100963 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100964 if (numSrcElements == 0)
965 {
966 return;
967 }
968
telsoa01c577f2c2018-08-31 09:22:23 +0100969 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100970 if (numDstElements == 0)
971 {
972 numDstElements = numSrcElements;
973 }
974
telsoa01c577f2c2018-08-31 09:22:23 +0100975 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100976 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
977
978 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
979 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
980
telsoa01c577f2c2018-08-31 09:22:23 +0100981 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +0100982 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
983
984 if (numDstElements > numSrcElements)
985 {
telsoa01c577f2c2018-08-31 09:22:23 +0100986 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +0100987 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
988 }
989 }
990
991};
992
993template <>
994void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
995 unsigned int dstElements, std::vector<int8_t>& outputData)
996{
997 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
998 outputData, dstElements);
999}
1000
1001template <>
1002void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
1003 unsigned int dstElements, std::vector<int8_t>& outputData)
1004{
1005 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
1006 outputData, dstElements);
1007}
1008
1009template <template<typename> class OperatorType, typename T = int8_t>
1010struct MakeTfOperation
1011{
1012 template<typename DataType, class... Args>
1013 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
1014 Args&&... args)
1015 {
1016 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
1017 }
1018};
1019
1020template <>
1021struct MakeTfOperation<ParsedConstTfOperation>
1022{
1023 template<typename DataType, class... Args>
1024 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
1025 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
1026 {
1027 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1028 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
1029 }
1030};
1031
1032template <class FuncType>
1033struct InvokeParseFunction
1034{
1035 template<class ResType, class... Args>
1036 inline static ResType Result(DataType dataType, Args&&... args)
1037 {
1038 if (dataType == DataType::Float32)
1039 {
1040 return FuncType::template Parse<float>(std::forward<Args>(args)...);
1041 }
1042 else if (dataType == DataType::Signed32)
1043 {
1044 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1045 }
1046
1047 return ResType();
1048 }
1049
1050 template<class... Args>
1051 inline static void Result(DataType dataType, Args&&... args)
1052 {
1053 if (dataType == DataType::Float32)
1054 {
1055 FuncType::template Parse<float>(std::forward<Args>(args)...);
1056 }
1057 else if (dataType == DataType::Signed32)
1058 {
1059 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1060 }
1061 }
1062};
1063
1064ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1065{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001066 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001067 BOOST_ASSERT(nodeDef.op() == "Const");
1068
1069 if (nodeDef.attr().count("value") == 0)
1070 {
telsoa01c577f2c2018-08-31 09:22:23 +01001071 throw ParseException(
1072 boost::str(
1073 boost::format(
1074 "Value not found for Const node - %1% %2%")
1075 % nodeDef.name()
1076 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001077 }
1078
1079 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
1080 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1081 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
1082
1083 const auto GetDimensionSize = [](auto& d) { return d.size(); };
1084
1085 std::vector<unsigned int> dimensionSizes;
1086 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1087 std::back_inserter(dimensionSizes), GetDimensionSize);
1088
telsoa01c577f2c2018-08-31 09:22:23 +01001089 // Calculates number of elements.
1090 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001091 unsigned int numElements = 0U;
1092
1093 if (!dimensionSizes.empty())
1094 {
1095 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1096 1U, std::multiplies<unsigned int>());
1097 }
1098
1099 std::vector<int8_t> tensorData;
1100
telsoa01c577f2c2018-08-31 09:22:23 +01001101 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001102 if (tfTensor.tensor_content().empty())
1103 {
1104 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
1105
1106 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +01001107 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +01001108 if (numElements == 0)
1109 {
telsoa01c577f2c2018-08-31 09:22:23 +01001110 const unsigned int tfNumElements =
1111 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001112 dimensionSizes.push_back(tfNumElements);
1113 }
1114 }
telsoa01c577f2c2018-08-31 09:22:23 +01001115 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001116 else
1117 {
1118 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1119
telsoa01c577f2c2018-08-31 09:22:23 +01001120 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +01001121 if (numElements == 0)
1122 {
telsoa01c577f2c2018-08-31 09:22:23 +01001123 throw ParseException(
1124 boost::str(
1125 boost::format(
1126 "No tensor shape found for Const node - %1% %2%")
1127 % nodeDef.name()
1128 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001129 }
1130 }
1131
telsoa01c577f2c2018-08-31 09:22:23 +01001132 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001133 if (tensorData.empty())
1134 {
telsoa01c577f2c2018-08-31 09:22:23 +01001135 throw ParseException(
1136 boost::str(
1137 boost::format(
1138 "No tensor data found for Const node - %1% %2%")
1139 % nodeDef.name()
1140 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001141 }
1142
telsoa01c577f2c2018-08-31 09:22:23 +01001143 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1144 dimensionSizes.data(),
1145 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001146
1147 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +01001148 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +01001149 if (tensorData.size() > tensorInfo.GetNumBytes())
1150 {
telsoa01c577f2c2018-08-31 09:22:23 +01001151 throw ParseException(
1152 boost::str(
1153 boost::format(
1154 "Number of elements (%1%) should be less than or equal "
1155 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
1156 % (tensorData.size() / GetDataTypeSize(dataType))
1157 % tensorInfo.GetNumElements()
1158 % nodeDef.name()
1159 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001160 }
1161
1162 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1163 dataType, this, nodeDef, tensorData, tensorInfo);
1164}
1165
1166template<typename Type>
1167bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
1168{
1169 auto it = m_ParsedTfOperations.find(nodeName);
jimfly01f6ba7472018-12-04 10:09:52 +00001170 if (it == m_ParsedTfOperations.end())
surmeh01bceff2f2018-03-29 16:29:27 +01001171 {
1172 return false;
1173 }
jimfly01f6ba7472018-12-04 10:09:52 +00001174 return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1175}
1176
1177template<typename Type>
1178bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
1179{
1180 return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
surmeh01bceff2f2018-03-29 16:29:27 +01001181}
1182
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00001183unsigned int TfParser::GetConstInputIndex(const std::vector<OutputOfParsedTfOperation>& inputs)
1184{
1185 for (unsigned int i = 0; i < inputs.size(); i++)
1186 {
1187 if (HasParsedConstTensor<int32_t>(inputs[i].m_IndexedValue->GetNode().name()))
1188 {
1189 return i;
1190 }
1191 }
1192 throw ParseException(
1193 boost::str(
1194 boost::format(
1195 "ArmNN only supports operators with constant axis. %1%")
1196 % CHECK_LOCATION().AsString()));
1197
1198}
1199
surmeh01bceff2f2018-03-29 16:29:27 +01001200ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
1201 const tensorflow::GraphDef& graphDef)
1202{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001203 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001204 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1205 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1206 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1207
1208 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1209 {
telsoa01c577f2c2018-08-31 09:22:23 +01001210 throw ParseException(
1211 boost::str(
1212 boost::format(
1213 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1214 % nodeDef.name()
1215 % inputs[1].m_IndexedValue->GetNode().name()
1216 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001217 }
1218 ParsedConstTfOperation<float>* weightNode =
1219 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1220
1221 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1222 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1223 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1224
telsoa01c577f2c2018-08-31 09:22:23 +01001225 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
surmeh01bceff2f2018-03-29 16:29:27 +01001226 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1227 if (!dilations.empty())
1228 {
1229 for (auto dilation : dilations)
1230 {
1231 if (dilation != 1u)
1232 {
telsoa01c577f2c2018-08-31 09:22:23 +01001233 throw ParseException(
1234 boost::str(
1235 boost::format(
1236 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1237 % nodeDef.name()
1238 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001239 }
1240 }
1241 }
1242
1243 Convolution2dDescriptor desc;
1244 desc.m_BiasEnabled = false;
1245
telsoa01c577f2c2018-08-31 09:22:23 +01001246 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1247
Matteo Martincigh46315822018-11-28 16:22:36 +00001248 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001249
Matteo Martincigh46315822018-11-28 16:22:36 +00001250 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001251
Matteo Martincigh46315822018-11-28 16:22:36 +00001252 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001253
Matteo Martincigh46315822018-11-28 16:22:36 +00001254 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1255 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001256
Matteo Martincigh46315822018-11-28 16:22:36 +00001257 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1258 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1259
1260 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1261 // Tensorflow weights are [H, W, In, Out].
1262 // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1263 // and [Out, In, H, W] when the data layout is NCHW.
1264 PermutationVector permutationVector =
1265 dataLayout == DataLayout::NHWC ?
1266 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1267 std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1268
1269 // Swizzle the tensor using the given permutation vector.
1270 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1271 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1272
1273 // Swizzles the content of the tensor's permanent storage into a local storage.
1274 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1275 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001276 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Matteo Martincigh46315822018-11-28 16:22:36 +00001277
1278 // Create a weight tensor with the newly swizzled data.
1279 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1280
1281 uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1282 uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001283
1284 bool padding = false;
1285 TensorInfo outputInfo;
Matteo Martincigh46315822018-11-28 16:22:36 +00001286 unsigned int outputHeight = 0;
1287 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001288
1289 CHECK_PADDING_TYPE(nodeDef, paddingString);
1290
surmeh01bceff2f2018-03-29 16:29:27 +01001291 if (paddingString == "SAME")
1292 {
1293 padding = true;
Matteo Martincigh46315822018-11-28 16:22:36 +00001294
1295 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1296 static_cast<float>(desc.m_StrideY)));
1297 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1298 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001299 }
1300 else if (paddingString == "VALID")
1301 {
1302 padding = false;
Matteo Martincigh46315822018-11-28 16:22:36 +00001303
1304 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1305 static_cast<float>(desc.m_StrideY)));
1306 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1307 static_cast<float>(desc.m_StrideX)));
1308 }
1309
1310 switch (dataLayout)
1311 {
1312 case DataLayout::NHWC:
1313 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1314 outputHeight,
1315 outputWidth,
1316 weightTensor.GetShape()[0] },
1317 DataType::Float32);
1318 break;
1319 case DataLayout::NCHW:
1320 default:
surmeh01bceff2f2018-03-29 16:29:27 +01001321 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1322 weightTensor.GetShape()[0],
Matteo Martincigh46315822018-11-28 16:22:36 +00001323 outputHeight,
1324 outputWidth },
1325 DataType::Float32);
1326 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001327 }
surmeh01bceff2f2018-03-29 16:29:27 +01001328
1329 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1330 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1331
Matteo Martincighfc598e12019-05-14 10:36:13 +01001332 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc,
1333 weightTensor,
1334 EmptyOptional(),
1335 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001336 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Matteo Martincigh46315822018-11-28 16:22:36 +00001337 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001338
1339 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1340}
1341
1342ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
telsoa01c577f2c2018-08-31 09:22:23 +01001343 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001344{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001345 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001346 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1347 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1348 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1349
1350 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1351 {
telsoa01c577f2c2018-08-31 09:22:23 +01001352 throw ParseException(
1353 boost::str(
1354 boost::format(
1355 "ArmNN only supports Depthwise Convolution layer with constant weights. "
1356 "Non const input found %1% for node %2% %3%")
1357 % inputs[1].m_IndexedValue->GetNode().name()
1358 % nodeDef.name()
1359 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001360 }
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001361
surmeh01bceff2f2018-03-29 16:29:27 +01001362 ParsedConstTfOperation<float>* weightNode =
1363 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1364
surmeh01bceff2f2018-03-29 16:29:27 +01001365 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1366 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1367 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1368
1369 DepthwiseConvolution2dDescriptor desc;
1370 desc.m_BiasEnabled = false;
1371
telsoa01c577f2c2018-08-31 09:22:23 +01001372 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1373
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001374 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001375
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001376 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001377
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001378 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001379
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001380 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1381 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001382
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001383 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1384 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1385
1386 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
Matteo Martincigh747ef822018-12-18 09:26:39 +00001387 // Tensorflow weights come in the format [H, W, I, M].
1388 // ArmNN weights have to be [M, I, H, W].
1389 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001390
1391 // Swizzle the tensor using the given permutation vector.
1392 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1393 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1394
1395 // Swizzles the content of the tensor's permanent storage into a local storage.
1396 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1397 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001398 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001399
1400 // Create a weight tensor with the newly swizzled data.
1401 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1402
Matteo Martincigh747ef822018-12-18 09:26:39 +00001403 uint32_t weightHeight = weightTensor.GetShape()[2];
1404 uint32_t weightWidth = weightTensor.GetShape()[3];
surmeh01bceff2f2018-03-29 16:29:27 +01001405
1406 bool padding = false;
1407 TensorInfo outputInfo;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001408 unsigned int outputHeight = 0;
1409 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001410
1411 CHECK_PADDING_TYPE(nodeDef, paddingString);
1412
surmeh01bceff2f2018-03-29 16:29:27 +01001413 if (paddingString == "SAME")
1414 {
1415 padding = true;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001416
1417 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1418 static_cast<float>(desc.m_StrideY)));
1419 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1420 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001421 }
1422 else if (paddingString == "VALID")
1423 {
1424 padding = false;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001425
1426 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1427 static_cast<float>(desc.m_StrideY)));
1428 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1429 static_cast<float>(desc.m_StrideX)));
1430 }
1431
1432 switch (dataLayout)
1433 {
1434 case DataLayout::NHWC:
1435 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1436 outputHeight,
1437 outputWidth,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001438 weightTensor.GetShape()[0] * weightTensor.GetShape()[1]},
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001439 DataType::Float32);
1440 break;
1441 case DataLayout::NCHW:
1442 default:
1443 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1444 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1445 outputHeight,
1446 outputWidth },
1447 DataType::Float32);
1448 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001449 }
surmeh01bceff2f2018-03-29 16:29:27 +01001450
1451 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1452 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1453
Matteo Martincighfc598e12019-05-14 10:36:13 +01001454 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1455 weightTensor,
1456 EmptyOptional(),
1457 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001458 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001459 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001460
1461 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1462}
1463
Conor Kennedyc2130a02018-12-05 11:05:54 +00001464TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
1465{
1466 BOOST_ASSERT(nodeDef.op() == "ExpandDims");
1467
1468 if (inputTensorInfo.GetNumDimensions() > 4) {
1469 throw ParseException(
1470 boost::str(
1471 boost::format(
1472 "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1473 % inputTensorInfo.GetNumDimensions()
1474 % nodeDef.name()
1475 % CHECK_LOCATION().AsString()));
1476 }
1477
1478 std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1479
1480 std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1481 std::vector<uint32_t> outputDims;
1482
1483 // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1484 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1485 {
1486 // add current input shape to outputDims
1487 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1488 auto currentDimension = inputTensorInfo.GetShape()[i];
1489 outputDims.push_back(currentDimension);
1490 }
1491
1492 // insert a dimension of 1 at index 'expandDim' of inputs shape
1493 if (expandDim >= 0)
1494 {
1495 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1496 outputDims.insert(getPosition, 1);
1497 }
1498
1499 // if negative number for 'expandDim' then count backwards from the last element
1500 // and insert 1 dimension at index 'expandDim'
1501 if (expandDim < 0)
1502 {
Matteo Martincighd7cceeb2018-12-06 09:06:29 +00001503 int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001504 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1505 outputDims.insert(getPosition, 1);
1506 }
1507 }
1508 else
1509 {
1510 throw InvalidArgumentException(
1511 boost::str(
1512 boost::format(
1513 "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1514 % expandDim
1515 % inputDimSize
1516 % CHECK_LOCATION().AsString()));
1517 }
1518
1519 if (outputDims.size() > 4)
1520 {
1521 throw ParseException(
1522 boost::str(
1523 boost::format(
1524 "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1525 % outputDims.size()
1526 % nodeDef.name()
1527 % CHECK_LOCATION().AsString()));
1528 }
1529
1530 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1531 outputDims.data());
1532
1533 TensorInfo outTensorInfo = inputTensorInfo;
1534 outTensorInfo.SetShape(outShape);
1535
1536 return outTensorInfo;
1537}
1538
1539ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1540{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001541 boost::ignore_unused(graphDef);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001542 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1543
1544 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1545 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1546
1547 TensorInfo outputInfo;
1548 outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo);
1549
1550 ReshapeDescriptor reshapeDesc;
1551 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1552 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1553 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1554 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1555
1556 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1557}
1558
surmeh01bceff2f2018-03-29 16:29:27 +01001559ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1560 const tensorflow::GraphDef& graphDef)
1561{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001562 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001563 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1564
1565 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1566 {
telsoa01c577f2c2018-08-31 09:22:23 +01001567 throw ParseException(
1568 boost::str(
1569 boost::format(
1570 "ArmNN only supports FusedBatchNormalization layers with constant scale. "
1571 "Input %1%. Node %2% %3%")
1572 % inputs[1].m_IndexedValue->GetNode().name()
1573 % nodeDef.name()
1574 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001575 }
1576 ParsedConstTfOperation<float>* scaleNode =
1577 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1578
1579 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1580 {
telsoa01c577f2c2018-08-31 09:22:23 +01001581 throw ParseException(
1582 boost::str(
1583 boost::format(
1584 "ArmNN only supports FusedBatchNormalization layers with constant offset. "
1585 "Input %1%. Node %2% %3%")
1586 % inputs[2].m_IndexedValue->GetNode().name()
1587 % nodeDef.name()
1588 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001589 }
1590 ParsedConstTfOperation<float>* offsetNode =
1591 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1592
1593 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1594 {
telsoa01c577f2c2018-08-31 09:22:23 +01001595 throw ParseException(
1596 boost::str(
1597 boost::format(
1598 "ArmNN only supports FusedBatchNormalization layers with constant mean. "
1599 "Input %1%. Node %2% %3%")
1600 % inputs[3].m_IndexedValue->GetNode().name()
1601 % nodeDef.name()
1602 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001603 }
1604 ParsedConstTfOperation<float>* meanNode =
1605 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1606
1607 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1608 {
telsoa01c577f2c2018-08-31 09:22:23 +01001609 throw ParseException(
1610 boost::str(
1611 boost::format(
1612 "ArmNN only supports FusedBatchNormalization layers with constant variance. "
1613 "Input %1%. Node %2% %3%")
1614 % inputs[4].m_IndexedValue->GetNode().name()
1615 % nodeDef.name()
1616 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001617 }
1618 ParsedConstTfOperation<float>* varianceNode =
1619 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1620
Aron Virginas-Tar2e259272019-11-27 13:29:51 +00001621 const std::string dataFormat = ReadOptionalNodeStringAttribute(nodeDef, "data_format", "NHWC");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001622 CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1623
telsoa01c577f2c2018-08-31 09:22:23 +01001624 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001625 BatchNormalizationDescriptor desc;
1626 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001627 desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001628
telsoa01c577f2c2018-08-31 09:22:23 +01001629 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1630 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001631 std::vector<float> scaleTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001632 ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001633
1634 std::vector<float> offsetTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001635 ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001636
1637 std::vector<float> meanTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001638 ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001639
1640 std::vector<float> varianceTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001641 ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001642
1643 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1644 meanTensor,
1645 varianceTensor,
1646 offsetTensor,
1647 scaleTensor,
1648 nodeDef.name().c_str());
1649
1650 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1651
Matteo Martincigh075c7502018-12-05 13:10:45 +00001652 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1653 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001654
1655 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1656}
1657
telsoa01c577f2c2018-08-31 09:22:23 +01001658bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1659 size_t alphaLayerIndex,
1660 const OutputOfParsedTfOperation& otherOp,
1661 armnn::IOutputSlot** outputOfLeakyRelu,
1662 armnn::ActivationDescriptor & desc)
1663{
1664 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1665
1666 // Verifying all these assumptions hold:
1667 //
1668 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1669 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1670 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1671 //
1672
1673 if (mulNodeDef.op() == "Mul")
1674 {
1675 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1676 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1677
1678 BOOST_ASSERT(inputs.size() == 2);
1679 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1680 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1681 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1682
1683 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1684 {
1685 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1686 {
1687 ParsedConstTfOperation<float>* alpha =
1688 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1689 inputs[alphaLayerIndex].m_IndexedValue);
1690
1691 std::vector<float> const_data;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001692 ConstTensor const_tensor = alpha->GetConstTensor(const_data);
telsoa01c577f2c2018-08-31 09:22:23 +01001693
1694 if (const_data.size() == 1)
1695 {
1696 desc.m_Function = ActivationFunction::LeakyReLu;
1697 desc.m_A = const_data[0];
1698
1699 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1700 return true;
1701 }
1702 }
1703 }
1704 }
1705 return false;
1706}
1707
telsoa01c577f2c2018-08-31 09:22:23 +01001708ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1709 const tensorflow::GraphDef& graphDef)
1710{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001711 boost::ignore_unused(graphDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001712 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
Sadik Armagan975c09a2018-12-04 10:02:08 +00001713 if (inputs.size() != 2)
1714 {
1715 throw ParseException(
1716 boost::str(
1717 boost::format(
1718 "Maximum expects two inputs!. Got %1% for Node %2% %3%")
1719 % inputs.size()
1720 % nodeDef.name()
1721 % CHECK_LOCATION().AsString()));
1722 }
1723
telsoa01c577f2c2018-08-31 09:22:23 +01001724 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1725 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1726 IOutputSlot* outputOfLeakyRelu = nullptr;
1727
1728 ActivationDescriptor desc;
1729
Sadik Armagan975c09a2018-12-04 10:02:08 +00001730 // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1731 // i.e. one of the four possible scenarios:
1732 // 1, max(mul(a, x), x)
1733 // 2, max(mul(x, a), x)
1734 // 3, max(x, mul(a, x))
1735 // 4, max(x, mul(x, a))
1736 // These are handled by an activation layer.
telsoa01c577f2c2018-08-31 09:22:23 +01001737
1738 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1739 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1740 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1741 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1742 {
1743 BOOST_ASSERT(outputOfLeakyRelu != nullptr);
1744
1745 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1746 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1747 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1748 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1749 }
1750 else
1751 {
Sadik Armagan975c09a2018-12-04 10:02:08 +00001752 // Anything else is just a maximum layer.
1753
1754 return AddMaximumLayer(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001755 }
1756}
1757
jimfly0184c70e62018-12-19 13:14:46 +00001758std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> TfParser::ProcessElementwiseInputSlots(
1759 const tensorflow::NodeDef& nodeDef, const std::string& layerName)
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001760{
1761 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1762
1763 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1764 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1765 const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1766 const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1767
1768 if (input0Dim != input1Dim)
1769 {
1770 // broadcasting where input0 and input1 have different number of dimensions
1771 // is only supported for 1D and 4D tensors pair
1772 if (input0Dim == 1 && input1Dim == 4)
1773 {
1774 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1775 }
1776 else if (input0Dim == 4 && input1Dim == 1)
1777 {
1778 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1779 }
1780 else
1781 {
1782 throw ParseException(
jimfly0184c70e62018-12-19 13:14:46 +00001783 boost::str(
1784 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
1785 % layerName
1786 % nodeDef.name()
1787 % CHECK_LOCATION().AsString()));
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001788 }
1789 }
jimfly0184c70e62018-12-19 13:14:46 +00001790 return {input0Slot, input1Slot};
1791}
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001792
kevmay012b4d88e2019-01-24 14:05:09 +00001793ParsedTfOperationPtr TfParser::ProcessComparisonLayer(
1794 IOutputSlot* input0Slot,
1795 IOutputSlot* input1Slot,
1796 IConnectableLayer* const layer,
1797 const tensorflow::NodeDef& nodeDef)
1798{
1799 input0Slot->Connect(layer->GetInputSlot(0));
1800 input1Slot->Connect(layer->GetInputSlot(1));
1801
1802 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1803 outputInfo.SetDataType(DataType::Boolean);
1804 std::vector<unsigned int> outputShape;
1805
1806 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1807 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1808
1809 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1810 {
1811 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1812 }
1813
1814 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1815 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1816
1817 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1818}
1819
jimfly0184c70e62018-12-19 13:14:46 +00001820ParsedTfOperationPtr TfParser::ProcessElementwiseLayer(
1821 IOutputSlot* input0Slot,
1822 IOutputSlot* input1Slot,
1823 IConnectableLayer* const layer,
1824 const tensorflow::NodeDef& nodeDef)
1825{
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001826 input0Slot->Connect(layer->GetInputSlot(0));
1827 input1Slot->Connect(layer->GetInputSlot(1));
1828
1829 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1830 std::vector<unsigned int> outputShape;
1831
1832 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1833 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1834
1835 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1836 {
1837 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1838 }
1839
1840 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1841 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1842
1843 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1844}
1845
FrancisMurtagh94412af2019-01-24 10:53:39 +00001846ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
1847 const tensorflow::GraphDef& graphDef)
1848{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001849 boost::ignore_unused(graphDef);
FrancisMurtagh94412af2019-01-24 10:53:39 +00001850 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1851 IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1852 IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1853
1854 // Infer shape of output tensor
1855 unsigned int paramsDim = params.GetTensorInfo().GetNumDimensions();
1856 unsigned int indicesDim = indices.GetTensorInfo().GetNumDimensions();
1857 unsigned int outputDim = paramsDim - 1 + indicesDim;
1858
1859 std::vector<unsigned int> dimSizes;
1860
1861 for (unsigned int i = 0; i < indicesDim; ++i)
1862 {
1863 dimSizes.push_back(indices.GetTensorInfo().GetShape()[i]);
1864 }
1865 for (unsigned int i = 1; i < paramsDim; ++i)
1866 {
1867 dimSizes.push_back(params.GetTensorInfo().GetShape()[i]);
1868 }
1869
1870 const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data());
1871
1872 const TensorInfo inferredOutputInfo(inferredShape, params.GetTensorInfo().GetDataType());
1873
1874 IConnectableLayer* const layer = m_Network->AddGatherLayer(nodeDef.name().c_str());
1875 layer->GetOutputSlot(0).SetTensorInfo(inferredOutputInfo);
1876
1877 params.Connect(layer->GetInputSlot(0));
1878 indices.Connect(layer->GetInputSlot(1));
1879
1880 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1881}
1882
jimfly01a06bf312018-12-18 16:24:51 +00001883ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
1884 const tensorflow::GraphDef& graphDef)
1885{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001886 boost::ignore_unused(graphDef);
jimfly01a06bf312018-12-18 16:24:51 +00001887 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
1888 IOutputSlot* input0Slot = inputLayers.first;
1889 IOutputSlot* input1Slot = inputLayers.second;
1890
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001891 ComparisonDescriptor descriptor(ComparisonOperation::Greater);
1892 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly01a06bf312018-12-18 16:24:51 +00001893
kevmay012b4d88e2019-01-24 14:05:09 +00001894 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly01a06bf312018-12-18 16:24:51 +00001895}
1896
jimfly0184c70e62018-12-19 13:14:46 +00001897ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
1898 const tensorflow::GraphDef& graphDef)
1899{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001900 boost::ignore_unused(graphDef);
jimfly0184c70e62018-12-19 13:14:46 +00001901 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
1902 IOutputSlot* input0Slot = inputLayers.first;
1903 IOutputSlot* input1Slot = inputLayers.second;
1904
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001905 ComparisonDescriptor descriptor(ComparisonOperation::Equal);
1906 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly0184c70e62018-12-19 13:14:46 +00001907
kevmay012b4d88e2019-01-24 14:05:09 +00001908 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly0184c70e62018-12-19 13:14:46 +00001909}
1910
1911ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
1912 const tensorflow::GraphDef& graphDef)
1913{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001914 boost::ignore_unused(graphDef);
jimfly0184c70e62018-12-19 13:14:46 +00001915 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
1916 IOutputSlot* input0Slot = inputLayers.first;
1917 IOutputSlot* input1Slot = inputLayers.second;
1918
1919 IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1920
1921 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1922}
1923
jimfly0123be07e2018-12-04 17:47:22 +00001924ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1925{
Derek Lambertibaa177f2019-12-10 22:00:43 +00001926 boost::ignore_unused(graphDef);
jimfly0123be07e2018-12-04 17:47:22 +00001927 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1928
1929 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1930 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1931
1932 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
1933 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
1934
1935 if (input0Info.GetNumDimensions() == 1)
1936 {
1937 const bool isNHWC = true;
1938 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1939 }
1940
1941 if (input1Info.GetNumDimensions() == 1)
1942 {
1943 const bool isNHWC = true;
1944 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1945 }
1946
1947 IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
1948
1949 input0Slot->Connect(layer->GetInputSlot(0));
1950 input1Slot->Connect(layer->GetInputSlot(1));
1951
1952 if (input0Info.GetNumDimensions() == 1)
1953 {
1954 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
1955 }
1956 else
1957 {
1958 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
1959 }
1960
1961 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1962}
1963
jimfly01f6ba7472018-12-04 10:09:52 +00001964unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
1965 const TensorInfo& inputTensorInfo,
1966 const std::string& nodeName)
1967{
1968 unsigned int rank = paddingTensor.GetShape()[0];
1969 unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
1970 if (rank != expectedRank)
1971 {
1972 throw ParseException(
1973 boost::str(
1974 boost::format(
1975 "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
1976 % expectedRank
1977 % rank
1978 % nodeName
1979 % CHECK_LOCATION().AsString()));
1980 }
1981 unsigned int second = paddingTensor.GetShape()[1];
1982 if (second != 2)
1983 {
1984 throw ParseException(
1985 boost::str(
1986 boost::format(
1987 "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
1988 % rank
1989 % second
1990 % nodeName
1991 % CHECK_LOCATION().AsString()));
1992 }
1993 return rank;
1994}
1995
1996TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
1997 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1998{
1999 unsigned int numDims = inputTensorInfo.GetNumDimensions();
2000 std::vector<unsigned int> outDims;
2001 for (unsigned int i = 0; i < numDims; ++i)
2002 {
2003 unsigned int dimSize = inputTensorInfo.GetShape()[i];
2004 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2005 dimSize += dimPadding.first;
2006 dimSize += dimPadding.second;
2007 outDims.push_back(dimSize);
2008 }
2009 TensorInfo paddedTensorInfo = inputTensorInfo;
2010 unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
2011 paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
2012 return paddedTensorInfo;
2013}
2014
2015ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
2016 const tensorflow::GraphDef& graphDef)
2017{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002018 boost::ignore_unused(graphDef);
jimfly01f6ba7472018-12-04 10:09:52 +00002019 // input consists of:
2020 // input[0] the tensor which will be padded
2021 // input[1] the tensor holding the padding values
2022 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2023 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2024 TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
2025 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
2026 {
2027 throw ParseException(
2028 boost::str(
2029 boost::format(
2030 "ArmNN only supports Pad with constant padding. "
2031 "Input %1%. Node %2% %3%")
2032 % inputs[1].m_IndexedValue->GetNode().name()
2033 % nodeDef.name()
2034 % CHECK_LOCATION().AsString()));
2035
2036 }
2037 ParsedConstTfOperation<int32_t>* paddingTensorOp =
2038 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2039
2040 std::vector<int32_t> paddingTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002041 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
jimfly01f6ba7472018-12-04 10:09:52 +00002042 // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
2043 // and should match the rank of the input tensor that is being padded.
2044 // For each dimension D of input, paddings[D, 0] indicates how many values to add
2045 // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
2046 // many values to add after the contents of tensor in that dimension
2047 // This needs to be translated into a padList for ACL
2048 std::vector<std::pair<unsigned int, unsigned int>> padList;
2049 unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
2050 for (unsigned int i = 0; i < rank; ++i)
2051 {
2052 std::pair<unsigned int, unsigned int> paddingForDim;
2053 for (unsigned int j = 0; j < 2; j++)
2054 {
2055 unsigned int index = (i * 2) + j;
2056 int paddingAmount = paddingTensorData[index];
2057 // make sure we can cast to an unsigned value
2058 if (paddingAmount < 0)
2059 {
2060 throw ParseException(
2061 boost::str(
2062 boost::format(
2063 "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
2064 % paddingAmount
2065 % i
2066 % j
2067 % nodeDef.name()
2068 % CHECK_LOCATION().AsString()));
2069 }
2070 if (j == 0)
2071 {
2072 paddingForDim.first = static_cast<unsigned int>(paddingAmount);
2073 }
2074 else
2075 {
2076 paddingForDim.second = static_cast<unsigned int>(paddingAmount);
2077 }
2078 }
2079 padList.push_back(paddingForDim);
2080 }
2081 PadDescriptor padDescriptor(padList);
2082 IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
2083 previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
2084 // Use the padding to calculate the new output tensor shape
2085 TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
2086 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2087 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2088}
2089
surmeh01bceff2f2018-03-29 16:29:27 +01002090ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
2091 const tensorflow::GraphDef& graphDef)
2092{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002093 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002094 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002095
telsoa01c577f2c2018-08-31 09:22:23 +01002096 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01002097 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
surmeh01bceff2f2018-03-29 16:29:27 +01002098
surmeh01bceff2f2018-03-29 16:29:27 +01002099 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2100
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002101 // Constant tensor index
2102 unsigned int index = GetConstInputIndex(inputs);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002103 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002104 ParsedConstTfOperation<int32_t>* shapeNode =
2105 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2106
surmeh01bceff2f2018-03-29 16:29:27 +01002107 std::vector<int32_t> axisTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002108 shapeNode->GetConstTensor(axisTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002109
telsoa01c577f2c2018-08-31 09:22:23 +01002110 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002111 const unsigned int concatDim = static_cast<unsigned int>(axisTensorData[0]);
surmeh01bceff2f2018-03-29 16:29:27 +01002112
telsoa01c577f2c2018-08-31 09:22:23 +01002113 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002114 if (concatDim == 0 || concatDim == 2)
surmeh01bceff2f2018-03-29 16:29:27 +01002115 {
telsoa01c577f2c2018-08-31 09:22:23 +01002116 throw ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002117 boost::str(
2118 boost::format(
telsoa01c577f2c2018-08-31 09:22:23 +01002119 "Dimension %1% for concatenation is not supported by Armnn. "
2120 "Node %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002121 % concatDim
2122 % nodeDef.name()
2123 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002124 }
2125
Matthew Jacksondba634f2019-08-15 15:14:18 +01002126 const unsigned int supportedNumDims = 4;
Matteo Martincighf9afc792018-12-06 12:03:17 +00002127 unsigned int numConcatViews = numInputs - 1;
Matthew Jacksondba634f2019-08-15 15:14:18 +01002128 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002129 concatDescriptor.SetConcatAxis(concatDim);
Matthew Jacksondba634f2019-08-15 15:14:18 +01002130 TensorShape mergeDims(supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002131 unsigned int mergeDim = 0;
2132 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002133 {
telsoa01c577f2c2018-08-31 09:22:23 +01002134 // Need to double check whether it should be
Matteo Martincighf9afc792018-12-06 12:03:17 +00002135 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002136 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2137
Matteo Martincighf9afc792018-12-06 12:03:17 +00002138 // Double check dimensions of the tensors
Matthew Jacksondba634f2019-08-15 15:14:18 +01002139 if (inputTensorInfo.GetNumDimensions() != supportedNumDims)
Matteo Martincighf9afc792018-12-06 12:03:17 +00002140 {
2141 throw armnn::ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002142 boost::str(
2143 boost::format(
Matteo Martincighf9afc792018-12-06 12:03:17 +00002144 "The number of dimensions: %1% for input tensors of the "
2145 "concatenation op should be %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002146 % inputTensorInfo.GetNumDimensions()
Matthew Jacksondba634f2019-08-15 15:14:18 +01002147 % supportedNumDims
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002148 % CHECK_LOCATION().AsString()));
Matteo Martincighf9afc792018-12-06 12:03:17 +00002149 }
2150
2151 // Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
2152 mergeDims = inputTensorInfo.GetShape();
2153 unsigned int* viewOrigin = const_cast<unsigned int*>(concatDescriptor.GetViewOrigin(viewIndex));
Matthew Jacksondba634f2019-08-15 15:14:18 +01002154 std::fill(viewOrigin, viewOrigin + supportedNumDims, 0);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002155
2156 // Update the view origin coordinates and the merge dimension value
2157 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
2158 mergeDim += mergeDims[concatDim];
surmeh01bceff2f2018-03-29 16:29:27 +01002159 }
2160
Matteo Martincighf9afc792018-12-06 12:03:17 +00002161 // Update the output shape
2162 mergeDims[concatDim] = mergeDim;
Jim Flynn906f9462019-05-10 13:55:21 +01002163 armnn::IConnectableLayer *layer = m_Network->AddConcatLayer(concatDescriptor, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002164
Matteo Martincighf9afc792018-12-06 12:03:17 +00002165 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
surmeh01bceff2f2018-03-29 16:29:27 +01002166
Matteo Martincighf9afc792018-12-06 12:03:17 +00002167 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002168 {
Matteo Martincighf9afc792018-12-06 12:03:17 +00002169 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2170 inputSlot.Connect(layer->GetInputSlot(viewIndex));
surmeh01bceff2f2018-03-29 16:29:27 +01002171 }
2172
2173 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2174}
2175
2176ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
2177 const tensorflow::GraphDef& graphDef)
2178{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002179 boost::ignore_unused(graphDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002180 // Note: the Shape layer is handled in a special way, because:
2181 // 1. ARMNN doesn't support int32 tensors which it outputs.
2182 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01002183 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01002184 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01002185
2186 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
2187 if (tfDataType != tensorflow::DT_INT32)
2188 {
telsoa01c577f2c2018-08-31 09:22:23 +01002189 throw ParseException(
2190 boost::str(
2191 boost::format(
2192 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
2193 % tensorflow::DataType_Name(tfDataType)
2194 % nodeDef.name()
2195 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002196 }
2197
2198 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2199 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2200 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2201 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
2202
2203 std::vector<int32_t> shapeTensorData;
2204 shapeTensorData.reserve(prevLayerDimensions);
2205
2206 for (unsigned int i=0; i<prevLayerDimensions; ++i)
2207 {
2208 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
2209 }
2210
2211 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2212
2213 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
2214 nodeDef,
2215 &shapeTensorData[0],
2216 shapeTensorInfo);
2217}
2218
2219ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
2220 const tensorflow::GraphDef& graphDef)
2221{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002222 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002223 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2224 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2225
2226 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2227 {
telsoa01c577f2c2018-08-31 09:22:23 +01002228 throw ParseException(
2229 boost::str(
2230 boost::format(
2231 "ArmNN only supports Reshape layers with constant shapes. "
2232 "Input %1% Node %2% %3%")
2233 % inputs[1].m_IndexedValue->GetNode().name()
2234 % nodeDef.name()
2235 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002236 }
2237 ParsedConstTfOperation<int32_t>* shapeNode =
2238 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2239
2240 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2241 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2242
2243 std::vector<int32_t> shapeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002244 ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002245 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2246
2247 TensorShape targetShape = outputTensorInfo.GetShape();
2248 ReshapeDescriptor reshapeDesc;
2249 reshapeDesc.m_TargetShape = targetShape;
2250
2251 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2252 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2253 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2254
2255 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2256}
2257
2258ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
2259 const tensorflow::GraphDef& graphDef)
2260{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002261 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002262 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2263
2264 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2265 {
telsoa01c577f2c2018-08-31 09:22:23 +01002266 throw ParseException(
2267 boost::str(
2268 boost::format(
2269 "ArmNN only supports ResizeBilinear layers with constant sizes. "
2270 "Input %1%. Node %2% %3%")
2271 % inputs[1].m_IndexedValue->GetNode().name()
2272 % nodeDef.name()
2273 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002274 }
2275 ParsedConstTfOperation<int32_t>* sizeNode =
2276 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2277
telsoa01c577f2c2018-08-31 09:22:23 +01002278 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01002279 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
2280 {
telsoa01c577f2c2018-08-31 09:22:23 +01002281 throw ParseException(
2282 boost::str(
2283 boost::format(
2284 "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
2285 "Node %1% %2%")
2286 % nodeDef.name()
2287 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002288 }
2289
telsoa01c577f2c2018-08-31 09:22:23 +01002290 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01002291 std::vector<int32_t> sizeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002292 ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002293
telsoa01c577f2c2018-08-31 09:22:23 +01002294 // The descriptor only has target height and width attributes, which we get from the size tensor.
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002295 ResizeDescriptor desc;
2296 desc.m_Method = armnn::ResizeMethod::Bilinear;
surmeh01bceff2f2018-03-29 16:29:27 +01002297 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002298 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2299 desc.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002300
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002301 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002302
2303 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2304 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01002305 // The input shape is always in BHWC format, this will be swizzled below; for now,
2306 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01002307 unsigned int outBatch = inputTensorInfo.GetShape()[0];
2308 unsigned int outChannels = inputTensorInfo.GetShape()[3];
2309 unsigned int outHeight = desc.m_TargetHeight;
2310 unsigned int outWidth = desc.m_TargetWidth;
jimfly018a121502018-12-06 16:19:52 +00002311 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
telsoa01c577f2c2018-08-31 09:22:23 +01002312 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01002313 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2314 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2315
jimfly018a121502018-12-06 16:19:52 +00002316 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002317
2318 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2319}
2320
2321TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
2322{
2323 BOOST_ASSERT(nodeDef.op() == "Squeeze");
2324 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2325
2326 DataType type;
2327 if (tfDataType == tensorflow::DT_FLOAT)
2328 {
2329 type = DataType::Float32;
2330 }
2331 else if (tfDataType == tensorflow::DT_INT32)
2332 {
2333 type = DataType::Signed32;
2334 }
2335 else
2336 {
telsoa01c577f2c2018-08-31 09:22:23 +01002337 throw ParseException(
2338 boost::str(
2339 boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
2340 % tensorflow::DataType_Name(tfDataType)
2341 % nodeDef.name()
2342 % CHECK_LOCATION().AsString()));
2343 }
2344
2345
2346 if (inputTensorInfo.GetNumDimensions() > 4)
2347 {
2348 throw ParseException(
2349 boost::str(
2350 boost::format(
2351 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2352 % inputTensorInfo.GetNumDimensions()
2353 % nodeDef.name()
2354 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002355 }
2356
2357 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01002358 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2359
surmeh01bceff2f2018-03-29 16:29:27 +01002360 if (squeezeDims.empty())
2361 {
telsoa01c577f2c2018-08-31 09:22:23 +01002362 squeezeDims.assign(dimensionSequence,
2363 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01002364 }
2365
2366 std::vector<uint32_t> outputDims;
2367 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2368 {
telsoa01c577f2c2018-08-31 09:22:23 +01002369 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2370 auto currentDimension = inputTensorInfo.GetShape()[i];
2371 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01002372 {
telsoa01c577f2c2018-08-31 09:22:23 +01002373 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01002374 }
2375 }
2376
2377 if (outputDims.size() > 4)
2378 {
telsoa01c577f2c2018-08-31 09:22:23 +01002379 throw ParseException(
2380 boost::str(
2381 boost::format(
2382 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2383 % outputDims.size()
2384 % nodeDef.name()
2385 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002386 }
2387
telsoa01c577f2c2018-08-31 09:22:23 +01002388 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2389 outputDims.data());
2390
2391 TensorInfo outTensorInfo = inputTensorInfo;
2392 outTensorInfo.SetShape(outShape);
2393 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01002394
2395 return outTensorInfo;
2396}
2397
2398ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2399{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002400 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002401 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2402
2403 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2404 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2405
2406 TensorInfo outputInfo;
2407 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2408
2409 ReshapeDescriptor reshapeDesc;
2410 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2411 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2412 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2413 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2414
2415 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2416}
2417
2418ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2419{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002420 boost::ignore_unused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002421 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2422
2423 NormalizationDescriptor normalizationDescriptor;
2424 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2425 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2426 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2427 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2428 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2429 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
ruoyan018174f362018-12-04 18:24:08 +00002430 normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002431
2432 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2433 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2434
2435 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002436 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2437 nodeDef.name().c_str());
ruoyan018174f362018-12-04 18:24:08 +00002438 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2439 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
surmeh01bceff2f2018-03-29 16:29:27 +01002440
2441 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2442}
2443
2444/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01002445/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2446/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2447/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2448///
surmeh01bceff2f2018-03-29 16:29:27 +01002449class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
2450{
2451public:
2452 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2453 : DeferredSingleLayerParsedTfOperation(parser, node)
2454 {
2455 }
2456
2457 void CreateLayerDeferred() override
2458 {
2459 BOOST_ASSERT(m_Layer == nullptr);
2460 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2461 }
2462};
2463
2464ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2465{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002466 boost::ignore_unused(graphDef);
2467
telsoa01c577f2c2018-08-31 09:22:23 +01002468 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01002469 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2470}
2471
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002472ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2473{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002474 boost::ignore_unused(graphDef);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002475 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2476 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2477 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2478
2479 if (inputs.size() != 2)
2480 {
2481 throw ParseException(
2482 boost::str(boost::format("Mean expects two inputs!. Got %1% for Node %2% %3%")
2483 % inputs.size()
2484 % nodeDef.name()
2485 % CHECK_LOCATION().AsString()));
2486 }
2487
2488 bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef, "keep_dims");
2489
2490 ParsedConstTfOperation<int32_t>* axisNode =
2491 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2492
2493 const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo();
2494
2495 ConstTensor axisTensor(axisTensorInfo, axisNode->GetStorage());
2496 const int* axisData = static_cast<const int*>(axisTensor.GetMemoryArea());
2497
2498 TensorInfo outputTensorInfo;
2499 MeanDescriptor meanDescriptor;
2500 meanDescriptor.m_KeepDims = keepDims;
2501
2502 // Negative axis values are supported so that the process requires
2503 // to convert them into the corresponding positive ones.
2504 // Duplicate values are also removed.
2505 std::vector<int> rawAxisVector(axisData, axisData + axisTensorInfo.GetNumElements());
2506 std::set<unsigned int> positiveAxisSet;
2507 int rank = static_cast<int>(inputTensorInfo.GetNumDimensions());
2508
2509 std::transform(rawAxisVector.begin(), rawAxisVector.end(),
2510 std::inserter(positiveAxisSet, positiveAxisSet.begin()),
2511 [rank](int i) -> unsigned int { return static_cast<unsigned int>((i + rank) % rank); });
2512
Derek Lambertibaa177f2019-12-10 22:00:43 +00002513 CalculateReducedOutputTensoInfo(inputTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002514
2515 if (inputTensorInfo.GetNumDimensions() > positiveAxisSet.size())
2516 {
2517 meanDescriptor.m_Axis.assign(positiveAxisSet.begin(), positiveAxisSet.end());
2518 }
2519
2520 IConnectableLayer* layer = m_Network->AddMeanLayer(meanDescriptor, nodeDef.name().c_str());
2521 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2522 inputSlot.Connect(layer->GetInputSlot(0));
2523
2524 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2525}
2526
telsoa01c577f2c2018-08-31 09:22:23 +01002527/// An ParsedTfOperation for a Mul node.
2528/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2529/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2530/// and in these cases armnn doesn't need a separate layer for the Mul.
2531///
2532class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
2533{
2534public:
2535 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2536 : DeferredSingleLayerParsedTfOperation(parser, node)
2537 {
2538 }
2539
2540 void CreateLayerDeferred() override
2541 {
2542 BOOST_ASSERT(m_Layer == nullptr);
2543 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2544 }
2545};
2546
surmeh01bceff2f2018-03-29 16:29:27 +01002547ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2548{
2549 boost::ignore_unused(graphDef);
2550
telsoa01c577f2c2018-08-31 09:22:23 +01002551 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002552}
2553
2554ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
2555 const tensorflow::GraphDef& graphDef)
2556{
2557 boost::ignore_unused(graphDef);
2558
2559 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2560
2561 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
2562
2563 auto it = m_InputShapes.find(nodeDef.name());
2564 if (it == m_InputShapes.end())
2565 {
telsoa01c577f2c2018-08-31 09:22:23 +01002566 throw ParseException(
2567 boost::str(
2568 boost::format(
2569 "Missing input shape for Placeholder '%1%' %2%")
2570 % nodeDef.name()
2571 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002572 }
2573 TensorInfo tensorInfo(it->second, DataType::Float32);
2574
2575 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2576
2577 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2578
2579 TrackInputBinding(layer, layerId, tensorInfo);
2580
2581 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2582}
2583
saoste01bbd40612018-08-28 15:41:51 +01002584ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2585{
2586 boost::ignore_unused(graphDef);
2587 return AddRealDivLayer(nodeDef);
2588}
2589
surmeh01bceff2f2018-03-29 16:29:27 +01002590ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
2591 const tensorflow::GraphDef& graphDef)
2592{
2593 boost::ignore_unused(graphDef);
2594
2595 ActivationDescriptor activationDesc;
2596 activationDesc.m_Function = ActivationFunction::ReLu;
2597 return AddActivationLayer(nodeDef, activationDesc);
2598}
2599
2600ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
2601 const tensorflow::GraphDef& graphDef)
2602{
2603 boost::ignore_unused(graphDef);
2604
2605 ActivationDescriptor activationDesc;
2606 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2607 activationDesc.m_A = 6.0f;
2608 activationDesc.m_B = 0.0f;
2609
2610 return AddActivationLayer(nodeDef, activationDesc);
2611}
2612
2613ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
2614 const tensorflow::GraphDef& graphDef)
2615{
2616 boost::ignore_unused(graphDef);
2617
2618 ActivationDescriptor activationDesc;
2619 activationDesc.m_Function = ActivationFunction::Sigmoid;
2620
2621 return AddActivationLayer(nodeDef, activationDesc);
2622}
2623
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002624ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
2625 const tensorflow::GraphDef &graphDef)
2626{
2627 boost::ignore_unused(graphDef);
2628
2629 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2630
josh minor4a3c6102020-01-06 16:40:46 -06002631 ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
2632 IConnectableLayer* const layer = m_Network->AddElementwiseUnaryLayer(descriptor, nodeDef.name().c_str());
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002633
2634 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2635 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2636 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2637
2638 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2639}
2640
surmeh01bceff2f2018-03-29 16:29:27 +01002641ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
2642 const tensorflow::GraphDef& graphDef)
2643{
2644 boost::ignore_unused(graphDef);
2645
2646 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2647
2648 SoftmaxDescriptor softmaxDescriptor;
2649 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2650
2651 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2652 prevLayerSlot.Connect(layer->GetInputSlot(0));
2653 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2654
2655 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2656}
2657
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002658ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
2659 const tensorflow::GraphDef& graphDef)
2660{
2661 boost::ignore_unused(graphDef);
2662
2663 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2664 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2665 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2666
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002667 // Constant tensor index
2668 unsigned int index = GetConstInputIndex(inputs);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002669 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002670 ParsedConstTfOperation<int32_t>* shapeNode =
2671 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2672
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002673 std::vector<int32_t> axisTensorData;
2674 shapeNode->GetConstTensor(axisTensorData);
2675
2676 // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2677 const unsigned int splitDim = static_cast<unsigned int>(axisTensorData[0]);
2678
2679 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2680 if (splitDim == 0 || splitDim == 2)
2681 {
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002682 throw armnn::ParseException(
2683 boost::str(
2684 boost::format(
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002685 "Dimension %1% for split is not supported by Armnn. "
2686 "Node %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002687 % splitDim
2688 % nodeDef.name()
2689 % CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002690 }
2691
Saoirse Stewart315258e2019-02-28 11:32:41 +00002692 // As Armnn only supports splitter outputs of the same shape, therefore num_split will be limited to an integer.
2693 uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef, "num_split");
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002694
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002695 IOutputSlot& inputSlot = inputs[1 - index].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1 - index].m_Index);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002696 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2697
Matthew Jacksondba634f2019-08-15 15:14:18 +01002698 const unsigned int supportedNumDims = 4;
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002699 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2700
Matthew Jacksondba634f2019-08-15 15:14:18 +01002701 if (inputDimSize != supportedNumDims)
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002702 {
2703 throw armnn::ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002704 boost::str(
2705 boost::format(
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002706 "The number of dimensions: %1% for input tensors of the "
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002707 "split op should be %2% %3%")
2708 % inputTensorInfo.GetNumDimensions()
Matthew Jacksondba634f2019-08-15 15:14:18 +01002709 % supportedNumDims
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002710 % CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002711 }
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002712
2713 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2714
2715 // Add current input shape to splitterDimSizes
2716 for (unsigned int i = 0; i < inputDimSize; ++i)
2717 {
2718 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2719 }
2720
2721 if (splitterDimSizes[splitDim] % num_split != 0)
2722 {
2723 throw ParseException("Number of splits must evenly divide the dimension");
2724 }
2725 splitterDimSizes[splitDim] /= num_split;
2726
2727 SplitterDescriptor splitDesc(num_split);
2728 for (unsigned int g = 0; g < num_split; ++g)
2729 {
2730 // Set the size of the views.
2731 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2732 {
2733 splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2734 }
2735 splitDesc.SetViewOriginCoord(g, splitDim, splitterDimSizes[splitDim] * g);
2736 }
2737
2738 IConnectableLayer *layer = m_Network->AddSplitterLayer(splitDesc, nodeDef.name().c_str());
2739
2740 inputSlot.Connect(layer->GetInputSlot(0));
2741
2742 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2743 splitterDimSizes.data());
2744
2745 for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
2746 {
2747 layer->GetOutputSlot(i).SetTensorInfo(armnn::TensorInfo(outShape, inputTensorInfo.GetDataType()));
2748 }
2749
2750 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2751}
2752
surmeh01bceff2f2018-03-29 16:29:27 +01002753ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
2754 const tensorflow::GraphDef& graphDef)
2755{
2756 boost::ignore_unused(graphDef);
2757
2758 ActivationDescriptor activationDesc;
2759 activationDesc.m_Function = ActivationFunction::SoftReLu;
2760
2761 return AddActivationLayer(nodeDef, activationDesc);
2762}
2763
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002764ParsedTfOperationPtr TfParser::ParseStridedSlice(const tensorflow::NodeDef& nodeDef,
2765 const tensorflow::GraphDef& graphDef)
2766{
2767 boost::ignore_unused(graphDef);
2768
2769 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2770 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2771 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2772
2773 ParsedConstTfOperation<int32_t>* beginNode =
2774 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t> *>(inputs[1].m_IndexedValue);
2775 std::vector<int32_t> beginTensorData;
2776 beginNode->GetConstTensor(beginTensorData);
2777
2778 ParsedConstTfOperation<int32_t>* endNode =
2779 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t> *>(inputs[2].m_IndexedValue);
2780 std::vector<int32_t> endTensorData;
2781 endNode->GetConstTensor(endTensorData);
2782
2783 ParsedConstTfOperation<int32_t>* stridesNode =
2784 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t> *>(inputs[3].m_IndexedValue);
2785 std::vector<int32_t> stridesTensorData;
2786 stridesNode->GetConstTensor(stridesTensorData);
2787
2788 StridedSliceDescriptor desc;
2789 desc.m_Begin = beginTensorData;
2790 desc.m_End = endTensorData;
2791 desc.m_Stride = stridesTensorData;
2792 desc.m_BeginMask = ReadMandatoryNodeInt32Attribute(nodeDef, "begin_mask");
2793 desc.m_EndMask = ReadMandatoryNodeInt32Attribute(nodeDef, "end_mask");
2794 desc.m_EllipsisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "ellipsis_mask");
2795 desc.m_NewAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "new_axis_mask");
2796 desc.m_ShrinkAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "shrink_axis_mask");
2797 desc.m_DataLayout = armnn::DataLayout::NHWC;
2798 IConnectableLayer* const layer = m_Network->AddStridedSliceLayer(desc, nodeDef.name().c_str());
2799
2800 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2801 TensorInfo inputTensorInfo = prevLayerSlot.GetTensorInfo();
2802
2803 TensorInfo outputTensorInfo;
2804 CalculateStridedSliceOutputTensorInfo(inputTensorInfo, desc, outputTensorInfo);
2805
2806 prevLayerSlot.Connect(layer->GetInputSlot(0));
2807 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2808
2809 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2810}
2811
surmeh01bceff2f2018-03-29 16:29:27 +01002812ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2813{
2814 boost::ignore_unused(graphDef);
2815
2816 ActivationDescriptor activationDesc;
2817 activationDesc.m_Function = ActivationFunction::TanH;
2818 activationDesc.m_A = 1.0f;
2819 activationDesc.m_B = 1.0f;
2820
2821 return AddActivationLayer(nodeDef, activationDesc);
2822}
2823
2824ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
2825 ActivationDescriptor& activationDesc)
2826{
2827 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2828
2829 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2830
2831 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2832 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2833 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2834 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2835}
2836
2837ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
2838 const tensorflow::GraphDef& graphDef)
2839{
2840 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
2841}
2842
2843ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
2844 const tensorflow::GraphDef& graphDef)
2845{
2846 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2847}
2848
2849ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
2850 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
2851{
Derek Lambertibaa177f2019-12-10 22:00:43 +00002852 boost::ignore_unused(graphDef);
2853
surmeh01bceff2f2018-03-29 16:29:27 +01002854 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2855 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2856 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2857
2858 if (inputs.size() != 1)
2859 {
telsoa01c577f2c2018-08-31 09:22:23 +01002860 throw ParseException(
2861 boost::str(
2862 boost::format(
2863 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
2864 % inputs.size()
2865 % nodeDef.name()
2866 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002867 }
2868
2869 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
2870 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2871 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
2872 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
2873
2874 Pooling2dDescriptor pooling2dDescriptor;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002875 pooling2dDescriptor.m_PoolType = pooltype;
2876 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
surmeh01bceff2f2018-03-29 16:29:27 +01002877 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
2878
telsoa01c577f2c2018-08-31 09:22:23 +01002879 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
FrancisMurtaghf005e312018-12-06 15:26:04 +00002880 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
2881 pooling2dDescriptor.m_DataLayout = dataLayout;
2882 DataLayoutIndexed dataLayoutIndexed(dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +01002883
FrancisMurtaghf005e312018-12-06 15:26:04 +00002884 pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
2885 pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
2886 pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
2887 pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002888
FrancisMurtaghf005e312018-12-06 15:26:04 +00002889 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
2890 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002891
2892 bool padding = false;
2893 TensorInfo outputInfo;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002894 unsigned int outputHeight = 0;
2895 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01002896
2897 CHECK_PADDING_TYPE(nodeDef, paddingString);
2898
surmeh01bceff2f2018-03-29 16:29:27 +01002899 if (paddingString == "SAME")
2900 {
2901 padding = true;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002902
2903 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
2904 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2905 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
2906 static_cast<float>(pooling2dDescriptor.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01002907 }
2908 else if (paddingString == "VALID")
2909 {
2910 padding = false;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002911
2912 outputHeight = static_cast<uint32_t>(ceil(
2913 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
2914 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2915 outputWidth = static_cast<uint32_t>(ceil(
2916 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
2917 static_cast<float>(pooling2dDescriptor.m_StrideX)));
2918 }
2919
2920 switch (dataLayout)
2921 {
2922 case DataLayout::NHWC:
2923 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2924 outputHeight,
2925 outputWidth,
2926 inputTensorInfo.GetShape()[3] },
2927 DataType::Float32);
2928 break;
2929 case DataLayout::NCHW:
2930 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2931 inputTensorInfo.GetShape()[1],
2932 outputHeight,
2933 outputWidth },
2934 DataType::Float32);
2935 break;
surmeh01bceff2f2018-03-29 16:29:27 +01002936 }
surmeh01bceff2f2018-03-29 16:29:27 +01002937
2938 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002939 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002940 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002941 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002942
2943
2944 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
2945 if (layer == nullptr)
2946 {
telsoa01c577f2c2018-08-31 09:22:23 +01002947 throw ParseException(
2948 boost::str(
2949 boost::format(
2950 "Failed to add pooling2d layer for %1% %2%")
2951 % nodeDef.name()
2952 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002953 }
2954
2955 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2956
FrancisMurtaghf005e312018-12-06 15:26:04 +00002957 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002958
2959 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2960}
2961
2962ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
2963{
2964 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2965
2966 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2967 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2968
2969 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
2970 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
2971
2972 if (isBiasAdd)
2973 {
2974 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
2975 // with the same data in the correct dimension for broadcast in addition.
2976 if(input1Info.GetNumDimensions() != 1)
2977 {
telsoa01c577f2c2018-08-31 09:22:23 +01002978 throw ParseException(
2979 boost::str(
2980 boost::format(
2981 "Unsupported bias for BiasAdd. It should be a 1D vector. "
2982 "Got %1% dimensions for input %2%. Node %3% %4%")
2983 % input1Info.GetNumDimensions()
2984 % inputs[1].m_IndexedValue->GetNode().name()
2985 % nodeDef.name()
2986 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002987 }
2988
2989 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01002990
telsoa01c577f2c2018-08-31 09:22:23 +01002991 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01002992 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002993 }
2994 else
2995 {
2996 if (input0Info.GetNumDimensions() == 1)
2997 {
2998 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002999 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003000 }
3001
3002 if (input1Info.GetNumDimensions() == 1)
3003 {
3004 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003005 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003006 }
3007 }
3008
3009 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
3010
3011 input0Slot->Connect(layer->GetInputSlot(0));
3012 input1Slot->Connect(layer->GetInputSlot(1));
3013
Nattapat Chaimanowongfab64f02019-02-15 16:46:24 +00003014 if (input0Info.GetNumDimensions() == input1Info.GetNumDimensions())
3015 {
3016 const TensorShape& input0Shape = input0Info.GetShape();
3017 const TensorShape& input1Shape = input1Info.GetShape();
3018
3019 std::vector<unsigned int> outputShape;
3020 outputShape.reserve(input0Shape.GetNumDimensions());
3021 TensorInfo outputInfo(input0Info);
3022
3023 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3024 {
3025 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3026 }
3027
3028 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3029
3030 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3031 }
3032 else if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
surmeh01bceff2f2018-03-29 16:29:27 +01003033 {
3034 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3035 }
3036 else
3037 {
3038 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3039 }
3040
3041 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3042}
3043
saoste01bbd40612018-08-28 15:41:51 +01003044ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
3045{
3046 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3047
3048 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
3049 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3050 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3051
3052 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3053 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3054
3055
3056 if (input0NumDims < input1NumDims)
3057 {
3058 const bool isNHWC = true;
3059 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3060 }
3061 if (input1NumDims < input0NumDims)
3062 {
3063 const bool isNHWC = true;
3064 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3065 }
3066
3067 input0Slot->Connect(layer->GetInputSlot(0));
3068 input1Slot->Connect(layer->GetInputSlot(1));
3069
3070 if (input0NumDims < input1NumDims)
3071 {
3072 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3073 }
3074 else
3075 {
3076 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3077
3078 }
3079 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3080}
3081
Sadik Armagan975c09a2018-12-04 10:02:08 +00003082ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
3083{
3084 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3085
3086 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3087 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3088
3089 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3090 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3091
3092 if (input0NumDims < input1NumDims)
3093 {
3094 const bool isNHWC = true;
3095 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3096 }
3097 if (input1NumDims < input0NumDims)
3098 {
3099 const bool isNHWC = true;
3100 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3101 }
3102
3103 IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
3104
3105 input0Slot->Connect(layer->GetInputSlot(0));
3106 input1Slot->Connect(layer->GetInputSlot(1));
3107
3108 TensorInfo outputInfo = input0Slot->GetTensorInfo();
3109 std::vector<unsigned int> outputShape;
3110
3111 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
3112 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
3113
3114 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3115 {
3116 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3117 }
3118
3119 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3120 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3121
3122 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3123}
3124
telsoa01c577f2c2018-08-31 09:22:23 +01003125IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
3126{
3127 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3128
3129 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
3130 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3131 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3132
3133 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3134 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3135
3136 if (input0NumDims < input1NumDims)
3137 {
3138 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003139 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003140 }
3141 if (input1NumDims < input0NumDims)
3142 {
3143 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003144 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003145 }
3146
3147 input0Slot->Connect(layer->GetInputSlot(0));
3148 input1Slot->Connect(layer->GetInputSlot(1));
3149
3150 if (input0NumDims < input1NumDims)
3151 {
3152 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3153 }
3154 else
3155 {
3156 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3157 }
3158 return layer;
3159}
3160
surmeh01bceff2f2018-03-29 16:29:27 +01003161IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
3162 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
3163{
telsoa01c577f2c2018-08-31 09:22:23 +01003164 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01003165 ParsedConstTfOperation<float>* biasNode = nullptr;
3166 if (addNodeDef != nullptr)
3167 {
3168 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01003169 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003170 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
3171 {
3172 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
3173 }
3174 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
3175 {
3176 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
3177 }
3178 else
3179 {
telsoa01c577f2c2018-08-31 09:22:23 +01003180 throw ParseException(
3181 boost::str(
3182 boost::format(
3183 "ArmNN only supports fully connected layers with constant bias. "
3184 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
3185 % addInputs[0].m_IndexedValue->GetNode().name()
3186 % addInputs[1].m_IndexedValue->GetNode().name()
3187 % addNodeDef->name()
3188 % matMulNodeDef.name()
3189 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003190 }
3191 }
3192
telsoa01c577f2c2018-08-31 09:22:23 +01003193 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003194 ParsedConstTfOperation<float>* weightNode = nullptr;
3195 ParsedTfOperation* inputNode = nullptr;
3196 unsigned int inputIdx = 0;
3197 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
3198 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
3199 {
3200 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
3201 inputNode = mulInputs[1].m_IndexedValue;
3202 inputIdx = mulInputs[1].m_Index;
3203 }
3204 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
3205 {
3206 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
3207 inputNode = mulInputs[0].m_IndexedValue;
3208 inputIdx = mulInputs[0].m_Index;
3209 }
3210 else
3211 {
telsoa01c577f2c2018-08-31 09:22:23 +01003212 throw ParseException(
3213 boost::str(
3214 boost::format(
3215 "ArmNN only supports fully connected layers with constant weights. "
3216 "Inputs %1% and %2%. MatMulNode %3% %4%")
3217 % mulInputs[0].m_IndexedValue->GetNode().name()
3218 % mulInputs[1].m_IndexedValue->GetNode().name()
3219 % matMulNodeDef.name()
3220 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003221 }
3222
3223 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003224 // Handles weight.
Matteo Martincigh482ca852018-12-12 09:20:55 +00003225 ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003226
3227 FullyConnectedDescriptor desc;
3228 desc.m_BiasEnabled = addNodeDef != nullptr;
3229
3230 IConnectableLayer* layer = nullptr;
Matteo Martincighfc598e12019-05-14 10:36:13 +01003231 Optional<ConstTensor> optionalBiases;
3232 std::vector<float> biasTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003233 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003234 if (addNodeDef != nullptr)
3235 {
Matteo Martincigh482ca852018-12-12 09:20:55 +00003236 ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003237
3238 if (weights.GetShape()[1] != biases.GetShape()[0])
3239 {
telsoa01c577f2c2018-08-31 09:22:23 +01003240 throw ParseException(
3241 boost::str(
3242 boost::format(
3243 "Shape of matmul weights and bias do not match. "
3244 "AddNode %1%. MatMulNode %2% %3%")
3245 % addNodeDef->name()
3246 % matMulNodeDef.name()
3247 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003248 }
3249
Matteo Martincighfc598e12019-05-14 10:36:13 +01003250 optionalBiases = Optional<ConstTensor>(biases);
surmeh01bceff2f2018-03-29 16:29:27 +01003251 }
Matteo Martincighfc598e12019-05-14 10:36:13 +01003252 layer = m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
surmeh01bceff2f2018-03-29 16:29:27 +01003253
3254 BOOST_ASSERT(layer != nullptr);
3255
3256 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3257 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3258
telsoa01c577f2c2018-08-31 09:22:23 +01003259 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01003260 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
3261 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3262 return layer;
3263}
3264
3265void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
3266{
telsoa01c577f2c2018-08-31 09:22:23 +01003267 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01003268 tensorflow::DataType type = tensorflow::DT_FLOAT;
3269 if (nodeDef.attr().count("T") != 0)
3270 {
3271 auto attr = nodeDef.attr().at("T");
3272 type = attr.type();
3273 }
3274 else if (nodeDef.attr().count("dtype") != 0)
3275 {
3276 auto attr = nodeDef.attr().at("dtype");
3277 type = attr.type();
3278 }
3279
Ferran Balaguerc602f292019-02-08 17:09:55 +00003280 if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() != "Const")
surmeh01bceff2f2018-03-29 16:29:27 +01003281 {
telsoa01c577f2c2018-08-31 09:22:23 +01003282 throw ParseException(
3283 boost::str(
3284 boost::format(
Ferran Balaguerc602f292019-02-08 17:09:55 +00003285 "Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). "
telsoa01c577f2c2018-08-31 09:22:23 +01003286 "Got %1% for Node %2% %3%")
3287 % tensorflow::DataType_Name(type)
3288 % nodeDef.name()
3289 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003290 }
3291
3292 const std::string& operation = nodeDef.op();
narpra016f37f832018-12-21 18:30:00 +00003293 auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3294 if (itControlInput != m_ControlInputs.end())
3295 {
3296 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
3297 return;
3298 }
surmeh01bceff2f2018-03-29 16:29:27 +01003299 auto it = ms_OperationNameToParsingFunctions.find(operation);
3300 if (it != ms_OperationNameToParsingFunctions.end())
3301 {
3302 auto func = it->second;
3303 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
3304 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3305
telsoa01c577f2c2018-08-31 09:22:23 +01003306 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01003307 auto it = m_ParsedTfOperations.find(nodeDef.name());
3308 if (it != m_ParsedTfOperations.end())
3309 {
3310 throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
3311 }
3312 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3313
telsoa01c577f2c2018-08-31 09:22:23 +01003314 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003315 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3316 m_RequestedOutputs.end())
3317 {
3318 auto outId = ParseOutputId(nodeDef.name());
3319 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
3320 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3321
3322 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3323
3324 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
3325
3326 prevSlot.Connect(outputLayer->GetInputSlot(0));
3327
3328 TrackOutputBinding(outputLayer, layerId, tensorInfo);
3329 }
3330 }
3331 else
3332 {
telsoa01c577f2c2018-08-31 09:22:23 +01003333 throw ParseException(
3334 boost::str(
3335 boost::format(
3336 "Unsupported operation %1% in tensorflow::GraphDef %2%")
3337 % operation
3338 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003339 }
3340}
3341
3342void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
3343{
telsoa01c577f2c2018-08-31 09:22:23 +01003344 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01003345 m_NodesByName.clear();
3346 m_NetworkInputsBindingInfo.clear();
3347 m_NetworkOutputsBindingInfo.clear();
3348
3349 for (int i = 0; i < graphDef.node_size(); ++i)
3350 {
3351 const tensorflow::NodeDef& node = graphDef.node(i);
3352 m_NodesByName[node.name()] = &node;
3353 }
3354
Francis Murtaghbb190a62019-04-04 11:16:29 +01003355 // Checks that the input nodes the user has requested exist.
3356 for (const auto& pair : m_InputShapes)
3357 {
3358 const std::string& requestedInputName = pair.first;
3359 auto nodeIt = m_NodesByName.find(requestedInputName);
3360 if (nodeIt == m_NodesByName.end())
3361 {
3362 throw ParseException(
3363 boost::str(
3364 boost::format(
3365 "Couldn't find requested input node '%1%' in graph %2%")
3366 % requestedInputName
3367 % CHECK_LOCATION().AsString()));
3368 }
3369 }
3370
telsoa01c577f2c2018-08-31 09:22:23 +01003371 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01003372 std::vector<const tensorflow::NodeDef*> targetNodes;
3373 for (const std::string& requestedOutputName : m_RequestedOutputs)
3374 {
3375 auto nodeIt = m_NodesByName.find(requestedOutputName);
3376 if (nodeIt == m_NodesByName.end())
3377 {
telsoa01c577f2c2018-08-31 09:22:23 +01003378 throw ParseException(
3379 boost::str(
3380 boost::format(
3381 "Couldn't find requested output node '%1%' in graph %2%")
3382 % requestedOutputName
3383 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003384 }
3385 targetNodes.push_back(nodeIt->second);
3386 }
3387
telsoa01c577f2c2018-08-31 09:22:23 +01003388 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003389 std::vector<const tensorflow::NodeDef*> sortedNodes;
3390 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3391 targetNodes,
3392 [this](const tensorflow::NodeDef* node)
3393 {
3394 auto outputs = GetTfInputNodes(*node);
3395 std::vector<const tensorflow::NodeDef*> nodesOnly;
3396 for (const auto & o : outputs) {
3397 nodesOnly.push_back(o.m_IndexedValue);
3398 }
3399 return nodesOnly;
3400 },
3401 sortedNodes))
3402 {
telsoa01c577f2c2018-08-31 09:22:23 +01003403 throw ParseException(
3404 boost::str(
3405 boost::format(
3406 "Cycle detected in graph %1%")
3407 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003408 }
3409
telsoa01c577f2c2018-08-31 09:22:23 +01003410 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003411 for (const auto& it : sortedNodes)
3412 {
3413 const tensorflow::NodeDef& currentNode = *it;
3414 LoadNodeDef(currentNode, graphDef);
3415 }
3416}
3417
3418INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
3419 const std::map<std::string, TensorShape>& inputShapes,
3420 const std::vector<std::string>& requestedOutputs)
3421{
3422 FILE* fd = fopen(graphFile, "r");
3423
3424 if (fd == nullptr)
3425 {
telsoa01c577f2c2018-08-31 09:22:23 +01003426 throw FileNotFoundException(
3427 boost::str(
3428 boost::format(
3429 "Graph file %1% failed to open %2%")
3430 % graphFile
3431 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003432 }
3433
telsoa01c577f2c2018-08-31 09:22:23 +01003434 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003435 tensorflow::GraphDef graphDef;
3436 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
3437 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3438 delete input;
3439 fclose(fd);
3440
3441 if (!success)
3442 {
telsoa01c577f2c2018-08-31 09:22:23 +01003443 throw ParseException(
3444 boost::str(
3445 boost::format(
3446 "Failed to parse graph file %1%")
3447 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003448 }
3449
3450 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3451}
3452
3453INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
3454 const std::map<std::string, TensorShape>& inputShapes,
3455 const std::vector<std::string>& requestedOutputs)
3456{
telsoa01c577f2c2018-08-31 09:22:23 +01003457 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003458 tensorflow::GraphDef graphDef;
3459 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3460
3461 if (!success)
3462 {
telsoa01c577f2c2018-08-31 09:22:23 +01003463 throw ParseException(
3464 boost::str(
3465 boost::format(
3466 "Failed to parse graph file %1%")
3467 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003468 }
3469
3470 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3471}
3472
3473INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
3474 const std::map<std::string, TensorShape>& inputShapes,
3475 const std::vector<std::string>& requestedOutputs)
3476{
3477 FILE* fd = fopen(graphFile, "rb");
3478
3479 if (fd == nullptr)
3480 {
telsoa01c577f2c2018-08-31 09:22:23 +01003481 throw FileNotFoundException(
3482 boost::str(
3483 boost::format(
3484 "Graph file %1% failed to open %2%")
3485 % graphFile
3486 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003487 }
3488
telsoa01c577f2c2018-08-31 09:22:23 +01003489 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003490 tensorflow::GraphDef graphDef;
3491
3492 google::protobuf::io::FileInputStream inStream(fileno(fd));
3493 google::protobuf::io::CodedInputStream codedStream(&inStream);
3494 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
3495 bool success = graphDef.ParseFromCodedStream(&codedStream);
3496 fclose(fd);
3497
3498 if (!success)
3499 {
telsoa01c577f2c2018-08-31 09:22:23 +01003500 throw ParseException(
3501 boost::str(
3502 boost::format(
3503 "Failed to parse protobuf file %1% %2%")
3504 % graphFile
3505 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003506 }
3507
3508 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3509}
3510
3511INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
3512 const std::map<std::string, TensorShape>& inputShapes,
3513 const std::vector<std::string>& requestedOutputs)
3514{
3515 m_Network = INetwork::Create();
3516
3517 m_InputShapes = inputShapes;
3518 if (requestedOutputs.size() == 0)
3519 {
telsoa01c577f2c2018-08-31 09:22:23 +01003520 throw ParseException(
3521 boost::str(
3522 boost::format(
3523 "requestedOutputs must have at least one entry %1%")
3524 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003525 }
3526 m_RequestedOutputs = requestedOutputs;
3527
3528 try
3529 {
3530 LoadGraphDef(graphDef);
3531 }
3532 catch (const ParseException& e)
3533 {
3534 Cleanup();
3535 throw e;
3536 }
3537
3538 Cleanup();
3539
3540 return std::move(m_Network);
3541}
3542
3543void TfParser::Cleanup()
3544{
telsoa01c577f2c2018-08-31 09:22:23 +01003545 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01003546 m_InputShapes.clear();
3547 m_RequestedOutputs.clear();
3548 m_NodesByName.clear();
3549 m_ParsedTfOperations.clear();
3550}
3551
3552BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
3553{
3554 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
3555}
3556
3557BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
3558{
3559 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
3560}
3561
3562std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
3563 const char* bindingPointDesc,
3564 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3565{
3566 auto it = nameToBindingInfo.find(layerName);
3567 if (it == nameToBindingInfo.end())
3568 {
telsoa01c577f2c2018-08-31 09:22:23 +01003569 throw InvalidArgumentException(
3570 boost::str(
3571 boost::format(
3572 "Unknown %1% '%2%' %3%")
3573 % bindingPointDesc
3574 % layerName
3575 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003576 }
3577 return it->second;
3578}
3579
3580void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3581{
3582 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3583}
3584
3585void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3586{
3587 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3588}
3589
3590void TfParser::TrackBindingPoint(IConnectableLayer* layer,
3591 LayerBindingId id,
3592 const TensorInfo& tensorInfo,
3593 const char* bindingPointDesc,
3594 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3595{
3596 const std::string layerName = layer->GetName();
3597 auto it = nameToBindingInfo.find(layerName);
3598 if (it == nameToBindingInfo.end())
3599 {
3600 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3601 }
3602 else
3603 {
telsoa01c577f2c2018-08-31 09:22:23 +01003604 throw ParseException(
3605 boost::str(
3606 boost::format(
3607 "Id %1% used by more than one %2% layer %3%")
3608 % id
3609 % bindingPointDesc
3610 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003611 }
3612}
3613
3614} // namespace armnnTfParser