blob: 210b825e43919ee8a9d0a23260a604701efde276 [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
5#include "TfParser.hpp"
6
7#include <armnn/INetwork.hpp>
8#include <armnn/Utils.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <armnn/Exceptions.hpp>
11#include <armnn/Descriptors.hpp>
12
13#include <GraphTopologicalSort.hpp>
Sadik Armagan479045b2018-10-01 11:51:37 +010014#include <ParserHelper.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010015#include <Permute.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010016#include <VerificationHelpers.hpp>
Matteo Martincigh46315822018-11-28 16:22:36 +000017#include <DataLayoutIndexed.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010018
19#include <google/protobuf/io/zero_copy_stream_impl.h>
20#include <google/protobuf/text_format.h>
21
22#include "tensorflow/core/framework/graph.pb.h"
23#include "tensorflow/core/framework/node_def.pb.h"
24#include "tensorflow/core/framework/types.pb.h"
25#include "tensorflow/core/framework/tensor.pb.h"
26#include "tensorflow/core/framework/tensor_shape.pb.h"
27
28#include <boost/assert.hpp>
29#include <boost/format.hpp>
30#include <boost/core/ignore_unused.hpp>
31#include <boost/log/trivial.hpp>
32#include <boost/numeric/conversion/cast.hpp>
33#include <boost/polymorphic_cast.hpp>
34
35#include <memory>
36#include <sstream>
37#include <numeric>
38#include <functional>
39
Matteo Martincigh46315822018-11-28 16:22:36 +000040using namespace armnnUtils;
surmeh01bceff2f2018-03-29 16:29:27 +010041using namespace armnn;
42
43namespace armnnTfParser
44{
45namespace
46{
47
48const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
49const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
50
51IConnectableLayer* AddSwizzleLayer(INetwork& network, IOutputSlot& input, const PermutationVector& mapping,
52 const std::string& name)
53{
telsoa01c577f2c2018-08-31 09:22:23 +010054 // Adds swizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010055 IConnectableLayer* const layer = network.AddPermuteLayer(mapping, name.c_str());
56
telsoa01c577f2c2018-08-31 09:22:23 +010057 // Connects intput to swizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010058 input.Connect(layer->GetInputSlot(0));
59
telsoa01c577f2c2018-08-31 09:22:23 +010060 // Sets up swizzled output.
surmeh01bceff2f2018-03-29 16:29:27 +010061 const TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mapping);
62 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
63
64 return layer;
65}
66
67IConnectableLayer* SwizzleInDeswizzleOut(INetwork& network, IOutputSlot& input, IConnectableLayer& layer,
68 const std::string& name)
69{
telsoa01c577f2c2018-08-31 09:22:23 +010070 // Adds swizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010071 IConnectableLayer* const swizzleLayer = AddSwizzleLayer(network, input, NHWCToArmNN, "swizzle_for-" + name);
72
telsoa01c577f2c2018-08-31 09:22:23 +010073 // Connects swizzledInput to layer.
surmeh01bceff2f2018-03-29 16:29:27 +010074 swizzleLayer->GetOutputSlot(0).Connect(layer.GetInputSlot(0));
75
telsoa01c577f2c2018-08-31 09:22:23 +010076 // Adds deswizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010077 IConnectableLayer* const deswizzleLayer = AddSwizzleLayer(network, layer.GetOutputSlot(0), ArmNNToNHWC,
78 "deswizzle_for-" + name);
79
80 return deswizzleLayer;
81}
82
83template <typename Callable>
84void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
85 const std::string& attribName,
86 tensorflow::AttrValue::ValueCase expectedValueCase,
87 Callable callable)
88{
89 auto iter = nodeDef.attr().find(attribName);
90 if (iter != nodeDef.attr().end())
91 {
92 const auto& attrValue = iter->second;
93 if (attrValue.value_case() == expectedValueCase)
94 {
95 callable(attrValue);
96 }
97 else
98 {
telsoa01c577f2c2018-08-31 09:22:23 +010099 throw ParseException(
100 boost::str(
101 boost::format(
102 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
103 "but found %4% instead %5%")
104 % attribName
105 % nodeDef.name()
106 % static_cast<int>(expectedValueCase)
107 % static_cast<int>(attrValue.value_case())
108 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100109 }
110 }
111 else
112 {
telsoa01c577f2c2018-08-31 09:22:23 +0100113 throw ParseException(
114 boost::str(
115 boost::format(
116 "Could not find required attribute %1% in node %2% %3%")
117 % attribName
118 % nodeDef.name()
119 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100120 }
121}
122
123template <typename Callable>
124void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
125 const std::string& attribName,
126 tensorflow::AttrValue::ValueCase expectedValueCase,
127 Callable callable)
128{
129 auto iter = nodeDef.attr().find(attribName);
130 if (iter != nodeDef.attr().end())
131 {
132 const auto& attrValue = iter->second;
133 if (attrValue.value_case() == expectedValueCase)
134 {
135 callable(attrValue);
136 }
137 else
138 {
telsoa01c577f2c2018-08-31 09:22:23 +0100139 throw ParseException(
140 boost::str(
141 boost::format(
142 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
143 "but found %4% instead %5%")
144 % attribName
145 % nodeDef.name()
146 % static_cast<int>(expectedValueCase)
147 % static_cast<int>(attrValue.value_case())
148 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100149 }
150 }
151}
152
153float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
154{
155 float attribValue = 0.0f;
156 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
157 [&attribValue](const tensorflow::AttrValue& attrValue)
158 {
159 attribValue = attrValue.f();
160 });
161 return attribValue;
162}
163
Conor Kennedyc2130a02018-12-05 11:05:54 +0000164int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
165{
166 int32_t attribValue = 0u;
167 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
168 [&attribValue](const tensorflow::AttrValue& attrValue)
169 {
170 attribValue = static_cast<int32_t>(attrValue.i());
171 });
172 return attribValue;
173}
174
surmeh01bceff2f2018-03-29 16:29:27 +0100175uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
176{
177 uint32_t attribValue = 0u;
178 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
179 [&attribValue](const tensorflow::AttrValue& attrValue)
180 {
181 attribValue = static_cast<uint32_t>(attrValue.i());
182 });
183 return attribValue;
184}
185
186std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
187{
188 std::string attribValue = "";
189 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
190 [&attribValue](const tensorflow::AttrValue& attrValue)
191 {
192 attribValue = attrValue.s();
193 });
194 return attribValue;
195}
196
197std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
198 const std::string& name)
199{
200 std::vector<uint32_t> attriList;
201 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
202 [&attriList](const tensorflow::AttrValue& attrValue)
203 {
204 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
205 {
206 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
207 }
208 });
209
210 return attriList;
211}
212
213std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
214 const std::string& name)
215{
216 std::vector<uint32_t> attriList;
217 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
218 [&attriList](const tensorflow::AttrValue& attrValue)
219 {
220 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
221 {
222 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
223 }
224 });
225
226 return attriList;
227}
228
229bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
230 const std::string& name,
231 bool defaultValue = false)
232{
233 bool attribValue = defaultValue;
234 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
235 [&attribValue](const tensorflow::AttrValue& attrValue)
236 {
237 attribValue = attrValue.b();
238 });
239 return attribValue;
240}
241
242tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
243{
244 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
245 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
246 [&attribValue](const tensorflow::AttrValue& attrValue)
247 {
248 attribValue = attrValue.type();
249 });
250 return attribValue;
251}
252
253TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
254{
255 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
256 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
257
258 if (stretchDim != targetDims.end())
259 {
260 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
261 {
telsoa01c577f2c2018-08-31 09:22:23 +0100262 throw ParseException(
263 boost::str(
264 boost::format(
265 "At most one component of shape can be -1 %1%")
266 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100267 }
268
telsoa01c577f2c2018-08-31 09:22:23 +0100269 auto targetNumElements =
270 boost::numeric_cast<unsigned int>(
271 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100272 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
273 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
274 }
275
276 TensorInfo reshapeInfo = input;
277 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
278
279 return reshapeInfo;
280}
281
telsoa01c577f2c2018-08-31 09:22:23 +0100282// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100283IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
284 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100285{
286 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
287 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
288 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
289 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
290 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
291 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
292
293 armnn::TensorInfo reshapedInfo = input1Info;
294 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
295
296 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
297 ReshapeDescriptor reshapeDesc;
298 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
299 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
300
301 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
302 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
303
304 input1Slot = &reshapeLayer->GetOutputSlot(0);
305
306 return input1Slot;
307}
308
309OutputId ParseOutputId(const std::string & name)
310{
311 unsigned int outputNum = 0;
312 size_t colonPos = name.find_last_of(":");
313 if (colonPos != std::string::npos)
314 {
315 int n = std::stoi(name.substr(colonPos+1));
316 if (n<0 || n>100)
317 {
telsoa01c577f2c2018-08-31 09:22:23 +0100318 throw ParseException(
319 boost::str(
320 boost::format(
321 "Output tensor id is out of range for %1% %2%")
322 % name
323 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100324 }
325 outputNum = static_cast<unsigned int>(n);
326 }
327 return OutputId(name.substr(0,colonPos),outputNum);
328}
329
telsoa01c577f2c2018-08-31 09:22:23 +0100330#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
331 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
332 { \
333 throw ParseException( \
334 boost::str( \
335 boost::format( \
336 "Unsupported data format %1% passed for %2% node %3%. " \
337 "Only NHWC and NCHW supported %4%") \
338 % FORMAT \
339 % NODE_TYPE \
340 % NODE_DEF.name() \
341 % CHECK_LOCATION().AsString())); \
342 }
343
344#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
345 if(PADDING != "SAME" && PADDING != "VALID" ) \
346 { \
347 throw ParseException( \
348 boost::str( \
349 boost::format( \
350 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
351 % PADDING \
352 % NODE_DEF.name() \
353 % CHECK_LOCATION().AsString())); \
354 } \
355
surmeh01bceff2f2018-03-29 16:29:27 +0100356} // namespace
357
358const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
359 { "Const", &TfParser::ParseConst },
360 { "Add", &TfParser::ParseAdd },
361 { "BiasAdd", &TfParser::ParseBiasAdd },
362 { "Identity", &TfParser::ParseIdentity },
363 { "Conv2D", &TfParser::ParseConv2D },
364 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
Conor Kennedyc2130a02018-12-05 11:05:54 +0000365 { "ExpandDims", &TfParser::ParseExpandDims },
surmeh01bceff2f2018-03-29 16:29:27 +0100366 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
367 { "ConcatV2", &TfParser::ParseConcat },
368 { "LRN", &TfParser::ParseLrn },
369 { "MatMul", &TfParser::ParseMatMul },
370 { "Mul", &TfParser::ParseMul },
371 { "Placeholder", &TfParser::ParsePlaceholder },
saoste01bbd40612018-08-28 15:41:51 +0100372 { "RealDiv", &TfParser::ParseRealDiv },
surmeh01bceff2f2018-03-29 16:29:27 +0100373 { "Relu", &TfParser::ParseRelu },
374 { "Relu6", &TfParser::ParseRelu6 },
375 { "Reshape", &TfParser::ParseReshape },
376 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
377 { "Shape", &TfParser::ParseShape },
378 { "Squeeze", &TfParser::ParseSqueeze },
379 { "Sigmoid", &TfParser::ParseSigmoid },
380 { "Softmax", &TfParser::ParseSoftmax },
381 { "Softplus", &TfParser::ParseSoftplus },
382 { "Tanh", &TfParser::ParseTanh },
383 { "MaxPool", &TfParser::ParseMaxPool },
384 { "AvgPool", &TfParser::ParseAvgPool },
telsoa01c577f2c2018-08-31 09:22:23 +0100385 { "Maximum", &TfParser::ParseMaximum },
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +0000386 { "Minimum", &TfParser::ParseMinimum },
jimfly01f6ba7472018-12-04 10:09:52 +0000387 { "Pad", &TfParser::ParsePad },
jimfly0123be07e2018-12-04 17:47:22 +0000388 { "Sub", &TfParser::ParseSub },
surmeh01bceff2f2018-03-29 16:29:27 +0100389};
390
391ITfParser* ITfParser::CreateRaw()
392{
393 return new TfParser();
394}
395
396ITfParserPtr ITfParser::Create()
397{
398 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
399}
400
401void ITfParser::Destroy(ITfParser* parser)
402{
403 delete parser;
404}
405
406inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
407 uint32_t filterSize, bool samePadding,
408 uint32_t* paddingFront, uint32_t* paddingBack) {
409 *paddingFront = 0;
410 *paddingBack = 0;
411
412 if (samePadding) {
413 uint32_t outputSize = (inputSize + stride - 1) / stride;
414 uint32_t temp = (outputSize - 1) * stride + filterSize;
415 if (temp > inputSize) {
416 *paddingFront = (temp - inputSize) / 2;
417 *paddingBack = (temp - inputSize) - *paddingFront;
418 }
419 }
420}
421
422void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
423 bool samePadding)
424{
425 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
426}
427
428/// An Abstract base class which represents a single tensorflow operation (node)
429/// that has been (potentially partially) converted to Armnn.
430/// It may not yet have been fully converted into actual Armnn layers.
431class ParsedTfOperation
432{
433public:
434 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
435 : m_Parser(parser)
436 , m_Node(node)
437 {
438 }
439
440 virtual ~ParsedTfOperation() {};
441
442 const tensorflow::NodeDef& GetNode() const { return m_Node; }
443
444 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
445 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
446 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
447
448 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
449 virtual ParsedTfOperation* ResolveIdentityOperations()
450 {
451 return this;
452 }
453
454protected:
455 TfParser* m_Parser;
456 const tensorflow::NodeDef& m_Node;
457};
458
459/// An ParsedTfOperation where the Armnn equivalent is a single layer,
460/// with output slots that correspond directly to the Tf node outputs.
461class SingleLayerParsedTfOperation : public ParsedTfOperation
462{
463public:
464 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
465 : ParsedTfOperation(parser, node)
466 , m_Layer(layer)
467 {
468 }
469
470 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
471 {
472 BOOST_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100473 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100474 unsigned int armnnOutputSlotIdx = tfOutputIndex;
475 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
476 {
477 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100478 boost::str(
479 boost::format(
480 "The requested output slot #%1% "
481 "for %2% does not exist %3%")
482 % armnnOutputSlotIdx
483 % m_Layer->GetName()
484 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100485 }
486 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
487 }
488
489protected:
490 IConnectableLayer* m_Layer;
491};
492
telsoa01c577f2c2018-08-31 09:22:23 +0100493/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100494class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
495{
496public:
497 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
498 : SingleLayerParsedTfOperation(parser, node, nullptr)
499 {
500 }
501
502 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
503 {
504 if (!m_Layer)
505 {
506 CreateLayerDeferred();
507 }
508 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
509 }
510
511private:
512 virtual void CreateLayerDeferred() = 0;
513};
514
515
516TfParser::TfParser()
517 : m_Network(nullptr, nullptr)
518{
519}
520
521
522const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
523{
524 if (nodeDef->op() != "Identity")
525 {
526 return nodeDef;
527 }
528
529 if (nodeDef->input_size() != 1)
530 {
telsoa01c577f2c2018-08-31 09:22:23 +0100531 throw ParseException(
532 boost::str(
533 boost::format(
534 "Identity node should have a single input! %1% has %2% inputs %3%")
535 % nodeDef->name()
536 % nodeDef->input_size()
537 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100538 }
539
540 auto it = m_NodesByName.find(nodeDef->input(0));
541 if (it != m_NodesByName.end())
542 {
543 const tensorflow::NodeDef* inputNode = it->second;
544 return ResolveIdentityNode(inputNode);
545 }
546 else
547 {
telsoa01c577f2c2018-08-31 09:22:23 +0100548 throw ParseException(
549 boost::str(
550 boost::format(
551 "Cannot find what the Identity node %1% is linked to! %2%")
552 % nodeDef->name()
553 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100554 }
555}
556
557std::vector<OutputOfConstNodeDef>
558TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
559{
560 std::vector<OutputOfConstNodeDef> ret;
561
surmeh013537c2c2018-05-18 16:31:43 +0100562 if (nodeDef.op() == "Const")
563 {
564 // For some reason const node can have "Control Inputs". We ignore them for now.
565 return ret;
566 }
567
surmeh01bceff2f2018-03-29 16:29:27 +0100568 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
569 for (int j = 0; j < nodeDef.input_size(); ++j)
570 {
571 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100572
573 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
574 {
575 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100576 boost::str(
577 boost::format(
578 "Node '%1%' has Control Input '%2%' for input #%3% which is unsupported. %4%")
579 % nodeDef.name()
580 % nodeDef.input(j)
581 % j
582 % CHECK_LOCATION().AsString()));
surmeh013537c2c2018-05-18 16:31:43 +0100583 }
584
surmeh01bceff2f2018-03-29 16:29:27 +0100585 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
586 if (inputIt == m_NodesByName.end())
587 {
588 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100589 boost::str(
590 boost::format(
591 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
592 % nodeDef.input(j)
593 % nodeDef.name()
594 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100595 }
596 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
597 }
598
599 return ret;
600}
601
602std::vector<OutputOfParsedTfOperation>
603TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
604 std::size_t expectedNumInputs)
605{
telsoa01c577f2c2018-08-31 09:22:23 +0100606 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100607 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
608 const std::size_t numInputs = nodes.size();
609 if (numInputs != expectedNumInputs)
610 {
telsoa01c577f2c2018-08-31 09:22:23 +0100611 throw ParseException(
612 boost::str(
613 boost::format(
614 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
615 % nodeDef.name()
616 % expectedNumInputs
617 % numInputs
618 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100619 }
telsoa01c577f2c2018-08-31 09:22:23 +0100620 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100621 std::vector<OutputOfParsedTfOperation> result;
622 for (auto&& node : nodes)
623 {
624 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
625 if (it == m_ParsedTfOperations.end())
626 {
telsoa01c577f2c2018-08-31 09:22:23 +0100627 throw ParseException(
628 boost::str(
629 boost::format(
630 "Node with name '%1%' has not been parsed %2%")
631 % node.m_IndexedValue->name()
632 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100633 }
634 ParsedTfOperation* parsedOp = it->second.get();
635 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
636 parsedOp = parsedOp->ResolveIdentityOperations();
637 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
638 }
639 return result;
640}
641
642ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
643{
644 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
645
telsoa01c577f2c2018-08-31 09:22:23 +0100646 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
647 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100648 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
649 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
650 {
651 IConnectableLayer* layer =
652 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
653 &nodeDef,nodeDef.name().c_str());
654 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
655 }
656 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
657 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
658 {
659 IConnectableLayer* layer =
660 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
661 &nodeDef,nodeDef.name().c_str());
662 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
663 }
664 else
665 {
telsoa01c577f2c2018-08-31 09:22:23 +0100666 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100667 return AddAdditionLayer(nodeDef);
668 }
669}
670
671ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
672{
673 return AddAdditionLayer(nodeDef, true);
674}
675
676/// An ParsedTfOperation which forwards to another (used for Identity nodes).
677class ParsedIdentityTfOperation : public ParsedTfOperation
678{
679public:
680 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
681 : ParsedTfOperation(parser, node)
682 , m_Representative(representative)
683 {
684 }
685
686 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
687 {
688 BOOST_ASSERT(m_Representative);
689 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
690 }
691
692 virtual ParsedTfOperation* ResolveIdentityOperations() override
693 {
694 return m_Representative->ResolveIdentityOperations();
695 }
696
697private:
698 ParsedTfOperation* m_Representative;
699};
700
701ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
702{
703 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
704 // Any requests for the output slots of this node should be forwarded to the node connected as input.
705 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
706}
707
708/// An ParsedTfOperation for a Const node.
709/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
710/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
711template <typename T>
712class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
713{
714public:
715 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
716 const T* tensorData, const TensorInfo& tensorInfo)
717 : DeferredSingleLayerParsedTfOperation(parser, node),
718 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
719 m_TensorInfo(tensorInfo)
720 {
721 BOOST_ASSERT(tensorInfo.GetDataType() == GetDataType<T>());
722 }
723
724 void CreateLayerDeferred() override
725 {
726 BOOST_ASSERT(m_Layer == nullptr);
727 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
728 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
729 }
730
731 ConstTensor GetConstTensor(bool swizzleForConvolutionWeights, std::vector<T>& outputTensorData) const
732 {
733 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
telsoa01c577f2c2018-08-31 09:22:23 +0100734 // Tensorflow weights are [H, W, In, Out].
735 // ArmNN weights are [Out, In, H, W].
surmeh01bceff2f2018-03-29 16:29:27 +0100736 static const PermutationVector HWIOToOIHW = {2, 3, 1, 0};
737
738 const TensorInfo outInfo = swizzleForConvolutionWeights
739 ? armnnUtils::Permuted(m_TensorInfo, HWIOToOIHW)
740 : m_TensorInfo;
741
742 outputTensorData.resize(m_TensorInfo.GetNumElements());
743
telsoa01c577f2c2018-08-31 09:22:23 +0100744 // Copies or swizzles from the permanent storage into the storage the caller provided.
surmeh01bceff2f2018-03-29 16:29:27 +0100745 if (swizzleForConvolutionWeights)
746 {
747 armnnUtils::Permute(outInfo.GetShape(), HWIOToOIHW, m_Storage.data(), outputTensorData.data());
748 }
749 else
750 {
751 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
752 }
telsoa01c577f2c2018-08-31 09:22:23 +0100753 // Updates the result to point to the user provided storage.
surmeh01bceff2f2018-03-29 16:29:27 +0100754 ConstTensor constTensor(outInfo, outputTensorData);
755 return constTensor;
756 }
757
Matteo Martincigh46315822018-11-28 16:22:36 +0000758 const T* GetStorage() const
759 {
760 return m_Storage.data();
761 }
762
763 const TensorInfo& GetTensorInfo() const
764 {
765 return m_TensorInfo;
766 }
767
surmeh01bceff2f2018-03-29 16:29:27 +0100768private:
769 ///< Manages the lifetime of the tensor data.
770 std::vector<T> m_Storage;
771 ///< Describes the layout of the tensor and points to the data in m_Storage.
772 TensorInfo m_TensorInfo;
773};
774
telsoa01c577f2c2018-08-31 09:22:23 +0100775DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
776 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100777{
778 switch (tfDataType)
779 {
780 case tensorflow::DT_FLOAT:
781 return DataType::Float32;
782 break;
783 case tensorflow::DT_INT32:
784 return DataType::Signed32;
785 break;
786 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100787 throw ParseException(
788 boost::str(
789 boost::format(
790 "Unknown DataType %1% for node %2% %3%")
791 % tensorflow::DataType_Name(tfDataType)
792 % nodeDef.name()
793 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100794 }
795}
796
797struct ParseTfTensorValueList
798{
799 template<typename DataType>
800 static void Parse(
801 const tensorflow::TensorProto& tfTensor,
802 unsigned int dstElements,
803 std::vector<int8_t>& outputData);
804
805 template <typename DataType>
806 static void ReadData(const void* srcData, unsigned int numSrcElements,
807 std::vector<int8_t>& dstData, unsigned int numDstElements)
808 {
telsoa01c577f2c2018-08-31 09:22:23 +0100809 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100810 if (numSrcElements == 0)
811 {
812 return;
813 }
814
telsoa01c577f2c2018-08-31 09:22:23 +0100815 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100816 if (numDstElements == 0)
817 {
818 numDstElements = numSrcElements;
819 }
820
telsoa01c577f2c2018-08-31 09:22:23 +0100821 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100822 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
823
824 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
825 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
826
telsoa01c577f2c2018-08-31 09:22:23 +0100827 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +0100828 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
829
830 if (numDstElements > numSrcElements)
831 {
telsoa01c577f2c2018-08-31 09:22:23 +0100832 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +0100833 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
834 }
835 }
836
837};
838
839template <>
840void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
841 unsigned int dstElements, std::vector<int8_t>& outputData)
842{
843 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
844 outputData, dstElements);
845}
846
847template <>
848void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
849 unsigned int dstElements, std::vector<int8_t>& outputData)
850{
851 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
852 outputData, dstElements);
853}
854
855template <template<typename> class OperatorType, typename T = int8_t>
856struct MakeTfOperation
857{
858 template<typename DataType, class... Args>
859 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
860 Args&&... args)
861 {
862 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
863 }
864};
865
866template <>
867struct MakeTfOperation<ParsedConstTfOperation>
868{
869 template<typename DataType, class... Args>
870 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
871 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
872 {
873 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
874 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
875 }
876};
877
878template <class FuncType>
879struct InvokeParseFunction
880{
881 template<class ResType, class... Args>
882 inline static ResType Result(DataType dataType, Args&&... args)
883 {
884 if (dataType == DataType::Float32)
885 {
886 return FuncType::template Parse<float>(std::forward<Args>(args)...);
887 }
888 else if (dataType == DataType::Signed32)
889 {
890 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
891 }
892
893 return ResType();
894 }
895
896 template<class... Args>
897 inline static void Result(DataType dataType, Args&&... args)
898 {
899 if (dataType == DataType::Float32)
900 {
901 FuncType::template Parse<float>(std::forward<Args>(args)...);
902 }
903 else if (dataType == DataType::Signed32)
904 {
905 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
906 }
907 }
908};
909
910ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
911{
912 BOOST_ASSERT(nodeDef.op() == "Const");
913
914 if (nodeDef.attr().count("value") == 0)
915 {
telsoa01c577f2c2018-08-31 09:22:23 +0100916 throw ParseException(
917 boost::str(
918 boost::format(
919 "Value not found for Const node - %1% %2%")
920 % nodeDef.name()
921 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100922 }
923
924 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
925 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
926 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
927
928 const auto GetDimensionSize = [](auto& d) { return d.size(); };
929
930 std::vector<unsigned int> dimensionSizes;
931 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
932 std::back_inserter(dimensionSizes), GetDimensionSize);
933
telsoa01c577f2c2018-08-31 09:22:23 +0100934 // Calculates number of elements.
935 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100936 unsigned int numElements = 0U;
937
938 if (!dimensionSizes.empty())
939 {
940 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
941 1U, std::multiplies<unsigned int>());
942 }
943
944 std::vector<int8_t> tensorData;
945
telsoa01c577f2c2018-08-31 09:22:23 +0100946 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +0100947 if (tfTensor.tensor_content().empty())
948 {
949 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
950
951 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +0100952 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +0100953 if (numElements == 0)
954 {
telsoa01c577f2c2018-08-31 09:22:23 +0100955 const unsigned int tfNumElements =
956 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +0100957 dimensionSizes.push_back(tfNumElements);
958 }
959 }
telsoa01c577f2c2018-08-31 09:22:23 +0100960 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +0100961 else
962 {
963 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
964
telsoa01c577f2c2018-08-31 09:22:23 +0100965 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +0100966 if (numElements == 0)
967 {
telsoa01c577f2c2018-08-31 09:22:23 +0100968 throw ParseException(
969 boost::str(
970 boost::format(
971 "No tensor shape found for Const node - %1% %2%")
972 % nodeDef.name()
973 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100974 }
975 }
976
telsoa01c577f2c2018-08-31 09:22:23 +0100977 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +0100978 if (tensorData.empty())
979 {
telsoa01c577f2c2018-08-31 09:22:23 +0100980 throw ParseException(
981 boost::str(
982 boost::format(
983 "No tensor data found for Const node - %1% %2%")
984 % nodeDef.name()
985 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100986 }
987
telsoa01c577f2c2018-08-31 09:22:23 +0100988 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
989 dimensionSizes.data(),
990 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +0100991
992 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +0100993 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +0100994 if (tensorData.size() > tensorInfo.GetNumBytes())
995 {
telsoa01c577f2c2018-08-31 09:22:23 +0100996 throw ParseException(
997 boost::str(
998 boost::format(
999 "Number of elements (%1%) should be less than or equal "
1000 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
1001 % (tensorData.size() / GetDataTypeSize(dataType))
1002 % tensorInfo.GetNumElements()
1003 % nodeDef.name()
1004 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001005 }
1006
1007 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1008 dataType, this, nodeDef, tensorData, tensorInfo);
1009}
1010
1011template<typename Type>
1012bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
1013{
1014 auto it = m_ParsedTfOperations.find(nodeName);
jimfly01f6ba7472018-12-04 10:09:52 +00001015 if (it == m_ParsedTfOperations.end())
surmeh01bceff2f2018-03-29 16:29:27 +01001016 {
1017 return false;
1018 }
jimfly01f6ba7472018-12-04 10:09:52 +00001019 return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1020}
1021
1022template<typename Type>
1023bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
1024{
1025 return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
surmeh01bceff2f2018-03-29 16:29:27 +01001026}
1027
1028ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
1029 const tensorflow::GraphDef& graphDef)
1030{
1031 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1032 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1033 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1034
1035 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1036 {
telsoa01c577f2c2018-08-31 09:22:23 +01001037 throw ParseException(
1038 boost::str(
1039 boost::format(
1040 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1041 % nodeDef.name()
1042 % inputs[1].m_IndexedValue->GetNode().name()
1043 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001044 }
1045 ParsedConstTfOperation<float>* weightNode =
1046 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1047
1048 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1049 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1050 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1051
telsoa01c577f2c2018-08-31 09:22:23 +01001052 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
surmeh01bceff2f2018-03-29 16:29:27 +01001053 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1054 if (!dilations.empty())
1055 {
1056 for (auto dilation : dilations)
1057 {
1058 if (dilation != 1u)
1059 {
telsoa01c577f2c2018-08-31 09:22:23 +01001060 throw ParseException(
1061 boost::str(
1062 boost::format(
1063 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1064 % nodeDef.name()
1065 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001066 }
1067 }
1068 }
1069
1070 Convolution2dDescriptor desc;
1071 desc.m_BiasEnabled = false;
1072
telsoa01c577f2c2018-08-31 09:22:23 +01001073 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1074
Matteo Martincigh46315822018-11-28 16:22:36 +00001075 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001076
Matteo Martincigh46315822018-11-28 16:22:36 +00001077 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001078
Matteo Martincigh46315822018-11-28 16:22:36 +00001079 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001080
Matteo Martincigh46315822018-11-28 16:22:36 +00001081 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1082 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001083
Matteo Martincigh46315822018-11-28 16:22:36 +00001084 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1085 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1086
1087 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1088 // Tensorflow weights are [H, W, In, Out].
1089 // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1090 // and [Out, In, H, W] when the data layout is NCHW.
1091 PermutationVector permutationVector =
1092 dataLayout == DataLayout::NHWC ?
1093 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1094 std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1095
1096 // Swizzle the tensor using the given permutation vector.
1097 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1098 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1099
1100 // Swizzles the content of the tensor's permanent storage into a local storage.
1101 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1102 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
1103 weightNode->GetStorage(), weightTensorSwizzledData.data());
1104
1105 // Create a weight tensor with the newly swizzled data.
1106 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1107
1108 uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1109 uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001110
1111 bool padding = false;
1112 TensorInfo outputInfo;
Matteo Martincigh46315822018-11-28 16:22:36 +00001113 unsigned int outputHeight = 0;
1114 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001115
1116 CHECK_PADDING_TYPE(nodeDef, paddingString);
1117
surmeh01bceff2f2018-03-29 16:29:27 +01001118 if (paddingString == "SAME")
1119 {
1120 padding = true;
Matteo Martincigh46315822018-11-28 16:22:36 +00001121
1122 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1123 static_cast<float>(desc.m_StrideY)));
1124 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1125 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001126 }
1127 else if (paddingString == "VALID")
1128 {
1129 padding = false;
Matteo Martincigh46315822018-11-28 16:22:36 +00001130
1131 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1132 static_cast<float>(desc.m_StrideY)));
1133 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1134 static_cast<float>(desc.m_StrideX)));
1135 }
1136
1137 switch (dataLayout)
1138 {
1139 case DataLayout::NHWC:
1140 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1141 outputHeight,
1142 outputWidth,
1143 weightTensor.GetShape()[0] },
1144 DataType::Float32);
1145 break;
1146 case DataLayout::NCHW:
1147 default:
surmeh01bceff2f2018-03-29 16:29:27 +01001148 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1149 weightTensor.GetShape()[0],
Matteo Martincigh46315822018-11-28 16:22:36 +00001150 outputHeight,
1151 outputWidth },
1152 DataType::Float32);
1153 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001154 }
surmeh01bceff2f2018-03-29 16:29:27 +01001155
1156 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1157 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1158
1159 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1160 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Matteo Martincigh46315822018-11-28 16:22:36 +00001161 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001162
1163 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1164}
1165
1166ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
telsoa01c577f2c2018-08-31 09:22:23 +01001167 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001168{
1169 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1170 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1171 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1172
1173 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1174 {
telsoa01c577f2c2018-08-31 09:22:23 +01001175 throw ParseException(
1176 boost::str(
1177 boost::format(
1178 "ArmNN only supports Depthwise Convolution layer with constant weights. "
1179 "Non const input found %1% for node %2% %3%")
1180 % inputs[1].m_IndexedValue->GetNode().name()
1181 % nodeDef.name()
1182 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001183 }
1184 ParsedConstTfOperation<float>* weightNode =
1185 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1186
1187
1188 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1189 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1190 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1191
1192 DepthwiseConvolution2dDescriptor desc;
1193 desc.m_BiasEnabled = false;
1194
telsoa01c577f2c2018-08-31 09:22:23 +01001195 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1196
surmeh01bceff2f2018-03-29 16:29:27 +01001197 if (dataFormat == "NHWC")
1198 {
1199 desc.m_StrideX = strides[2];
1200 desc.m_StrideY = strides[1];
telsoa01c577f2c2018-08-31 09:22:23 +01001201 // Swizzles input to supported memory layout.
surmeh01bceff2f2018-03-29 16:29:27 +01001202 inputTensorInfo = armnnUtils::Permuted(inputSlot.GetTensorInfo(), NHWCToArmNN);
1203 }
1204 else if (dataFormat == "NCHW")
1205 {
1206 desc.m_StrideX = strides[3];
1207 desc.m_StrideY = strides[2];
1208 }
surmeh01bceff2f2018-03-29 16:29:27 +01001209
1210 uint32_t inputHeight = inputTensorInfo.GetShape()[2];
1211 uint32_t inputWidth = inputTensorInfo.GetShape()[3];
1212
1213 std::vector<float> outputTensorData;
1214
1215 ConstTensor weightTensor = weightNode->GetConstTensor(true, outputTensorData);
1216
1217 uint32_t weightHeight = weightTensor.GetShape()[2];
1218 uint32_t weightWidth = weightTensor.GetShape()[3];
1219
1220 bool padding = false;
1221 TensorInfo outputInfo;
telsoa01c577f2c2018-08-31 09:22:23 +01001222
1223 CHECK_PADDING_TYPE(nodeDef, paddingString);
1224
surmeh01bceff2f2018-03-29 16:29:27 +01001225 if (paddingString == "SAME")
1226 {
1227 padding = true;
1228 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1229 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1230 static_cast<uint32_t>(ceil(
1231 static_cast<float>(inputHeight) /
1232 static_cast<float>(desc.m_StrideY))),
1233 static_cast<uint32_t>(ceil(
1234 static_cast<float>(inputWidth) /
1235 static_cast<float>(desc.m_StrideX)))
1236 }, DataType::Float32);
1237 }
1238 else if (paddingString == "VALID")
1239 {
1240 padding = false;
1241 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1242 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1243 static_cast<uint32_t>(ceil(
1244 static_cast<float>(inputHeight - weightHeight + 1) /
1245 static_cast<float>(desc.m_StrideY))),
1246 static_cast<uint32_t>(ceil(
1247 static_cast<float>(inputWidth - weightWidth + 1) /
1248 static_cast<float>(desc.m_StrideX)))
1249 }, DataType::Float32);
1250 }
surmeh01bceff2f2018-03-29 16:29:27 +01001251
1252 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1253 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1254
1255 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1256 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1257
1258 if (dataFormat == "NHWC")
1259 {
1260 layer = SwizzleInDeswizzleOut(*m_Network, inputSlot, *layer, nodeDef.name());
1261 }
1262 else
1263 {
1264 inputSlot.Connect(layer->GetInputSlot(0));
1265 }
1266
1267 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1268}
1269
Conor Kennedyc2130a02018-12-05 11:05:54 +00001270TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
1271{
1272 BOOST_ASSERT(nodeDef.op() == "ExpandDims");
1273
1274 if (inputTensorInfo.GetNumDimensions() > 4) {
1275 throw ParseException(
1276 boost::str(
1277 boost::format(
1278 "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1279 % inputTensorInfo.GetNumDimensions()
1280 % nodeDef.name()
1281 % CHECK_LOCATION().AsString()));
1282 }
1283
1284 std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1285
1286 std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1287 std::vector<uint32_t> outputDims;
1288
1289 // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1290 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1291 {
1292 // add current input shape to outputDims
1293 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1294 auto currentDimension = inputTensorInfo.GetShape()[i];
1295 outputDims.push_back(currentDimension);
1296 }
1297
1298 // insert a dimension of 1 at index 'expandDim' of inputs shape
1299 if (expandDim >= 0)
1300 {
1301 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1302 outputDims.insert(getPosition, 1);
1303 }
1304
1305 // if negative number for 'expandDim' then count backwards from the last element
1306 // and insert 1 dimension at index 'expandDim'
1307 if (expandDim < 0)
1308 {
Matteo Martincighd7cceeb2018-12-06 09:06:29 +00001309 int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001310 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1311 outputDims.insert(getPosition, 1);
1312 }
1313 }
1314 else
1315 {
1316 throw InvalidArgumentException(
1317 boost::str(
1318 boost::format(
1319 "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1320 % expandDim
1321 % inputDimSize
1322 % CHECK_LOCATION().AsString()));
1323 }
1324
1325 if (outputDims.size() > 4)
1326 {
1327 throw ParseException(
1328 boost::str(
1329 boost::format(
1330 "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1331 % outputDims.size()
1332 % nodeDef.name()
1333 % CHECK_LOCATION().AsString()));
1334 }
1335
1336 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1337 outputDims.data());
1338
1339 TensorInfo outTensorInfo = inputTensorInfo;
1340 outTensorInfo.SetShape(outShape);
1341
1342 return outTensorInfo;
1343}
1344
1345ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1346{
1347 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1348
1349 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1350 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1351
1352 TensorInfo outputInfo;
1353 outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo);
1354
1355 ReshapeDescriptor reshapeDesc;
1356 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1357 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1358 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1359 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1360
1361 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1362}
1363
surmeh01bceff2f2018-03-29 16:29:27 +01001364ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1365 const tensorflow::GraphDef& graphDef)
1366{
1367 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1368
1369 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1370 {
telsoa01c577f2c2018-08-31 09:22:23 +01001371 throw ParseException(
1372 boost::str(
1373 boost::format(
1374 "ArmNN only supports FusedBatchNormalization layers with constant scale. "
1375 "Input %1%. Node %2% %3%")
1376 % inputs[1].m_IndexedValue->GetNode().name()
1377 % nodeDef.name()
1378 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001379 }
1380 ParsedConstTfOperation<float>* scaleNode =
1381 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1382
1383 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1384 {
telsoa01c577f2c2018-08-31 09:22:23 +01001385 throw ParseException(
1386 boost::str(
1387 boost::format(
1388 "ArmNN only supports FusedBatchNormalization layers with constant offset. "
1389 "Input %1%. Node %2% %3%")
1390 % inputs[2].m_IndexedValue->GetNode().name()
1391 % nodeDef.name()
1392 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001393 }
1394 ParsedConstTfOperation<float>* offsetNode =
1395 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1396
1397 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1398 {
telsoa01c577f2c2018-08-31 09:22:23 +01001399 throw ParseException(
1400 boost::str(
1401 boost::format(
1402 "ArmNN only supports FusedBatchNormalization layers with constant mean. "
1403 "Input %1%. Node %2% %3%")
1404 % inputs[3].m_IndexedValue->GetNode().name()
1405 % nodeDef.name()
1406 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001407 }
1408 ParsedConstTfOperation<float>* meanNode =
1409 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1410
1411 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1412 {
telsoa01c577f2c2018-08-31 09:22:23 +01001413 throw ParseException(
1414 boost::str(
1415 boost::format(
1416 "ArmNN only supports FusedBatchNormalization layers with constant variance. "
1417 "Input %1%. Node %2% %3%")
1418 % inputs[4].m_IndexedValue->GetNode().name()
1419 % nodeDef.name()
1420 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001421 }
1422 ParsedConstTfOperation<float>* varianceNode =
1423 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1424
Matteo Martincigh075c7502018-12-05 13:10:45 +00001425 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1426
1427 CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1428
telsoa01c577f2c2018-08-31 09:22:23 +01001429 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001430 BatchNormalizationDescriptor desc;
1431 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001432 desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001433
telsoa01c577f2c2018-08-31 09:22:23 +01001434 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1435 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001436 std::vector<float> scaleTensorData;
1437 ConstTensor scaleTensor = scaleNode->GetConstTensor(false, scaleTensorData);
1438
1439 std::vector<float> offsetTensorData;
1440 ConstTensor offsetTensor = offsetNode->GetConstTensor(false, offsetTensorData);
1441
1442 std::vector<float> meanTensorData;
1443 ConstTensor meanTensor = meanNode->GetConstTensor(false, meanTensorData);
1444
1445 std::vector<float> varianceTensorData;
1446 ConstTensor varianceTensor = varianceNode->GetConstTensor(false, varianceTensorData);
1447
1448 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1449 meanTensor,
1450 varianceTensor,
1451 offsetTensor,
1452 scaleTensor,
1453 nodeDef.name().c_str());
1454
1455 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1456
Matteo Martincigh075c7502018-12-05 13:10:45 +00001457 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1458 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001459
1460 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1461}
1462
telsoa01c577f2c2018-08-31 09:22:23 +01001463bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1464 size_t alphaLayerIndex,
1465 const OutputOfParsedTfOperation& otherOp,
1466 armnn::IOutputSlot** outputOfLeakyRelu,
1467 armnn::ActivationDescriptor & desc)
1468{
1469 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1470
1471 // Verifying all these assumptions hold:
1472 //
1473 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1474 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1475 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1476 //
1477
1478 if (mulNodeDef.op() == "Mul")
1479 {
1480 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1481 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1482
1483 BOOST_ASSERT(inputs.size() == 2);
1484 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1485 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1486 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1487
1488 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1489 {
1490 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1491 {
1492 ParsedConstTfOperation<float>* alpha =
1493 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1494 inputs[alphaLayerIndex].m_IndexedValue);
1495
1496 std::vector<float> const_data;
1497 ConstTensor const_tensor = alpha->GetConstTensor(false, const_data);
1498
1499 if (const_data.size() == 1)
1500 {
1501 desc.m_Function = ActivationFunction::LeakyReLu;
1502 desc.m_A = const_data[0];
1503
1504 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1505 return true;
1506 }
1507 }
1508 }
1509 }
1510 return false;
1511}
1512
telsoa01c577f2c2018-08-31 09:22:23 +01001513ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1514 const tensorflow::GraphDef& graphDef)
1515{
1516 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
Sadik Armagan975c09a2018-12-04 10:02:08 +00001517 if (inputs.size() != 2)
1518 {
1519 throw ParseException(
1520 boost::str(
1521 boost::format(
1522 "Maximum expects two inputs!. Got %1% for Node %2% %3%")
1523 % inputs.size()
1524 % nodeDef.name()
1525 % CHECK_LOCATION().AsString()));
1526 }
1527
telsoa01c577f2c2018-08-31 09:22:23 +01001528 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1529 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1530 IOutputSlot* outputOfLeakyRelu = nullptr;
1531
1532 ActivationDescriptor desc;
1533
Sadik Armagan975c09a2018-12-04 10:02:08 +00001534 // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1535 // i.e. one of the four possible scenarios:
1536 // 1, max(mul(a, x), x)
1537 // 2, max(mul(x, a), x)
1538 // 3, max(x, mul(a, x))
1539 // 4, max(x, mul(x, a))
1540 // These are handled by an activation layer.
telsoa01c577f2c2018-08-31 09:22:23 +01001541
1542 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1543 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1544 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1545 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1546 {
1547 BOOST_ASSERT(outputOfLeakyRelu != nullptr);
1548
1549 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1550 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1551 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1552 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1553 }
1554 else
1555 {
Sadik Armagan975c09a2018-12-04 10:02:08 +00001556 // Anything else is just a maximum layer.
1557
1558 return AddMaximumLayer(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001559 }
1560}
1561
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001562ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
1563 const tensorflow::GraphDef& graphDef)
1564{
1565 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1566
1567 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1568 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1569 const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1570 const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1571
1572 if (input0Dim != input1Dim)
1573 {
1574 // broadcasting where input0 and input1 have different number of dimensions
1575 // is only supported for 1D and 4D tensors pair
1576 if (input0Dim == 1 && input1Dim == 4)
1577 {
1578 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1579 }
1580 else if (input0Dim == 4 && input1Dim == 1)
1581 {
1582 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1583 }
1584 else
1585 {
1586 throw ParseException(
1587 boost::str(
1588 boost::format("Unsupported broadcast configuration for Minimum operation %1% %2%")
1589 % nodeDef.name()
1590 % CHECK_LOCATION().AsString()));
1591 }
1592 }
1593
1594 IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1595
1596 input0Slot->Connect(layer->GetInputSlot(0));
1597 input1Slot->Connect(layer->GetInputSlot(1));
1598
1599 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1600 std::vector<unsigned int> outputShape;
1601
1602 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1603 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1604
1605 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1606 {
1607 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1608 }
1609
1610 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1611 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1612
1613 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1614}
1615
jimfly0123be07e2018-12-04 17:47:22 +00001616ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1617{
1618 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1619
1620 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1621 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1622
1623 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
1624 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
1625
1626 if (input0Info.GetNumDimensions() == 1)
1627 {
1628 const bool isNHWC = true;
1629 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1630 }
1631
1632 if (input1Info.GetNumDimensions() == 1)
1633 {
1634 const bool isNHWC = true;
1635 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1636 }
1637
1638 IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
1639
1640 input0Slot->Connect(layer->GetInputSlot(0));
1641 input1Slot->Connect(layer->GetInputSlot(1));
1642
1643 if (input0Info.GetNumDimensions() == 1)
1644 {
1645 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
1646 }
1647 else
1648 {
1649 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
1650 }
1651
1652 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1653}
1654
jimfly01f6ba7472018-12-04 10:09:52 +00001655unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
1656 const TensorInfo& inputTensorInfo,
1657 const std::string& nodeName)
1658{
1659 unsigned int rank = paddingTensor.GetShape()[0];
1660 unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
1661 if (rank != expectedRank)
1662 {
1663 throw ParseException(
1664 boost::str(
1665 boost::format(
1666 "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
1667 % expectedRank
1668 % rank
1669 % nodeName
1670 % CHECK_LOCATION().AsString()));
1671 }
1672 unsigned int second = paddingTensor.GetShape()[1];
1673 if (second != 2)
1674 {
1675 throw ParseException(
1676 boost::str(
1677 boost::format(
1678 "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
1679 % rank
1680 % second
1681 % nodeName
1682 % CHECK_LOCATION().AsString()));
1683 }
1684 return rank;
1685}
1686
1687TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
1688 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1689{
1690 unsigned int numDims = inputTensorInfo.GetNumDimensions();
1691 std::vector<unsigned int> outDims;
1692 for (unsigned int i = 0; i < numDims; ++i)
1693 {
1694 unsigned int dimSize = inputTensorInfo.GetShape()[i];
1695 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
1696 dimSize += dimPadding.first;
1697 dimSize += dimPadding.second;
1698 outDims.push_back(dimSize);
1699 }
1700 TensorInfo paddedTensorInfo = inputTensorInfo;
1701 unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
1702 paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
1703 return paddedTensorInfo;
1704}
1705
1706ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
1707 const tensorflow::GraphDef& graphDef)
1708{
1709 // input consists of:
1710 // input[0] the tensor which will be padded
1711 // input[1] the tensor holding the padding values
1712 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1713 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1714 TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
1715 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
1716 {
1717 throw ParseException(
1718 boost::str(
1719 boost::format(
1720 "ArmNN only supports Pad with constant padding. "
1721 "Input %1%. Node %2% %3%")
1722 % inputs[1].m_IndexedValue->GetNode().name()
1723 % nodeDef.name()
1724 % CHECK_LOCATION().AsString()));
1725
1726 }
1727 ParsedConstTfOperation<int32_t>* paddingTensorOp =
1728 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1729
1730 std::vector<int32_t> paddingTensorData;
1731 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(false, paddingTensorData);
1732 // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
1733 // and should match the rank of the input tensor that is being padded.
1734 // For each dimension D of input, paddings[D, 0] indicates how many values to add
1735 // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
1736 // many values to add after the contents of tensor in that dimension
1737 // This needs to be translated into a padList for ACL
1738 std::vector<std::pair<unsigned int, unsigned int>> padList;
1739 unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
1740 for (unsigned int i = 0; i < rank; ++i)
1741 {
1742 std::pair<unsigned int, unsigned int> paddingForDim;
1743 for (unsigned int j = 0; j < 2; j++)
1744 {
1745 unsigned int index = (i * 2) + j;
1746 int paddingAmount = paddingTensorData[index];
1747 // make sure we can cast to an unsigned value
1748 if (paddingAmount < 0)
1749 {
1750 throw ParseException(
1751 boost::str(
1752 boost::format(
1753 "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
1754 % paddingAmount
1755 % i
1756 % j
1757 % nodeDef.name()
1758 % CHECK_LOCATION().AsString()));
1759 }
1760 if (j == 0)
1761 {
1762 paddingForDim.first = static_cast<unsigned int>(paddingAmount);
1763 }
1764 else
1765 {
1766 paddingForDim.second = static_cast<unsigned int>(paddingAmount);
1767 }
1768 }
1769 padList.push_back(paddingForDim);
1770 }
1771 PadDescriptor padDescriptor(padList);
1772 IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
1773 previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
1774 // Use the padding to calculate the new output tensor shape
1775 TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
1776 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1777 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1778}
1779
surmeh01bceff2f2018-03-29 16:29:27 +01001780ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
1781 const tensorflow::GraphDef& graphDef)
1782{
1783 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
Matteo Martincighf9afc792018-12-06 12:03:17 +00001784
telsoa01c577f2c2018-08-31 09:22:23 +01001785 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01001786 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
surmeh01bceff2f2018-03-29 16:29:27 +01001787
surmeh01bceff2f2018-03-29 16:29:27 +01001788 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
1789
telsoa01c577f2c2018-08-31 09:22:23 +01001790 // The last input is the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01001791 if (!HasParsedConstTensor<int32_t>(inputs[numInputs - 1].m_IndexedValue->GetNode().name()))
1792 {
telsoa01c577f2c2018-08-31 09:22:23 +01001793 throw ParseException(
1794 boost::str(
1795 boost::format(
1796 "ArmNN only supports Concat with constant axis. "
1797 "Input %1%. Node %2% %3%")
1798 % inputs[numInputs - 1].m_IndexedValue->GetNode().name()
1799 % nodeDef.name()
1800 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001801 }
1802 ParsedConstTfOperation<int32_t>* shapeNode =
1803 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[numInputs - 1].m_IndexedValue);
1804
Matteo Martincighf9afc792018-12-06 12:03:17 +00001805 // Get the axis tensor data
surmeh01bceff2f2018-03-29 16:29:27 +01001806 std::vector<int32_t> axisTensorData;
Matteo Martincighf9afc792018-12-06 12:03:17 +00001807 shapeNode->GetConstTensor(false, axisTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001808
telsoa01c577f2c2018-08-31 09:22:23 +01001809 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00001810 const unsigned int concatDim = static_cast<unsigned int>(axisTensorData[0]);
surmeh01bceff2f2018-03-29 16:29:27 +01001811
telsoa01c577f2c2018-08-31 09:22:23 +01001812 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00001813 if (concatDim == 0 || concatDim == 2)
surmeh01bceff2f2018-03-29 16:29:27 +01001814 {
telsoa01c577f2c2018-08-31 09:22:23 +01001815 throw ParseException(
1816 boost::str(
1817 boost::format(
1818 "Dimension %1% for concatenation is not supported by Armnn. "
1819 "Node %2% %3%")
Matteo Martincighf9afc792018-12-06 12:03:17 +00001820 % concatDim
telsoa01c577f2c2018-08-31 09:22:23 +01001821 % nodeDef.name()
1822 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001823 }
1824
Matteo Martincighf9afc792018-12-06 12:03:17 +00001825 unsigned int numConcatViews = numInputs - 1;
1826 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), MaxNumOfTensorDimensions);
1827 concatDescriptor.SetConcatAxis(concatDim);
1828 TensorShape mergeDims(MaxNumOfTensorDimensions);
1829 unsigned int mergeDim = 0;
1830 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01001831 {
telsoa01c577f2c2018-08-31 09:22:23 +01001832 // Need to double check whether it should be
Matteo Martincighf9afc792018-12-06 12:03:17 +00001833 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01001834 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1835
Matteo Martincighf9afc792018-12-06 12:03:17 +00001836 // Double check dimensions of the tensors
1837 if (inputTensorInfo.GetNumDimensions() != MaxNumOfTensorDimensions)
1838 {
1839 throw armnn::ParseException(
1840 boost::str(
1841 boost::format(
1842 "The number of dimensions: %1% for input tensors of the "
1843 "concatenation op should be %2% %3%")
1844 % inputTensorInfo.GetNumDimensions()
1845 % MaxNumOfTensorDimensions
1846 % CHECK_LOCATION().AsString()));
1847 }
1848
1849 // Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
1850 mergeDims = inputTensorInfo.GetShape();
1851 unsigned int* viewOrigin = const_cast<unsigned int*>(concatDescriptor.GetViewOrigin(viewIndex));
1852 std::fill(viewOrigin, viewOrigin + MaxNumOfTensorDimensions, 0);
1853
1854 // Update the view origin coordinates and the merge dimension value
1855 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
1856 mergeDim += mergeDims[concatDim];
surmeh01bceff2f2018-03-29 16:29:27 +01001857 }
1858
Matteo Martincighf9afc792018-12-06 12:03:17 +00001859 // Update the output shape
1860 mergeDims[concatDim] = mergeDim;
surmeh01bceff2f2018-03-29 16:29:27 +01001861 armnn::IConnectableLayer *layer = m_Network->AddMergerLayer(concatDescriptor, nodeDef.name().c_str());
1862
Matteo Martincighf9afc792018-12-06 12:03:17 +00001863 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
surmeh01bceff2f2018-03-29 16:29:27 +01001864
Matteo Martincighf9afc792018-12-06 12:03:17 +00001865 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01001866 {
Matteo Martincighf9afc792018-12-06 12:03:17 +00001867 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
1868 inputSlot.Connect(layer->GetInputSlot(viewIndex));
surmeh01bceff2f2018-03-29 16:29:27 +01001869 }
1870
1871 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1872}
1873
1874ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
1875 const tensorflow::GraphDef& graphDef)
1876{
telsoa01c577f2c2018-08-31 09:22:23 +01001877 // Note: the Shape layer is handled in a special way, because:
1878 // 1. ARMNN doesn't support int32 tensors which it outputs.
1879 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01001880 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01001881 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01001882
1883 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
1884 if (tfDataType != tensorflow::DT_INT32)
1885 {
telsoa01c577f2c2018-08-31 09:22:23 +01001886 throw ParseException(
1887 boost::str(
1888 boost::format(
1889 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
1890 % tensorflow::DataType_Name(tfDataType)
1891 % nodeDef.name()
1892 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001893 }
1894
1895 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1896 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1897 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1898 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
1899
1900 std::vector<int32_t> shapeTensorData;
1901 shapeTensorData.reserve(prevLayerDimensions);
1902
1903 for (unsigned int i=0; i<prevLayerDimensions; ++i)
1904 {
1905 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
1906 }
1907
1908 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
1909
1910 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
1911 nodeDef,
1912 &shapeTensorData[0],
1913 shapeTensorInfo);
1914}
1915
1916ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
1917 const tensorflow::GraphDef& graphDef)
1918{
1919 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1920 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
1921
1922 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1923 {
telsoa01c577f2c2018-08-31 09:22:23 +01001924 throw ParseException(
1925 boost::str(
1926 boost::format(
1927 "ArmNN only supports Reshape layers with constant shapes. "
1928 "Input %1% Node %2% %3%")
1929 % inputs[1].m_IndexedValue->GetNode().name()
1930 % nodeDef.name()
1931 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001932 }
1933 ParsedConstTfOperation<int32_t>* shapeNode =
1934 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1935
1936 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
1937 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1938
1939 std::vector<int32_t> shapeTensorData;
1940 ConstTensor shapeTensor = shapeNode->GetConstTensor(false, shapeTensorData);
1941 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
1942
1943 TensorShape targetShape = outputTensorInfo.GetShape();
1944 ReshapeDescriptor reshapeDesc;
1945 reshapeDesc.m_TargetShape = targetShape;
1946
1947 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1948 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1949 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1950
1951 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1952}
1953
1954ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
1955 const tensorflow::GraphDef& graphDef)
1956{
1957 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1958
1959 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1960 {
telsoa01c577f2c2018-08-31 09:22:23 +01001961 throw ParseException(
1962 boost::str(
1963 boost::format(
1964 "ArmNN only supports ResizeBilinear layers with constant sizes. "
1965 "Input %1%. Node %2% %3%")
1966 % inputs[1].m_IndexedValue->GetNode().name()
1967 % nodeDef.name()
1968 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001969 }
1970 ParsedConstTfOperation<int32_t>* sizeNode =
1971 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1972
telsoa01c577f2c2018-08-31 09:22:23 +01001973 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01001974 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
1975 {
telsoa01c577f2c2018-08-31 09:22:23 +01001976 throw ParseException(
1977 boost::str(
1978 boost::format(
1979 "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
1980 "Node %1% %2%")
1981 % nodeDef.name()
1982 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001983 }
1984
telsoa01c577f2c2018-08-31 09:22:23 +01001985 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01001986 std::vector<int32_t> sizeTensorData;
1987 ConstTensor sizeTensor = sizeNode->GetConstTensor(false, sizeTensorData);
1988
telsoa01c577f2c2018-08-31 09:22:23 +01001989 // The descriptor only has target height and width attributes, which we get from the size tensor.
surmeh01bceff2f2018-03-29 16:29:27 +01001990 ResizeBilinearDescriptor desc;
1991 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
1992 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
jimfly018a121502018-12-06 16:19:52 +00001993 desc.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01001994
1995 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, nodeDef.name().c_str());
1996
1997 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1998 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01001999 // The input shape is always in BHWC format, this will be swizzled below; for now,
2000 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01002001 unsigned int outBatch = inputTensorInfo.GetShape()[0];
2002 unsigned int outChannels = inputTensorInfo.GetShape()[3];
2003 unsigned int outHeight = desc.m_TargetHeight;
2004 unsigned int outWidth = desc.m_TargetWidth;
jimfly018a121502018-12-06 16:19:52 +00002005 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
telsoa01c577f2c2018-08-31 09:22:23 +01002006 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01002007 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2008 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2009
jimfly018a121502018-12-06 16:19:52 +00002010 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002011
2012 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2013}
2014
2015TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
2016{
2017 BOOST_ASSERT(nodeDef.op() == "Squeeze");
2018 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2019
2020 DataType type;
2021 if (tfDataType == tensorflow::DT_FLOAT)
2022 {
2023 type = DataType::Float32;
2024 }
2025 else if (tfDataType == tensorflow::DT_INT32)
2026 {
2027 type = DataType::Signed32;
2028 }
2029 else
2030 {
telsoa01c577f2c2018-08-31 09:22:23 +01002031 throw ParseException(
2032 boost::str(
2033 boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
2034 % tensorflow::DataType_Name(tfDataType)
2035 % nodeDef.name()
2036 % CHECK_LOCATION().AsString()));
2037 }
2038
2039
2040 if (inputTensorInfo.GetNumDimensions() > 4)
2041 {
2042 throw ParseException(
2043 boost::str(
2044 boost::format(
2045 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2046 % inputTensorInfo.GetNumDimensions()
2047 % nodeDef.name()
2048 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002049 }
2050
2051 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01002052 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2053
surmeh01bceff2f2018-03-29 16:29:27 +01002054 if (squeezeDims.empty())
2055 {
telsoa01c577f2c2018-08-31 09:22:23 +01002056 squeezeDims.assign(dimensionSequence,
2057 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01002058 }
2059
2060 std::vector<uint32_t> outputDims;
2061 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2062 {
telsoa01c577f2c2018-08-31 09:22:23 +01002063 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2064 auto currentDimension = inputTensorInfo.GetShape()[i];
2065 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01002066 {
telsoa01c577f2c2018-08-31 09:22:23 +01002067 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01002068 }
2069 }
2070
2071 if (outputDims.size() > 4)
2072 {
telsoa01c577f2c2018-08-31 09:22:23 +01002073 throw ParseException(
2074 boost::str(
2075 boost::format(
2076 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2077 % outputDims.size()
2078 % nodeDef.name()
2079 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002080 }
2081
telsoa01c577f2c2018-08-31 09:22:23 +01002082 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2083 outputDims.data());
2084
2085 TensorInfo outTensorInfo = inputTensorInfo;
2086 outTensorInfo.SetShape(outShape);
2087 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01002088
2089 return outTensorInfo;
2090}
2091
2092ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2093{
2094 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2095
2096 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2097 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2098
2099 TensorInfo outputInfo;
2100 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2101
2102 ReshapeDescriptor reshapeDesc;
2103 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2104 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2105 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2106 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2107
2108 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2109}
2110
2111ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2112{
2113 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2114
2115 NormalizationDescriptor normalizationDescriptor;
2116 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2117 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2118 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2119 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2120 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2121 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
ruoyan018174f362018-12-04 18:24:08 +00002122 normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002123
2124 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2125 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2126
2127 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002128 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2129 nodeDef.name().c_str());
ruoyan018174f362018-12-04 18:24:08 +00002130 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2131 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
surmeh01bceff2f2018-03-29 16:29:27 +01002132
2133 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2134}
2135
2136/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01002137/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2138/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2139/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2140///
surmeh01bceff2f2018-03-29 16:29:27 +01002141class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
2142{
2143public:
2144 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2145 : DeferredSingleLayerParsedTfOperation(parser, node)
2146 {
2147 }
2148
2149 void CreateLayerDeferred() override
2150 {
2151 BOOST_ASSERT(m_Layer == nullptr);
2152 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2153 }
2154};
2155
2156ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2157{
telsoa01c577f2c2018-08-31 09:22:23 +01002158 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01002159 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2160}
2161
telsoa01c577f2c2018-08-31 09:22:23 +01002162/// An ParsedTfOperation for a Mul node.
2163/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2164/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2165/// and in these cases armnn doesn't need a separate layer for the Mul.
2166///
2167class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
2168{
2169public:
2170 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2171 : DeferredSingleLayerParsedTfOperation(parser, node)
2172 {
2173 }
2174
2175 void CreateLayerDeferred() override
2176 {
2177 BOOST_ASSERT(m_Layer == nullptr);
2178 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2179 }
2180};
2181
surmeh01bceff2f2018-03-29 16:29:27 +01002182ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2183{
2184 boost::ignore_unused(graphDef);
2185
telsoa01c577f2c2018-08-31 09:22:23 +01002186 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002187}
2188
2189ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
2190 const tensorflow::GraphDef& graphDef)
2191{
2192 boost::ignore_unused(graphDef);
2193
2194 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2195
2196 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
2197
2198 auto it = m_InputShapes.find(nodeDef.name());
2199 if (it == m_InputShapes.end())
2200 {
telsoa01c577f2c2018-08-31 09:22:23 +01002201 throw ParseException(
2202 boost::str(
2203 boost::format(
2204 "Missing input shape for Placeholder '%1%' %2%")
2205 % nodeDef.name()
2206 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002207 }
2208 TensorInfo tensorInfo(it->second, DataType::Float32);
2209
2210 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2211
2212 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2213
2214 TrackInputBinding(layer, layerId, tensorInfo);
2215
2216 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2217}
2218
saoste01bbd40612018-08-28 15:41:51 +01002219ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2220{
2221 boost::ignore_unused(graphDef);
2222 return AddRealDivLayer(nodeDef);
2223}
2224
surmeh01bceff2f2018-03-29 16:29:27 +01002225ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
2226 const tensorflow::GraphDef& graphDef)
2227{
2228 boost::ignore_unused(graphDef);
2229
2230 ActivationDescriptor activationDesc;
2231 activationDesc.m_Function = ActivationFunction::ReLu;
2232 return AddActivationLayer(nodeDef, activationDesc);
2233}
2234
2235ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
2236 const tensorflow::GraphDef& graphDef)
2237{
2238 boost::ignore_unused(graphDef);
2239
2240 ActivationDescriptor activationDesc;
2241 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2242 activationDesc.m_A = 6.0f;
2243 activationDesc.m_B = 0.0f;
2244
2245 return AddActivationLayer(nodeDef, activationDesc);
2246}
2247
2248ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
2249 const tensorflow::GraphDef& graphDef)
2250{
2251 boost::ignore_unused(graphDef);
2252
2253 ActivationDescriptor activationDesc;
2254 activationDesc.m_Function = ActivationFunction::Sigmoid;
2255
2256 return AddActivationLayer(nodeDef, activationDesc);
2257}
2258
2259ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
2260 const tensorflow::GraphDef& graphDef)
2261{
2262 boost::ignore_unused(graphDef);
2263
2264 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2265
2266 SoftmaxDescriptor softmaxDescriptor;
2267 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2268
2269 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2270 prevLayerSlot.Connect(layer->GetInputSlot(0));
2271 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2272
2273 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2274}
2275
2276ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
2277 const tensorflow::GraphDef& graphDef)
2278{
2279 boost::ignore_unused(graphDef);
2280
2281 ActivationDescriptor activationDesc;
2282 activationDesc.m_Function = ActivationFunction::SoftReLu;
2283
2284 return AddActivationLayer(nodeDef, activationDesc);
2285}
2286
2287ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2288{
2289 boost::ignore_unused(graphDef);
2290
2291 ActivationDescriptor activationDesc;
2292 activationDesc.m_Function = ActivationFunction::TanH;
2293 activationDesc.m_A = 1.0f;
2294 activationDesc.m_B = 1.0f;
2295
2296 return AddActivationLayer(nodeDef, activationDesc);
2297}
2298
2299ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
2300 ActivationDescriptor& activationDesc)
2301{
2302 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2303
2304 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2305
2306 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2307 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2308 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2309 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2310}
2311
2312ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
2313 const tensorflow::GraphDef& graphDef)
2314{
2315 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
2316}
2317
2318ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
2319 const tensorflow::GraphDef& graphDef)
2320{
2321 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2322}
2323
2324ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
2325 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
2326{
2327 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2328 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2329 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2330
2331 if (inputs.size() != 1)
2332 {
telsoa01c577f2c2018-08-31 09:22:23 +01002333 throw ParseException(
2334 boost::str(
2335 boost::format(
2336 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
2337 % inputs.size()
2338 % nodeDef.name()
2339 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002340 }
2341
2342 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
2343 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2344 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
2345 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
2346
2347 Pooling2dDescriptor pooling2dDescriptor;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002348 pooling2dDescriptor.m_PoolType = pooltype;
2349 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
surmeh01bceff2f2018-03-29 16:29:27 +01002350 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
2351
telsoa01c577f2c2018-08-31 09:22:23 +01002352 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
FrancisMurtaghf005e312018-12-06 15:26:04 +00002353 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
2354 pooling2dDescriptor.m_DataLayout = dataLayout;
2355 DataLayoutIndexed dataLayoutIndexed(dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +01002356
FrancisMurtaghf005e312018-12-06 15:26:04 +00002357 pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
2358 pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
2359 pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
2360 pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002361
FrancisMurtaghf005e312018-12-06 15:26:04 +00002362 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
2363 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002364
2365 bool padding = false;
2366 TensorInfo outputInfo;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002367 unsigned int outputHeight = 0;
2368 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01002369
2370 CHECK_PADDING_TYPE(nodeDef, paddingString);
2371
surmeh01bceff2f2018-03-29 16:29:27 +01002372 if (paddingString == "SAME")
2373 {
2374 padding = true;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002375
2376 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
2377 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2378 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
2379 static_cast<float>(pooling2dDescriptor.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01002380 }
2381 else if (paddingString == "VALID")
2382 {
2383 padding = false;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002384
2385 outputHeight = static_cast<uint32_t>(ceil(
2386 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
2387 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2388 outputWidth = static_cast<uint32_t>(ceil(
2389 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
2390 static_cast<float>(pooling2dDescriptor.m_StrideX)));
2391 }
2392
2393 switch (dataLayout)
2394 {
2395 case DataLayout::NHWC:
2396 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2397 outputHeight,
2398 outputWidth,
2399 inputTensorInfo.GetShape()[3] },
2400 DataType::Float32);
2401 break;
2402 case DataLayout::NCHW:
2403 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2404 inputTensorInfo.GetShape()[1],
2405 outputHeight,
2406 outputWidth },
2407 DataType::Float32);
2408 break;
surmeh01bceff2f2018-03-29 16:29:27 +01002409 }
surmeh01bceff2f2018-03-29 16:29:27 +01002410
2411 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002412 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002413 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002414 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002415
2416
2417 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
2418 if (layer == nullptr)
2419 {
telsoa01c577f2c2018-08-31 09:22:23 +01002420 throw ParseException(
2421 boost::str(
2422 boost::format(
2423 "Failed to add pooling2d layer for %1% %2%")
2424 % nodeDef.name()
2425 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002426 }
2427
2428 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2429
FrancisMurtaghf005e312018-12-06 15:26:04 +00002430 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002431
2432 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2433}
2434
2435ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
2436{
2437 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2438
2439 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2440 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2441
2442 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
2443 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
2444
2445 if (isBiasAdd)
2446 {
2447 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
2448 // with the same data in the correct dimension for broadcast in addition.
2449 if(input1Info.GetNumDimensions() != 1)
2450 {
telsoa01c577f2c2018-08-31 09:22:23 +01002451 throw ParseException(
2452 boost::str(
2453 boost::format(
2454 "Unsupported bias for BiasAdd. It should be a 1D vector. "
2455 "Got %1% dimensions for input %2%. Node %3% %4%")
2456 % input1Info.GetNumDimensions()
2457 % inputs[1].m_IndexedValue->GetNode().name()
2458 % nodeDef.name()
2459 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002460 }
2461
2462 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01002463
telsoa01c577f2c2018-08-31 09:22:23 +01002464 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01002465 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002466 }
2467 else
2468 {
2469 if (input0Info.GetNumDimensions() == 1)
2470 {
2471 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002472 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002473 }
2474
2475 if (input1Info.GetNumDimensions() == 1)
2476 {
2477 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002478 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002479 }
2480 }
2481
2482 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
2483
2484 input0Slot->Connect(layer->GetInputSlot(0));
2485 input1Slot->Connect(layer->GetInputSlot(1));
2486
2487 if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
2488 {
2489 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2490 }
2491 else
2492 {
2493 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2494 }
2495
2496 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2497}
2498
saoste01bbd40612018-08-28 15:41:51 +01002499ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
2500{
2501 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2502
2503 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
2504 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2505 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2506
2507 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2508 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2509
2510
2511 if (input0NumDims < input1NumDims)
2512 {
2513 const bool isNHWC = true;
2514 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2515 }
2516 if (input1NumDims < input0NumDims)
2517 {
2518 const bool isNHWC = true;
2519 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2520 }
2521
2522 input0Slot->Connect(layer->GetInputSlot(0));
2523 input1Slot->Connect(layer->GetInputSlot(1));
2524
2525 if (input0NumDims < input1NumDims)
2526 {
2527 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2528 }
2529 else
2530 {
2531 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2532
2533 }
2534 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2535}
2536
Sadik Armagan975c09a2018-12-04 10:02:08 +00002537ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
2538{
2539 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2540
2541 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2542 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2543
2544 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2545 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2546
2547 if (input0NumDims < input1NumDims)
2548 {
2549 const bool isNHWC = true;
2550 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2551 }
2552 if (input1NumDims < input0NumDims)
2553 {
2554 const bool isNHWC = true;
2555 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2556 }
2557
2558 IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
2559
2560 input0Slot->Connect(layer->GetInputSlot(0));
2561 input1Slot->Connect(layer->GetInputSlot(1));
2562
2563 TensorInfo outputInfo = input0Slot->GetTensorInfo();
2564 std::vector<unsigned int> outputShape;
2565
2566 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
2567 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
2568
2569 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
2570 {
2571 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
2572 }
2573
2574 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
2575 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2576
2577 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2578}
2579
telsoa01c577f2c2018-08-31 09:22:23 +01002580IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
2581{
2582 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2583
2584 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
2585 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2586 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2587
2588 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2589 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2590
2591 if (input0NumDims < input1NumDims)
2592 {
2593 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002594 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002595 }
2596 if (input1NumDims < input0NumDims)
2597 {
2598 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002599 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002600 }
2601
2602 input0Slot->Connect(layer->GetInputSlot(0));
2603 input1Slot->Connect(layer->GetInputSlot(1));
2604
2605 if (input0NumDims < input1NumDims)
2606 {
2607 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2608 }
2609 else
2610 {
2611 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2612 }
2613 return layer;
2614}
2615
surmeh01bceff2f2018-03-29 16:29:27 +01002616IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
2617 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
2618{
telsoa01c577f2c2018-08-31 09:22:23 +01002619 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01002620 ParsedConstTfOperation<float>* biasNode = nullptr;
2621 if (addNodeDef != nullptr)
2622 {
2623 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01002624 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01002625 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
2626 {
2627 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
2628 }
2629 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
2630 {
2631 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
2632 }
2633 else
2634 {
telsoa01c577f2c2018-08-31 09:22:23 +01002635 throw ParseException(
2636 boost::str(
2637 boost::format(
2638 "ArmNN only supports fully connected layers with constant bias. "
2639 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
2640 % addInputs[0].m_IndexedValue->GetNode().name()
2641 % addInputs[1].m_IndexedValue->GetNode().name()
2642 % addNodeDef->name()
2643 % matMulNodeDef.name()
2644 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002645 }
2646 }
2647
telsoa01c577f2c2018-08-31 09:22:23 +01002648 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01002649 ParsedConstTfOperation<float>* weightNode = nullptr;
2650 ParsedTfOperation* inputNode = nullptr;
2651 unsigned int inputIdx = 0;
2652 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
2653 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
2654 {
2655 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
2656 inputNode = mulInputs[1].m_IndexedValue;
2657 inputIdx = mulInputs[1].m_Index;
2658 }
2659 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
2660 {
2661 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
2662 inputNode = mulInputs[0].m_IndexedValue;
2663 inputIdx = mulInputs[0].m_Index;
2664 }
2665 else
2666 {
telsoa01c577f2c2018-08-31 09:22:23 +01002667 throw ParseException(
2668 boost::str(
2669 boost::format(
2670 "ArmNN only supports fully connected layers with constant weights. "
2671 "Inputs %1% and %2%. MatMulNode %3% %4%")
2672 % mulInputs[0].m_IndexedValue->GetNode().name()
2673 % mulInputs[1].m_IndexedValue->GetNode().name()
2674 % matMulNodeDef.name()
2675 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002676 }
2677
2678 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01002679 // Handles weight.
surmeh01bceff2f2018-03-29 16:29:27 +01002680 ConstTensor weights = weightNode->GetConstTensor(false, weightTensorData);
2681
2682 FullyConnectedDescriptor desc;
2683 desc.m_BiasEnabled = addNodeDef != nullptr;
2684
2685 IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +01002686 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01002687 if (addNodeDef != nullptr)
2688 {
2689 std::vector<float> biasTensorData;
2690 ConstTensor biases = biasNode->GetConstTensor(false, biasTensorData);
2691
2692 if (weights.GetShape()[1] != biases.GetShape()[0])
2693 {
telsoa01c577f2c2018-08-31 09:22:23 +01002694 throw ParseException(
2695 boost::str(
2696 boost::format(
2697 "Shape of matmul weights and bias do not match. "
2698 "AddNode %1%. MatMulNode %2% %3%")
2699 % addNodeDef->name()
2700 % matMulNodeDef.name()
2701 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002702 }
2703
2704 layer = m_Network->AddFullyConnectedLayer(desc, weights, biases, armnnLayerName);
2705 }
2706 else
2707 {
2708 layer = m_Network->AddFullyConnectedLayer(desc, weights, armnnLayerName);
2709 }
2710
2711 BOOST_ASSERT(layer != nullptr);
2712
2713 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
2714 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
2715
telsoa01c577f2c2018-08-31 09:22:23 +01002716 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01002717 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
2718 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2719 return layer;
2720}
2721
2722void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2723{
telsoa01c577f2c2018-08-31 09:22:23 +01002724 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01002725 tensorflow::DataType type = tensorflow::DT_FLOAT;
2726 if (nodeDef.attr().count("T") != 0)
2727 {
2728 auto attr = nodeDef.attr().at("T");
2729 type = attr.type();
2730 }
2731 else if (nodeDef.attr().count("dtype") != 0)
2732 {
2733 auto attr = nodeDef.attr().at("dtype");
2734 type = attr.type();
2735 }
2736
2737 if (type != tensorflow::DT_FLOAT && nodeDef.op() != "Const")
2738 {
telsoa01c577f2c2018-08-31 09:22:23 +01002739 throw ParseException(
2740 boost::str(
2741 boost::format(
2742 "Currently only FLOAT is supported for tensorflow nodes (apart from Const). "
2743 "Got %1% for Node %2% %3%")
2744 % tensorflow::DataType_Name(type)
2745 % nodeDef.name()
2746 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002747 }
2748
2749 const std::string& operation = nodeDef.op();
2750 auto it = ms_OperationNameToParsingFunctions.find(operation);
2751 if (it != ms_OperationNameToParsingFunctions.end())
2752 {
2753 auto func = it->second;
2754 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
2755 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
2756
telsoa01c577f2c2018-08-31 09:22:23 +01002757 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01002758 auto it = m_ParsedTfOperations.find(nodeDef.name());
2759 if (it != m_ParsedTfOperations.end())
2760 {
2761 throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
2762 }
2763 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
2764
telsoa01c577f2c2018-08-31 09:22:23 +01002765 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01002766 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
2767 m_RequestedOutputs.end())
2768 {
2769 auto outId = ParseOutputId(nodeDef.name());
2770 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
2771 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
2772
2773 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
2774
2775 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
2776
2777 prevSlot.Connect(outputLayer->GetInputSlot(0));
2778
2779 TrackOutputBinding(outputLayer, layerId, tensorInfo);
2780 }
2781 }
2782 else
2783 {
telsoa01c577f2c2018-08-31 09:22:23 +01002784 throw ParseException(
2785 boost::str(
2786 boost::format(
2787 "Unsupported operation %1% in tensorflow::GraphDef %2%")
2788 % operation
2789 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002790 }
2791}
2792
2793void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
2794{
telsoa01c577f2c2018-08-31 09:22:23 +01002795 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01002796 m_NodesByName.clear();
2797 m_NetworkInputsBindingInfo.clear();
2798 m_NetworkOutputsBindingInfo.clear();
2799
2800 for (int i = 0; i < graphDef.node_size(); ++i)
2801 {
2802 const tensorflow::NodeDef& node = graphDef.node(i);
2803 m_NodesByName[node.name()] = &node;
2804 }
2805
telsoa01c577f2c2018-08-31 09:22:23 +01002806 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01002807 std::vector<const tensorflow::NodeDef*> targetNodes;
2808 for (const std::string& requestedOutputName : m_RequestedOutputs)
2809 {
2810 auto nodeIt = m_NodesByName.find(requestedOutputName);
2811 if (nodeIt == m_NodesByName.end())
2812 {
telsoa01c577f2c2018-08-31 09:22:23 +01002813 throw ParseException(
2814 boost::str(
2815 boost::format(
2816 "Couldn't find requested output node '%1%' in graph %2%")
2817 % requestedOutputName
2818 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002819 }
2820 targetNodes.push_back(nodeIt->second);
2821 }
2822
telsoa01c577f2c2018-08-31 09:22:23 +01002823 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01002824 std::vector<const tensorflow::NodeDef*> sortedNodes;
2825 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
2826 targetNodes,
2827 [this](const tensorflow::NodeDef* node)
2828 {
2829 auto outputs = GetTfInputNodes(*node);
2830 std::vector<const tensorflow::NodeDef*> nodesOnly;
2831 for (const auto & o : outputs) {
2832 nodesOnly.push_back(o.m_IndexedValue);
2833 }
2834 return nodesOnly;
2835 },
2836 sortedNodes))
2837 {
telsoa01c577f2c2018-08-31 09:22:23 +01002838 throw ParseException(
2839 boost::str(
2840 boost::format(
2841 "Cycle detected in graph %1%")
2842 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002843 }
2844
telsoa01c577f2c2018-08-31 09:22:23 +01002845 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01002846 for (const auto& it : sortedNodes)
2847 {
2848 const tensorflow::NodeDef& currentNode = *it;
2849 LoadNodeDef(currentNode, graphDef);
2850 }
2851}
2852
2853INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
2854 const std::map<std::string, TensorShape>& inputShapes,
2855 const std::vector<std::string>& requestedOutputs)
2856{
2857 FILE* fd = fopen(graphFile, "r");
2858
2859 if (fd == nullptr)
2860 {
telsoa01c577f2c2018-08-31 09:22:23 +01002861 throw FileNotFoundException(
2862 boost::str(
2863 boost::format(
2864 "Graph file %1% failed to open %2%")
2865 % graphFile
2866 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002867 }
2868
telsoa01c577f2c2018-08-31 09:22:23 +01002869 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01002870 tensorflow::GraphDef graphDef;
2871 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
2872 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
2873 delete input;
2874 fclose(fd);
2875
2876 if (!success)
2877 {
telsoa01c577f2c2018-08-31 09:22:23 +01002878 throw ParseException(
2879 boost::str(
2880 boost::format(
2881 "Failed to parse graph file %1%")
2882 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002883 }
2884
2885 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2886}
2887
2888INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
2889 const std::map<std::string, TensorShape>& inputShapes,
2890 const std::vector<std::string>& requestedOutputs)
2891{
telsoa01c577f2c2018-08-31 09:22:23 +01002892 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01002893 tensorflow::GraphDef graphDef;
2894 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
2895
2896 if (!success)
2897 {
telsoa01c577f2c2018-08-31 09:22:23 +01002898 throw ParseException(
2899 boost::str(
2900 boost::format(
2901 "Failed to parse graph file %1%")
2902 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002903 }
2904
2905 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2906}
2907
2908INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
2909 const std::map<std::string, TensorShape>& inputShapes,
2910 const std::vector<std::string>& requestedOutputs)
2911{
2912 FILE* fd = fopen(graphFile, "rb");
2913
2914 if (fd == nullptr)
2915 {
telsoa01c577f2c2018-08-31 09:22:23 +01002916 throw FileNotFoundException(
2917 boost::str(
2918 boost::format(
2919 "Graph file %1% failed to open %2%")
2920 % graphFile
2921 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002922 }
2923
telsoa01c577f2c2018-08-31 09:22:23 +01002924 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01002925 tensorflow::GraphDef graphDef;
2926
2927 google::protobuf::io::FileInputStream inStream(fileno(fd));
2928 google::protobuf::io::CodedInputStream codedStream(&inStream);
2929 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
2930 bool success = graphDef.ParseFromCodedStream(&codedStream);
2931 fclose(fd);
2932
2933 if (!success)
2934 {
telsoa01c577f2c2018-08-31 09:22:23 +01002935 throw ParseException(
2936 boost::str(
2937 boost::format(
2938 "Failed to parse protobuf file %1% %2%")
2939 % graphFile
2940 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002941 }
2942
2943 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2944}
2945
2946INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
2947 const std::map<std::string, TensorShape>& inputShapes,
2948 const std::vector<std::string>& requestedOutputs)
2949{
2950 m_Network = INetwork::Create();
2951
2952 m_InputShapes = inputShapes;
2953 if (requestedOutputs.size() == 0)
2954 {
telsoa01c577f2c2018-08-31 09:22:23 +01002955 throw ParseException(
2956 boost::str(
2957 boost::format(
2958 "requestedOutputs must have at least one entry %1%")
2959 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002960 }
2961 m_RequestedOutputs = requestedOutputs;
2962
2963 try
2964 {
2965 LoadGraphDef(graphDef);
2966 }
2967 catch (const ParseException& e)
2968 {
2969 Cleanup();
2970 throw e;
2971 }
2972
2973 Cleanup();
2974
2975 return std::move(m_Network);
2976}
2977
2978void TfParser::Cleanup()
2979{
telsoa01c577f2c2018-08-31 09:22:23 +01002980 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01002981 m_InputShapes.clear();
2982 m_RequestedOutputs.clear();
2983 m_NodesByName.clear();
2984 m_ParsedTfOperations.clear();
2985}
2986
2987BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
2988{
2989 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
2990}
2991
2992BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
2993{
2994 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
2995}
2996
2997std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
2998 const char* bindingPointDesc,
2999 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3000{
3001 auto it = nameToBindingInfo.find(layerName);
3002 if (it == nameToBindingInfo.end())
3003 {
telsoa01c577f2c2018-08-31 09:22:23 +01003004 throw InvalidArgumentException(
3005 boost::str(
3006 boost::format(
3007 "Unknown %1% '%2%' %3%")
3008 % bindingPointDesc
3009 % layerName
3010 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003011 }
3012 return it->second;
3013}
3014
3015void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3016{
3017 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3018}
3019
3020void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3021{
3022 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3023}
3024
3025void TfParser::TrackBindingPoint(IConnectableLayer* layer,
3026 LayerBindingId id,
3027 const TensorInfo& tensorInfo,
3028 const char* bindingPointDesc,
3029 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3030{
3031 const std::string layerName = layer->GetName();
3032 auto it = nameToBindingInfo.find(layerName);
3033 if (it == nameToBindingInfo.end())
3034 {
3035 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3036 }
3037 else
3038 {
telsoa01c577f2c2018-08-31 09:22:23 +01003039 throw ParseException(
3040 boost::str(
3041 boost::format(
3042 "Id %1% used by more than one %2% layer %3%")
3043 % id
3044 % bindingPointDesc
3045 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003046 }
3047}
3048
3049} // namespace armnnTfParser