blob: 8f6352c6e7de0dcffdc50fc8cf6d519942897449 [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
5#include "TfParser.hpp"
6
7#include <armnn/INetwork.hpp>
8#include <armnn/Utils.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <armnn/Exceptions.hpp>
11#include <armnn/Descriptors.hpp>
12
13#include <GraphTopologicalSort.hpp>
Sadik Armagan479045b2018-10-01 11:51:37 +010014#include <ParserHelper.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010015#include <Permute.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010016#include <VerificationHelpers.hpp>
Matteo Martincigh46315822018-11-28 16:22:36 +000017#include <DataLayoutIndexed.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010018
19#include <google/protobuf/io/zero_copy_stream_impl.h>
20#include <google/protobuf/text_format.h>
21
22#include "tensorflow/core/framework/graph.pb.h"
23#include "tensorflow/core/framework/node_def.pb.h"
24#include "tensorflow/core/framework/types.pb.h"
25#include "tensorflow/core/framework/tensor.pb.h"
26#include "tensorflow/core/framework/tensor_shape.pb.h"
27
28#include <boost/assert.hpp>
29#include <boost/format.hpp>
30#include <boost/core/ignore_unused.hpp>
31#include <boost/log/trivial.hpp>
32#include <boost/numeric/conversion/cast.hpp>
33#include <boost/polymorphic_cast.hpp>
34
35#include <memory>
36#include <sstream>
37#include <numeric>
38#include <functional>
39
Matteo Martincigh46315822018-11-28 16:22:36 +000040using namespace armnnUtils;
surmeh01bceff2f2018-03-29 16:29:27 +010041using namespace armnn;
42
43namespace armnnTfParser
44{
45namespace
46{
47
48const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
49const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
50
51IConnectableLayer* AddSwizzleLayer(INetwork& network, IOutputSlot& input, const PermutationVector& mapping,
52 const std::string& name)
53{
telsoa01c577f2c2018-08-31 09:22:23 +010054 // Adds swizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010055 IConnectableLayer* const layer = network.AddPermuteLayer(mapping, name.c_str());
56
telsoa01c577f2c2018-08-31 09:22:23 +010057 // Connects intput to swizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010058 input.Connect(layer->GetInputSlot(0));
59
telsoa01c577f2c2018-08-31 09:22:23 +010060 // Sets up swizzled output.
surmeh01bceff2f2018-03-29 16:29:27 +010061 const TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mapping);
62 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
63
64 return layer;
65}
66
67IConnectableLayer* SwizzleInDeswizzleOut(INetwork& network, IOutputSlot& input, IConnectableLayer& layer,
68 const std::string& name)
69{
telsoa01c577f2c2018-08-31 09:22:23 +010070 // Adds swizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010071 IConnectableLayer* const swizzleLayer = AddSwizzleLayer(network, input, NHWCToArmNN, "swizzle_for-" + name);
72
telsoa01c577f2c2018-08-31 09:22:23 +010073 // Connects swizzledInput to layer.
surmeh01bceff2f2018-03-29 16:29:27 +010074 swizzleLayer->GetOutputSlot(0).Connect(layer.GetInputSlot(0));
75
telsoa01c577f2c2018-08-31 09:22:23 +010076 // Adds deswizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010077 IConnectableLayer* const deswizzleLayer = AddSwizzleLayer(network, layer.GetOutputSlot(0), ArmNNToNHWC,
78 "deswizzle_for-" + name);
79
80 return deswizzleLayer;
81}
82
83template <typename Callable>
84void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
85 const std::string& attribName,
86 tensorflow::AttrValue::ValueCase expectedValueCase,
87 Callable callable)
88{
89 auto iter = nodeDef.attr().find(attribName);
90 if (iter != nodeDef.attr().end())
91 {
92 const auto& attrValue = iter->second;
93 if (attrValue.value_case() == expectedValueCase)
94 {
95 callable(attrValue);
96 }
97 else
98 {
telsoa01c577f2c2018-08-31 09:22:23 +010099 throw ParseException(
100 boost::str(
101 boost::format(
102 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
103 "but found %4% instead %5%")
104 % attribName
105 % nodeDef.name()
106 % static_cast<int>(expectedValueCase)
107 % static_cast<int>(attrValue.value_case())
108 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100109 }
110 }
111 else
112 {
telsoa01c577f2c2018-08-31 09:22:23 +0100113 throw ParseException(
114 boost::str(
115 boost::format(
116 "Could not find required attribute %1% in node %2% %3%")
117 % attribName
118 % nodeDef.name()
119 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100120 }
121}
122
123template <typename Callable>
124void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
125 const std::string& attribName,
126 tensorflow::AttrValue::ValueCase expectedValueCase,
127 Callable callable)
128{
129 auto iter = nodeDef.attr().find(attribName);
130 if (iter != nodeDef.attr().end())
131 {
132 const auto& attrValue = iter->second;
133 if (attrValue.value_case() == expectedValueCase)
134 {
135 callable(attrValue);
136 }
137 else
138 {
telsoa01c577f2c2018-08-31 09:22:23 +0100139 throw ParseException(
140 boost::str(
141 boost::format(
142 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
143 "but found %4% instead %5%")
144 % attribName
145 % nodeDef.name()
146 % static_cast<int>(expectedValueCase)
147 % static_cast<int>(attrValue.value_case())
148 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100149 }
150 }
151}
152
153float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
154{
155 float attribValue = 0.0f;
156 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
157 [&attribValue](const tensorflow::AttrValue& attrValue)
158 {
159 attribValue = attrValue.f();
160 });
161 return attribValue;
162}
163
Conor Kennedyc2130a02018-12-05 11:05:54 +0000164int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
165{
166 int32_t attribValue = 0u;
167 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
168 [&attribValue](const tensorflow::AttrValue& attrValue)
169 {
170 attribValue = static_cast<int32_t>(attrValue.i());
171 });
172 return attribValue;
173}
174
surmeh01bceff2f2018-03-29 16:29:27 +0100175uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
176{
177 uint32_t attribValue = 0u;
178 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
179 [&attribValue](const tensorflow::AttrValue& attrValue)
180 {
181 attribValue = static_cast<uint32_t>(attrValue.i());
182 });
183 return attribValue;
184}
185
186std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
187{
188 std::string attribValue = "";
189 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
190 [&attribValue](const tensorflow::AttrValue& attrValue)
191 {
192 attribValue = attrValue.s();
193 });
194 return attribValue;
195}
196
197std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
198 const std::string& name)
199{
200 std::vector<uint32_t> attriList;
201 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
202 [&attriList](const tensorflow::AttrValue& attrValue)
203 {
204 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
205 {
206 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
207 }
208 });
209
210 return attriList;
211}
212
213std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
214 const std::string& name)
215{
216 std::vector<uint32_t> attriList;
217 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
218 [&attriList](const tensorflow::AttrValue& attrValue)
219 {
220 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
221 {
222 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
223 }
224 });
225
226 return attriList;
227}
228
229bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
230 const std::string& name,
231 bool defaultValue = false)
232{
233 bool attribValue = defaultValue;
234 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
235 [&attribValue](const tensorflow::AttrValue& attrValue)
236 {
237 attribValue = attrValue.b();
238 });
239 return attribValue;
240}
241
242tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
243{
244 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
245 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
246 [&attribValue](const tensorflow::AttrValue& attrValue)
247 {
248 attribValue = attrValue.type();
249 });
250 return attribValue;
251}
252
253TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
254{
255 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
256 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
257
258 if (stretchDim != targetDims.end())
259 {
260 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
261 {
telsoa01c577f2c2018-08-31 09:22:23 +0100262 throw ParseException(
263 boost::str(
264 boost::format(
265 "At most one component of shape can be -1 %1%")
266 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100267 }
268
telsoa01c577f2c2018-08-31 09:22:23 +0100269 auto targetNumElements =
270 boost::numeric_cast<unsigned int>(
271 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100272 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
273 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
274 }
275
276 TensorInfo reshapeInfo = input;
277 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
278
279 return reshapeInfo;
280}
281
telsoa01c577f2c2018-08-31 09:22:23 +0100282// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100283IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
284 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100285{
286 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
287 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
288 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
289 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
290 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
291 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
292
293 armnn::TensorInfo reshapedInfo = input1Info;
294 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
295
296 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
297 ReshapeDescriptor reshapeDesc;
298 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
299 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
300
301 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
302 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
303
304 input1Slot = &reshapeLayer->GetOutputSlot(0);
305
306 return input1Slot;
307}
308
309OutputId ParseOutputId(const std::string & name)
310{
311 unsigned int outputNum = 0;
312 size_t colonPos = name.find_last_of(":");
313 if (colonPos != std::string::npos)
314 {
315 int n = std::stoi(name.substr(colonPos+1));
316 if (n<0 || n>100)
317 {
telsoa01c577f2c2018-08-31 09:22:23 +0100318 throw ParseException(
319 boost::str(
320 boost::format(
321 "Output tensor id is out of range for %1% %2%")
322 % name
323 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100324 }
325 outputNum = static_cast<unsigned int>(n);
326 }
327 return OutputId(name.substr(0,colonPos),outputNum);
328}
329
telsoa01c577f2c2018-08-31 09:22:23 +0100330#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
331 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
332 { \
333 throw ParseException( \
334 boost::str( \
335 boost::format( \
336 "Unsupported data format %1% passed for %2% node %3%. " \
337 "Only NHWC and NCHW supported %4%") \
338 % FORMAT \
339 % NODE_TYPE \
340 % NODE_DEF.name() \
341 % CHECK_LOCATION().AsString())); \
342 }
343
344#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
345 if(PADDING != "SAME" && PADDING != "VALID" ) \
346 { \
347 throw ParseException( \
348 boost::str( \
349 boost::format( \
350 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
351 % PADDING \
352 % NODE_DEF.name() \
353 % CHECK_LOCATION().AsString())); \
354 } \
355
surmeh01bceff2f2018-03-29 16:29:27 +0100356} // namespace
357
358const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
359 { "Const", &TfParser::ParseConst },
360 { "Add", &TfParser::ParseAdd },
361 { "BiasAdd", &TfParser::ParseBiasAdd },
362 { "Identity", &TfParser::ParseIdentity },
363 { "Conv2D", &TfParser::ParseConv2D },
364 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
Conor Kennedyc2130a02018-12-05 11:05:54 +0000365 { "ExpandDims", &TfParser::ParseExpandDims },
surmeh01bceff2f2018-03-29 16:29:27 +0100366 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
367 { "ConcatV2", &TfParser::ParseConcat },
368 { "LRN", &TfParser::ParseLrn },
369 { "MatMul", &TfParser::ParseMatMul },
370 { "Mul", &TfParser::ParseMul },
371 { "Placeholder", &TfParser::ParsePlaceholder },
saoste01bbd40612018-08-28 15:41:51 +0100372 { "RealDiv", &TfParser::ParseRealDiv },
surmeh01bceff2f2018-03-29 16:29:27 +0100373 { "Relu", &TfParser::ParseRelu },
374 { "Relu6", &TfParser::ParseRelu6 },
375 { "Reshape", &TfParser::ParseReshape },
376 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
377 { "Shape", &TfParser::ParseShape },
378 { "Squeeze", &TfParser::ParseSqueeze },
379 { "Sigmoid", &TfParser::ParseSigmoid },
380 { "Softmax", &TfParser::ParseSoftmax },
381 { "Softplus", &TfParser::ParseSoftplus },
382 { "Tanh", &TfParser::ParseTanh },
383 { "MaxPool", &TfParser::ParseMaxPool },
384 { "AvgPool", &TfParser::ParseAvgPool },
telsoa01c577f2c2018-08-31 09:22:23 +0100385 { "Maximum", &TfParser::ParseMaximum },
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +0000386 { "Minimum", &TfParser::ParseMinimum },
jimfly01f6ba7472018-12-04 10:09:52 +0000387 { "Pad", &TfParser::ParsePad },
jimfly0123be07e2018-12-04 17:47:22 +0000388 { "Sub", &TfParser::ParseSub },
surmeh01bceff2f2018-03-29 16:29:27 +0100389};
390
391ITfParser* ITfParser::CreateRaw()
392{
393 return new TfParser();
394}
395
396ITfParserPtr ITfParser::Create()
397{
398 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
399}
400
401void ITfParser::Destroy(ITfParser* parser)
402{
403 delete parser;
404}
405
406inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
407 uint32_t filterSize, bool samePadding,
408 uint32_t* paddingFront, uint32_t* paddingBack) {
409 *paddingFront = 0;
410 *paddingBack = 0;
411
412 if (samePadding) {
413 uint32_t outputSize = (inputSize + stride - 1) / stride;
414 uint32_t temp = (outputSize - 1) * stride + filterSize;
415 if (temp > inputSize) {
416 *paddingFront = (temp - inputSize) / 2;
417 *paddingBack = (temp - inputSize) - *paddingFront;
418 }
419 }
420}
421
422void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
423 bool samePadding)
424{
425 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
426}
427
428/// An Abstract base class which represents a single tensorflow operation (node)
429/// that has been (potentially partially) converted to Armnn.
430/// It may not yet have been fully converted into actual Armnn layers.
431class ParsedTfOperation
432{
433public:
434 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
435 : m_Parser(parser)
436 , m_Node(node)
437 {
438 }
439
440 virtual ~ParsedTfOperation() {};
441
442 const tensorflow::NodeDef& GetNode() const { return m_Node; }
443
444 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
445 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
446 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
447
448 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
449 virtual ParsedTfOperation* ResolveIdentityOperations()
450 {
451 return this;
452 }
453
454protected:
455 TfParser* m_Parser;
456 const tensorflow::NodeDef& m_Node;
457};
458
459/// An ParsedTfOperation where the Armnn equivalent is a single layer,
460/// with output slots that correspond directly to the Tf node outputs.
461class SingleLayerParsedTfOperation : public ParsedTfOperation
462{
463public:
464 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
465 : ParsedTfOperation(parser, node)
466 , m_Layer(layer)
467 {
468 }
469
470 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
471 {
472 BOOST_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100473 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100474 unsigned int armnnOutputSlotIdx = tfOutputIndex;
475 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
476 {
477 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100478 boost::str(
479 boost::format(
480 "The requested output slot #%1% "
481 "for %2% does not exist %3%")
482 % armnnOutputSlotIdx
483 % m_Layer->GetName()
484 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100485 }
486 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
487 }
488
489protected:
490 IConnectableLayer* m_Layer;
491};
492
telsoa01c577f2c2018-08-31 09:22:23 +0100493/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100494class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
495{
496public:
497 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
498 : SingleLayerParsedTfOperation(parser, node, nullptr)
499 {
500 }
501
502 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
503 {
504 if (!m_Layer)
505 {
506 CreateLayerDeferred();
507 }
508 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
509 }
510
511private:
512 virtual void CreateLayerDeferred() = 0;
513};
514
515
516TfParser::TfParser()
517 : m_Network(nullptr, nullptr)
518{
519}
520
521
522const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
523{
524 if (nodeDef->op() != "Identity")
525 {
526 return nodeDef;
527 }
528
529 if (nodeDef->input_size() != 1)
530 {
telsoa01c577f2c2018-08-31 09:22:23 +0100531 throw ParseException(
532 boost::str(
533 boost::format(
534 "Identity node should have a single input! %1% has %2% inputs %3%")
535 % nodeDef->name()
536 % nodeDef->input_size()
537 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100538 }
539
540 auto it = m_NodesByName.find(nodeDef->input(0));
541 if (it != m_NodesByName.end())
542 {
543 const tensorflow::NodeDef* inputNode = it->second;
544 return ResolveIdentityNode(inputNode);
545 }
546 else
547 {
telsoa01c577f2c2018-08-31 09:22:23 +0100548 throw ParseException(
549 boost::str(
550 boost::format(
551 "Cannot find what the Identity node %1% is linked to! %2%")
552 % nodeDef->name()
553 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100554 }
555}
556
557std::vector<OutputOfConstNodeDef>
558TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
559{
560 std::vector<OutputOfConstNodeDef> ret;
561
surmeh013537c2c2018-05-18 16:31:43 +0100562 if (nodeDef.op() == "Const")
563 {
564 // For some reason const node can have "Control Inputs". We ignore them for now.
565 return ret;
566 }
567
surmeh01bceff2f2018-03-29 16:29:27 +0100568 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
569 for (int j = 0; j < nodeDef.input_size(); ++j)
570 {
571 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100572
573 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
574 {
575 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100576 boost::str(
577 boost::format(
578 "Node '%1%' has Control Input '%2%' for input #%3% which is unsupported. %4%")
579 % nodeDef.name()
580 % nodeDef.input(j)
581 % j
582 % CHECK_LOCATION().AsString()));
surmeh013537c2c2018-05-18 16:31:43 +0100583 }
584
surmeh01bceff2f2018-03-29 16:29:27 +0100585 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
586 if (inputIt == m_NodesByName.end())
587 {
588 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100589 boost::str(
590 boost::format(
591 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
592 % nodeDef.input(j)
593 % nodeDef.name()
594 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100595 }
596 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
597 }
598
599 return ret;
600}
601
602std::vector<OutputOfParsedTfOperation>
603TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
604 std::size_t expectedNumInputs)
605{
telsoa01c577f2c2018-08-31 09:22:23 +0100606 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100607 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
608 const std::size_t numInputs = nodes.size();
609 if (numInputs != expectedNumInputs)
610 {
telsoa01c577f2c2018-08-31 09:22:23 +0100611 throw ParseException(
612 boost::str(
613 boost::format(
614 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
615 % nodeDef.name()
616 % expectedNumInputs
617 % numInputs
618 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100619 }
telsoa01c577f2c2018-08-31 09:22:23 +0100620 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100621 std::vector<OutputOfParsedTfOperation> result;
622 for (auto&& node : nodes)
623 {
624 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
625 if (it == m_ParsedTfOperations.end())
626 {
telsoa01c577f2c2018-08-31 09:22:23 +0100627 throw ParseException(
628 boost::str(
629 boost::format(
630 "Node with name '%1%' has not been parsed %2%")
631 % node.m_IndexedValue->name()
632 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100633 }
634 ParsedTfOperation* parsedOp = it->second.get();
635 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
636 parsedOp = parsedOp->ResolveIdentityOperations();
637 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
638 }
639 return result;
640}
641
642ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
643{
644 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
645
telsoa01c577f2c2018-08-31 09:22:23 +0100646 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
647 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100648 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
649 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
650 {
651 IConnectableLayer* layer =
652 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
653 &nodeDef,nodeDef.name().c_str());
654 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
655 }
656 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
657 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
658 {
659 IConnectableLayer* layer =
660 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
661 &nodeDef,nodeDef.name().c_str());
662 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
663 }
664 else
665 {
telsoa01c577f2c2018-08-31 09:22:23 +0100666 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100667 return AddAdditionLayer(nodeDef);
668 }
669}
670
671ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
672{
673 return AddAdditionLayer(nodeDef, true);
674}
675
676/// An ParsedTfOperation which forwards to another (used for Identity nodes).
677class ParsedIdentityTfOperation : public ParsedTfOperation
678{
679public:
680 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
681 : ParsedTfOperation(parser, node)
682 , m_Representative(representative)
683 {
684 }
685
686 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
687 {
688 BOOST_ASSERT(m_Representative);
689 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
690 }
691
692 virtual ParsedTfOperation* ResolveIdentityOperations() override
693 {
694 return m_Representative->ResolveIdentityOperations();
695 }
696
697private:
698 ParsedTfOperation* m_Representative;
699};
700
701ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
702{
703 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
704 // Any requests for the output slots of this node should be forwarded to the node connected as input.
705 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
706}
707
708/// An ParsedTfOperation for a Const node.
709/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
710/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
711template <typename T>
712class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
713{
714public:
715 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
716 const T* tensorData, const TensorInfo& tensorInfo)
717 : DeferredSingleLayerParsedTfOperation(parser, node),
718 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
719 m_TensorInfo(tensorInfo)
720 {
721 BOOST_ASSERT(tensorInfo.GetDataType() == GetDataType<T>());
722 }
723
724 void CreateLayerDeferred() override
725 {
726 BOOST_ASSERT(m_Layer == nullptr);
727 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
728 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
729 }
730
731 ConstTensor GetConstTensor(bool swizzleForConvolutionWeights, std::vector<T>& outputTensorData) const
732 {
733 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
telsoa01c577f2c2018-08-31 09:22:23 +0100734 // Tensorflow weights are [H, W, In, Out].
735 // ArmNN weights are [Out, In, H, W].
surmeh01bceff2f2018-03-29 16:29:27 +0100736 static const PermutationVector HWIOToOIHW = {2, 3, 1, 0};
737
738 const TensorInfo outInfo = swizzleForConvolutionWeights
739 ? armnnUtils::Permuted(m_TensorInfo, HWIOToOIHW)
740 : m_TensorInfo;
741
742 outputTensorData.resize(m_TensorInfo.GetNumElements());
743
telsoa01c577f2c2018-08-31 09:22:23 +0100744 // Copies or swizzles from the permanent storage into the storage the caller provided.
surmeh01bceff2f2018-03-29 16:29:27 +0100745 if (swizzleForConvolutionWeights)
746 {
747 armnnUtils::Permute(outInfo.GetShape(), HWIOToOIHW, m_Storage.data(), outputTensorData.data());
748 }
749 else
750 {
751 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
752 }
telsoa01c577f2c2018-08-31 09:22:23 +0100753 // Updates the result to point to the user provided storage.
surmeh01bceff2f2018-03-29 16:29:27 +0100754 ConstTensor constTensor(outInfo, outputTensorData);
755 return constTensor;
756 }
757
Matteo Martincigh46315822018-11-28 16:22:36 +0000758 const T* GetStorage() const
759 {
760 return m_Storage.data();
761 }
762
763 const TensorInfo& GetTensorInfo() const
764 {
765 return m_TensorInfo;
766 }
767
surmeh01bceff2f2018-03-29 16:29:27 +0100768private:
769 ///< Manages the lifetime of the tensor data.
770 std::vector<T> m_Storage;
771 ///< Describes the layout of the tensor and points to the data in m_Storage.
772 TensorInfo m_TensorInfo;
773};
774
telsoa01c577f2c2018-08-31 09:22:23 +0100775DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
776 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100777{
778 switch (tfDataType)
779 {
780 case tensorflow::DT_FLOAT:
781 return DataType::Float32;
782 break;
783 case tensorflow::DT_INT32:
784 return DataType::Signed32;
785 break;
786 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100787 throw ParseException(
788 boost::str(
789 boost::format(
790 "Unknown DataType %1% for node %2% %3%")
791 % tensorflow::DataType_Name(tfDataType)
792 % nodeDef.name()
793 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100794 }
795}
796
797struct ParseTfTensorValueList
798{
799 template<typename DataType>
800 static void Parse(
801 const tensorflow::TensorProto& tfTensor,
802 unsigned int dstElements,
803 std::vector<int8_t>& outputData);
804
805 template <typename DataType>
806 static void ReadData(const void* srcData, unsigned int numSrcElements,
807 std::vector<int8_t>& dstData, unsigned int numDstElements)
808 {
telsoa01c577f2c2018-08-31 09:22:23 +0100809 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100810 if (numSrcElements == 0)
811 {
812 return;
813 }
814
telsoa01c577f2c2018-08-31 09:22:23 +0100815 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100816 if (numDstElements == 0)
817 {
818 numDstElements = numSrcElements;
819 }
820
telsoa01c577f2c2018-08-31 09:22:23 +0100821 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100822 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
823
824 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
825 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
826
telsoa01c577f2c2018-08-31 09:22:23 +0100827 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +0100828 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
829
830 if (numDstElements > numSrcElements)
831 {
telsoa01c577f2c2018-08-31 09:22:23 +0100832 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +0100833 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
834 }
835 }
836
837};
838
839template <>
840void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
841 unsigned int dstElements, std::vector<int8_t>& outputData)
842{
843 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
844 outputData, dstElements);
845}
846
847template <>
848void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
849 unsigned int dstElements, std::vector<int8_t>& outputData)
850{
851 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
852 outputData, dstElements);
853}
854
855template <template<typename> class OperatorType, typename T = int8_t>
856struct MakeTfOperation
857{
858 template<typename DataType, class... Args>
859 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
860 Args&&... args)
861 {
862 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
863 }
864};
865
866template <>
867struct MakeTfOperation<ParsedConstTfOperation>
868{
869 template<typename DataType, class... Args>
870 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
871 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
872 {
873 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
874 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
875 }
876};
877
878template <class FuncType>
879struct InvokeParseFunction
880{
881 template<class ResType, class... Args>
882 inline static ResType Result(DataType dataType, Args&&... args)
883 {
884 if (dataType == DataType::Float32)
885 {
886 return FuncType::template Parse<float>(std::forward<Args>(args)...);
887 }
888 else if (dataType == DataType::Signed32)
889 {
890 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
891 }
892
893 return ResType();
894 }
895
896 template<class... Args>
897 inline static void Result(DataType dataType, Args&&... args)
898 {
899 if (dataType == DataType::Float32)
900 {
901 FuncType::template Parse<float>(std::forward<Args>(args)...);
902 }
903 else if (dataType == DataType::Signed32)
904 {
905 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
906 }
907 }
908};
909
910ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
911{
912 BOOST_ASSERT(nodeDef.op() == "Const");
913
914 if (nodeDef.attr().count("value") == 0)
915 {
telsoa01c577f2c2018-08-31 09:22:23 +0100916 throw ParseException(
917 boost::str(
918 boost::format(
919 "Value not found for Const node - %1% %2%")
920 % nodeDef.name()
921 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100922 }
923
924 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
925 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
926 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
927
928 const auto GetDimensionSize = [](auto& d) { return d.size(); };
929
930 std::vector<unsigned int> dimensionSizes;
931 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
932 std::back_inserter(dimensionSizes), GetDimensionSize);
933
telsoa01c577f2c2018-08-31 09:22:23 +0100934 // Calculates number of elements.
935 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100936 unsigned int numElements = 0U;
937
938 if (!dimensionSizes.empty())
939 {
940 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
941 1U, std::multiplies<unsigned int>());
942 }
943
944 std::vector<int8_t> tensorData;
945
telsoa01c577f2c2018-08-31 09:22:23 +0100946 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +0100947 if (tfTensor.tensor_content().empty())
948 {
949 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
950
951 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +0100952 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +0100953 if (numElements == 0)
954 {
telsoa01c577f2c2018-08-31 09:22:23 +0100955 const unsigned int tfNumElements =
956 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +0100957 dimensionSizes.push_back(tfNumElements);
958 }
959 }
telsoa01c577f2c2018-08-31 09:22:23 +0100960 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +0100961 else
962 {
963 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
964
telsoa01c577f2c2018-08-31 09:22:23 +0100965 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +0100966 if (numElements == 0)
967 {
telsoa01c577f2c2018-08-31 09:22:23 +0100968 throw ParseException(
969 boost::str(
970 boost::format(
971 "No tensor shape found for Const node - %1% %2%")
972 % nodeDef.name()
973 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100974 }
975 }
976
telsoa01c577f2c2018-08-31 09:22:23 +0100977 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +0100978 if (tensorData.empty())
979 {
telsoa01c577f2c2018-08-31 09:22:23 +0100980 throw ParseException(
981 boost::str(
982 boost::format(
983 "No tensor data found for Const node - %1% %2%")
984 % nodeDef.name()
985 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100986 }
987
telsoa01c577f2c2018-08-31 09:22:23 +0100988 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
989 dimensionSizes.data(),
990 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +0100991
992 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +0100993 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +0100994 if (tensorData.size() > tensorInfo.GetNumBytes())
995 {
telsoa01c577f2c2018-08-31 09:22:23 +0100996 throw ParseException(
997 boost::str(
998 boost::format(
999 "Number of elements (%1%) should be less than or equal "
1000 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
1001 % (tensorData.size() / GetDataTypeSize(dataType))
1002 % tensorInfo.GetNumElements()
1003 % nodeDef.name()
1004 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001005 }
1006
1007 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1008 dataType, this, nodeDef, tensorData, tensorInfo);
1009}
1010
1011template<typename Type>
1012bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
1013{
1014 auto it = m_ParsedTfOperations.find(nodeName);
jimfly01f6ba7472018-12-04 10:09:52 +00001015 if (it == m_ParsedTfOperations.end())
surmeh01bceff2f2018-03-29 16:29:27 +01001016 {
1017 return false;
1018 }
jimfly01f6ba7472018-12-04 10:09:52 +00001019 return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1020}
1021
1022template<typename Type>
1023bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
1024{
1025 return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
surmeh01bceff2f2018-03-29 16:29:27 +01001026}
1027
1028ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
1029 const tensorflow::GraphDef& graphDef)
1030{
1031 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1032 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1033 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1034
1035 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1036 {
telsoa01c577f2c2018-08-31 09:22:23 +01001037 throw ParseException(
1038 boost::str(
1039 boost::format(
1040 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1041 % nodeDef.name()
1042 % inputs[1].m_IndexedValue->GetNode().name()
1043 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001044 }
1045 ParsedConstTfOperation<float>* weightNode =
1046 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1047
1048 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1049 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1050 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1051
telsoa01c577f2c2018-08-31 09:22:23 +01001052 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
surmeh01bceff2f2018-03-29 16:29:27 +01001053 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1054 if (!dilations.empty())
1055 {
1056 for (auto dilation : dilations)
1057 {
1058 if (dilation != 1u)
1059 {
telsoa01c577f2c2018-08-31 09:22:23 +01001060 throw ParseException(
1061 boost::str(
1062 boost::format(
1063 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1064 % nodeDef.name()
1065 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001066 }
1067 }
1068 }
1069
1070 Convolution2dDescriptor desc;
1071 desc.m_BiasEnabled = false;
1072
telsoa01c577f2c2018-08-31 09:22:23 +01001073 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1074
Matteo Martincigh46315822018-11-28 16:22:36 +00001075 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001076
Matteo Martincigh46315822018-11-28 16:22:36 +00001077 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001078
Matteo Martincigh46315822018-11-28 16:22:36 +00001079 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001080
Matteo Martincigh46315822018-11-28 16:22:36 +00001081 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1082 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001083
Matteo Martincigh46315822018-11-28 16:22:36 +00001084 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1085 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1086
1087 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1088 // Tensorflow weights are [H, W, In, Out].
1089 // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1090 // and [Out, In, H, W] when the data layout is NCHW.
1091 PermutationVector permutationVector =
1092 dataLayout == DataLayout::NHWC ?
1093 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1094 std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1095
1096 // Swizzle the tensor using the given permutation vector.
1097 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1098 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1099
1100 // Swizzles the content of the tensor's permanent storage into a local storage.
1101 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1102 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
1103 weightNode->GetStorage(), weightTensorSwizzledData.data());
1104
1105 // Create a weight tensor with the newly swizzled data.
1106 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1107
1108 uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1109 uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001110
1111 bool padding = false;
1112 TensorInfo outputInfo;
Matteo Martincigh46315822018-11-28 16:22:36 +00001113 unsigned int outputHeight = 0;
1114 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001115
1116 CHECK_PADDING_TYPE(nodeDef, paddingString);
1117
surmeh01bceff2f2018-03-29 16:29:27 +01001118 if (paddingString == "SAME")
1119 {
1120 padding = true;
Matteo Martincigh46315822018-11-28 16:22:36 +00001121
1122 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1123 static_cast<float>(desc.m_StrideY)));
1124 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1125 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001126 }
1127 else if (paddingString == "VALID")
1128 {
1129 padding = false;
Matteo Martincigh46315822018-11-28 16:22:36 +00001130
1131 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1132 static_cast<float>(desc.m_StrideY)));
1133 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1134 static_cast<float>(desc.m_StrideX)));
1135 }
1136
1137 switch (dataLayout)
1138 {
1139 case DataLayout::NHWC:
1140 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1141 outputHeight,
1142 outputWidth,
1143 weightTensor.GetShape()[0] },
1144 DataType::Float32);
1145 break;
1146 case DataLayout::NCHW:
1147 default:
surmeh01bceff2f2018-03-29 16:29:27 +01001148 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1149 weightTensor.GetShape()[0],
Matteo Martincigh46315822018-11-28 16:22:36 +00001150 outputHeight,
1151 outputWidth },
1152 DataType::Float32);
1153 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001154 }
surmeh01bceff2f2018-03-29 16:29:27 +01001155
1156 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1157 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1158
1159 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1160 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Matteo Martincigh46315822018-11-28 16:22:36 +00001161 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001162
1163 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1164}
1165
1166ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
telsoa01c577f2c2018-08-31 09:22:23 +01001167 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001168{
1169 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1170 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1171 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1172
1173 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1174 {
telsoa01c577f2c2018-08-31 09:22:23 +01001175 throw ParseException(
1176 boost::str(
1177 boost::format(
1178 "ArmNN only supports Depthwise Convolution layer with constant weights. "
1179 "Non const input found %1% for node %2% %3%")
1180 % inputs[1].m_IndexedValue->GetNode().name()
1181 % nodeDef.name()
1182 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001183 }
1184 ParsedConstTfOperation<float>* weightNode =
1185 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1186
1187
1188 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1189 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1190 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1191
1192 DepthwiseConvolution2dDescriptor desc;
1193 desc.m_BiasEnabled = false;
1194
telsoa01c577f2c2018-08-31 09:22:23 +01001195 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1196
surmeh01bceff2f2018-03-29 16:29:27 +01001197 if (dataFormat == "NHWC")
1198 {
1199 desc.m_StrideX = strides[2];
1200 desc.m_StrideY = strides[1];
telsoa01c577f2c2018-08-31 09:22:23 +01001201 // Swizzles input to supported memory layout.
surmeh01bceff2f2018-03-29 16:29:27 +01001202 inputTensorInfo = armnnUtils::Permuted(inputSlot.GetTensorInfo(), NHWCToArmNN);
1203 }
1204 else if (dataFormat == "NCHW")
1205 {
1206 desc.m_StrideX = strides[3];
1207 desc.m_StrideY = strides[2];
1208 }
surmeh01bceff2f2018-03-29 16:29:27 +01001209
1210 uint32_t inputHeight = inputTensorInfo.GetShape()[2];
1211 uint32_t inputWidth = inputTensorInfo.GetShape()[3];
1212
1213 std::vector<float> outputTensorData;
1214
1215 ConstTensor weightTensor = weightNode->GetConstTensor(true, outputTensorData);
1216
1217 uint32_t weightHeight = weightTensor.GetShape()[2];
1218 uint32_t weightWidth = weightTensor.GetShape()[3];
1219
1220 bool padding = false;
1221 TensorInfo outputInfo;
telsoa01c577f2c2018-08-31 09:22:23 +01001222
1223 CHECK_PADDING_TYPE(nodeDef, paddingString);
1224
surmeh01bceff2f2018-03-29 16:29:27 +01001225 if (paddingString == "SAME")
1226 {
1227 padding = true;
1228 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1229 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1230 static_cast<uint32_t>(ceil(
1231 static_cast<float>(inputHeight) /
1232 static_cast<float>(desc.m_StrideY))),
1233 static_cast<uint32_t>(ceil(
1234 static_cast<float>(inputWidth) /
1235 static_cast<float>(desc.m_StrideX)))
1236 }, DataType::Float32);
1237 }
1238 else if (paddingString == "VALID")
1239 {
1240 padding = false;
1241 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1242 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1243 static_cast<uint32_t>(ceil(
1244 static_cast<float>(inputHeight - weightHeight + 1) /
1245 static_cast<float>(desc.m_StrideY))),
1246 static_cast<uint32_t>(ceil(
1247 static_cast<float>(inputWidth - weightWidth + 1) /
1248 static_cast<float>(desc.m_StrideX)))
1249 }, DataType::Float32);
1250 }
surmeh01bceff2f2018-03-29 16:29:27 +01001251
1252 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1253 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1254
1255 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1256 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1257
1258 if (dataFormat == "NHWC")
1259 {
1260 layer = SwizzleInDeswizzleOut(*m_Network, inputSlot, *layer, nodeDef.name());
1261 }
1262 else
1263 {
1264 inputSlot.Connect(layer->GetInputSlot(0));
1265 }
1266
1267 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1268}
1269
Conor Kennedyc2130a02018-12-05 11:05:54 +00001270TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
1271{
1272 BOOST_ASSERT(nodeDef.op() == "ExpandDims");
1273
1274 if (inputTensorInfo.GetNumDimensions() > 4) {
1275 throw ParseException(
1276 boost::str(
1277 boost::format(
1278 "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1279 % inputTensorInfo.GetNumDimensions()
1280 % nodeDef.name()
1281 % CHECK_LOCATION().AsString()));
1282 }
1283
1284 std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1285
1286 std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1287 std::vector<uint32_t> outputDims;
1288
1289 // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1290 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1291 {
1292 // add current input shape to outputDims
1293 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1294 auto currentDimension = inputTensorInfo.GetShape()[i];
1295 outputDims.push_back(currentDimension);
1296 }
1297
1298 // insert a dimension of 1 at index 'expandDim' of inputs shape
1299 if (expandDim >= 0)
1300 {
1301 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1302 outputDims.insert(getPosition, 1);
1303 }
1304
1305 // if negative number for 'expandDim' then count backwards from the last element
1306 // and insert 1 dimension at index 'expandDim'
1307 if (expandDim < 0)
1308 {
Matteo Martincighd7cceeb2018-12-06 09:06:29 +00001309 int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001310 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1311 outputDims.insert(getPosition, 1);
1312 }
1313 }
1314 else
1315 {
1316 throw InvalidArgumentException(
1317 boost::str(
1318 boost::format(
1319 "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1320 % expandDim
1321 % inputDimSize
1322 % CHECK_LOCATION().AsString()));
1323 }
1324
1325 if (outputDims.size() > 4)
1326 {
1327 throw ParseException(
1328 boost::str(
1329 boost::format(
1330 "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1331 % outputDims.size()
1332 % nodeDef.name()
1333 % CHECK_LOCATION().AsString()));
1334 }
1335
1336 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1337 outputDims.data());
1338
1339 TensorInfo outTensorInfo = inputTensorInfo;
1340 outTensorInfo.SetShape(outShape);
1341
1342 return outTensorInfo;
1343}
1344
1345ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1346{
1347 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1348
1349 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1350 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1351
1352 TensorInfo outputInfo;
1353 outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo);
1354
1355 ReshapeDescriptor reshapeDesc;
1356 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1357 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1358 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1359 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1360
1361 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1362}
1363
surmeh01bceff2f2018-03-29 16:29:27 +01001364ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1365 const tensorflow::GraphDef& graphDef)
1366{
1367 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1368
1369 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1370 {
telsoa01c577f2c2018-08-31 09:22:23 +01001371 throw ParseException(
1372 boost::str(
1373 boost::format(
1374 "ArmNN only supports FusedBatchNormalization layers with constant scale. "
1375 "Input %1%. Node %2% %3%")
1376 % inputs[1].m_IndexedValue->GetNode().name()
1377 % nodeDef.name()
1378 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001379 }
1380 ParsedConstTfOperation<float>* scaleNode =
1381 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1382
1383 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1384 {
telsoa01c577f2c2018-08-31 09:22:23 +01001385 throw ParseException(
1386 boost::str(
1387 boost::format(
1388 "ArmNN only supports FusedBatchNormalization layers with constant offset. "
1389 "Input %1%. Node %2% %3%")
1390 % inputs[2].m_IndexedValue->GetNode().name()
1391 % nodeDef.name()
1392 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001393 }
1394 ParsedConstTfOperation<float>* offsetNode =
1395 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1396
1397 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1398 {
telsoa01c577f2c2018-08-31 09:22:23 +01001399 throw ParseException(
1400 boost::str(
1401 boost::format(
1402 "ArmNN only supports FusedBatchNormalization layers with constant mean. "
1403 "Input %1%. Node %2% %3%")
1404 % inputs[3].m_IndexedValue->GetNode().name()
1405 % nodeDef.name()
1406 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001407 }
1408 ParsedConstTfOperation<float>* meanNode =
1409 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1410
1411 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1412 {
telsoa01c577f2c2018-08-31 09:22:23 +01001413 throw ParseException(
1414 boost::str(
1415 boost::format(
1416 "ArmNN only supports FusedBatchNormalization layers with constant variance. "
1417 "Input %1%. Node %2% %3%")
1418 % inputs[4].m_IndexedValue->GetNode().name()
1419 % nodeDef.name()
1420 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001421 }
1422 ParsedConstTfOperation<float>* varianceNode =
1423 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1424
Matteo Martincigh075c7502018-12-05 13:10:45 +00001425 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1426
1427 CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1428
telsoa01c577f2c2018-08-31 09:22:23 +01001429 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001430 BatchNormalizationDescriptor desc;
1431 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001432 desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001433
telsoa01c577f2c2018-08-31 09:22:23 +01001434 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1435 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001436 std::vector<float> scaleTensorData;
1437 ConstTensor scaleTensor = scaleNode->GetConstTensor(false, scaleTensorData);
1438
1439 std::vector<float> offsetTensorData;
1440 ConstTensor offsetTensor = offsetNode->GetConstTensor(false, offsetTensorData);
1441
1442 std::vector<float> meanTensorData;
1443 ConstTensor meanTensor = meanNode->GetConstTensor(false, meanTensorData);
1444
1445 std::vector<float> varianceTensorData;
1446 ConstTensor varianceTensor = varianceNode->GetConstTensor(false, varianceTensorData);
1447
1448 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1449 meanTensor,
1450 varianceTensor,
1451 offsetTensor,
1452 scaleTensor,
1453 nodeDef.name().c_str());
1454
1455 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1456
Matteo Martincigh075c7502018-12-05 13:10:45 +00001457 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1458 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001459
1460 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1461}
1462
telsoa01c577f2c2018-08-31 09:22:23 +01001463bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1464 size_t alphaLayerIndex,
1465 const OutputOfParsedTfOperation& otherOp,
1466 armnn::IOutputSlot** outputOfLeakyRelu,
1467 armnn::ActivationDescriptor & desc)
1468{
1469 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1470
1471 // Verifying all these assumptions hold:
1472 //
1473 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1474 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1475 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1476 //
1477
1478 if (mulNodeDef.op() == "Mul")
1479 {
1480 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1481 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1482
1483 BOOST_ASSERT(inputs.size() == 2);
1484 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1485 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1486 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1487
1488 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1489 {
1490 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1491 {
1492 ParsedConstTfOperation<float>* alpha =
1493 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1494 inputs[alphaLayerIndex].m_IndexedValue);
1495
1496 std::vector<float> const_data;
1497 ConstTensor const_tensor = alpha->GetConstTensor(false, const_data);
1498
1499 if (const_data.size() == 1)
1500 {
1501 desc.m_Function = ActivationFunction::LeakyReLu;
1502 desc.m_A = const_data[0];
1503
1504 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1505 return true;
1506 }
1507 }
1508 }
1509 }
1510 return false;
1511}
1512
telsoa01c577f2c2018-08-31 09:22:23 +01001513ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1514 const tensorflow::GraphDef& graphDef)
1515{
1516 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
Sadik Armagan975c09a2018-12-04 10:02:08 +00001517 if (inputs.size() != 2)
1518 {
1519 throw ParseException(
1520 boost::str(
1521 boost::format(
1522 "Maximum expects two inputs!. Got %1% for Node %2% %3%")
1523 % inputs.size()
1524 % nodeDef.name()
1525 % CHECK_LOCATION().AsString()));
1526 }
1527
telsoa01c577f2c2018-08-31 09:22:23 +01001528 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1529 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1530 IOutputSlot* outputOfLeakyRelu = nullptr;
1531
1532 ActivationDescriptor desc;
1533
Sadik Armagan975c09a2018-12-04 10:02:08 +00001534 // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1535 // i.e. one of the four possible scenarios:
1536 // 1, max(mul(a, x), x)
1537 // 2, max(mul(x, a), x)
1538 // 3, max(x, mul(a, x))
1539 // 4, max(x, mul(x, a))
1540 // These are handled by an activation layer.
telsoa01c577f2c2018-08-31 09:22:23 +01001541
1542 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1543 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1544 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1545 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1546 {
1547 BOOST_ASSERT(outputOfLeakyRelu != nullptr);
1548
1549 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1550 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1551 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1552 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1553 }
1554 else
1555 {
Sadik Armagan975c09a2018-12-04 10:02:08 +00001556 // Anything else is just a maximum layer.
1557
1558 return AddMaximumLayer(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001559 }
1560}
1561
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001562ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
1563 const tensorflow::GraphDef& graphDef)
1564{
1565 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1566
1567 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1568 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1569 const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1570 const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1571
1572 if (input0Dim != input1Dim)
1573 {
1574 // broadcasting where input0 and input1 have different number of dimensions
1575 // is only supported for 1D and 4D tensors pair
1576 if (input0Dim == 1 && input1Dim == 4)
1577 {
1578 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1579 }
1580 else if (input0Dim == 4 && input1Dim == 1)
1581 {
1582 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1583 }
1584 else
1585 {
1586 throw ParseException(
1587 boost::str(
1588 boost::format("Unsupported broadcast configuration for Minimum operation %1% %2%")
1589 % nodeDef.name()
1590 % CHECK_LOCATION().AsString()));
1591 }
1592 }
1593
1594 IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1595
1596 input0Slot->Connect(layer->GetInputSlot(0));
1597 input1Slot->Connect(layer->GetInputSlot(1));
1598
1599 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1600 std::vector<unsigned int> outputShape;
1601
1602 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1603 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1604
1605 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1606 {
1607 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1608 }
1609
1610 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1611 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1612
1613 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1614}
1615
jimfly0123be07e2018-12-04 17:47:22 +00001616ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1617{
1618 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1619
1620 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1621 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1622
1623 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
1624 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
1625
1626 if (input0Info.GetNumDimensions() == 1)
1627 {
1628 const bool isNHWC = true;
1629 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1630 }
1631
1632 if (input1Info.GetNumDimensions() == 1)
1633 {
1634 const bool isNHWC = true;
1635 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1636 }
1637
1638 IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
1639
1640 input0Slot->Connect(layer->GetInputSlot(0));
1641 input1Slot->Connect(layer->GetInputSlot(1));
1642
1643 if (input0Info.GetNumDimensions() == 1)
1644 {
1645 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
1646 }
1647 else
1648 {
1649 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
1650 }
1651
1652 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1653}
1654
jimfly01f6ba7472018-12-04 10:09:52 +00001655unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
1656 const TensorInfo& inputTensorInfo,
1657 const std::string& nodeName)
1658{
1659 unsigned int rank = paddingTensor.GetShape()[0];
1660 unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
1661 if (rank != expectedRank)
1662 {
1663 throw ParseException(
1664 boost::str(
1665 boost::format(
1666 "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
1667 % expectedRank
1668 % rank
1669 % nodeName
1670 % CHECK_LOCATION().AsString()));
1671 }
1672 unsigned int second = paddingTensor.GetShape()[1];
1673 if (second != 2)
1674 {
1675 throw ParseException(
1676 boost::str(
1677 boost::format(
1678 "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
1679 % rank
1680 % second
1681 % nodeName
1682 % CHECK_LOCATION().AsString()));
1683 }
1684 return rank;
1685}
1686
1687TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
1688 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1689{
1690 unsigned int numDims = inputTensorInfo.GetNumDimensions();
1691 std::vector<unsigned int> outDims;
1692 for (unsigned int i = 0; i < numDims; ++i)
1693 {
1694 unsigned int dimSize = inputTensorInfo.GetShape()[i];
1695 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
1696 dimSize += dimPadding.first;
1697 dimSize += dimPadding.second;
1698 outDims.push_back(dimSize);
1699 }
1700 TensorInfo paddedTensorInfo = inputTensorInfo;
1701 unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
1702 paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
1703 return paddedTensorInfo;
1704}
1705
1706ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
1707 const tensorflow::GraphDef& graphDef)
1708{
1709 // input consists of:
1710 // input[0] the tensor which will be padded
1711 // input[1] the tensor holding the padding values
1712 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1713 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1714 TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
1715 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
1716 {
1717 throw ParseException(
1718 boost::str(
1719 boost::format(
1720 "ArmNN only supports Pad with constant padding. "
1721 "Input %1%. Node %2% %3%")
1722 % inputs[1].m_IndexedValue->GetNode().name()
1723 % nodeDef.name()
1724 % CHECK_LOCATION().AsString()));
1725
1726 }
1727 ParsedConstTfOperation<int32_t>* paddingTensorOp =
1728 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1729
1730 std::vector<int32_t> paddingTensorData;
1731 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(false, paddingTensorData);
1732 // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
1733 // and should match the rank of the input tensor that is being padded.
1734 // For each dimension D of input, paddings[D, 0] indicates how many values to add
1735 // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
1736 // many values to add after the contents of tensor in that dimension
1737 // This needs to be translated into a padList for ACL
1738 std::vector<std::pair<unsigned int, unsigned int>> padList;
1739 unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
1740 for (unsigned int i = 0; i < rank; ++i)
1741 {
1742 std::pair<unsigned int, unsigned int> paddingForDim;
1743 for (unsigned int j = 0; j < 2; j++)
1744 {
1745 unsigned int index = (i * 2) + j;
1746 int paddingAmount = paddingTensorData[index];
1747 // make sure we can cast to an unsigned value
1748 if (paddingAmount < 0)
1749 {
1750 throw ParseException(
1751 boost::str(
1752 boost::format(
1753 "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
1754 % paddingAmount
1755 % i
1756 % j
1757 % nodeDef.name()
1758 % CHECK_LOCATION().AsString()));
1759 }
1760 if (j == 0)
1761 {
1762 paddingForDim.first = static_cast<unsigned int>(paddingAmount);
1763 }
1764 else
1765 {
1766 paddingForDim.second = static_cast<unsigned int>(paddingAmount);
1767 }
1768 }
1769 padList.push_back(paddingForDim);
1770 }
1771 PadDescriptor padDescriptor(padList);
1772 IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
1773 previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
1774 // Use the padding to calculate the new output tensor shape
1775 TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
1776 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1777 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1778}
1779
surmeh01bceff2f2018-03-29 16:29:27 +01001780ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
1781 const tensorflow::GraphDef& graphDef)
1782{
1783 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001784 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01001785 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
1786 unsigned int numConcatView = numInputs - 1;
1787
1788 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), MaxNumOfTensorDimensions);
1789 std::vector<unsigned int>mergeDimSizes(MaxNumOfTensorDimensions, 0u);
1790
1791 unsigned int mergeDim = 0;
1792 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
1793
telsoa01c577f2c2018-08-31 09:22:23 +01001794 // The last input is the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01001795 if (!HasParsedConstTensor<int32_t>(inputs[numInputs - 1].m_IndexedValue->GetNode().name()))
1796 {
telsoa01c577f2c2018-08-31 09:22:23 +01001797 throw ParseException(
1798 boost::str(
1799 boost::format(
1800 "ArmNN only supports Concat with constant axis. "
1801 "Input %1%. Node %2% %3%")
1802 % inputs[numInputs - 1].m_IndexedValue->GetNode().name()
1803 % nodeDef.name()
1804 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001805 }
1806 ParsedConstTfOperation<int32_t>* shapeNode =
1807 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[numInputs - 1].m_IndexedValue);
1808
1809 std::vector<int32_t> axisTensorData;
1810 ConstTensor axisTensor = shapeNode->GetConstTensor(false, axisTensorData);
1811
telsoa01c577f2c2018-08-31 09:22:23 +01001812 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
surmeh01bceff2f2018-03-29 16:29:27 +01001813 const unsigned int concatDimInput = static_cast<unsigned int>(axisTensorData[0]);
1814
telsoa01c577f2c2018-08-31 09:22:23 +01001815 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
surmeh01bceff2f2018-03-29 16:29:27 +01001816 if (concatDimInput == 0 || concatDimInput == 2)
1817 {
telsoa01c577f2c2018-08-31 09:22:23 +01001818 throw ParseException(
1819 boost::str(
1820 boost::format(
1821 "Dimension %1% for concatenation is not supported by Armnn. "
1822 "Node %2% %3%")
1823 % concatDimInput
1824 % nodeDef.name()
1825 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001826 }
1827
telsoa01c577f2c2018-08-31 09:22:23 +01001828 // This is the only concatDim we support in armnn.
surmeh01bceff2f2018-03-29 16:29:27 +01001829 const unsigned int concatDim = 1;
1830 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1831 {
telsoa01c577f2c2018-08-31 09:22:23 +01001832 // Need to double check whether it should be
surmeh01bceff2f2018-03-29 16:29:27 +01001833 IOutputSlot& inputSlot =
1834 inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
1835 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1836
Sadik Armagan479045b2018-10-01 11:51:37 +01001837 // process the input tensor info
1838 armnnUtils::ProcessConcatInputTensorInfo(inputTensorInfo, concatDescriptor,
1839 concatDimInput, viewIndex, mergeDimSizes, mergeDim);
surmeh01bceff2f2018-03-29 16:29:27 +01001840 }
1841
1842 mergeDimSizes[concatDim] = mergeDim;
1843 armnn::IConnectableLayer *layer = m_Network->AddMergerLayer(concatDescriptor, nodeDef.name().c_str());
1844
1845 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(MaxNumOfTensorDimensions, mergeDimSizes.data(),
1846 DataType::Float32));
1847
1848 for (unsigned int v = 0; v < numConcatView; ++v)
1849 {
1850 IOutputSlot& inputSlot = inputs[v].m_IndexedValue->ResolveArmnnOutputSlot(inputs[v].m_Index);
1851 if (concatDimInput == 3)
1852 {
1853 IConnectableLayer* const swizzleLayer = AddSwizzleLayer(*m_Network, inputSlot, NHWCToArmNN,
1854 "swizzle_for-" + nodeDef.name());
1855 swizzleLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(v));
1856 }
1857 else
1858 {
1859 inputSlot.Connect(layer->GetInputSlot(v));
1860 }
1861 }
1862
1863 if (concatDimInput == 3)
1864 {
1865 IConnectableLayer* const deswizzleLayer = AddSwizzleLayer(*m_Network, layer->GetOutputSlot(0), ArmNNToNHWC,
1866 "deswizzle_for-" + nodeDef.name());
1867 layer = deswizzleLayer;
1868 }
1869
1870 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1871}
1872
1873ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
1874 const tensorflow::GraphDef& graphDef)
1875{
telsoa01c577f2c2018-08-31 09:22:23 +01001876 // Note: the Shape layer is handled in a special way, because:
1877 // 1. ARMNN doesn't support int32 tensors which it outputs.
1878 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01001879 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01001880 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01001881
1882 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
1883 if (tfDataType != tensorflow::DT_INT32)
1884 {
telsoa01c577f2c2018-08-31 09:22:23 +01001885 throw ParseException(
1886 boost::str(
1887 boost::format(
1888 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
1889 % tensorflow::DataType_Name(tfDataType)
1890 % nodeDef.name()
1891 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001892 }
1893
1894 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1895 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1896 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1897 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
1898
1899 std::vector<int32_t> shapeTensorData;
1900 shapeTensorData.reserve(prevLayerDimensions);
1901
1902 for (unsigned int i=0; i<prevLayerDimensions; ++i)
1903 {
1904 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
1905 }
1906
1907 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
1908
1909 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
1910 nodeDef,
1911 &shapeTensorData[0],
1912 shapeTensorInfo);
1913}
1914
1915ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
1916 const tensorflow::GraphDef& graphDef)
1917{
1918 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1919 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
1920
1921 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1922 {
telsoa01c577f2c2018-08-31 09:22:23 +01001923 throw ParseException(
1924 boost::str(
1925 boost::format(
1926 "ArmNN only supports Reshape layers with constant shapes. "
1927 "Input %1% Node %2% %3%")
1928 % inputs[1].m_IndexedValue->GetNode().name()
1929 % nodeDef.name()
1930 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001931 }
1932 ParsedConstTfOperation<int32_t>* shapeNode =
1933 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1934
1935 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
1936 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1937
1938 std::vector<int32_t> shapeTensorData;
1939 ConstTensor shapeTensor = shapeNode->GetConstTensor(false, shapeTensorData);
1940 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
1941
1942 TensorShape targetShape = outputTensorInfo.GetShape();
1943 ReshapeDescriptor reshapeDesc;
1944 reshapeDesc.m_TargetShape = targetShape;
1945
1946 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1947 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1948 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1949
1950 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1951}
1952
1953ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
1954 const tensorflow::GraphDef& graphDef)
1955{
1956 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1957
1958 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1959 {
telsoa01c577f2c2018-08-31 09:22:23 +01001960 throw ParseException(
1961 boost::str(
1962 boost::format(
1963 "ArmNN only supports ResizeBilinear layers with constant sizes. "
1964 "Input %1%. Node %2% %3%")
1965 % inputs[1].m_IndexedValue->GetNode().name()
1966 % nodeDef.name()
1967 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001968 }
1969 ParsedConstTfOperation<int32_t>* sizeNode =
1970 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1971
telsoa01c577f2c2018-08-31 09:22:23 +01001972 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01001973 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
1974 {
telsoa01c577f2c2018-08-31 09:22:23 +01001975 throw ParseException(
1976 boost::str(
1977 boost::format(
1978 "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
1979 "Node %1% %2%")
1980 % nodeDef.name()
1981 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001982 }
1983
telsoa01c577f2c2018-08-31 09:22:23 +01001984 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01001985 std::vector<int32_t> sizeTensorData;
1986 ConstTensor sizeTensor = sizeNode->GetConstTensor(false, sizeTensorData);
1987
telsoa01c577f2c2018-08-31 09:22:23 +01001988 // The descriptor only has target height and width attributes, which we get from the size tensor.
surmeh01bceff2f2018-03-29 16:29:27 +01001989 ResizeBilinearDescriptor desc;
1990 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
1991 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
jimfly018a121502018-12-06 16:19:52 +00001992 desc.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01001993
1994 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, nodeDef.name().c_str());
1995
1996 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1997 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01001998 // The input shape is always in BHWC format, this will be swizzled below; for now,
1999 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01002000 unsigned int outBatch = inputTensorInfo.GetShape()[0];
2001 unsigned int outChannels = inputTensorInfo.GetShape()[3];
2002 unsigned int outHeight = desc.m_TargetHeight;
2003 unsigned int outWidth = desc.m_TargetWidth;
jimfly018a121502018-12-06 16:19:52 +00002004 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
telsoa01c577f2c2018-08-31 09:22:23 +01002005 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01002006 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2007 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2008
jimfly018a121502018-12-06 16:19:52 +00002009 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002010
2011 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2012}
2013
2014TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
2015{
2016 BOOST_ASSERT(nodeDef.op() == "Squeeze");
2017 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2018
2019 DataType type;
2020 if (tfDataType == tensorflow::DT_FLOAT)
2021 {
2022 type = DataType::Float32;
2023 }
2024 else if (tfDataType == tensorflow::DT_INT32)
2025 {
2026 type = DataType::Signed32;
2027 }
2028 else
2029 {
telsoa01c577f2c2018-08-31 09:22:23 +01002030 throw ParseException(
2031 boost::str(
2032 boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
2033 % tensorflow::DataType_Name(tfDataType)
2034 % nodeDef.name()
2035 % CHECK_LOCATION().AsString()));
2036 }
2037
2038
2039 if (inputTensorInfo.GetNumDimensions() > 4)
2040 {
2041 throw ParseException(
2042 boost::str(
2043 boost::format(
2044 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2045 % inputTensorInfo.GetNumDimensions()
2046 % nodeDef.name()
2047 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002048 }
2049
2050 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01002051 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2052
surmeh01bceff2f2018-03-29 16:29:27 +01002053 if (squeezeDims.empty())
2054 {
telsoa01c577f2c2018-08-31 09:22:23 +01002055 squeezeDims.assign(dimensionSequence,
2056 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01002057 }
2058
2059 std::vector<uint32_t> outputDims;
2060 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2061 {
telsoa01c577f2c2018-08-31 09:22:23 +01002062 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2063 auto currentDimension = inputTensorInfo.GetShape()[i];
2064 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01002065 {
telsoa01c577f2c2018-08-31 09:22:23 +01002066 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01002067 }
2068 }
2069
2070 if (outputDims.size() > 4)
2071 {
telsoa01c577f2c2018-08-31 09:22:23 +01002072 throw ParseException(
2073 boost::str(
2074 boost::format(
2075 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2076 % outputDims.size()
2077 % nodeDef.name()
2078 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002079 }
2080
telsoa01c577f2c2018-08-31 09:22:23 +01002081 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2082 outputDims.data());
2083
2084 TensorInfo outTensorInfo = inputTensorInfo;
2085 outTensorInfo.SetShape(outShape);
2086 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01002087
2088 return outTensorInfo;
2089}
2090
2091ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2092{
2093 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2094
2095 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2096 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2097
2098 TensorInfo outputInfo;
2099 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2100
2101 ReshapeDescriptor reshapeDesc;
2102 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2103 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2104 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2105 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2106
2107 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2108}
2109
2110ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2111{
2112 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2113
2114 NormalizationDescriptor normalizationDescriptor;
2115 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2116 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2117 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2118 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2119 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2120 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
ruoyan018174f362018-12-04 18:24:08 +00002121 normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002122
2123 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2124 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2125
2126 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002127 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2128 nodeDef.name().c_str());
ruoyan018174f362018-12-04 18:24:08 +00002129 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2130 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
surmeh01bceff2f2018-03-29 16:29:27 +01002131
2132 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2133}
2134
2135/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01002136/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2137/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2138/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2139///
surmeh01bceff2f2018-03-29 16:29:27 +01002140class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
2141{
2142public:
2143 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2144 : DeferredSingleLayerParsedTfOperation(parser, node)
2145 {
2146 }
2147
2148 void CreateLayerDeferred() override
2149 {
2150 BOOST_ASSERT(m_Layer == nullptr);
2151 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2152 }
2153};
2154
2155ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2156{
telsoa01c577f2c2018-08-31 09:22:23 +01002157 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01002158 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2159}
2160
telsoa01c577f2c2018-08-31 09:22:23 +01002161/// An ParsedTfOperation for a Mul node.
2162/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2163/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2164/// and in these cases armnn doesn't need a separate layer for the Mul.
2165///
2166class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
2167{
2168public:
2169 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2170 : DeferredSingleLayerParsedTfOperation(parser, node)
2171 {
2172 }
2173
2174 void CreateLayerDeferred() override
2175 {
2176 BOOST_ASSERT(m_Layer == nullptr);
2177 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2178 }
2179};
2180
surmeh01bceff2f2018-03-29 16:29:27 +01002181ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2182{
2183 boost::ignore_unused(graphDef);
2184
telsoa01c577f2c2018-08-31 09:22:23 +01002185 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002186}
2187
2188ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
2189 const tensorflow::GraphDef& graphDef)
2190{
2191 boost::ignore_unused(graphDef);
2192
2193 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2194
2195 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
2196
2197 auto it = m_InputShapes.find(nodeDef.name());
2198 if (it == m_InputShapes.end())
2199 {
telsoa01c577f2c2018-08-31 09:22:23 +01002200 throw ParseException(
2201 boost::str(
2202 boost::format(
2203 "Missing input shape for Placeholder '%1%' %2%")
2204 % nodeDef.name()
2205 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002206 }
2207 TensorInfo tensorInfo(it->second, DataType::Float32);
2208
2209 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2210
2211 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2212
2213 TrackInputBinding(layer, layerId, tensorInfo);
2214
2215 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2216}
2217
saoste01bbd40612018-08-28 15:41:51 +01002218ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2219{
2220 boost::ignore_unused(graphDef);
2221 return AddRealDivLayer(nodeDef);
2222}
2223
surmeh01bceff2f2018-03-29 16:29:27 +01002224ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
2225 const tensorflow::GraphDef& graphDef)
2226{
2227 boost::ignore_unused(graphDef);
2228
2229 ActivationDescriptor activationDesc;
2230 activationDesc.m_Function = ActivationFunction::ReLu;
2231 return AddActivationLayer(nodeDef, activationDesc);
2232}
2233
2234ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
2235 const tensorflow::GraphDef& graphDef)
2236{
2237 boost::ignore_unused(graphDef);
2238
2239 ActivationDescriptor activationDesc;
2240 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2241 activationDesc.m_A = 6.0f;
2242 activationDesc.m_B = 0.0f;
2243
2244 return AddActivationLayer(nodeDef, activationDesc);
2245}
2246
2247ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
2248 const tensorflow::GraphDef& graphDef)
2249{
2250 boost::ignore_unused(graphDef);
2251
2252 ActivationDescriptor activationDesc;
2253 activationDesc.m_Function = ActivationFunction::Sigmoid;
2254
2255 return AddActivationLayer(nodeDef, activationDesc);
2256}
2257
2258ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
2259 const tensorflow::GraphDef& graphDef)
2260{
2261 boost::ignore_unused(graphDef);
2262
2263 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2264
2265 SoftmaxDescriptor softmaxDescriptor;
2266 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2267
2268 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2269 prevLayerSlot.Connect(layer->GetInputSlot(0));
2270 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2271
2272 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2273}
2274
2275ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
2276 const tensorflow::GraphDef& graphDef)
2277{
2278 boost::ignore_unused(graphDef);
2279
2280 ActivationDescriptor activationDesc;
2281 activationDesc.m_Function = ActivationFunction::SoftReLu;
2282
2283 return AddActivationLayer(nodeDef, activationDesc);
2284}
2285
2286ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2287{
2288 boost::ignore_unused(graphDef);
2289
2290 ActivationDescriptor activationDesc;
2291 activationDesc.m_Function = ActivationFunction::TanH;
2292 activationDesc.m_A = 1.0f;
2293 activationDesc.m_B = 1.0f;
2294
2295 return AddActivationLayer(nodeDef, activationDesc);
2296}
2297
2298ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
2299 ActivationDescriptor& activationDesc)
2300{
2301 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2302
2303 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2304
2305 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2306 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2307 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2308 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2309}
2310
2311ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
2312 const tensorflow::GraphDef& graphDef)
2313{
2314 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
2315}
2316
2317ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
2318 const tensorflow::GraphDef& graphDef)
2319{
2320 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2321}
2322
2323ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
2324 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
2325{
2326 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2327 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2328 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2329
2330 if (inputs.size() != 1)
2331 {
telsoa01c577f2c2018-08-31 09:22:23 +01002332 throw ParseException(
2333 boost::str(
2334 boost::format(
2335 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
2336 % inputs.size()
2337 % nodeDef.name()
2338 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002339 }
2340
2341 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
2342 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2343 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
2344 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
2345
2346 Pooling2dDescriptor pooling2dDescriptor;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002347 pooling2dDescriptor.m_PoolType = pooltype;
2348 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
surmeh01bceff2f2018-03-29 16:29:27 +01002349 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
2350
telsoa01c577f2c2018-08-31 09:22:23 +01002351 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
FrancisMurtaghf005e312018-12-06 15:26:04 +00002352 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
2353 pooling2dDescriptor.m_DataLayout = dataLayout;
2354 DataLayoutIndexed dataLayoutIndexed(dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +01002355
FrancisMurtaghf005e312018-12-06 15:26:04 +00002356 pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
2357 pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
2358 pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
2359 pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002360
FrancisMurtaghf005e312018-12-06 15:26:04 +00002361 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
2362 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002363
2364 bool padding = false;
2365 TensorInfo outputInfo;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002366 unsigned int outputHeight = 0;
2367 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01002368
2369 CHECK_PADDING_TYPE(nodeDef, paddingString);
2370
surmeh01bceff2f2018-03-29 16:29:27 +01002371 if (paddingString == "SAME")
2372 {
2373 padding = true;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002374
2375 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
2376 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2377 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
2378 static_cast<float>(pooling2dDescriptor.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01002379 }
2380 else if (paddingString == "VALID")
2381 {
2382 padding = false;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002383
2384 outputHeight = static_cast<uint32_t>(ceil(
2385 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
2386 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2387 outputWidth = static_cast<uint32_t>(ceil(
2388 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
2389 static_cast<float>(pooling2dDescriptor.m_StrideX)));
2390 }
2391
2392 switch (dataLayout)
2393 {
2394 case DataLayout::NHWC:
2395 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2396 outputHeight,
2397 outputWidth,
2398 inputTensorInfo.GetShape()[3] },
2399 DataType::Float32);
2400 break;
2401 case DataLayout::NCHW:
2402 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2403 inputTensorInfo.GetShape()[1],
2404 outputHeight,
2405 outputWidth },
2406 DataType::Float32);
2407 break;
surmeh01bceff2f2018-03-29 16:29:27 +01002408 }
surmeh01bceff2f2018-03-29 16:29:27 +01002409
2410 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002411 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002412 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002413 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002414
2415
2416 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
2417 if (layer == nullptr)
2418 {
telsoa01c577f2c2018-08-31 09:22:23 +01002419 throw ParseException(
2420 boost::str(
2421 boost::format(
2422 "Failed to add pooling2d layer for %1% %2%")
2423 % nodeDef.name()
2424 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002425 }
2426
2427 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2428
FrancisMurtaghf005e312018-12-06 15:26:04 +00002429 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002430
2431 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2432}
2433
2434ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
2435{
2436 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2437
2438 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2439 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2440
2441 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
2442 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
2443
2444 if (isBiasAdd)
2445 {
2446 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
2447 // with the same data in the correct dimension for broadcast in addition.
2448 if(input1Info.GetNumDimensions() != 1)
2449 {
telsoa01c577f2c2018-08-31 09:22:23 +01002450 throw ParseException(
2451 boost::str(
2452 boost::format(
2453 "Unsupported bias for BiasAdd. It should be a 1D vector. "
2454 "Got %1% dimensions for input %2%. Node %3% %4%")
2455 % input1Info.GetNumDimensions()
2456 % inputs[1].m_IndexedValue->GetNode().name()
2457 % nodeDef.name()
2458 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002459 }
2460
2461 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01002462
telsoa01c577f2c2018-08-31 09:22:23 +01002463 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01002464 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002465 }
2466 else
2467 {
2468 if (input0Info.GetNumDimensions() == 1)
2469 {
2470 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002471 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002472 }
2473
2474 if (input1Info.GetNumDimensions() == 1)
2475 {
2476 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002477 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002478 }
2479 }
2480
2481 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
2482
2483 input0Slot->Connect(layer->GetInputSlot(0));
2484 input1Slot->Connect(layer->GetInputSlot(1));
2485
2486 if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
2487 {
2488 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2489 }
2490 else
2491 {
2492 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2493 }
2494
2495 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2496}
2497
saoste01bbd40612018-08-28 15:41:51 +01002498ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
2499{
2500 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2501
2502 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
2503 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2504 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2505
2506 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2507 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2508
2509
2510 if (input0NumDims < input1NumDims)
2511 {
2512 const bool isNHWC = true;
2513 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2514 }
2515 if (input1NumDims < input0NumDims)
2516 {
2517 const bool isNHWC = true;
2518 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2519 }
2520
2521 input0Slot->Connect(layer->GetInputSlot(0));
2522 input1Slot->Connect(layer->GetInputSlot(1));
2523
2524 if (input0NumDims < input1NumDims)
2525 {
2526 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2527 }
2528 else
2529 {
2530 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2531
2532 }
2533 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2534}
2535
Sadik Armagan975c09a2018-12-04 10:02:08 +00002536ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
2537{
2538 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2539
2540 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2541 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2542
2543 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2544 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2545
2546 if (input0NumDims < input1NumDims)
2547 {
2548 const bool isNHWC = true;
2549 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2550 }
2551 if (input1NumDims < input0NumDims)
2552 {
2553 const bool isNHWC = true;
2554 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2555 }
2556
2557 IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
2558
2559 input0Slot->Connect(layer->GetInputSlot(0));
2560 input1Slot->Connect(layer->GetInputSlot(1));
2561
2562 TensorInfo outputInfo = input0Slot->GetTensorInfo();
2563 std::vector<unsigned int> outputShape;
2564
2565 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
2566 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
2567
2568 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
2569 {
2570 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
2571 }
2572
2573 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
2574 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2575
2576 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2577}
2578
telsoa01c577f2c2018-08-31 09:22:23 +01002579IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
2580{
2581 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2582
2583 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
2584 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2585 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2586
2587 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2588 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2589
2590 if (input0NumDims < input1NumDims)
2591 {
2592 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002593 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002594 }
2595 if (input1NumDims < input0NumDims)
2596 {
2597 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002598 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002599 }
2600
2601 input0Slot->Connect(layer->GetInputSlot(0));
2602 input1Slot->Connect(layer->GetInputSlot(1));
2603
2604 if (input0NumDims < input1NumDims)
2605 {
2606 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2607 }
2608 else
2609 {
2610 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2611 }
2612 return layer;
2613}
2614
surmeh01bceff2f2018-03-29 16:29:27 +01002615IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
2616 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
2617{
telsoa01c577f2c2018-08-31 09:22:23 +01002618 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01002619 ParsedConstTfOperation<float>* biasNode = nullptr;
2620 if (addNodeDef != nullptr)
2621 {
2622 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01002623 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01002624 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
2625 {
2626 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
2627 }
2628 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
2629 {
2630 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
2631 }
2632 else
2633 {
telsoa01c577f2c2018-08-31 09:22:23 +01002634 throw ParseException(
2635 boost::str(
2636 boost::format(
2637 "ArmNN only supports fully connected layers with constant bias. "
2638 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
2639 % addInputs[0].m_IndexedValue->GetNode().name()
2640 % addInputs[1].m_IndexedValue->GetNode().name()
2641 % addNodeDef->name()
2642 % matMulNodeDef.name()
2643 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002644 }
2645 }
2646
telsoa01c577f2c2018-08-31 09:22:23 +01002647 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01002648 ParsedConstTfOperation<float>* weightNode = nullptr;
2649 ParsedTfOperation* inputNode = nullptr;
2650 unsigned int inputIdx = 0;
2651 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
2652 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
2653 {
2654 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
2655 inputNode = mulInputs[1].m_IndexedValue;
2656 inputIdx = mulInputs[1].m_Index;
2657 }
2658 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
2659 {
2660 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
2661 inputNode = mulInputs[0].m_IndexedValue;
2662 inputIdx = mulInputs[0].m_Index;
2663 }
2664 else
2665 {
telsoa01c577f2c2018-08-31 09:22:23 +01002666 throw ParseException(
2667 boost::str(
2668 boost::format(
2669 "ArmNN only supports fully connected layers with constant weights. "
2670 "Inputs %1% and %2%. MatMulNode %3% %4%")
2671 % mulInputs[0].m_IndexedValue->GetNode().name()
2672 % mulInputs[1].m_IndexedValue->GetNode().name()
2673 % matMulNodeDef.name()
2674 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002675 }
2676
2677 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01002678 // Handles weight.
surmeh01bceff2f2018-03-29 16:29:27 +01002679 ConstTensor weights = weightNode->GetConstTensor(false, weightTensorData);
2680
2681 FullyConnectedDescriptor desc;
2682 desc.m_BiasEnabled = addNodeDef != nullptr;
2683
2684 IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +01002685 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01002686 if (addNodeDef != nullptr)
2687 {
2688 std::vector<float> biasTensorData;
2689 ConstTensor biases = biasNode->GetConstTensor(false, biasTensorData);
2690
2691 if (weights.GetShape()[1] != biases.GetShape()[0])
2692 {
telsoa01c577f2c2018-08-31 09:22:23 +01002693 throw ParseException(
2694 boost::str(
2695 boost::format(
2696 "Shape of matmul weights and bias do not match. "
2697 "AddNode %1%. MatMulNode %2% %3%")
2698 % addNodeDef->name()
2699 % matMulNodeDef.name()
2700 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002701 }
2702
2703 layer = m_Network->AddFullyConnectedLayer(desc, weights, biases, armnnLayerName);
2704 }
2705 else
2706 {
2707 layer = m_Network->AddFullyConnectedLayer(desc, weights, armnnLayerName);
2708 }
2709
2710 BOOST_ASSERT(layer != nullptr);
2711
2712 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
2713 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
2714
telsoa01c577f2c2018-08-31 09:22:23 +01002715 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01002716 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
2717 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2718 return layer;
2719}
2720
2721void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2722{
telsoa01c577f2c2018-08-31 09:22:23 +01002723 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01002724 tensorflow::DataType type = tensorflow::DT_FLOAT;
2725 if (nodeDef.attr().count("T") != 0)
2726 {
2727 auto attr = nodeDef.attr().at("T");
2728 type = attr.type();
2729 }
2730 else if (nodeDef.attr().count("dtype") != 0)
2731 {
2732 auto attr = nodeDef.attr().at("dtype");
2733 type = attr.type();
2734 }
2735
2736 if (type != tensorflow::DT_FLOAT && nodeDef.op() != "Const")
2737 {
telsoa01c577f2c2018-08-31 09:22:23 +01002738 throw ParseException(
2739 boost::str(
2740 boost::format(
2741 "Currently only FLOAT is supported for tensorflow nodes (apart from Const). "
2742 "Got %1% for Node %2% %3%")
2743 % tensorflow::DataType_Name(type)
2744 % nodeDef.name()
2745 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002746 }
2747
2748 const std::string& operation = nodeDef.op();
2749 auto it = ms_OperationNameToParsingFunctions.find(operation);
2750 if (it != ms_OperationNameToParsingFunctions.end())
2751 {
2752 auto func = it->second;
2753 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
2754 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
2755
telsoa01c577f2c2018-08-31 09:22:23 +01002756 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01002757 auto it = m_ParsedTfOperations.find(nodeDef.name());
2758 if (it != m_ParsedTfOperations.end())
2759 {
2760 throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
2761 }
2762 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
2763
telsoa01c577f2c2018-08-31 09:22:23 +01002764 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01002765 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
2766 m_RequestedOutputs.end())
2767 {
2768 auto outId = ParseOutputId(nodeDef.name());
2769 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
2770 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
2771
2772 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
2773
2774 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
2775
2776 prevSlot.Connect(outputLayer->GetInputSlot(0));
2777
2778 TrackOutputBinding(outputLayer, layerId, tensorInfo);
2779 }
2780 }
2781 else
2782 {
telsoa01c577f2c2018-08-31 09:22:23 +01002783 throw ParseException(
2784 boost::str(
2785 boost::format(
2786 "Unsupported operation %1% in tensorflow::GraphDef %2%")
2787 % operation
2788 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002789 }
2790}
2791
2792void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
2793{
telsoa01c577f2c2018-08-31 09:22:23 +01002794 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01002795 m_NodesByName.clear();
2796 m_NetworkInputsBindingInfo.clear();
2797 m_NetworkOutputsBindingInfo.clear();
2798
2799 for (int i = 0; i < graphDef.node_size(); ++i)
2800 {
2801 const tensorflow::NodeDef& node = graphDef.node(i);
2802 m_NodesByName[node.name()] = &node;
2803 }
2804
telsoa01c577f2c2018-08-31 09:22:23 +01002805 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01002806 std::vector<const tensorflow::NodeDef*> targetNodes;
2807 for (const std::string& requestedOutputName : m_RequestedOutputs)
2808 {
2809 auto nodeIt = m_NodesByName.find(requestedOutputName);
2810 if (nodeIt == m_NodesByName.end())
2811 {
telsoa01c577f2c2018-08-31 09:22:23 +01002812 throw ParseException(
2813 boost::str(
2814 boost::format(
2815 "Couldn't find requested output node '%1%' in graph %2%")
2816 % requestedOutputName
2817 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002818 }
2819 targetNodes.push_back(nodeIt->second);
2820 }
2821
telsoa01c577f2c2018-08-31 09:22:23 +01002822 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01002823 std::vector<const tensorflow::NodeDef*> sortedNodes;
2824 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
2825 targetNodes,
2826 [this](const tensorflow::NodeDef* node)
2827 {
2828 auto outputs = GetTfInputNodes(*node);
2829 std::vector<const tensorflow::NodeDef*> nodesOnly;
2830 for (const auto & o : outputs) {
2831 nodesOnly.push_back(o.m_IndexedValue);
2832 }
2833 return nodesOnly;
2834 },
2835 sortedNodes))
2836 {
telsoa01c577f2c2018-08-31 09:22:23 +01002837 throw ParseException(
2838 boost::str(
2839 boost::format(
2840 "Cycle detected in graph %1%")
2841 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002842 }
2843
telsoa01c577f2c2018-08-31 09:22:23 +01002844 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01002845 for (const auto& it : sortedNodes)
2846 {
2847 const tensorflow::NodeDef& currentNode = *it;
2848 LoadNodeDef(currentNode, graphDef);
2849 }
2850}
2851
2852INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
2853 const std::map<std::string, TensorShape>& inputShapes,
2854 const std::vector<std::string>& requestedOutputs)
2855{
2856 FILE* fd = fopen(graphFile, "r");
2857
2858 if (fd == nullptr)
2859 {
telsoa01c577f2c2018-08-31 09:22:23 +01002860 throw FileNotFoundException(
2861 boost::str(
2862 boost::format(
2863 "Graph file %1% failed to open %2%")
2864 % graphFile
2865 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002866 }
2867
telsoa01c577f2c2018-08-31 09:22:23 +01002868 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01002869 tensorflow::GraphDef graphDef;
2870 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
2871 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
2872 delete input;
2873 fclose(fd);
2874
2875 if (!success)
2876 {
telsoa01c577f2c2018-08-31 09:22:23 +01002877 throw ParseException(
2878 boost::str(
2879 boost::format(
2880 "Failed to parse graph file %1%")
2881 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002882 }
2883
2884 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2885}
2886
2887INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
2888 const std::map<std::string, TensorShape>& inputShapes,
2889 const std::vector<std::string>& requestedOutputs)
2890{
telsoa01c577f2c2018-08-31 09:22:23 +01002891 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01002892 tensorflow::GraphDef graphDef;
2893 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
2894
2895 if (!success)
2896 {
telsoa01c577f2c2018-08-31 09:22:23 +01002897 throw ParseException(
2898 boost::str(
2899 boost::format(
2900 "Failed to parse graph file %1%")
2901 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002902 }
2903
2904 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2905}
2906
2907INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
2908 const std::map<std::string, TensorShape>& inputShapes,
2909 const std::vector<std::string>& requestedOutputs)
2910{
2911 FILE* fd = fopen(graphFile, "rb");
2912
2913 if (fd == nullptr)
2914 {
telsoa01c577f2c2018-08-31 09:22:23 +01002915 throw FileNotFoundException(
2916 boost::str(
2917 boost::format(
2918 "Graph file %1% failed to open %2%")
2919 % graphFile
2920 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002921 }
2922
telsoa01c577f2c2018-08-31 09:22:23 +01002923 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01002924 tensorflow::GraphDef graphDef;
2925
2926 google::protobuf::io::FileInputStream inStream(fileno(fd));
2927 google::protobuf::io::CodedInputStream codedStream(&inStream);
2928 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
2929 bool success = graphDef.ParseFromCodedStream(&codedStream);
2930 fclose(fd);
2931
2932 if (!success)
2933 {
telsoa01c577f2c2018-08-31 09:22:23 +01002934 throw ParseException(
2935 boost::str(
2936 boost::format(
2937 "Failed to parse protobuf file %1% %2%")
2938 % graphFile
2939 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002940 }
2941
2942 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2943}
2944
2945INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
2946 const std::map<std::string, TensorShape>& inputShapes,
2947 const std::vector<std::string>& requestedOutputs)
2948{
2949 m_Network = INetwork::Create();
2950
2951 m_InputShapes = inputShapes;
2952 if (requestedOutputs.size() == 0)
2953 {
telsoa01c577f2c2018-08-31 09:22:23 +01002954 throw ParseException(
2955 boost::str(
2956 boost::format(
2957 "requestedOutputs must have at least one entry %1%")
2958 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002959 }
2960 m_RequestedOutputs = requestedOutputs;
2961
2962 try
2963 {
2964 LoadGraphDef(graphDef);
2965 }
2966 catch (const ParseException& e)
2967 {
2968 Cleanup();
2969 throw e;
2970 }
2971
2972 Cleanup();
2973
2974 return std::move(m_Network);
2975}
2976
2977void TfParser::Cleanup()
2978{
telsoa01c577f2c2018-08-31 09:22:23 +01002979 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01002980 m_InputShapes.clear();
2981 m_RequestedOutputs.clear();
2982 m_NodesByName.clear();
2983 m_ParsedTfOperations.clear();
2984}
2985
2986BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
2987{
2988 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
2989}
2990
2991BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
2992{
2993 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
2994}
2995
2996std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
2997 const char* bindingPointDesc,
2998 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
2999{
3000 auto it = nameToBindingInfo.find(layerName);
3001 if (it == nameToBindingInfo.end())
3002 {
telsoa01c577f2c2018-08-31 09:22:23 +01003003 throw InvalidArgumentException(
3004 boost::str(
3005 boost::format(
3006 "Unknown %1% '%2%' %3%")
3007 % bindingPointDesc
3008 % layerName
3009 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003010 }
3011 return it->second;
3012}
3013
3014void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3015{
3016 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3017}
3018
3019void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3020{
3021 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3022}
3023
3024void TfParser::TrackBindingPoint(IConnectableLayer* layer,
3025 LayerBindingId id,
3026 const TensorInfo& tensorInfo,
3027 const char* bindingPointDesc,
3028 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3029{
3030 const std::string layerName = layer->GetName();
3031 auto it = nameToBindingInfo.find(layerName);
3032 if (it == nameToBindingInfo.end())
3033 {
3034 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3035 }
3036 else
3037 {
telsoa01c577f2c2018-08-31 09:22:23 +01003038 throw ParseException(
3039 boost::str(
3040 boost::format(
3041 "Id %1% used by more than one %2% layer %3%")
3042 % id
3043 % bindingPointDesc
3044 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003045 }
3046}
3047
3048} // namespace armnnTfParser