blob: 0d425257e87a3bd0241566347d445e361ab6f496 [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
5#include "TfParser.hpp"
6
7#include <armnn/INetwork.hpp>
8#include <armnn/Utils.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <armnn/Exceptions.hpp>
11#include <armnn/Descriptors.hpp>
12
13#include <GraphTopologicalSort.hpp>
Sadik Armagan479045b2018-10-01 11:51:37 +010014#include <ParserHelper.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010015#include <Permute.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010016#include <VerificationHelpers.hpp>
Matteo Martincigh46315822018-11-28 16:22:36 +000017#include <DataLayoutIndexed.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010018
19#include <google/protobuf/io/zero_copy_stream_impl.h>
20#include <google/protobuf/text_format.h>
21
22#include "tensorflow/core/framework/graph.pb.h"
23#include "tensorflow/core/framework/node_def.pb.h"
24#include "tensorflow/core/framework/types.pb.h"
25#include "tensorflow/core/framework/tensor.pb.h"
26#include "tensorflow/core/framework/tensor_shape.pb.h"
27
28#include <boost/assert.hpp>
29#include <boost/format.hpp>
30#include <boost/core/ignore_unused.hpp>
31#include <boost/log/trivial.hpp>
32#include <boost/numeric/conversion/cast.hpp>
33#include <boost/polymorphic_cast.hpp>
34
35#include <memory>
36#include <sstream>
37#include <numeric>
38#include <functional>
39
Matteo Martincigh46315822018-11-28 16:22:36 +000040using namespace armnnUtils;
surmeh01bceff2f2018-03-29 16:29:27 +010041using namespace armnn;
42
43namespace armnnTfParser
44{
45namespace
46{
47
48const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
49const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
50
51IConnectableLayer* AddSwizzleLayer(INetwork& network, IOutputSlot& input, const PermutationVector& mapping,
52 const std::string& name)
53{
telsoa01c577f2c2018-08-31 09:22:23 +010054 // Adds swizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010055 IConnectableLayer* const layer = network.AddPermuteLayer(mapping, name.c_str());
56
telsoa01c577f2c2018-08-31 09:22:23 +010057 // Connects intput to swizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010058 input.Connect(layer->GetInputSlot(0));
59
telsoa01c577f2c2018-08-31 09:22:23 +010060 // Sets up swizzled output.
surmeh01bceff2f2018-03-29 16:29:27 +010061 const TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mapping);
62 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
63
64 return layer;
65}
66
67IConnectableLayer* SwizzleInDeswizzleOut(INetwork& network, IOutputSlot& input, IConnectableLayer& layer,
68 const std::string& name)
69{
telsoa01c577f2c2018-08-31 09:22:23 +010070 // Adds swizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010071 IConnectableLayer* const swizzleLayer = AddSwizzleLayer(network, input, NHWCToArmNN, "swizzle_for-" + name);
72
telsoa01c577f2c2018-08-31 09:22:23 +010073 // Connects swizzledInput to layer.
surmeh01bceff2f2018-03-29 16:29:27 +010074 swizzleLayer->GetOutputSlot(0).Connect(layer.GetInputSlot(0));
75
telsoa01c577f2c2018-08-31 09:22:23 +010076 // Adds deswizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010077 IConnectableLayer* const deswizzleLayer = AddSwizzleLayer(network, layer.GetOutputSlot(0), ArmNNToNHWC,
78 "deswizzle_for-" + name);
79
80 return deswizzleLayer;
81}
82
83template <typename Callable>
84void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
85 const std::string& attribName,
86 tensorflow::AttrValue::ValueCase expectedValueCase,
87 Callable callable)
88{
89 auto iter = nodeDef.attr().find(attribName);
90 if (iter != nodeDef.attr().end())
91 {
92 const auto& attrValue = iter->second;
93 if (attrValue.value_case() == expectedValueCase)
94 {
95 callable(attrValue);
96 }
97 else
98 {
telsoa01c577f2c2018-08-31 09:22:23 +010099 throw ParseException(
100 boost::str(
101 boost::format(
102 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
103 "but found %4% instead %5%")
104 % attribName
105 % nodeDef.name()
106 % static_cast<int>(expectedValueCase)
107 % static_cast<int>(attrValue.value_case())
108 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100109 }
110 }
111 else
112 {
telsoa01c577f2c2018-08-31 09:22:23 +0100113 throw ParseException(
114 boost::str(
115 boost::format(
116 "Could not find required attribute %1% in node %2% %3%")
117 % attribName
118 % nodeDef.name()
119 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100120 }
121}
122
123template <typename Callable>
124void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
125 const std::string& attribName,
126 tensorflow::AttrValue::ValueCase expectedValueCase,
127 Callable callable)
128{
129 auto iter = nodeDef.attr().find(attribName);
130 if (iter != nodeDef.attr().end())
131 {
132 const auto& attrValue = iter->second;
133 if (attrValue.value_case() == expectedValueCase)
134 {
135 callable(attrValue);
136 }
137 else
138 {
telsoa01c577f2c2018-08-31 09:22:23 +0100139 throw ParseException(
140 boost::str(
141 boost::format(
142 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
143 "but found %4% instead %5%")
144 % attribName
145 % nodeDef.name()
146 % static_cast<int>(expectedValueCase)
147 % static_cast<int>(attrValue.value_case())
148 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100149 }
150 }
151}
152
153float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
154{
155 float attribValue = 0.0f;
156 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
157 [&attribValue](const tensorflow::AttrValue& attrValue)
158 {
159 attribValue = attrValue.f();
160 });
161 return attribValue;
162}
163
Conor Kennedyc2130a02018-12-05 11:05:54 +0000164int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
165{
166 int32_t attribValue = 0u;
167 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
168 [&attribValue](const tensorflow::AttrValue& attrValue)
169 {
170 attribValue = static_cast<int32_t>(attrValue.i());
171 });
172 return attribValue;
173}
174
surmeh01bceff2f2018-03-29 16:29:27 +0100175uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
176{
177 uint32_t attribValue = 0u;
178 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
179 [&attribValue](const tensorflow::AttrValue& attrValue)
180 {
181 attribValue = static_cast<uint32_t>(attrValue.i());
182 });
183 return attribValue;
184}
185
186std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
187{
188 std::string attribValue = "";
189 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
190 [&attribValue](const tensorflow::AttrValue& attrValue)
191 {
192 attribValue = attrValue.s();
193 });
194 return attribValue;
195}
196
197std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
198 const std::string& name)
199{
200 std::vector<uint32_t> attriList;
201 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
202 [&attriList](const tensorflow::AttrValue& attrValue)
203 {
204 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
205 {
206 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
207 }
208 });
209
210 return attriList;
211}
212
213std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
214 const std::string& name)
215{
216 std::vector<uint32_t> attriList;
217 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
218 [&attriList](const tensorflow::AttrValue& attrValue)
219 {
220 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
221 {
222 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
223 }
224 });
225
226 return attriList;
227}
228
229bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
230 const std::string& name,
231 bool defaultValue = false)
232{
233 bool attribValue = defaultValue;
234 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
235 [&attribValue](const tensorflow::AttrValue& attrValue)
236 {
237 attribValue = attrValue.b();
238 });
239 return attribValue;
240}
241
242tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
243{
244 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
245 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
246 [&attribValue](const tensorflow::AttrValue& attrValue)
247 {
248 attribValue = attrValue.type();
249 });
250 return attribValue;
251}
252
253TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
254{
255 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
256 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
257
258 if (stretchDim != targetDims.end())
259 {
260 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
261 {
telsoa01c577f2c2018-08-31 09:22:23 +0100262 throw ParseException(
263 boost::str(
264 boost::format(
265 "At most one component of shape can be -1 %1%")
266 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100267 }
268
telsoa01c577f2c2018-08-31 09:22:23 +0100269 auto targetNumElements =
270 boost::numeric_cast<unsigned int>(
271 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100272 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
273 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
274 }
275
276 TensorInfo reshapeInfo = input;
277 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
278
279 return reshapeInfo;
280}
281
telsoa01c577f2c2018-08-31 09:22:23 +0100282// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100283IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
284 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100285{
286 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
287 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
288 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
289 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
290 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
291 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
292
293 armnn::TensorInfo reshapedInfo = input1Info;
294 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
295
296 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
297 ReshapeDescriptor reshapeDesc;
298 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
299 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
300
301 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
302 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
303
304 input1Slot = &reshapeLayer->GetOutputSlot(0);
305
306 return input1Slot;
307}
308
309OutputId ParseOutputId(const std::string & name)
310{
311 unsigned int outputNum = 0;
312 size_t colonPos = name.find_last_of(":");
313 if (colonPos != std::string::npos)
314 {
315 int n = std::stoi(name.substr(colonPos+1));
316 if (n<0 || n>100)
317 {
telsoa01c577f2c2018-08-31 09:22:23 +0100318 throw ParseException(
319 boost::str(
320 boost::format(
321 "Output tensor id is out of range for %1% %2%")
322 % name
323 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100324 }
325 outputNum = static_cast<unsigned int>(n);
326 }
327 return OutputId(name.substr(0,colonPos),outputNum);
328}
329
telsoa01c577f2c2018-08-31 09:22:23 +0100330#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
331 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
332 { \
333 throw ParseException( \
334 boost::str( \
335 boost::format( \
336 "Unsupported data format %1% passed for %2% node %3%. " \
337 "Only NHWC and NCHW supported %4%") \
338 % FORMAT \
339 % NODE_TYPE \
340 % NODE_DEF.name() \
341 % CHECK_LOCATION().AsString())); \
342 }
343
344#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
345 if(PADDING != "SAME" && PADDING != "VALID" ) \
346 { \
347 throw ParseException( \
348 boost::str( \
349 boost::format( \
350 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
351 % PADDING \
352 % NODE_DEF.name() \
353 % CHECK_LOCATION().AsString())); \
354 } \
355
surmeh01bceff2f2018-03-29 16:29:27 +0100356} // namespace
357
358const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
359 { "Const", &TfParser::ParseConst },
360 { "Add", &TfParser::ParseAdd },
361 { "BiasAdd", &TfParser::ParseBiasAdd },
362 { "Identity", &TfParser::ParseIdentity },
363 { "Conv2D", &TfParser::ParseConv2D },
364 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
Conor Kennedyc2130a02018-12-05 11:05:54 +0000365 { "ExpandDims", &TfParser::ParseExpandDims },
surmeh01bceff2f2018-03-29 16:29:27 +0100366 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
367 { "ConcatV2", &TfParser::ParseConcat },
368 { "LRN", &TfParser::ParseLrn },
369 { "MatMul", &TfParser::ParseMatMul },
370 { "Mul", &TfParser::ParseMul },
371 { "Placeholder", &TfParser::ParsePlaceholder },
saoste01bbd40612018-08-28 15:41:51 +0100372 { "RealDiv", &TfParser::ParseRealDiv },
surmeh01bceff2f2018-03-29 16:29:27 +0100373 { "Relu", &TfParser::ParseRelu },
374 { "Relu6", &TfParser::ParseRelu6 },
375 { "Reshape", &TfParser::ParseReshape },
376 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
377 { "Shape", &TfParser::ParseShape },
378 { "Squeeze", &TfParser::ParseSqueeze },
379 { "Sigmoid", &TfParser::ParseSigmoid },
380 { "Softmax", &TfParser::ParseSoftmax },
381 { "Softplus", &TfParser::ParseSoftplus },
382 { "Tanh", &TfParser::ParseTanh },
383 { "MaxPool", &TfParser::ParseMaxPool },
384 { "AvgPool", &TfParser::ParseAvgPool },
telsoa01c577f2c2018-08-31 09:22:23 +0100385 { "Maximum", &TfParser::ParseMaximum },
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +0000386 { "Minimum", &TfParser::ParseMinimum },
jimfly01f6ba7472018-12-04 10:09:52 +0000387 { "Pad", &TfParser::ParsePad },
surmeh01bceff2f2018-03-29 16:29:27 +0100388};
389
390ITfParser* ITfParser::CreateRaw()
391{
392 return new TfParser();
393}
394
395ITfParserPtr ITfParser::Create()
396{
397 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
398}
399
400void ITfParser::Destroy(ITfParser* parser)
401{
402 delete parser;
403}
404
405inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
406 uint32_t filterSize, bool samePadding,
407 uint32_t* paddingFront, uint32_t* paddingBack) {
408 *paddingFront = 0;
409 *paddingBack = 0;
410
411 if (samePadding) {
412 uint32_t outputSize = (inputSize + stride - 1) / stride;
413 uint32_t temp = (outputSize - 1) * stride + filterSize;
414 if (temp > inputSize) {
415 *paddingFront = (temp - inputSize) / 2;
416 *paddingBack = (temp - inputSize) - *paddingFront;
417 }
418 }
419}
420
421void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
422 bool samePadding)
423{
424 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
425}
426
427/// An Abstract base class which represents a single tensorflow operation (node)
428/// that has been (potentially partially) converted to Armnn.
429/// It may not yet have been fully converted into actual Armnn layers.
430class ParsedTfOperation
431{
432public:
433 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
434 : m_Parser(parser)
435 , m_Node(node)
436 {
437 }
438
439 virtual ~ParsedTfOperation() {};
440
441 const tensorflow::NodeDef& GetNode() const { return m_Node; }
442
443 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
444 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
445 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
446
447 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
448 virtual ParsedTfOperation* ResolveIdentityOperations()
449 {
450 return this;
451 }
452
453protected:
454 TfParser* m_Parser;
455 const tensorflow::NodeDef& m_Node;
456};
457
458/// An ParsedTfOperation where the Armnn equivalent is a single layer,
459/// with output slots that correspond directly to the Tf node outputs.
460class SingleLayerParsedTfOperation : public ParsedTfOperation
461{
462public:
463 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
464 : ParsedTfOperation(parser, node)
465 , m_Layer(layer)
466 {
467 }
468
469 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
470 {
471 BOOST_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100472 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100473 unsigned int armnnOutputSlotIdx = tfOutputIndex;
474 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
475 {
476 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100477 boost::str(
478 boost::format(
479 "The requested output slot #%1% "
480 "for %2% does not exist %3%")
481 % armnnOutputSlotIdx
482 % m_Layer->GetName()
483 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100484 }
485 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
486 }
487
488protected:
489 IConnectableLayer* m_Layer;
490};
491
telsoa01c577f2c2018-08-31 09:22:23 +0100492/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100493class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
494{
495public:
496 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
497 : SingleLayerParsedTfOperation(parser, node, nullptr)
498 {
499 }
500
501 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
502 {
503 if (!m_Layer)
504 {
505 CreateLayerDeferred();
506 }
507 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
508 }
509
510private:
511 virtual void CreateLayerDeferred() = 0;
512};
513
514
515TfParser::TfParser()
516 : m_Network(nullptr, nullptr)
517{
518}
519
520
521const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
522{
523 if (nodeDef->op() != "Identity")
524 {
525 return nodeDef;
526 }
527
528 if (nodeDef->input_size() != 1)
529 {
telsoa01c577f2c2018-08-31 09:22:23 +0100530 throw ParseException(
531 boost::str(
532 boost::format(
533 "Identity node should have a single input! %1% has %2% inputs %3%")
534 % nodeDef->name()
535 % nodeDef->input_size()
536 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100537 }
538
539 auto it = m_NodesByName.find(nodeDef->input(0));
540 if (it != m_NodesByName.end())
541 {
542 const tensorflow::NodeDef* inputNode = it->second;
543 return ResolveIdentityNode(inputNode);
544 }
545 else
546 {
telsoa01c577f2c2018-08-31 09:22:23 +0100547 throw ParseException(
548 boost::str(
549 boost::format(
550 "Cannot find what the Identity node %1% is linked to! %2%")
551 % nodeDef->name()
552 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100553 }
554}
555
556std::vector<OutputOfConstNodeDef>
557TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
558{
559 std::vector<OutputOfConstNodeDef> ret;
560
surmeh013537c2c2018-05-18 16:31:43 +0100561 if (nodeDef.op() == "Const")
562 {
563 // For some reason const node can have "Control Inputs". We ignore them for now.
564 return ret;
565 }
566
surmeh01bceff2f2018-03-29 16:29:27 +0100567 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
568 for (int j = 0; j < nodeDef.input_size(); ++j)
569 {
570 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100571
572 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
573 {
574 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100575 boost::str(
576 boost::format(
577 "Node '%1%' has Control Input '%2%' for input #%3% which is unsupported. %4%")
578 % nodeDef.name()
579 % nodeDef.input(j)
580 % j
581 % CHECK_LOCATION().AsString()));
surmeh013537c2c2018-05-18 16:31:43 +0100582 }
583
surmeh01bceff2f2018-03-29 16:29:27 +0100584 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
585 if (inputIt == m_NodesByName.end())
586 {
587 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100588 boost::str(
589 boost::format(
590 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
591 % nodeDef.input(j)
592 % nodeDef.name()
593 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100594 }
595 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
596 }
597
598 return ret;
599}
600
601std::vector<OutputOfParsedTfOperation>
602TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
603 std::size_t expectedNumInputs)
604{
telsoa01c577f2c2018-08-31 09:22:23 +0100605 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100606 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
607 const std::size_t numInputs = nodes.size();
608 if (numInputs != expectedNumInputs)
609 {
telsoa01c577f2c2018-08-31 09:22:23 +0100610 throw ParseException(
611 boost::str(
612 boost::format(
613 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
614 % nodeDef.name()
615 % expectedNumInputs
616 % numInputs
617 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100618 }
telsoa01c577f2c2018-08-31 09:22:23 +0100619 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100620 std::vector<OutputOfParsedTfOperation> result;
621 for (auto&& node : nodes)
622 {
623 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
624 if (it == m_ParsedTfOperations.end())
625 {
telsoa01c577f2c2018-08-31 09:22:23 +0100626 throw ParseException(
627 boost::str(
628 boost::format(
629 "Node with name '%1%' has not been parsed %2%")
630 % node.m_IndexedValue->name()
631 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100632 }
633 ParsedTfOperation* parsedOp = it->second.get();
634 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
635 parsedOp = parsedOp->ResolveIdentityOperations();
636 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
637 }
638 return result;
639}
640
641ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
642{
643 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
644
telsoa01c577f2c2018-08-31 09:22:23 +0100645 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
646 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100647 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
648 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
649 {
650 IConnectableLayer* layer =
651 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
652 &nodeDef,nodeDef.name().c_str());
653 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
654 }
655 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
656 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
657 {
658 IConnectableLayer* layer =
659 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
660 &nodeDef,nodeDef.name().c_str());
661 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
662 }
663 else
664 {
telsoa01c577f2c2018-08-31 09:22:23 +0100665 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100666 return AddAdditionLayer(nodeDef);
667 }
668}
669
670ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
671{
672 return AddAdditionLayer(nodeDef, true);
673}
674
675/// An ParsedTfOperation which forwards to another (used for Identity nodes).
676class ParsedIdentityTfOperation : public ParsedTfOperation
677{
678public:
679 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
680 : ParsedTfOperation(parser, node)
681 , m_Representative(representative)
682 {
683 }
684
685 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
686 {
687 BOOST_ASSERT(m_Representative);
688 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
689 }
690
691 virtual ParsedTfOperation* ResolveIdentityOperations() override
692 {
693 return m_Representative->ResolveIdentityOperations();
694 }
695
696private:
697 ParsedTfOperation* m_Representative;
698};
699
700ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
701{
702 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
703 // Any requests for the output slots of this node should be forwarded to the node connected as input.
704 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
705}
706
707/// An ParsedTfOperation for a Const node.
708/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
709/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
710template <typename T>
711class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
712{
713public:
714 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
715 const T* tensorData, const TensorInfo& tensorInfo)
716 : DeferredSingleLayerParsedTfOperation(parser, node),
717 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
718 m_TensorInfo(tensorInfo)
719 {
720 BOOST_ASSERT(tensorInfo.GetDataType() == GetDataType<T>());
721 }
722
723 void CreateLayerDeferred() override
724 {
725 BOOST_ASSERT(m_Layer == nullptr);
726 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
727 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
728 }
729
730 ConstTensor GetConstTensor(bool swizzleForConvolutionWeights, std::vector<T>& outputTensorData) const
731 {
732 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
telsoa01c577f2c2018-08-31 09:22:23 +0100733 // Tensorflow weights are [H, W, In, Out].
734 // ArmNN weights are [Out, In, H, W].
surmeh01bceff2f2018-03-29 16:29:27 +0100735 static const PermutationVector HWIOToOIHW = {2, 3, 1, 0};
736
737 const TensorInfo outInfo = swizzleForConvolutionWeights
738 ? armnnUtils::Permuted(m_TensorInfo, HWIOToOIHW)
739 : m_TensorInfo;
740
741 outputTensorData.resize(m_TensorInfo.GetNumElements());
742
telsoa01c577f2c2018-08-31 09:22:23 +0100743 // Copies or swizzles from the permanent storage into the storage the caller provided.
surmeh01bceff2f2018-03-29 16:29:27 +0100744 if (swizzleForConvolutionWeights)
745 {
746 armnnUtils::Permute(outInfo.GetShape(), HWIOToOIHW, m_Storage.data(), outputTensorData.data());
747 }
748 else
749 {
750 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
751 }
telsoa01c577f2c2018-08-31 09:22:23 +0100752 // Updates the result to point to the user provided storage.
surmeh01bceff2f2018-03-29 16:29:27 +0100753 ConstTensor constTensor(outInfo, outputTensorData);
754 return constTensor;
755 }
756
Matteo Martincigh46315822018-11-28 16:22:36 +0000757 const T* GetStorage() const
758 {
759 return m_Storage.data();
760 }
761
762 const TensorInfo& GetTensorInfo() const
763 {
764 return m_TensorInfo;
765 }
766
surmeh01bceff2f2018-03-29 16:29:27 +0100767private:
768 ///< Manages the lifetime of the tensor data.
769 std::vector<T> m_Storage;
770 ///< Describes the layout of the tensor and points to the data in m_Storage.
771 TensorInfo m_TensorInfo;
772};
773
telsoa01c577f2c2018-08-31 09:22:23 +0100774DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
775 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100776{
777 switch (tfDataType)
778 {
779 case tensorflow::DT_FLOAT:
780 return DataType::Float32;
781 break;
782 case tensorflow::DT_INT32:
783 return DataType::Signed32;
784 break;
785 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100786 throw ParseException(
787 boost::str(
788 boost::format(
789 "Unknown DataType %1% for node %2% %3%")
790 % tensorflow::DataType_Name(tfDataType)
791 % nodeDef.name()
792 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100793 }
794}
795
796struct ParseTfTensorValueList
797{
798 template<typename DataType>
799 static void Parse(
800 const tensorflow::TensorProto& tfTensor,
801 unsigned int dstElements,
802 std::vector<int8_t>& outputData);
803
804 template <typename DataType>
805 static void ReadData(const void* srcData, unsigned int numSrcElements,
806 std::vector<int8_t>& dstData, unsigned int numDstElements)
807 {
telsoa01c577f2c2018-08-31 09:22:23 +0100808 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100809 if (numSrcElements == 0)
810 {
811 return;
812 }
813
telsoa01c577f2c2018-08-31 09:22:23 +0100814 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100815 if (numDstElements == 0)
816 {
817 numDstElements = numSrcElements;
818 }
819
telsoa01c577f2c2018-08-31 09:22:23 +0100820 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100821 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
822
823 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
824 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
825
telsoa01c577f2c2018-08-31 09:22:23 +0100826 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +0100827 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
828
829 if (numDstElements > numSrcElements)
830 {
telsoa01c577f2c2018-08-31 09:22:23 +0100831 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +0100832 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
833 }
834 }
835
836};
837
838template <>
839void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
840 unsigned int dstElements, std::vector<int8_t>& outputData)
841{
842 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
843 outputData, dstElements);
844}
845
846template <>
847void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
848 unsigned int dstElements, std::vector<int8_t>& outputData)
849{
850 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
851 outputData, dstElements);
852}
853
854template <template<typename> class OperatorType, typename T = int8_t>
855struct MakeTfOperation
856{
857 template<typename DataType, class... Args>
858 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
859 Args&&... args)
860 {
861 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
862 }
863};
864
865template <>
866struct MakeTfOperation<ParsedConstTfOperation>
867{
868 template<typename DataType, class... Args>
869 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
870 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
871 {
872 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
873 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
874 }
875};
876
877template <class FuncType>
878struct InvokeParseFunction
879{
880 template<class ResType, class... Args>
881 inline static ResType Result(DataType dataType, Args&&... args)
882 {
883 if (dataType == DataType::Float32)
884 {
885 return FuncType::template Parse<float>(std::forward<Args>(args)...);
886 }
887 else if (dataType == DataType::Signed32)
888 {
889 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
890 }
891
892 return ResType();
893 }
894
895 template<class... Args>
896 inline static void Result(DataType dataType, Args&&... args)
897 {
898 if (dataType == DataType::Float32)
899 {
900 FuncType::template Parse<float>(std::forward<Args>(args)...);
901 }
902 else if (dataType == DataType::Signed32)
903 {
904 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
905 }
906 }
907};
908
909ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
910{
911 BOOST_ASSERT(nodeDef.op() == "Const");
912
913 if (nodeDef.attr().count("value") == 0)
914 {
telsoa01c577f2c2018-08-31 09:22:23 +0100915 throw ParseException(
916 boost::str(
917 boost::format(
918 "Value not found for Const node - %1% %2%")
919 % nodeDef.name()
920 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100921 }
922
923 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
924 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
925 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
926
927 const auto GetDimensionSize = [](auto& d) { return d.size(); };
928
929 std::vector<unsigned int> dimensionSizes;
930 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
931 std::back_inserter(dimensionSizes), GetDimensionSize);
932
telsoa01c577f2c2018-08-31 09:22:23 +0100933 // Calculates number of elements.
934 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100935 unsigned int numElements = 0U;
936
937 if (!dimensionSizes.empty())
938 {
939 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
940 1U, std::multiplies<unsigned int>());
941 }
942
943 std::vector<int8_t> tensorData;
944
telsoa01c577f2c2018-08-31 09:22:23 +0100945 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +0100946 if (tfTensor.tensor_content().empty())
947 {
948 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
949
950 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +0100951 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +0100952 if (numElements == 0)
953 {
telsoa01c577f2c2018-08-31 09:22:23 +0100954 const unsigned int tfNumElements =
955 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +0100956 dimensionSizes.push_back(tfNumElements);
957 }
958 }
telsoa01c577f2c2018-08-31 09:22:23 +0100959 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +0100960 else
961 {
962 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
963
telsoa01c577f2c2018-08-31 09:22:23 +0100964 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +0100965 if (numElements == 0)
966 {
telsoa01c577f2c2018-08-31 09:22:23 +0100967 throw ParseException(
968 boost::str(
969 boost::format(
970 "No tensor shape found for Const node - %1% %2%")
971 % nodeDef.name()
972 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100973 }
974 }
975
telsoa01c577f2c2018-08-31 09:22:23 +0100976 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +0100977 if (tensorData.empty())
978 {
telsoa01c577f2c2018-08-31 09:22:23 +0100979 throw ParseException(
980 boost::str(
981 boost::format(
982 "No tensor data found for Const node - %1% %2%")
983 % nodeDef.name()
984 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100985 }
986
telsoa01c577f2c2018-08-31 09:22:23 +0100987 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
988 dimensionSizes.data(),
989 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +0100990
991 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +0100992 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +0100993 if (tensorData.size() > tensorInfo.GetNumBytes())
994 {
telsoa01c577f2c2018-08-31 09:22:23 +0100995 throw ParseException(
996 boost::str(
997 boost::format(
998 "Number of elements (%1%) should be less than or equal "
999 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
1000 % (tensorData.size() / GetDataTypeSize(dataType))
1001 % tensorInfo.GetNumElements()
1002 % nodeDef.name()
1003 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001004 }
1005
1006 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1007 dataType, this, nodeDef, tensorData, tensorInfo);
1008}
1009
1010template<typename Type>
1011bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
1012{
1013 auto it = m_ParsedTfOperations.find(nodeName);
jimfly01f6ba7472018-12-04 10:09:52 +00001014 if (it == m_ParsedTfOperations.end())
surmeh01bceff2f2018-03-29 16:29:27 +01001015 {
1016 return false;
1017 }
jimfly01f6ba7472018-12-04 10:09:52 +00001018 return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1019}
1020
1021template<typename Type>
1022bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
1023{
1024 return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
surmeh01bceff2f2018-03-29 16:29:27 +01001025}
1026
1027ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
1028 const tensorflow::GraphDef& graphDef)
1029{
1030 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1031 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1032 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1033
1034 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1035 {
telsoa01c577f2c2018-08-31 09:22:23 +01001036 throw ParseException(
1037 boost::str(
1038 boost::format(
1039 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1040 % nodeDef.name()
1041 % inputs[1].m_IndexedValue->GetNode().name()
1042 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001043 }
1044 ParsedConstTfOperation<float>* weightNode =
1045 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1046
1047 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1048 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1049 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1050
telsoa01c577f2c2018-08-31 09:22:23 +01001051 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
surmeh01bceff2f2018-03-29 16:29:27 +01001052 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1053 if (!dilations.empty())
1054 {
1055 for (auto dilation : dilations)
1056 {
1057 if (dilation != 1u)
1058 {
telsoa01c577f2c2018-08-31 09:22:23 +01001059 throw ParseException(
1060 boost::str(
1061 boost::format(
1062 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1063 % nodeDef.name()
1064 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001065 }
1066 }
1067 }
1068
1069 Convolution2dDescriptor desc;
1070 desc.m_BiasEnabled = false;
1071
telsoa01c577f2c2018-08-31 09:22:23 +01001072 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1073
Matteo Martincigh46315822018-11-28 16:22:36 +00001074 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001075
Matteo Martincigh46315822018-11-28 16:22:36 +00001076 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001077
Matteo Martincigh46315822018-11-28 16:22:36 +00001078 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001079
Matteo Martincigh46315822018-11-28 16:22:36 +00001080 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1081 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001082
Matteo Martincigh46315822018-11-28 16:22:36 +00001083 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1084 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1085
1086 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1087 // Tensorflow weights are [H, W, In, Out].
1088 // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1089 // and [Out, In, H, W] when the data layout is NCHW.
1090 PermutationVector permutationVector =
1091 dataLayout == DataLayout::NHWC ?
1092 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1093 std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1094
1095 // Swizzle the tensor using the given permutation vector.
1096 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1097 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1098
1099 // Swizzles the content of the tensor's permanent storage into a local storage.
1100 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1101 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
1102 weightNode->GetStorage(), weightTensorSwizzledData.data());
1103
1104 // Create a weight tensor with the newly swizzled data.
1105 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1106
1107 uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1108 uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001109
1110 bool padding = false;
1111 TensorInfo outputInfo;
Matteo Martincigh46315822018-11-28 16:22:36 +00001112 unsigned int outputHeight = 0;
1113 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001114
1115 CHECK_PADDING_TYPE(nodeDef, paddingString);
1116
surmeh01bceff2f2018-03-29 16:29:27 +01001117 if (paddingString == "SAME")
1118 {
1119 padding = true;
Matteo Martincigh46315822018-11-28 16:22:36 +00001120
1121 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1122 static_cast<float>(desc.m_StrideY)));
1123 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1124 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001125 }
1126 else if (paddingString == "VALID")
1127 {
1128 padding = false;
Matteo Martincigh46315822018-11-28 16:22:36 +00001129
1130 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1131 static_cast<float>(desc.m_StrideY)));
1132 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1133 static_cast<float>(desc.m_StrideX)));
1134 }
1135
1136 switch (dataLayout)
1137 {
1138 case DataLayout::NHWC:
1139 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1140 outputHeight,
1141 outputWidth,
1142 weightTensor.GetShape()[0] },
1143 DataType::Float32);
1144 break;
1145 case DataLayout::NCHW:
1146 default:
surmeh01bceff2f2018-03-29 16:29:27 +01001147 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1148 weightTensor.GetShape()[0],
Matteo Martincigh46315822018-11-28 16:22:36 +00001149 outputHeight,
1150 outputWidth },
1151 DataType::Float32);
1152 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001153 }
surmeh01bceff2f2018-03-29 16:29:27 +01001154
1155 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1156 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1157
1158 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1159 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Matteo Martincigh46315822018-11-28 16:22:36 +00001160 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001161
1162 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1163}
1164
1165ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
telsoa01c577f2c2018-08-31 09:22:23 +01001166 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001167{
1168 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1169 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1170 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1171
1172 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1173 {
telsoa01c577f2c2018-08-31 09:22:23 +01001174 throw ParseException(
1175 boost::str(
1176 boost::format(
1177 "ArmNN only supports Depthwise Convolution layer with constant weights. "
1178 "Non const input found %1% for node %2% %3%")
1179 % inputs[1].m_IndexedValue->GetNode().name()
1180 % nodeDef.name()
1181 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001182 }
1183 ParsedConstTfOperation<float>* weightNode =
1184 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1185
1186
1187 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1188 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1189 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1190
1191 DepthwiseConvolution2dDescriptor desc;
1192 desc.m_BiasEnabled = false;
1193
telsoa01c577f2c2018-08-31 09:22:23 +01001194 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1195
surmeh01bceff2f2018-03-29 16:29:27 +01001196 if (dataFormat == "NHWC")
1197 {
1198 desc.m_StrideX = strides[2];
1199 desc.m_StrideY = strides[1];
telsoa01c577f2c2018-08-31 09:22:23 +01001200 // Swizzles input to supported memory layout.
surmeh01bceff2f2018-03-29 16:29:27 +01001201 inputTensorInfo = armnnUtils::Permuted(inputSlot.GetTensorInfo(), NHWCToArmNN);
1202 }
1203 else if (dataFormat == "NCHW")
1204 {
1205 desc.m_StrideX = strides[3];
1206 desc.m_StrideY = strides[2];
1207 }
surmeh01bceff2f2018-03-29 16:29:27 +01001208
1209 uint32_t inputHeight = inputTensorInfo.GetShape()[2];
1210 uint32_t inputWidth = inputTensorInfo.GetShape()[3];
1211
1212 std::vector<float> outputTensorData;
1213
1214 ConstTensor weightTensor = weightNode->GetConstTensor(true, outputTensorData);
1215
1216 uint32_t weightHeight = weightTensor.GetShape()[2];
1217 uint32_t weightWidth = weightTensor.GetShape()[3];
1218
1219 bool padding = false;
1220 TensorInfo outputInfo;
telsoa01c577f2c2018-08-31 09:22:23 +01001221
1222 CHECK_PADDING_TYPE(nodeDef, paddingString);
1223
surmeh01bceff2f2018-03-29 16:29:27 +01001224 if (paddingString == "SAME")
1225 {
1226 padding = true;
1227 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1228 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1229 static_cast<uint32_t>(ceil(
1230 static_cast<float>(inputHeight) /
1231 static_cast<float>(desc.m_StrideY))),
1232 static_cast<uint32_t>(ceil(
1233 static_cast<float>(inputWidth) /
1234 static_cast<float>(desc.m_StrideX)))
1235 }, DataType::Float32);
1236 }
1237 else if (paddingString == "VALID")
1238 {
1239 padding = false;
1240 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1241 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1242 static_cast<uint32_t>(ceil(
1243 static_cast<float>(inputHeight - weightHeight + 1) /
1244 static_cast<float>(desc.m_StrideY))),
1245 static_cast<uint32_t>(ceil(
1246 static_cast<float>(inputWidth - weightWidth + 1) /
1247 static_cast<float>(desc.m_StrideX)))
1248 }, DataType::Float32);
1249 }
surmeh01bceff2f2018-03-29 16:29:27 +01001250
1251 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1252 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1253
1254 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1255 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1256
1257 if (dataFormat == "NHWC")
1258 {
1259 layer = SwizzleInDeswizzleOut(*m_Network, inputSlot, *layer, nodeDef.name());
1260 }
1261 else
1262 {
1263 inputSlot.Connect(layer->GetInputSlot(0));
1264 }
1265
1266 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1267}
1268
Conor Kennedyc2130a02018-12-05 11:05:54 +00001269TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
1270{
1271 BOOST_ASSERT(nodeDef.op() == "ExpandDims");
1272
1273 if (inputTensorInfo.GetNumDimensions() > 4) {
1274 throw ParseException(
1275 boost::str(
1276 boost::format(
1277 "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1278 % inputTensorInfo.GetNumDimensions()
1279 % nodeDef.name()
1280 % CHECK_LOCATION().AsString()));
1281 }
1282
1283 std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1284
1285 std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1286 std::vector<uint32_t> outputDims;
1287
1288 // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1289 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1290 {
1291 // add current input shape to outputDims
1292 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1293 auto currentDimension = inputTensorInfo.GetShape()[i];
1294 outputDims.push_back(currentDimension);
1295 }
1296
1297 // insert a dimension of 1 at index 'expandDim' of inputs shape
1298 if (expandDim >= 0)
1299 {
1300 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1301 outputDims.insert(getPosition, 1);
1302 }
1303
1304 // if negative number for 'expandDim' then count backwards from the last element
1305 // and insert 1 dimension at index 'expandDim'
1306 if (expandDim < 0)
1307 {
Matteo Martincighd7cceeb2018-12-06 09:06:29 +00001308 int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001309 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1310 outputDims.insert(getPosition, 1);
1311 }
1312 }
1313 else
1314 {
1315 throw InvalidArgumentException(
1316 boost::str(
1317 boost::format(
1318 "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1319 % expandDim
1320 % inputDimSize
1321 % CHECK_LOCATION().AsString()));
1322 }
1323
1324 if (outputDims.size() > 4)
1325 {
1326 throw ParseException(
1327 boost::str(
1328 boost::format(
1329 "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1330 % outputDims.size()
1331 % nodeDef.name()
1332 % CHECK_LOCATION().AsString()));
1333 }
1334
1335 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1336 outputDims.data());
1337
1338 TensorInfo outTensorInfo = inputTensorInfo;
1339 outTensorInfo.SetShape(outShape);
1340
1341 return outTensorInfo;
1342}
1343
1344ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1345{
1346 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1347
1348 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1349 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1350
1351 TensorInfo outputInfo;
1352 outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo);
1353
1354 ReshapeDescriptor reshapeDesc;
1355 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1356 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1357 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1358 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1359
1360 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1361}
1362
surmeh01bceff2f2018-03-29 16:29:27 +01001363ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1364 const tensorflow::GraphDef& graphDef)
1365{
1366 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1367
1368 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1369 {
telsoa01c577f2c2018-08-31 09:22:23 +01001370 throw ParseException(
1371 boost::str(
1372 boost::format(
1373 "ArmNN only supports FusedBatchNormalization layers with constant scale. "
1374 "Input %1%. Node %2% %3%")
1375 % inputs[1].m_IndexedValue->GetNode().name()
1376 % nodeDef.name()
1377 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001378 }
1379 ParsedConstTfOperation<float>* scaleNode =
1380 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1381
1382 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1383 {
telsoa01c577f2c2018-08-31 09:22:23 +01001384 throw ParseException(
1385 boost::str(
1386 boost::format(
1387 "ArmNN only supports FusedBatchNormalization layers with constant offset. "
1388 "Input %1%. Node %2% %3%")
1389 % inputs[2].m_IndexedValue->GetNode().name()
1390 % nodeDef.name()
1391 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001392 }
1393 ParsedConstTfOperation<float>* offsetNode =
1394 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1395
1396 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1397 {
telsoa01c577f2c2018-08-31 09:22:23 +01001398 throw ParseException(
1399 boost::str(
1400 boost::format(
1401 "ArmNN only supports FusedBatchNormalization layers with constant mean. "
1402 "Input %1%. Node %2% %3%")
1403 % inputs[3].m_IndexedValue->GetNode().name()
1404 % nodeDef.name()
1405 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001406 }
1407 ParsedConstTfOperation<float>* meanNode =
1408 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1409
1410 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1411 {
telsoa01c577f2c2018-08-31 09:22:23 +01001412 throw ParseException(
1413 boost::str(
1414 boost::format(
1415 "ArmNN only supports FusedBatchNormalization layers with constant variance. "
1416 "Input %1%. Node %2% %3%")
1417 % inputs[4].m_IndexedValue->GetNode().name()
1418 % nodeDef.name()
1419 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001420 }
1421 ParsedConstTfOperation<float>* varianceNode =
1422 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1423
Matteo Martincigh075c7502018-12-05 13:10:45 +00001424 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1425
1426 CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1427
telsoa01c577f2c2018-08-31 09:22:23 +01001428 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001429 BatchNormalizationDescriptor desc;
1430 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001431 desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001432
telsoa01c577f2c2018-08-31 09:22:23 +01001433 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1434 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001435 std::vector<float> scaleTensorData;
1436 ConstTensor scaleTensor = scaleNode->GetConstTensor(false, scaleTensorData);
1437
1438 std::vector<float> offsetTensorData;
1439 ConstTensor offsetTensor = offsetNode->GetConstTensor(false, offsetTensorData);
1440
1441 std::vector<float> meanTensorData;
1442 ConstTensor meanTensor = meanNode->GetConstTensor(false, meanTensorData);
1443
1444 std::vector<float> varianceTensorData;
1445 ConstTensor varianceTensor = varianceNode->GetConstTensor(false, varianceTensorData);
1446
1447 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1448 meanTensor,
1449 varianceTensor,
1450 offsetTensor,
1451 scaleTensor,
1452 nodeDef.name().c_str());
1453
1454 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1455
Matteo Martincigh075c7502018-12-05 13:10:45 +00001456 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1457 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001458
1459 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1460}
1461
telsoa01c577f2c2018-08-31 09:22:23 +01001462bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1463 size_t alphaLayerIndex,
1464 const OutputOfParsedTfOperation& otherOp,
1465 armnn::IOutputSlot** outputOfLeakyRelu,
1466 armnn::ActivationDescriptor & desc)
1467{
1468 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1469
1470 // Verifying all these assumptions hold:
1471 //
1472 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1473 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1474 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1475 //
1476
1477 if (mulNodeDef.op() == "Mul")
1478 {
1479 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1480 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1481
1482 BOOST_ASSERT(inputs.size() == 2);
1483 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1484 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1485 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1486
1487 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1488 {
1489 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1490 {
1491 ParsedConstTfOperation<float>* alpha =
1492 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1493 inputs[alphaLayerIndex].m_IndexedValue);
1494
1495 std::vector<float> const_data;
1496 ConstTensor const_tensor = alpha->GetConstTensor(false, const_data);
1497
1498 if (const_data.size() == 1)
1499 {
1500 desc.m_Function = ActivationFunction::LeakyReLu;
1501 desc.m_A = const_data[0];
1502
1503 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1504 return true;
1505 }
1506 }
1507 }
1508 }
1509 return false;
1510}
1511
telsoa01c577f2c2018-08-31 09:22:23 +01001512ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1513 const tensorflow::GraphDef& graphDef)
1514{
1515 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
Sadik Armagan975c09a2018-12-04 10:02:08 +00001516 if (inputs.size() != 2)
1517 {
1518 throw ParseException(
1519 boost::str(
1520 boost::format(
1521 "Maximum expects two inputs!. Got %1% for Node %2% %3%")
1522 % inputs.size()
1523 % nodeDef.name()
1524 % CHECK_LOCATION().AsString()));
1525 }
1526
telsoa01c577f2c2018-08-31 09:22:23 +01001527 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1528 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1529 IOutputSlot* outputOfLeakyRelu = nullptr;
1530
1531 ActivationDescriptor desc;
1532
Sadik Armagan975c09a2018-12-04 10:02:08 +00001533 // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1534 // i.e. one of the four possible scenarios:
1535 // 1, max(mul(a, x), x)
1536 // 2, max(mul(x, a), x)
1537 // 3, max(x, mul(a, x))
1538 // 4, max(x, mul(x, a))
1539 // These are handled by an activation layer.
telsoa01c577f2c2018-08-31 09:22:23 +01001540
1541 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1542 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1543 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1544 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1545 {
1546 BOOST_ASSERT(outputOfLeakyRelu != nullptr);
1547
1548 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1549 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1550 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1551 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1552 }
1553 else
1554 {
Sadik Armagan975c09a2018-12-04 10:02:08 +00001555 // Anything else is just a maximum layer.
1556
1557 return AddMaximumLayer(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001558 }
1559}
1560
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001561ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
1562 const tensorflow::GraphDef& graphDef)
1563{
1564 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1565
1566 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1567 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1568 const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1569 const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1570
1571 if (input0Dim != input1Dim)
1572 {
1573 // broadcasting where input0 and input1 have different number of dimensions
1574 // is only supported for 1D and 4D tensors pair
1575 if (input0Dim == 1 && input1Dim == 4)
1576 {
1577 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1578 }
1579 else if (input0Dim == 4 && input1Dim == 1)
1580 {
1581 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1582 }
1583 else
1584 {
1585 throw ParseException(
1586 boost::str(
1587 boost::format("Unsupported broadcast configuration for Minimum operation %1% %2%")
1588 % nodeDef.name()
1589 % CHECK_LOCATION().AsString()));
1590 }
1591 }
1592
1593 IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1594
1595 input0Slot->Connect(layer->GetInputSlot(0));
1596 input1Slot->Connect(layer->GetInputSlot(1));
1597
1598 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1599 std::vector<unsigned int> outputShape;
1600
1601 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1602 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1603
1604 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1605 {
1606 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1607 }
1608
1609 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1610 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1611
1612 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1613}
1614
jimfly01f6ba7472018-12-04 10:09:52 +00001615unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
1616 const TensorInfo& inputTensorInfo,
1617 const std::string& nodeName)
1618{
1619 unsigned int rank = paddingTensor.GetShape()[0];
1620 unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
1621 if (rank != expectedRank)
1622 {
1623 throw ParseException(
1624 boost::str(
1625 boost::format(
1626 "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
1627 % expectedRank
1628 % rank
1629 % nodeName
1630 % CHECK_LOCATION().AsString()));
1631 }
1632 unsigned int second = paddingTensor.GetShape()[1];
1633 if (second != 2)
1634 {
1635 throw ParseException(
1636 boost::str(
1637 boost::format(
1638 "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
1639 % rank
1640 % second
1641 % nodeName
1642 % CHECK_LOCATION().AsString()));
1643 }
1644 return rank;
1645}
1646
1647TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
1648 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1649{
1650 unsigned int numDims = inputTensorInfo.GetNumDimensions();
1651 std::vector<unsigned int> outDims;
1652 for (unsigned int i = 0; i < numDims; ++i)
1653 {
1654 unsigned int dimSize = inputTensorInfo.GetShape()[i];
1655 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
1656 dimSize += dimPadding.first;
1657 dimSize += dimPadding.second;
1658 outDims.push_back(dimSize);
1659 }
1660 TensorInfo paddedTensorInfo = inputTensorInfo;
1661 unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
1662 paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
1663 return paddedTensorInfo;
1664}
1665
1666ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
1667 const tensorflow::GraphDef& graphDef)
1668{
1669 // input consists of:
1670 // input[0] the tensor which will be padded
1671 // input[1] the tensor holding the padding values
1672 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1673 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1674 TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
1675 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
1676 {
1677 throw ParseException(
1678 boost::str(
1679 boost::format(
1680 "ArmNN only supports Pad with constant padding. "
1681 "Input %1%. Node %2% %3%")
1682 % inputs[1].m_IndexedValue->GetNode().name()
1683 % nodeDef.name()
1684 % CHECK_LOCATION().AsString()));
1685
1686 }
1687 ParsedConstTfOperation<int32_t>* paddingTensorOp =
1688 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1689
1690 std::vector<int32_t> paddingTensorData;
1691 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(false, paddingTensorData);
1692 // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
1693 // and should match the rank of the input tensor that is being padded.
1694 // For each dimension D of input, paddings[D, 0] indicates how many values to add
1695 // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
1696 // many values to add after the contents of tensor in that dimension
1697 // This needs to be translated into a padList for ACL
1698 std::vector<std::pair<unsigned int, unsigned int>> padList;
1699 unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
1700 for (unsigned int i = 0; i < rank; ++i)
1701 {
1702 std::pair<unsigned int, unsigned int> paddingForDim;
1703 for (unsigned int j = 0; j < 2; j++)
1704 {
1705 unsigned int index = (i * 2) + j;
1706 int paddingAmount = paddingTensorData[index];
1707 // make sure we can cast to an unsigned value
1708 if (paddingAmount < 0)
1709 {
1710 throw ParseException(
1711 boost::str(
1712 boost::format(
1713 "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
1714 % paddingAmount
1715 % i
1716 % j
1717 % nodeDef.name()
1718 % CHECK_LOCATION().AsString()));
1719 }
1720 if (j == 0)
1721 {
1722 paddingForDim.first = static_cast<unsigned int>(paddingAmount);
1723 }
1724 else
1725 {
1726 paddingForDim.second = static_cast<unsigned int>(paddingAmount);
1727 }
1728 }
1729 padList.push_back(paddingForDim);
1730 }
1731 PadDescriptor padDescriptor(padList);
1732 IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
1733 previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
1734 // Use the padding to calculate the new output tensor shape
1735 TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
1736 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1737 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1738}
1739
surmeh01bceff2f2018-03-29 16:29:27 +01001740ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
1741 const tensorflow::GraphDef& graphDef)
1742{
1743 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001744 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01001745 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
1746 unsigned int numConcatView = numInputs - 1;
1747
1748 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), MaxNumOfTensorDimensions);
1749 std::vector<unsigned int>mergeDimSizes(MaxNumOfTensorDimensions, 0u);
1750
1751 unsigned int mergeDim = 0;
1752 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
1753
telsoa01c577f2c2018-08-31 09:22:23 +01001754 // The last input is the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01001755 if (!HasParsedConstTensor<int32_t>(inputs[numInputs - 1].m_IndexedValue->GetNode().name()))
1756 {
telsoa01c577f2c2018-08-31 09:22:23 +01001757 throw ParseException(
1758 boost::str(
1759 boost::format(
1760 "ArmNN only supports Concat with constant axis. "
1761 "Input %1%. Node %2% %3%")
1762 % inputs[numInputs - 1].m_IndexedValue->GetNode().name()
1763 % nodeDef.name()
1764 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001765 }
1766 ParsedConstTfOperation<int32_t>* shapeNode =
1767 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[numInputs - 1].m_IndexedValue);
1768
1769 std::vector<int32_t> axisTensorData;
1770 ConstTensor axisTensor = shapeNode->GetConstTensor(false, axisTensorData);
1771
telsoa01c577f2c2018-08-31 09:22:23 +01001772 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
surmeh01bceff2f2018-03-29 16:29:27 +01001773 const unsigned int concatDimInput = static_cast<unsigned int>(axisTensorData[0]);
1774
telsoa01c577f2c2018-08-31 09:22:23 +01001775 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
surmeh01bceff2f2018-03-29 16:29:27 +01001776 if (concatDimInput == 0 || concatDimInput == 2)
1777 {
telsoa01c577f2c2018-08-31 09:22:23 +01001778 throw ParseException(
1779 boost::str(
1780 boost::format(
1781 "Dimension %1% for concatenation is not supported by Armnn. "
1782 "Node %2% %3%")
1783 % concatDimInput
1784 % nodeDef.name()
1785 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001786 }
1787
telsoa01c577f2c2018-08-31 09:22:23 +01001788 // This is the only concatDim we support in armnn.
surmeh01bceff2f2018-03-29 16:29:27 +01001789 const unsigned int concatDim = 1;
1790 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1791 {
telsoa01c577f2c2018-08-31 09:22:23 +01001792 // Need to double check whether it should be
surmeh01bceff2f2018-03-29 16:29:27 +01001793 IOutputSlot& inputSlot =
1794 inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
1795 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1796
Sadik Armagan479045b2018-10-01 11:51:37 +01001797 // process the input tensor info
1798 armnnUtils::ProcessConcatInputTensorInfo(inputTensorInfo, concatDescriptor,
1799 concatDimInput, viewIndex, mergeDimSizes, mergeDim);
surmeh01bceff2f2018-03-29 16:29:27 +01001800 }
1801
1802 mergeDimSizes[concatDim] = mergeDim;
1803 armnn::IConnectableLayer *layer = m_Network->AddMergerLayer(concatDescriptor, nodeDef.name().c_str());
1804
1805 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(MaxNumOfTensorDimensions, mergeDimSizes.data(),
1806 DataType::Float32));
1807
1808 for (unsigned int v = 0; v < numConcatView; ++v)
1809 {
1810 IOutputSlot& inputSlot = inputs[v].m_IndexedValue->ResolveArmnnOutputSlot(inputs[v].m_Index);
1811 if (concatDimInput == 3)
1812 {
1813 IConnectableLayer* const swizzleLayer = AddSwizzleLayer(*m_Network, inputSlot, NHWCToArmNN,
1814 "swizzle_for-" + nodeDef.name());
1815 swizzleLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(v));
1816 }
1817 else
1818 {
1819 inputSlot.Connect(layer->GetInputSlot(v));
1820 }
1821 }
1822
1823 if (concatDimInput == 3)
1824 {
1825 IConnectableLayer* const deswizzleLayer = AddSwizzleLayer(*m_Network, layer->GetOutputSlot(0), ArmNNToNHWC,
1826 "deswizzle_for-" + nodeDef.name());
1827 layer = deswizzleLayer;
1828 }
1829
1830 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1831}
1832
1833ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
1834 const tensorflow::GraphDef& graphDef)
1835{
telsoa01c577f2c2018-08-31 09:22:23 +01001836 // Note: the Shape layer is handled in a special way, because:
1837 // 1. ARMNN doesn't support int32 tensors which it outputs.
1838 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01001839 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01001840 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01001841
1842 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
1843 if (tfDataType != tensorflow::DT_INT32)
1844 {
telsoa01c577f2c2018-08-31 09:22:23 +01001845 throw ParseException(
1846 boost::str(
1847 boost::format(
1848 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
1849 % tensorflow::DataType_Name(tfDataType)
1850 % nodeDef.name()
1851 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001852 }
1853
1854 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1855 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1856 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1857 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
1858
1859 std::vector<int32_t> shapeTensorData;
1860 shapeTensorData.reserve(prevLayerDimensions);
1861
1862 for (unsigned int i=0; i<prevLayerDimensions; ++i)
1863 {
1864 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
1865 }
1866
1867 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
1868
1869 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
1870 nodeDef,
1871 &shapeTensorData[0],
1872 shapeTensorInfo);
1873}
1874
1875ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
1876 const tensorflow::GraphDef& graphDef)
1877{
1878 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1879 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
1880
1881 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1882 {
telsoa01c577f2c2018-08-31 09:22:23 +01001883 throw ParseException(
1884 boost::str(
1885 boost::format(
1886 "ArmNN only supports Reshape layers with constant shapes. "
1887 "Input %1% Node %2% %3%")
1888 % inputs[1].m_IndexedValue->GetNode().name()
1889 % nodeDef.name()
1890 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001891 }
1892 ParsedConstTfOperation<int32_t>* shapeNode =
1893 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1894
1895 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
1896 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1897
1898 std::vector<int32_t> shapeTensorData;
1899 ConstTensor shapeTensor = shapeNode->GetConstTensor(false, shapeTensorData);
1900 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
1901
1902 TensorShape targetShape = outputTensorInfo.GetShape();
1903 ReshapeDescriptor reshapeDesc;
1904 reshapeDesc.m_TargetShape = targetShape;
1905
1906 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1907 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1908 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1909
1910 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1911}
1912
1913ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
1914 const tensorflow::GraphDef& graphDef)
1915{
1916 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1917
1918 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1919 {
telsoa01c577f2c2018-08-31 09:22:23 +01001920 throw ParseException(
1921 boost::str(
1922 boost::format(
1923 "ArmNN only supports ResizeBilinear layers with constant sizes. "
1924 "Input %1%. Node %2% %3%")
1925 % inputs[1].m_IndexedValue->GetNode().name()
1926 % nodeDef.name()
1927 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001928 }
1929 ParsedConstTfOperation<int32_t>* sizeNode =
1930 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1931
telsoa01c577f2c2018-08-31 09:22:23 +01001932 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01001933 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
1934 {
telsoa01c577f2c2018-08-31 09:22:23 +01001935 throw ParseException(
1936 boost::str(
1937 boost::format(
1938 "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
1939 "Node %1% %2%")
1940 % nodeDef.name()
1941 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001942 }
1943
telsoa01c577f2c2018-08-31 09:22:23 +01001944 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01001945 std::vector<int32_t> sizeTensorData;
1946 ConstTensor sizeTensor = sizeNode->GetConstTensor(false, sizeTensorData);
1947
telsoa01c577f2c2018-08-31 09:22:23 +01001948 // The descriptor only has target height and width attributes, which we get from the size tensor.
surmeh01bceff2f2018-03-29 16:29:27 +01001949 ResizeBilinearDescriptor desc;
1950 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
1951 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
jimfly018a121502018-12-06 16:19:52 +00001952 desc.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01001953
1954 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, nodeDef.name().c_str());
1955
1956 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1957 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01001958 // The input shape is always in BHWC format, this will be swizzled below; for now,
1959 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01001960 unsigned int outBatch = inputTensorInfo.GetShape()[0];
1961 unsigned int outChannels = inputTensorInfo.GetShape()[3];
1962 unsigned int outHeight = desc.m_TargetHeight;
1963 unsigned int outWidth = desc.m_TargetWidth;
jimfly018a121502018-12-06 16:19:52 +00001964 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
telsoa01c577f2c2018-08-31 09:22:23 +01001965 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01001966 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
1967 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1968
jimfly018a121502018-12-06 16:19:52 +00001969 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001970
1971 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1972}
1973
1974TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
1975{
1976 BOOST_ASSERT(nodeDef.op() == "Squeeze");
1977 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
1978
1979 DataType type;
1980 if (tfDataType == tensorflow::DT_FLOAT)
1981 {
1982 type = DataType::Float32;
1983 }
1984 else if (tfDataType == tensorflow::DT_INT32)
1985 {
1986 type = DataType::Signed32;
1987 }
1988 else
1989 {
telsoa01c577f2c2018-08-31 09:22:23 +01001990 throw ParseException(
1991 boost::str(
1992 boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
1993 % tensorflow::DataType_Name(tfDataType)
1994 % nodeDef.name()
1995 % CHECK_LOCATION().AsString()));
1996 }
1997
1998
1999 if (inputTensorInfo.GetNumDimensions() > 4)
2000 {
2001 throw ParseException(
2002 boost::str(
2003 boost::format(
2004 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2005 % inputTensorInfo.GetNumDimensions()
2006 % nodeDef.name()
2007 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002008 }
2009
2010 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01002011 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2012
surmeh01bceff2f2018-03-29 16:29:27 +01002013 if (squeezeDims.empty())
2014 {
telsoa01c577f2c2018-08-31 09:22:23 +01002015 squeezeDims.assign(dimensionSequence,
2016 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01002017 }
2018
2019 std::vector<uint32_t> outputDims;
2020 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2021 {
telsoa01c577f2c2018-08-31 09:22:23 +01002022 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2023 auto currentDimension = inputTensorInfo.GetShape()[i];
2024 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01002025 {
telsoa01c577f2c2018-08-31 09:22:23 +01002026 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01002027 }
2028 }
2029
2030 if (outputDims.size() > 4)
2031 {
telsoa01c577f2c2018-08-31 09:22:23 +01002032 throw ParseException(
2033 boost::str(
2034 boost::format(
2035 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2036 % outputDims.size()
2037 % nodeDef.name()
2038 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002039 }
2040
telsoa01c577f2c2018-08-31 09:22:23 +01002041 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2042 outputDims.data());
2043
2044 TensorInfo outTensorInfo = inputTensorInfo;
2045 outTensorInfo.SetShape(outShape);
2046 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01002047
2048 return outTensorInfo;
2049}
2050
2051ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2052{
2053 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2054
2055 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2056 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2057
2058 TensorInfo outputInfo;
2059 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2060
2061 ReshapeDescriptor reshapeDesc;
2062 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2063 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2064 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2065 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2066
2067 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2068}
2069
2070ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2071{
2072 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2073
2074 NormalizationDescriptor normalizationDescriptor;
2075 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2076 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2077 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2078 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2079 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2080 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
ruoyan018174f362018-12-04 18:24:08 +00002081 normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002082
2083 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2084 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2085
2086 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002087 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2088 nodeDef.name().c_str());
ruoyan018174f362018-12-04 18:24:08 +00002089 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2090 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
surmeh01bceff2f2018-03-29 16:29:27 +01002091
2092 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2093}
2094
2095/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01002096/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2097/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2098/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2099///
surmeh01bceff2f2018-03-29 16:29:27 +01002100class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
2101{
2102public:
2103 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2104 : DeferredSingleLayerParsedTfOperation(parser, node)
2105 {
2106 }
2107
2108 void CreateLayerDeferred() override
2109 {
2110 BOOST_ASSERT(m_Layer == nullptr);
2111 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2112 }
2113};
2114
2115ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2116{
telsoa01c577f2c2018-08-31 09:22:23 +01002117 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01002118 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2119}
2120
telsoa01c577f2c2018-08-31 09:22:23 +01002121/// An ParsedTfOperation for a Mul node.
2122/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2123/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2124/// and in these cases armnn doesn't need a separate layer for the Mul.
2125///
2126class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
2127{
2128public:
2129 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2130 : DeferredSingleLayerParsedTfOperation(parser, node)
2131 {
2132 }
2133
2134 void CreateLayerDeferred() override
2135 {
2136 BOOST_ASSERT(m_Layer == nullptr);
2137 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2138 }
2139};
2140
surmeh01bceff2f2018-03-29 16:29:27 +01002141ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2142{
2143 boost::ignore_unused(graphDef);
2144
telsoa01c577f2c2018-08-31 09:22:23 +01002145 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002146}
2147
2148ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
2149 const tensorflow::GraphDef& graphDef)
2150{
2151 boost::ignore_unused(graphDef);
2152
2153 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2154
2155 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
2156
2157 auto it = m_InputShapes.find(nodeDef.name());
2158 if (it == m_InputShapes.end())
2159 {
telsoa01c577f2c2018-08-31 09:22:23 +01002160 throw ParseException(
2161 boost::str(
2162 boost::format(
2163 "Missing input shape for Placeholder '%1%' %2%")
2164 % nodeDef.name()
2165 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002166 }
2167 TensorInfo tensorInfo(it->second, DataType::Float32);
2168
2169 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2170
2171 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2172
2173 TrackInputBinding(layer, layerId, tensorInfo);
2174
2175 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2176}
2177
saoste01bbd40612018-08-28 15:41:51 +01002178ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2179{
2180 boost::ignore_unused(graphDef);
2181 return AddRealDivLayer(nodeDef);
2182}
2183
surmeh01bceff2f2018-03-29 16:29:27 +01002184ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
2185 const tensorflow::GraphDef& graphDef)
2186{
2187 boost::ignore_unused(graphDef);
2188
2189 ActivationDescriptor activationDesc;
2190 activationDesc.m_Function = ActivationFunction::ReLu;
2191 return AddActivationLayer(nodeDef, activationDesc);
2192}
2193
2194ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
2195 const tensorflow::GraphDef& graphDef)
2196{
2197 boost::ignore_unused(graphDef);
2198
2199 ActivationDescriptor activationDesc;
2200 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2201 activationDesc.m_A = 6.0f;
2202 activationDesc.m_B = 0.0f;
2203
2204 return AddActivationLayer(nodeDef, activationDesc);
2205}
2206
2207ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
2208 const tensorflow::GraphDef& graphDef)
2209{
2210 boost::ignore_unused(graphDef);
2211
2212 ActivationDescriptor activationDesc;
2213 activationDesc.m_Function = ActivationFunction::Sigmoid;
2214
2215 return AddActivationLayer(nodeDef, activationDesc);
2216}
2217
2218ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
2219 const tensorflow::GraphDef& graphDef)
2220{
2221 boost::ignore_unused(graphDef);
2222
2223 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2224
2225 SoftmaxDescriptor softmaxDescriptor;
2226 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2227
2228 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2229 prevLayerSlot.Connect(layer->GetInputSlot(0));
2230 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2231
2232 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2233}
2234
2235ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
2236 const tensorflow::GraphDef& graphDef)
2237{
2238 boost::ignore_unused(graphDef);
2239
2240 ActivationDescriptor activationDesc;
2241 activationDesc.m_Function = ActivationFunction::SoftReLu;
2242
2243 return AddActivationLayer(nodeDef, activationDesc);
2244}
2245
2246ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2247{
2248 boost::ignore_unused(graphDef);
2249
2250 ActivationDescriptor activationDesc;
2251 activationDesc.m_Function = ActivationFunction::TanH;
2252 activationDesc.m_A = 1.0f;
2253 activationDesc.m_B = 1.0f;
2254
2255 return AddActivationLayer(nodeDef, activationDesc);
2256}
2257
2258ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
2259 ActivationDescriptor& activationDesc)
2260{
2261 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2262
2263 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2264
2265 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2266 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2267 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2268 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2269}
2270
2271ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
2272 const tensorflow::GraphDef& graphDef)
2273{
2274 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
2275}
2276
2277ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
2278 const tensorflow::GraphDef& graphDef)
2279{
2280 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2281}
2282
2283ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
2284 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
2285{
2286 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2287 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2288 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2289
2290 if (inputs.size() != 1)
2291 {
telsoa01c577f2c2018-08-31 09:22:23 +01002292 throw ParseException(
2293 boost::str(
2294 boost::format(
2295 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
2296 % inputs.size()
2297 % nodeDef.name()
2298 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002299 }
2300
2301 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
2302 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2303 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
2304 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
2305
2306 Pooling2dDescriptor pooling2dDescriptor;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002307 pooling2dDescriptor.m_PoolType = pooltype;
2308 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
surmeh01bceff2f2018-03-29 16:29:27 +01002309 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
2310
telsoa01c577f2c2018-08-31 09:22:23 +01002311 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
FrancisMurtaghf005e312018-12-06 15:26:04 +00002312 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
2313 pooling2dDescriptor.m_DataLayout = dataLayout;
2314 DataLayoutIndexed dataLayoutIndexed(dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +01002315
FrancisMurtaghf005e312018-12-06 15:26:04 +00002316 pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
2317 pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
2318 pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
2319 pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002320
FrancisMurtaghf005e312018-12-06 15:26:04 +00002321 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
2322 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002323
2324 bool padding = false;
2325 TensorInfo outputInfo;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002326 unsigned int outputHeight = 0;
2327 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01002328
2329 CHECK_PADDING_TYPE(nodeDef, paddingString);
2330
surmeh01bceff2f2018-03-29 16:29:27 +01002331 if (paddingString == "SAME")
2332 {
2333 padding = true;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002334
2335 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
2336 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2337 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
2338 static_cast<float>(pooling2dDescriptor.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01002339 }
2340 else if (paddingString == "VALID")
2341 {
2342 padding = false;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002343
2344 outputHeight = static_cast<uint32_t>(ceil(
2345 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
2346 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2347 outputWidth = static_cast<uint32_t>(ceil(
2348 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
2349 static_cast<float>(pooling2dDescriptor.m_StrideX)));
2350 }
2351
2352 switch (dataLayout)
2353 {
2354 case DataLayout::NHWC:
2355 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2356 outputHeight,
2357 outputWidth,
2358 inputTensorInfo.GetShape()[3] },
2359 DataType::Float32);
2360 break;
2361 case DataLayout::NCHW:
2362 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2363 inputTensorInfo.GetShape()[1],
2364 outputHeight,
2365 outputWidth },
2366 DataType::Float32);
2367 break;
surmeh01bceff2f2018-03-29 16:29:27 +01002368 }
surmeh01bceff2f2018-03-29 16:29:27 +01002369
2370 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002371 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002372 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002373 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002374
2375
2376 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
2377 if (layer == nullptr)
2378 {
telsoa01c577f2c2018-08-31 09:22:23 +01002379 throw ParseException(
2380 boost::str(
2381 boost::format(
2382 "Failed to add pooling2d layer for %1% %2%")
2383 % nodeDef.name()
2384 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002385 }
2386
2387 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2388
FrancisMurtaghf005e312018-12-06 15:26:04 +00002389 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002390
2391 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2392}
2393
2394ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
2395{
2396 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2397
2398 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2399 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2400
2401 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
2402 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
2403
2404 if (isBiasAdd)
2405 {
2406 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
2407 // with the same data in the correct dimension for broadcast in addition.
2408 if(input1Info.GetNumDimensions() != 1)
2409 {
telsoa01c577f2c2018-08-31 09:22:23 +01002410 throw ParseException(
2411 boost::str(
2412 boost::format(
2413 "Unsupported bias for BiasAdd. It should be a 1D vector. "
2414 "Got %1% dimensions for input %2%. Node %3% %4%")
2415 % input1Info.GetNumDimensions()
2416 % inputs[1].m_IndexedValue->GetNode().name()
2417 % nodeDef.name()
2418 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002419 }
2420
2421 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01002422
telsoa01c577f2c2018-08-31 09:22:23 +01002423 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01002424 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002425 }
2426 else
2427 {
2428 if (input0Info.GetNumDimensions() == 1)
2429 {
2430 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002431 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002432 }
2433
2434 if (input1Info.GetNumDimensions() == 1)
2435 {
2436 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002437 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002438 }
2439 }
2440
2441 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
2442
2443 input0Slot->Connect(layer->GetInputSlot(0));
2444 input1Slot->Connect(layer->GetInputSlot(1));
2445
2446 if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
2447 {
2448 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2449 }
2450 else
2451 {
2452 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2453 }
2454
2455 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2456}
2457
saoste01bbd40612018-08-28 15:41:51 +01002458ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
2459{
2460 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2461
2462 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
2463 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2464 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2465
2466 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2467 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2468
2469
2470 if (input0NumDims < input1NumDims)
2471 {
2472 const bool isNHWC = true;
2473 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2474 }
2475 if (input1NumDims < input0NumDims)
2476 {
2477 const bool isNHWC = true;
2478 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2479 }
2480
2481 input0Slot->Connect(layer->GetInputSlot(0));
2482 input1Slot->Connect(layer->GetInputSlot(1));
2483
2484 if (input0NumDims < input1NumDims)
2485 {
2486 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2487 }
2488 else
2489 {
2490 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2491
2492 }
2493 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2494}
2495
Sadik Armagan975c09a2018-12-04 10:02:08 +00002496ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
2497{
2498 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2499
2500 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2501 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2502
2503 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2504 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2505
2506 if (input0NumDims < input1NumDims)
2507 {
2508 const bool isNHWC = true;
2509 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2510 }
2511 if (input1NumDims < input0NumDims)
2512 {
2513 const bool isNHWC = true;
2514 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2515 }
2516
2517 IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
2518
2519 input0Slot->Connect(layer->GetInputSlot(0));
2520 input1Slot->Connect(layer->GetInputSlot(1));
2521
2522 TensorInfo outputInfo = input0Slot->GetTensorInfo();
2523 std::vector<unsigned int> outputShape;
2524
2525 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
2526 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
2527
2528 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
2529 {
2530 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
2531 }
2532
2533 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
2534 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2535
2536 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2537}
2538
telsoa01c577f2c2018-08-31 09:22:23 +01002539IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
2540{
2541 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2542
2543 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
2544 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2545 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2546
2547 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2548 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2549
2550 if (input0NumDims < input1NumDims)
2551 {
2552 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002553 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002554 }
2555 if (input1NumDims < input0NumDims)
2556 {
2557 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002558 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002559 }
2560
2561 input0Slot->Connect(layer->GetInputSlot(0));
2562 input1Slot->Connect(layer->GetInputSlot(1));
2563
2564 if (input0NumDims < input1NumDims)
2565 {
2566 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2567 }
2568 else
2569 {
2570 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2571 }
2572 return layer;
2573}
2574
surmeh01bceff2f2018-03-29 16:29:27 +01002575IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
2576 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
2577{
telsoa01c577f2c2018-08-31 09:22:23 +01002578 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01002579 ParsedConstTfOperation<float>* biasNode = nullptr;
2580 if (addNodeDef != nullptr)
2581 {
2582 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01002583 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01002584 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
2585 {
2586 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
2587 }
2588 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
2589 {
2590 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
2591 }
2592 else
2593 {
telsoa01c577f2c2018-08-31 09:22:23 +01002594 throw ParseException(
2595 boost::str(
2596 boost::format(
2597 "ArmNN only supports fully connected layers with constant bias. "
2598 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
2599 % addInputs[0].m_IndexedValue->GetNode().name()
2600 % addInputs[1].m_IndexedValue->GetNode().name()
2601 % addNodeDef->name()
2602 % matMulNodeDef.name()
2603 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002604 }
2605 }
2606
telsoa01c577f2c2018-08-31 09:22:23 +01002607 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01002608 ParsedConstTfOperation<float>* weightNode = nullptr;
2609 ParsedTfOperation* inputNode = nullptr;
2610 unsigned int inputIdx = 0;
2611 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
2612 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
2613 {
2614 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
2615 inputNode = mulInputs[1].m_IndexedValue;
2616 inputIdx = mulInputs[1].m_Index;
2617 }
2618 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
2619 {
2620 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
2621 inputNode = mulInputs[0].m_IndexedValue;
2622 inputIdx = mulInputs[0].m_Index;
2623 }
2624 else
2625 {
telsoa01c577f2c2018-08-31 09:22:23 +01002626 throw ParseException(
2627 boost::str(
2628 boost::format(
2629 "ArmNN only supports fully connected layers with constant weights. "
2630 "Inputs %1% and %2%. MatMulNode %3% %4%")
2631 % mulInputs[0].m_IndexedValue->GetNode().name()
2632 % mulInputs[1].m_IndexedValue->GetNode().name()
2633 % matMulNodeDef.name()
2634 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002635 }
2636
2637 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01002638 // Handles weight.
surmeh01bceff2f2018-03-29 16:29:27 +01002639 ConstTensor weights = weightNode->GetConstTensor(false, weightTensorData);
2640
2641 FullyConnectedDescriptor desc;
2642 desc.m_BiasEnabled = addNodeDef != nullptr;
2643
2644 IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +01002645 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01002646 if (addNodeDef != nullptr)
2647 {
2648 std::vector<float> biasTensorData;
2649 ConstTensor biases = biasNode->GetConstTensor(false, biasTensorData);
2650
2651 if (weights.GetShape()[1] != biases.GetShape()[0])
2652 {
telsoa01c577f2c2018-08-31 09:22:23 +01002653 throw ParseException(
2654 boost::str(
2655 boost::format(
2656 "Shape of matmul weights and bias do not match. "
2657 "AddNode %1%. MatMulNode %2% %3%")
2658 % addNodeDef->name()
2659 % matMulNodeDef.name()
2660 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002661 }
2662
2663 layer = m_Network->AddFullyConnectedLayer(desc, weights, biases, armnnLayerName);
2664 }
2665 else
2666 {
2667 layer = m_Network->AddFullyConnectedLayer(desc, weights, armnnLayerName);
2668 }
2669
2670 BOOST_ASSERT(layer != nullptr);
2671
2672 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
2673 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
2674
telsoa01c577f2c2018-08-31 09:22:23 +01002675 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01002676 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
2677 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2678 return layer;
2679}
2680
2681void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2682{
telsoa01c577f2c2018-08-31 09:22:23 +01002683 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01002684 tensorflow::DataType type = tensorflow::DT_FLOAT;
2685 if (nodeDef.attr().count("T") != 0)
2686 {
2687 auto attr = nodeDef.attr().at("T");
2688 type = attr.type();
2689 }
2690 else if (nodeDef.attr().count("dtype") != 0)
2691 {
2692 auto attr = nodeDef.attr().at("dtype");
2693 type = attr.type();
2694 }
2695
2696 if (type != tensorflow::DT_FLOAT && nodeDef.op() != "Const")
2697 {
telsoa01c577f2c2018-08-31 09:22:23 +01002698 throw ParseException(
2699 boost::str(
2700 boost::format(
2701 "Currently only FLOAT is supported for tensorflow nodes (apart from Const). "
2702 "Got %1% for Node %2% %3%")
2703 % tensorflow::DataType_Name(type)
2704 % nodeDef.name()
2705 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002706 }
2707
2708 const std::string& operation = nodeDef.op();
2709 auto it = ms_OperationNameToParsingFunctions.find(operation);
2710 if (it != ms_OperationNameToParsingFunctions.end())
2711 {
2712 auto func = it->second;
2713 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
2714 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
2715
telsoa01c577f2c2018-08-31 09:22:23 +01002716 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01002717 auto it = m_ParsedTfOperations.find(nodeDef.name());
2718 if (it != m_ParsedTfOperations.end())
2719 {
2720 throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
2721 }
2722 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
2723
telsoa01c577f2c2018-08-31 09:22:23 +01002724 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01002725 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
2726 m_RequestedOutputs.end())
2727 {
2728 auto outId = ParseOutputId(nodeDef.name());
2729 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
2730 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
2731
2732 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
2733
2734 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
2735
2736 prevSlot.Connect(outputLayer->GetInputSlot(0));
2737
2738 TrackOutputBinding(outputLayer, layerId, tensorInfo);
2739 }
2740 }
2741 else
2742 {
telsoa01c577f2c2018-08-31 09:22:23 +01002743 throw ParseException(
2744 boost::str(
2745 boost::format(
2746 "Unsupported operation %1% in tensorflow::GraphDef %2%")
2747 % operation
2748 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002749 }
2750}
2751
2752void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
2753{
telsoa01c577f2c2018-08-31 09:22:23 +01002754 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01002755 m_NodesByName.clear();
2756 m_NetworkInputsBindingInfo.clear();
2757 m_NetworkOutputsBindingInfo.clear();
2758
2759 for (int i = 0; i < graphDef.node_size(); ++i)
2760 {
2761 const tensorflow::NodeDef& node = graphDef.node(i);
2762 m_NodesByName[node.name()] = &node;
2763 }
2764
telsoa01c577f2c2018-08-31 09:22:23 +01002765 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01002766 std::vector<const tensorflow::NodeDef*> targetNodes;
2767 for (const std::string& requestedOutputName : m_RequestedOutputs)
2768 {
2769 auto nodeIt = m_NodesByName.find(requestedOutputName);
2770 if (nodeIt == m_NodesByName.end())
2771 {
telsoa01c577f2c2018-08-31 09:22:23 +01002772 throw ParseException(
2773 boost::str(
2774 boost::format(
2775 "Couldn't find requested output node '%1%' in graph %2%")
2776 % requestedOutputName
2777 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002778 }
2779 targetNodes.push_back(nodeIt->second);
2780 }
2781
telsoa01c577f2c2018-08-31 09:22:23 +01002782 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01002783 std::vector<const tensorflow::NodeDef*> sortedNodes;
2784 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
2785 targetNodes,
2786 [this](const tensorflow::NodeDef* node)
2787 {
2788 auto outputs = GetTfInputNodes(*node);
2789 std::vector<const tensorflow::NodeDef*> nodesOnly;
2790 for (const auto & o : outputs) {
2791 nodesOnly.push_back(o.m_IndexedValue);
2792 }
2793 return nodesOnly;
2794 },
2795 sortedNodes))
2796 {
telsoa01c577f2c2018-08-31 09:22:23 +01002797 throw ParseException(
2798 boost::str(
2799 boost::format(
2800 "Cycle detected in graph %1%")
2801 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002802 }
2803
telsoa01c577f2c2018-08-31 09:22:23 +01002804 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01002805 for (const auto& it : sortedNodes)
2806 {
2807 const tensorflow::NodeDef& currentNode = *it;
2808 LoadNodeDef(currentNode, graphDef);
2809 }
2810}
2811
2812INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
2813 const std::map<std::string, TensorShape>& inputShapes,
2814 const std::vector<std::string>& requestedOutputs)
2815{
2816 FILE* fd = fopen(graphFile, "r");
2817
2818 if (fd == nullptr)
2819 {
telsoa01c577f2c2018-08-31 09:22:23 +01002820 throw FileNotFoundException(
2821 boost::str(
2822 boost::format(
2823 "Graph file %1% failed to open %2%")
2824 % graphFile
2825 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002826 }
2827
telsoa01c577f2c2018-08-31 09:22:23 +01002828 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01002829 tensorflow::GraphDef graphDef;
2830 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
2831 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
2832 delete input;
2833 fclose(fd);
2834
2835 if (!success)
2836 {
telsoa01c577f2c2018-08-31 09:22:23 +01002837 throw ParseException(
2838 boost::str(
2839 boost::format(
2840 "Failed to parse graph file %1%")
2841 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002842 }
2843
2844 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2845}
2846
2847INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
2848 const std::map<std::string, TensorShape>& inputShapes,
2849 const std::vector<std::string>& requestedOutputs)
2850{
telsoa01c577f2c2018-08-31 09:22:23 +01002851 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01002852 tensorflow::GraphDef graphDef;
2853 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
2854
2855 if (!success)
2856 {
telsoa01c577f2c2018-08-31 09:22:23 +01002857 throw ParseException(
2858 boost::str(
2859 boost::format(
2860 "Failed to parse graph file %1%")
2861 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002862 }
2863
2864 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2865}
2866
2867INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
2868 const std::map<std::string, TensorShape>& inputShapes,
2869 const std::vector<std::string>& requestedOutputs)
2870{
2871 FILE* fd = fopen(graphFile, "rb");
2872
2873 if (fd == nullptr)
2874 {
telsoa01c577f2c2018-08-31 09:22:23 +01002875 throw FileNotFoundException(
2876 boost::str(
2877 boost::format(
2878 "Graph file %1% failed to open %2%")
2879 % graphFile
2880 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002881 }
2882
telsoa01c577f2c2018-08-31 09:22:23 +01002883 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01002884 tensorflow::GraphDef graphDef;
2885
2886 google::protobuf::io::FileInputStream inStream(fileno(fd));
2887 google::protobuf::io::CodedInputStream codedStream(&inStream);
2888 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
2889 bool success = graphDef.ParseFromCodedStream(&codedStream);
2890 fclose(fd);
2891
2892 if (!success)
2893 {
telsoa01c577f2c2018-08-31 09:22:23 +01002894 throw ParseException(
2895 boost::str(
2896 boost::format(
2897 "Failed to parse protobuf file %1% %2%")
2898 % graphFile
2899 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002900 }
2901
2902 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2903}
2904
2905INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
2906 const std::map<std::string, TensorShape>& inputShapes,
2907 const std::vector<std::string>& requestedOutputs)
2908{
2909 m_Network = INetwork::Create();
2910
2911 m_InputShapes = inputShapes;
2912 if (requestedOutputs.size() == 0)
2913 {
telsoa01c577f2c2018-08-31 09:22:23 +01002914 throw ParseException(
2915 boost::str(
2916 boost::format(
2917 "requestedOutputs must have at least one entry %1%")
2918 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002919 }
2920 m_RequestedOutputs = requestedOutputs;
2921
2922 try
2923 {
2924 LoadGraphDef(graphDef);
2925 }
2926 catch (const ParseException& e)
2927 {
2928 Cleanup();
2929 throw e;
2930 }
2931
2932 Cleanup();
2933
2934 return std::move(m_Network);
2935}
2936
2937void TfParser::Cleanup()
2938{
telsoa01c577f2c2018-08-31 09:22:23 +01002939 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01002940 m_InputShapes.clear();
2941 m_RequestedOutputs.clear();
2942 m_NodesByName.clear();
2943 m_ParsedTfOperations.clear();
2944}
2945
2946BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
2947{
2948 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
2949}
2950
2951BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
2952{
2953 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
2954}
2955
2956std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
2957 const char* bindingPointDesc,
2958 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
2959{
2960 auto it = nameToBindingInfo.find(layerName);
2961 if (it == nameToBindingInfo.end())
2962 {
telsoa01c577f2c2018-08-31 09:22:23 +01002963 throw InvalidArgumentException(
2964 boost::str(
2965 boost::format(
2966 "Unknown %1% '%2%' %3%")
2967 % bindingPointDesc
2968 % layerName
2969 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002970 }
2971 return it->second;
2972}
2973
2974void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
2975{
2976 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
2977}
2978
2979void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
2980{
2981 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
2982}
2983
2984void TfParser::TrackBindingPoint(IConnectableLayer* layer,
2985 LayerBindingId id,
2986 const TensorInfo& tensorInfo,
2987 const char* bindingPointDesc,
2988 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
2989{
2990 const std::string layerName = layer->GetName();
2991 auto it = nameToBindingInfo.find(layerName);
2992 if (it == nameToBindingInfo.end())
2993 {
2994 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
2995 }
2996 else
2997 {
telsoa01c577f2c2018-08-31 09:22:23 +01002998 throw ParseException(
2999 boost::str(
3000 boost::format(
3001 "Id %1% used by more than one %2% layer %3%")
3002 % id
3003 % bindingPointDesc
3004 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003005 }
3006}
3007
3008} // namespace armnnTfParser