blob: eca393663b0e0fbed46b55d894f56aa6581cdaf1 [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
5#include "TfParser.hpp"
6
7#include <armnn/INetwork.hpp>
8#include <armnn/Utils.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <armnn/Exceptions.hpp>
11#include <armnn/Descriptors.hpp>
12
13#include <GraphTopologicalSort.hpp>
14#include <Permute.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010015#include <VerificationHelpers.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010016
17#include <google/protobuf/io/zero_copy_stream_impl.h>
18#include <google/protobuf/text_format.h>
19
20#include "tensorflow/core/framework/graph.pb.h"
21#include "tensorflow/core/framework/node_def.pb.h"
22#include "tensorflow/core/framework/types.pb.h"
23#include "tensorflow/core/framework/tensor.pb.h"
24#include "tensorflow/core/framework/tensor_shape.pb.h"
25
26#include <boost/assert.hpp>
27#include <boost/format.hpp>
28#include <boost/core/ignore_unused.hpp>
29#include <boost/log/trivial.hpp>
30#include <boost/numeric/conversion/cast.hpp>
31#include <boost/polymorphic_cast.hpp>
32
33#include <memory>
34#include <sstream>
35#include <numeric>
36#include <functional>
37
38using namespace armnn;
39
40namespace armnnTfParser
41{
42namespace
43{
44
45const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
46const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
47
48IConnectableLayer* AddSwizzleLayer(INetwork& network, IOutputSlot& input, const PermutationVector& mapping,
49 const std::string& name)
50{
telsoa01c577f2c2018-08-31 09:22:23 +010051 // Adds swizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010052 IConnectableLayer* const layer = network.AddPermuteLayer(mapping, name.c_str());
53
telsoa01c577f2c2018-08-31 09:22:23 +010054 // Connects intput to swizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010055 input.Connect(layer->GetInputSlot(0));
56
telsoa01c577f2c2018-08-31 09:22:23 +010057 // Sets up swizzled output.
surmeh01bceff2f2018-03-29 16:29:27 +010058 const TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mapping);
59 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
60
61 return layer;
62}
63
64IConnectableLayer* SwizzleInDeswizzleOut(INetwork& network, IOutputSlot& input, IConnectableLayer& layer,
65 const std::string& name)
66{
telsoa01c577f2c2018-08-31 09:22:23 +010067 // Adds swizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010068 IConnectableLayer* const swizzleLayer = AddSwizzleLayer(network, input, NHWCToArmNN, "swizzle_for-" + name);
69
telsoa01c577f2c2018-08-31 09:22:23 +010070 // Connects swizzledInput to layer.
surmeh01bceff2f2018-03-29 16:29:27 +010071 swizzleLayer->GetOutputSlot(0).Connect(layer.GetInputSlot(0));
72
telsoa01c577f2c2018-08-31 09:22:23 +010073 // Adds deswizzle layer.
surmeh01bceff2f2018-03-29 16:29:27 +010074 IConnectableLayer* const deswizzleLayer = AddSwizzleLayer(network, layer.GetOutputSlot(0), ArmNNToNHWC,
75 "deswizzle_for-" + name);
76
77 return deswizzleLayer;
78}
79
80template <typename Callable>
81void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
82 const std::string& attribName,
83 tensorflow::AttrValue::ValueCase expectedValueCase,
84 Callable callable)
85{
86 auto iter = nodeDef.attr().find(attribName);
87 if (iter != nodeDef.attr().end())
88 {
89 const auto& attrValue = iter->second;
90 if (attrValue.value_case() == expectedValueCase)
91 {
92 callable(attrValue);
93 }
94 else
95 {
telsoa01c577f2c2018-08-31 09:22:23 +010096 throw ParseException(
97 boost::str(
98 boost::format(
99 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
100 "but found %4% instead %5%")
101 % attribName
102 % nodeDef.name()
103 % static_cast<int>(expectedValueCase)
104 % static_cast<int>(attrValue.value_case())
105 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100106 }
107 }
108 else
109 {
telsoa01c577f2c2018-08-31 09:22:23 +0100110 throw ParseException(
111 boost::str(
112 boost::format(
113 "Could not find required attribute %1% in node %2% %3%")
114 % attribName
115 % nodeDef.name()
116 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100117 }
118}
119
120template <typename Callable>
121void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
122 const std::string& attribName,
123 tensorflow::AttrValue::ValueCase expectedValueCase,
124 Callable callable)
125{
126 auto iter = nodeDef.attr().find(attribName);
127 if (iter != nodeDef.attr().end())
128 {
129 const auto& attrValue = iter->second;
130 if (attrValue.value_case() == expectedValueCase)
131 {
132 callable(attrValue);
133 }
134 else
135 {
telsoa01c577f2c2018-08-31 09:22:23 +0100136 throw ParseException(
137 boost::str(
138 boost::format(
139 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
140 "but found %4% instead %5%")
141 % attribName
142 % nodeDef.name()
143 % static_cast<int>(expectedValueCase)
144 % static_cast<int>(attrValue.value_case())
145 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100146 }
147 }
148}
149
150float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
151{
152 float attribValue = 0.0f;
153 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
154 [&attribValue](const tensorflow::AttrValue& attrValue)
155 {
156 attribValue = attrValue.f();
157 });
158 return attribValue;
159}
160
161uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
162{
163 uint32_t attribValue = 0u;
164 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
165 [&attribValue](const tensorflow::AttrValue& attrValue)
166 {
167 attribValue = static_cast<uint32_t>(attrValue.i());
168 });
169 return attribValue;
170}
171
172std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
173{
174 std::string attribValue = "";
175 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
176 [&attribValue](const tensorflow::AttrValue& attrValue)
177 {
178 attribValue = attrValue.s();
179 });
180 return attribValue;
181}
182
183std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
184 const std::string& name)
185{
186 std::vector<uint32_t> attriList;
187 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
188 [&attriList](const tensorflow::AttrValue& attrValue)
189 {
190 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
191 {
192 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
193 }
194 });
195
196 return attriList;
197}
198
199std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
200 const std::string& name)
201{
202 std::vector<uint32_t> attriList;
203 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
204 [&attriList](const tensorflow::AttrValue& attrValue)
205 {
206 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
207 {
208 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
209 }
210 });
211
212 return attriList;
213}
214
215bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
216 const std::string& name,
217 bool defaultValue = false)
218{
219 bool attribValue = defaultValue;
220 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
221 [&attribValue](const tensorflow::AttrValue& attrValue)
222 {
223 attribValue = attrValue.b();
224 });
225 return attribValue;
226}
227
228tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
229{
230 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
231 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
232 [&attribValue](const tensorflow::AttrValue& attrValue)
233 {
234 attribValue = attrValue.type();
235 });
236 return attribValue;
237}
238
239TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
240{
241 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
242 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
243
244 if (stretchDim != targetDims.end())
245 {
246 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
247 {
telsoa01c577f2c2018-08-31 09:22:23 +0100248 throw ParseException(
249 boost::str(
250 boost::format(
251 "At most one component of shape can be -1 %1%")
252 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100253 }
254
telsoa01c577f2c2018-08-31 09:22:23 +0100255 auto targetNumElements =
256 boost::numeric_cast<unsigned int>(
257 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100258 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
259 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
260 }
261
262 TensorInfo reshapeInfo = input;
263 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
264
265 return reshapeInfo;
266}
267
telsoa01c577f2c2018-08-31 09:22:23 +0100268// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100269IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
270 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100271{
272 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
273 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
274 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
275 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
276 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
277 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
278
279 armnn::TensorInfo reshapedInfo = input1Info;
280 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
281
282 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
283 ReshapeDescriptor reshapeDesc;
284 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
285 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
286
287 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
288 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
289
290 input1Slot = &reshapeLayer->GetOutputSlot(0);
291
292 return input1Slot;
293}
294
295OutputId ParseOutputId(const std::string & name)
296{
297 unsigned int outputNum = 0;
298 size_t colonPos = name.find_last_of(":");
299 if (colonPos != std::string::npos)
300 {
301 int n = std::stoi(name.substr(colonPos+1));
302 if (n<0 || n>100)
303 {
telsoa01c577f2c2018-08-31 09:22:23 +0100304 throw ParseException(
305 boost::str(
306 boost::format(
307 "Output tensor id is out of range for %1% %2%")
308 % name
309 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100310 }
311 outputNum = static_cast<unsigned int>(n);
312 }
313 return OutputId(name.substr(0,colonPos),outputNum);
314}
315
telsoa01c577f2c2018-08-31 09:22:23 +0100316#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
317 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
318 { \
319 throw ParseException( \
320 boost::str( \
321 boost::format( \
322 "Unsupported data format %1% passed for %2% node %3%. " \
323 "Only NHWC and NCHW supported %4%") \
324 % FORMAT \
325 % NODE_TYPE \
326 % NODE_DEF.name() \
327 % CHECK_LOCATION().AsString())); \
328 }
329
330#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
331 if(PADDING != "SAME" && PADDING != "VALID" ) \
332 { \
333 throw ParseException( \
334 boost::str( \
335 boost::format( \
336 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
337 % PADDING \
338 % NODE_DEF.name() \
339 % CHECK_LOCATION().AsString())); \
340 } \
341
surmeh01bceff2f2018-03-29 16:29:27 +0100342} // namespace
343
344const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
345 { "Const", &TfParser::ParseConst },
346 { "Add", &TfParser::ParseAdd },
347 { "BiasAdd", &TfParser::ParseBiasAdd },
348 { "Identity", &TfParser::ParseIdentity },
349 { "Conv2D", &TfParser::ParseConv2D },
350 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
351 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
352 { "ConcatV2", &TfParser::ParseConcat },
353 { "LRN", &TfParser::ParseLrn },
354 { "MatMul", &TfParser::ParseMatMul },
355 { "Mul", &TfParser::ParseMul },
356 { "Placeholder", &TfParser::ParsePlaceholder },
saoste01bbd40612018-08-28 15:41:51 +0100357 { "RealDiv", &TfParser::ParseRealDiv },
surmeh01bceff2f2018-03-29 16:29:27 +0100358 { "Relu", &TfParser::ParseRelu },
359 { "Relu6", &TfParser::ParseRelu6 },
360 { "Reshape", &TfParser::ParseReshape },
361 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
362 { "Shape", &TfParser::ParseShape },
363 { "Squeeze", &TfParser::ParseSqueeze },
364 { "Sigmoid", &TfParser::ParseSigmoid },
365 { "Softmax", &TfParser::ParseSoftmax },
366 { "Softplus", &TfParser::ParseSoftplus },
367 { "Tanh", &TfParser::ParseTanh },
368 { "MaxPool", &TfParser::ParseMaxPool },
369 { "AvgPool", &TfParser::ParseAvgPool },
telsoa01c577f2c2018-08-31 09:22:23 +0100370 { "Maximum", &TfParser::ParseMaximum },
surmeh01bceff2f2018-03-29 16:29:27 +0100371};
372
373ITfParser* ITfParser::CreateRaw()
374{
375 return new TfParser();
376}
377
378ITfParserPtr ITfParser::Create()
379{
380 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
381}
382
383void ITfParser::Destroy(ITfParser* parser)
384{
385 delete parser;
386}
387
388inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
389 uint32_t filterSize, bool samePadding,
390 uint32_t* paddingFront, uint32_t* paddingBack) {
391 *paddingFront = 0;
392 *paddingBack = 0;
393
394 if (samePadding) {
395 uint32_t outputSize = (inputSize + stride - 1) / stride;
396 uint32_t temp = (outputSize - 1) * stride + filterSize;
397 if (temp > inputSize) {
398 *paddingFront = (temp - inputSize) / 2;
399 *paddingBack = (temp - inputSize) - *paddingFront;
400 }
401 }
402}
403
404void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
405 bool samePadding)
406{
407 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
408}
409
410/// An Abstract base class which represents a single tensorflow operation (node)
411/// that has been (potentially partially) converted to Armnn.
412/// It may not yet have been fully converted into actual Armnn layers.
413class ParsedTfOperation
414{
415public:
416 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
417 : m_Parser(parser)
418 , m_Node(node)
419 {
420 }
421
422 virtual ~ParsedTfOperation() {};
423
424 const tensorflow::NodeDef& GetNode() const { return m_Node; }
425
426 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
427 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
428 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
429
430 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
431 virtual ParsedTfOperation* ResolveIdentityOperations()
432 {
433 return this;
434 }
435
436protected:
437 TfParser* m_Parser;
438 const tensorflow::NodeDef& m_Node;
439};
440
441/// An ParsedTfOperation where the Armnn equivalent is a single layer,
442/// with output slots that correspond directly to the Tf node outputs.
443class SingleLayerParsedTfOperation : public ParsedTfOperation
444{
445public:
446 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
447 : ParsedTfOperation(parser, node)
448 , m_Layer(layer)
449 {
450 }
451
452 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
453 {
454 BOOST_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100455 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100456 unsigned int armnnOutputSlotIdx = tfOutputIndex;
457 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
458 {
459 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100460 boost::str(
461 boost::format(
462 "The requested output slot #%1% "
463 "for %2% does not exist %3%")
464 % armnnOutputSlotIdx
465 % m_Layer->GetName()
466 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100467 }
468 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
469 }
470
471protected:
472 IConnectableLayer* m_Layer;
473};
474
telsoa01c577f2c2018-08-31 09:22:23 +0100475/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100476class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
477{
478public:
479 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
480 : SingleLayerParsedTfOperation(parser, node, nullptr)
481 {
482 }
483
484 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
485 {
486 if (!m_Layer)
487 {
488 CreateLayerDeferred();
489 }
490 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
491 }
492
493private:
494 virtual void CreateLayerDeferred() = 0;
495};
496
497
498TfParser::TfParser()
499 : m_Network(nullptr, nullptr)
500{
501}
502
503
504const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
505{
506 if (nodeDef->op() != "Identity")
507 {
508 return nodeDef;
509 }
510
511 if (nodeDef->input_size() != 1)
512 {
telsoa01c577f2c2018-08-31 09:22:23 +0100513 throw ParseException(
514 boost::str(
515 boost::format(
516 "Identity node should have a single input! %1% has %2% inputs %3%")
517 % nodeDef->name()
518 % nodeDef->input_size()
519 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100520 }
521
522 auto it = m_NodesByName.find(nodeDef->input(0));
523 if (it != m_NodesByName.end())
524 {
525 const tensorflow::NodeDef* inputNode = it->second;
526 return ResolveIdentityNode(inputNode);
527 }
528 else
529 {
telsoa01c577f2c2018-08-31 09:22:23 +0100530 throw ParseException(
531 boost::str(
532 boost::format(
533 "Cannot find what the Identity node %1% is linked to! %2%")
534 % nodeDef->name()
535 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100536 }
537}
538
539std::vector<OutputOfConstNodeDef>
540TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
541{
542 std::vector<OutputOfConstNodeDef> ret;
543
surmeh013537c2c2018-05-18 16:31:43 +0100544 if (nodeDef.op() == "Const")
545 {
546 // For some reason const node can have "Control Inputs". We ignore them for now.
547 return ret;
548 }
549
surmeh01bceff2f2018-03-29 16:29:27 +0100550 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
551 for (int j = 0; j < nodeDef.input_size(); ++j)
552 {
553 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100554
555 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
556 {
557 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100558 boost::str(
559 boost::format(
560 "Node '%1%' has Control Input '%2%' for input #%3% which is unsupported. %4%")
561 % nodeDef.name()
562 % nodeDef.input(j)
563 % j
564 % CHECK_LOCATION().AsString()));
surmeh013537c2c2018-05-18 16:31:43 +0100565 }
566
surmeh01bceff2f2018-03-29 16:29:27 +0100567 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
568 if (inputIt == m_NodesByName.end())
569 {
570 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100571 boost::str(
572 boost::format(
573 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
574 % nodeDef.input(j)
575 % nodeDef.name()
576 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100577 }
578 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
579 }
580
581 return ret;
582}
583
584std::vector<OutputOfParsedTfOperation>
585TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
586 std::size_t expectedNumInputs)
587{
telsoa01c577f2c2018-08-31 09:22:23 +0100588 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100589 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
590 const std::size_t numInputs = nodes.size();
591 if (numInputs != expectedNumInputs)
592 {
telsoa01c577f2c2018-08-31 09:22:23 +0100593 throw ParseException(
594 boost::str(
595 boost::format(
596 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
597 % nodeDef.name()
598 % expectedNumInputs
599 % numInputs
600 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100601 }
telsoa01c577f2c2018-08-31 09:22:23 +0100602 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100603 std::vector<OutputOfParsedTfOperation> result;
604 for (auto&& node : nodes)
605 {
606 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
607 if (it == m_ParsedTfOperations.end())
608 {
telsoa01c577f2c2018-08-31 09:22:23 +0100609 throw ParseException(
610 boost::str(
611 boost::format(
612 "Node with name '%1%' has not been parsed %2%")
613 % node.m_IndexedValue->name()
614 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100615 }
616 ParsedTfOperation* parsedOp = it->second.get();
617 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
618 parsedOp = parsedOp->ResolveIdentityOperations();
619 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
620 }
621 return result;
622}
623
624ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
625{
626 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
627
telsoa01c577f2c2018-08-31 09:22:23 +0100628 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
629 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100630 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
631 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
632 {
633 IConnectableLayer* layer =
634 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
635 &nodeDef,nodeDef.name().c_str());
636 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
637 }
638 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
639 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
640 {
641 IConnectableLayer* layer =
642 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
643 &nodeDef,nodeDef.name().c_str());
644 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
645 }
646 else
647 {
telsoa01c577f2c2018-08-31 09:22:23 +0100648 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100649 return AddAdditionLayer(nodeDef);
650 }
651}
652
653ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
654{
655 return AddAdditionLayer(nodeDef, true);
656}
657
658/// An ParsedTfOperation which forwards to another (used for Identity nodes).
659class ParsedIdentityTfOperation : public ParsedTfOperation
660{
661public:
662 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
663 : ParsedTfOperation(parser, node)
664 , m_Representative(representative)
665 {
666 }
667
668 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
669 {
670 BOOST_ASSERT(m_Representative);
671 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
672 }
673
674 virtual ParsedTfOperation* ResolveIdentityOperations() override
675 {
676 return m_Representative->ResolveIdentityOperations();
677 }
678
679private:
680 ParsedTfOperation* m_Representative;
681};
682
683ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
684{
685 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
686 // Any requests for the output slots of this node should be forwarded to the node connected as input.
687 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
688}
689
690/// An ParsedTfOperation for a Const node.
691/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
692/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
693template <typename T>
694class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
695{
696public:
697 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
698 const T* tensorData, const TensorInfo& tensorInfo)
699 : DeferredSingleLayerParsedTfOperation(parser, node),
700 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
701 m_TensorInfo(tensorInfo)
702 {
703 BOOST_ASSERT(tensorInfo.GetDataType() == GetDataType<T>());
704 }
705
706 void CreateLayerDeferred() override
707 {
708 BOOST_ASSERT(m_Layer == nullptr);
709 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
710 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
711 }
712
713 ConstTensor GetConstTensor(bool swizzleForConvolutionWeights, std::vector<T>& outputTensorData) const
714 {
715 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
telsoa01c577f2c2018-08-31 09:22:23 +0100716 // Tensorflow weights are [H, W, In, Out].
717 // ArmNN weights are [Out, In, H, W].
surmeh01bceff2f2018-03-29 16:29:27 +0100718 static const PermutationVector HWIOToOIHW = {2, 3, 1, 0};
719
720 const TensorInfo outInfo = swizzleForConvolutionWeights
721 ? armnnUtils::Permuted(m_TensorInfo, HWIOToOIHW)
722 : m_TensorInfo;
723
724 outputTensorData.resize(m_TensorInfo.GetNumElements());
725
telsoa01c577f2c2018-08-31 09:22:23 +0100726 // Copies or swizzles from the permanent storage into the storage the caller provided.
surmeh01bceff2f2018-03-29 16:29:27 +0100727 if (swizzleForConvolutionWeights)
728 {
729 armnnUtils::Permute(outInfo.GetShape(), HWIOToOIHW, m_Storage.data(), outputTensorData.data());
730 }
731 else
732 {
733 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
734 }
telsoa01c577f2c2018-08-31 09:22:23 +0100735 // Updates the result to point to the user provided storage.
surmeh01bceff2f2018-03-29 16:29:27 +0100736 ConstTensor constTensor(outInfo, outputTensorData);
737 return constTensor;
738 }
739
740private:
741 ///< Manages the lifetime of the tensor data.
742 std::vector<T> m_Storage;
743 ///< Describes the layout of the tensor and points to the data in m_Storage.
744 TensorInfo m_TensorInfo;
745};
746
telsoa01c577f2c2018-08-31 09:22:23 +0100747DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
748 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100749{
750 switch (tfDataType)
751 {
752 case tensorflow::DT_FLOAT:
753 return DataType::Float32;
754 break;
755 case tensorflow::DT_INT32:
756 return DataType::Signed32;
757 break;
758 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100759 throw ParseException(
760 boost::str(
761 boost::format(
762 "Unknown DataType %1% for node %2% %3%")
763 % tensorflow::DataType_Name(tfDataType)
764 % nodeDef.name()
765 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100766 }
767}
768
769struct ParseTfTensorValueList
770{
771 template<typename DataType>
772 static void Parse(
773 const tensorflow::TensorProto& tfTensor,
774 unsigned int dstElements,
775 std::vector<int8_t>& outputData);
776
777 template <typename DataType>
778 static void ReadData(const void* srcData, unsigned int numSrcElements,
779 std::vector<int8_t>& dstData, unsigned int numDstElements)
780 {
telsoa01c577f2c2018-08-31 09:22:23 +0100781 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100782 if (numSrcElements == 0)
783 {
784 return;
785 }
786
telsoa01c577f2c2018-08-31 09:22:23 +0100787 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100788 if (numDstElements == 0)
789 {
790 numDstElements = numSrcElements;
791 }
792
telsoa01c577f2c2018-08-31 09:22:23 +0100793 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100794 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
795
796 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
797 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
798
telsoa01c577f2c2018-08-31 09:22:23 +0100799 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +0100800 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
801
802 if (numDstElements > numSrcElements)
803 {
telsoa01c577f2c2018-08-31 09:22:23 +0100804 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +0100805 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
806 }
807 }
808
809};
810
811template <>
812void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
813 unsigned int dstElements, std::vector<int8_t>& outputData)
814{
815 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
816 outputData, dstElements);
817}
818
819template <>
820void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
821 unsigned int dstElements, std::vector<int8_t>& outputData)
822{
823 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
824 outputData, dstElements);
825}
826
827template <template<typename> class OperatorType, typename T = int8_t>
828struct MakeTfOperation
829{
830 template<typename DataType, class... Args>
831 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
832 Args&&... args)
833 {
834 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
835 }
836};
837
838template <>
839struct MakeTfOperation<ParsedConstTfOperation>
840{
841 template<typename DataType, class... Args>
842 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
843 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
844 {
845 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
846 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
847 }
848};
849
850template <class FuncType>
851struct InvokeParseFunction
852{
853 template<class ResType, class... Args>
854 inline static ResType Result(DataType dataType, Args&&... args)
855 {
856 if (dataType == DataType::Float32)
857 {
858 return FuncType::template Parse<float>(std::forward<Args>(args)...);
859 }
860 else if (dataType == DataType::Signed32)
861 {
862 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
863 }
864
865 return ResType();
866 }
867
868 template<class... Args>
869 inline static void Result(DataType dataType, Args&&... args)
870 {
871 if (dataType == DataType::Float32)
872 {
873 FuncType::template Parse<float>(std::forward<Args>(args)...);
874 }
875 else if (dataType == DataType::Signed32)
876 {
877 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
878 }
879 }
880};
881
882ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
883{
884 BOOST_ASSERT(nodeDef.op() == "Const");
885
886 if (nodeDef.attr().count("value") == 0)
887 {
telsoa01c577f2c2018-08-31 09:22:23 +0100888 throw ParseException(
889 boost::str(
890 boost::format(
891 "Value not found for Const node - %1% %2%")
892 % nodeDef.name()
893 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100894 }
895
896 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
897 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
898 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
899
900 const auto GetDimensionSize = [](auto& d) { return d.size(); };
901
902 std::vector<unsigned int> dimensionSizes;
903 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
904 std::back_inserter(dimensionSizes), GetDimensionSize);
905
telsoa01c577f2c2018-08-31 09:22:23 +0100906 // Calculates number of elements.
907 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100908 unsigned int numElements = 0U;
909
910 if (!dimensionSizes.empty())
911 {
912 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
913 1U, std::multiplies<unsigned int>());
914 }
915
916 std::vector<int8_t> tensorData;
917
telsoa01c577f2c2018-08-31 09:22:23 +0100918 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +0100919 if (tfTensor.tensor_content().empty())
920 {
921 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
922
923 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +0100924 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +0100925 if (numElements == 0)
926 {
telsoa01c577f2c2018-08-31 09:22:23 +0100927 const unsigned int tfNumElements =
928 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +0100929 dimensionSizes.push_back(tfNumElements);
930 }
931 }
telsoa01c577f2c2018-08-31 09:22:23 +0100932 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +0100933 else
934 {
935 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
936
telsoa01c577f2c2018-08-31 09:22:23 +0100937 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +0100938 if (numElements == 0)
939 {
telsoa01c577f2c2018-08-31 09:22:23 +0100940 throw ParseException(
941 boost::str(
942 boost::format(
943 "No tensor shape found for Const node - %1% %2%")
944 % nodeDef.name()
945 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100946 }
947 }
948
telsoa01c577f2c2018-08-31 09:22:23 +0100949 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +0100950 if (tensorData.empty())
951 {
telsoa01c577f2c2018-08-31 09:22:23 +0100952 throw ParseException(
953 boost::str(
954 boost::format(
955 "No tensor data found for Const node - %1% %2%")
956 % nodeDef.name()
957 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100958 }
959
telsoa01c577f2c2018-08-31 09:22:23 +0100960 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
961 dimensionSizes.data(),
962 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +0100963
964 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +0100965 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +0100966 if (tensorData.size() > tensorInfo.GetNumBytes())
967 {
telsoa01c577f2c2018-08-31 09:22:23 +0100968 throw ParseException(
969 boost::str(
970 boost::format(
971 "Number of elements (%1%) should be less than or equal "
972 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
973 % (tensorData.size() / GetDataTypeSize(dataType))
974 % tensorInfo.GetNumElements()
975 % nodeDef.name()
976 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100977 }
978
979 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
980 dataType, this, nodeDef, tensorData, tensorInfo);
981}
982
983template<typename Type>
984bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
985{
986 auto it = m_ParsedTfOperations.find(nodeName);
987 if (it == m_ParsedTfOperations.end() ||
988 dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) == nullptr)
989 {
990 return false;
991 }
992 else
993 {
994 return true;
995 }
996}
997
998ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
999 const tensorflow::GraphDef& graphDef)
1000{
1001 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1002 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1003 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1004
1005 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1006 {
telsoa01c577f2c2018-08-31 09:22:23 +01001007 throw ParseException(
1008 boost::str(
1009 boost::format(
1010 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1011 % nodeDef.name()
1012 % inputs[1].m_IndexedValue->GetNode().name()
1013 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001014 }
1015 ParsedConstTfOperation<float>* weightNode =
1016 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1017
1018 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1019 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1020 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1021
telsoa01c577f2c2018-08-31 09:22:23 +01001022 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
surmeh01bceff2f2018-03-29 16:29:27 +01001023 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1024 if (!dilations.empty())
1025 {
1026 for (auto dilation : dilations)
1027 {
1028 if (dilation != 1u)
1029 {
telsoa01c577f2c2018-08-31 09:22:23 +01001030 throw ParseException(
1031 boost::str(
1032 boost::format(
1033 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1034 % nodeDef.name()
1035 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001036 }
1037 }
1038 }
1039
1040 Convolution2dDescriptor desc;
1041 desc.m_BiasEnabled = false;
1042
telsoa01c577f2c2018-08-31 09:22:23 +01001043 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1044
surmeh01bceff2f2018-03-29 16:29:27 +01001045 if (dataFormat == "NHWC")
1046 {
1047 desc.m_StrideX = strides[2];
1048 desc.m_StrideY = strides[1];
telsoa01c577f2c2018-08-31 09:22:23 +01001049 // Swizzles input to supported memory layout.
surmeh01bceff2f2018-03-29 16:29:27 +01001050 inputTensorInfo = armnnUtils::Permuted(inputSlot.GetTensorInfo(), NHWCToArmNN);
1051 }
1052 else if (dataFormat == "NCHW")
1053 {
1054 desc.m_StrideX = strides[3];
1055 desc.m_StrideY = strides[2];
1056 }
surmeh01bceff2f2018-03-29 16:29:27 +01001057
1058 uint32_t inputHeight = inputTensorInfo.GetShape()[2];
1059 uint32_t inputWidth = inputTensorInfo.GetShape()[3];
1060
1061 std::vector<float> outputTensorData;
1062
1063 ConstTensor weightTensor = weightNode->GetConstTensor(true, outputTensorData);
1064
1065 uint32_t weightHeight = weightTensor.GetShape()[2];
1066 uint32_t weightWidth = weightTensor.GetShape()[3];
1067
1068 bool padding = false;
1069 TensorInfo outputInfo;
telsoa01c577f2c2018-08-31 09:22:23 +01001070
1071 CHECK_PADDING_TYPE(nodeDef, paddingString);
1072
surmeh01bceff2f2018-03-29 16:29:27 +01001073 if (paddingString == "SAME")
1074 {
1075 padding = true;
1076 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1077 weightTensor.GetShape()[0],
1078 static_cast<uint32_t>(ceil(
1079 static_cast<float>(inputHeight) /
1080 static_cast<float>(desc.m_StrideY))),
1081 static_cast<uint32_t>(ceil(
1082 static_cast<float>(inputWidth) /
1083 static_cast<float>(desc.m_StrideX)))
1084 }, DataType::Float32);
1085 }
1086 else if (paddingString == "VALID")
1087 {
1088 padding = false;
1089 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1090 weightTensor.GetShape()[0],
1091 static_cast<uint32_t>(ceil(
1092 static_cast<float>(inputHeight - weightHeight + 1) /
1093 static_cast<float>(desc.m_StrideY))),
1094 static_cast<uint32_t>(ceil(
1095 static_cast<float>(inputWidth - weightWidth + 1) /
1096 static_cast<float>(desc.m_StrideX)))
1097 }, DataType::Float32);
1098 }
surmeh01bceff2f2018-03-29 16:29:27 +01001099
1100 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1101 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1102
1103 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1104 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1105
1106 if (dataFormat == "NHWC")
1107 {
1108 layer = SwizzleInDeswizzleOut(*m_Network, inputSlot, *layer, nodeDef.name());
1109 }
1110 else
1111 {
1112 inputSlot.Connect(layer->GetInputSlot(0));
1113 }
1114
1115 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1116}
1117
1118ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
telsoa01c577f2c2018-08-31 09:22:23 +01001119 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001120{
1121 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1122 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1123 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1124
1125 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1126 {
telsoa01c577f2c2018-08-31 09:22:23 +01001127 throw ParseException(
1128 boost::str(
1129 boost::format(
1130 "ArmNN only supports Depthwise Convolution layer with constant weights. "
1131 "Non const input found %1% for node %2% %3%")
1132 % inputs[1].m_IndexedValue->GetNode().name()
1133 % nodeDef.name()
1134 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001135 }
1136 ParsedConstTfOperation<float>* weightNode =
1137 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1138
1139
1140 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1141 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1142 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1143
1144 DepthwiseConvolution2dDescriptor desc;
1145 desc.m_BiasEnabled = false;
1146
telsoa01c577f2c2018-08-31 09:22:23 +01001147 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1148
surmeh01bceff2f2018-03-29 16:29:27 +01001149 if (dataFormat == "NHWC")
1150 {
1151 desc.m_StrideX = strides[2];
1152 desc.m_StrideY = strides[1];
telsoa01c577f2c2018-08-31 09:22:23 +01001153 // Swizzles input to supported memory layout.
surmeh01bceff2f2018-03-29 16:29:27 +01001154 inputTensorInfo = armnnUtils::Permuted(inputSlot.GetTensorInfo(), NHWCToArmNN);
1155 }
1156 else if (dataFormat == "NCHW")
1157 {
1158 desc.m_StrideX = strides[3];
1159 desc.m_StrideY = strides[2];
1160 }
surmeh01bceff2f2018-03-29 16:29:27 +01001161
1162 uint32_t inputHeight = inputTensorInfo.GetShape()[2];
1163 uint32_t inputWidth = inputTensorInfo.GetShape()[3];
1164
1165 std::vector<float> outputTensorData;
1166
1167 ConstTensor weightTensor = weightNode->GetConstTensor(true, outputTensorData);
1168
1169 uint32_t weightHeight = weightTensor.GetShape()[2];
1170 uint32_t weightWidth = weightTensor.GetShape()[3];
1171
1172 bool padding = false;
1173 TensorInfo outputInfo;
telsoa01c577f2c2018-08-31 09:22:23 +01001174
1175 CHECK_PADDING_TYPE(nodeDef, paddingString);
1176
surmeh01bceff2f2018-03-29 16:29:27 +01001177 if (paddingString == "SAME")
1178 {
1179 padding = true;
1180 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1181 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1182 static_cast<uint32_t>(ceil(
1183 static_cast<float>(inputHeight) /
1184 static_cast<float>(desc.m_StrideY))),
1185 static_cast<uint32_t>(ceil(
1186 static_cast<float>(inputWidth) /
1187 static_cast<float>(desc.m_StrideX)))
1188 }, DataType::Float32);
1189 }
1190 else if (paddingString == "VALID")
1191 {
1192 padding = false;
1193 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1194 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1195 static_cast<uint32_t>(ceil(
1196 static_cast<float>(inputHeight - weightHeight + 1) /
1197 static_cast<float>(desc.m_StrideY))),
1198 static_cast<uint32_t>(ceil(
1199 static_cast<float>(inputWidth - weightWidth + 1) /
1200 static_cast<float>(desc.m_StrideX)))
1201 }, DataType::Float32);
1202 }
surmeh01bceff2f2018-03-29 16:29:27 +01001203
1204 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1205 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1206
1207 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1208 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1209
1210 if (dataFormat == "NHWC")
1211 {
1212 layer = SwizzleInDeswizzleOut(*m_Network, inputSlot, *layer, nodeDef.name());
1213 }
1214 else
1215 {
1216 inputSlot.Connect(layer->GetInputSlot(0));
1217 }
1218
1219 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1220}
1221
1222ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1223 const tensorflow::GraphDef& graphDef)
1224{
1225 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1226
1227 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1228 {
telsoa01c577f2c2018-08-31 09:22:23 +01001229 throw ParseException(
1230 boost::str(
1231 boost::format(
1232 "ArmNN only supports FusedBatchNormalization layers with constant scale. "
1233 "Input %1%. Node %2% %3%")
1234 % inputs[1].m_IndexedValue->GetNode().name()
1235 % nodeDef.name()
1236 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001237 }
1238 ParsedConstTfOperation<float>* scaleNode =
1239 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1240
1241 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1242 {
telsoa01c577f2c2018-08-31 09:22:23 +01001243 throw ParseException(
1244 boost::str(
1245 boost::format(
1246 "ArmNN only supports FusedBatchNormalization layers with constant offset. "
1247 "Input %1%. Node %2% %3%")
1248 % inputs[2].m_IndexedValue->GetNode().name()
1249 % nodeDef.name()
1250 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001251 }
1252 ParsedConstTfOperation<float>* offsetNode =
1253 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1254
1255 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1256 {
telsoa01c577f2c2018-08-31 09:22:23 +01001257 throw ParseException(
1258 boost::str(
1259 boost::format(
1260 "ArmNN only supports FusedBatchNormalization layers with constant mean. "
1261 "Input %1%. Node %2% %3%")
1262 % inputs[3].m_IndexedValue->GetNode().name()
1263 % nodeDef.name()
1264 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001265 }
1266 ParsedConstTfOperation<float>* meanNode =
1267 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1268
1269 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1270 {
telsoa01c577f2c2018-08-31 09:22:23 +01001271 throw ParseException(
1272 boost::str(
1273 boost::format(
1274 "ArmNN only supports FusedBatchNormalization layers with constant variance. "
1275 "Input %1%. Node %2% %3%")
1276 % inputs[4].m_IndexedValue->GetNode().name()
1277 % nodeDef.name()
1278 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001279 }
1280 ParsedConstTfOperation<float>* varianceNode =
1281 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1282
telsoa01c577f2c2018-08-31 09:22:23 +01001283 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001284 BatchNormalizationDescriptor desc;
1285 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
1286
telsoa01c577f2c2018-08-31 09:22:23 +01001287 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1288 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001289 std::vector<float> scaleTensorData;
1290 ConstTensor scaleTensor = scaleNode->GetConstTensor(false, scaleTensorData);
1291
1292 std::vector<float> offsetTensorData;
1293 ConstTensor offsetTensor = offsetNode->GetConstTensor(false, offsetTensorData);
1294
1295 std::vector<float> meanTensorData;
1296 ConstTensor meanTensor = meanNode->GetConstTensor(false, meanTensorData);
1297
1298 std::vector<float> varianceTensorData;
1299 ConstTensor varianceTensor = varianceNode->GetConstTensor(false, varianceTensorData);
1300
1301 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1302 meanTensor,
1303 varianceTensor,
1304 offsetTensor,
1305 scaleTensor,
1306 nodeDef.name().c_str());
1307
1308 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1309
1310 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1311
1312 if (dataFormat == "NHWC")
1313 {
1314 const TensorInfo outputTensorInfo = armnnUtils::Permuted(inputSlot.GetTensorInfo(), NHWCToArmNN);
1315 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1316 layer = SwizzleInDeswizzleOut(*m_Network, inputSlot, *layer, nodeDef.name());
1317 }
1318 else
1319 {
1320 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1321 inputSlot.Connect(layer->GetInputSlot(0));
1322 }
1323
1324 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1325}
1326
telsoa01c577f2c2018-08-31 09:22:23 +01001327bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1328 size_t alphaLayerIndex,
1329 const OutputOfParsedTfOperation& otherOp,
1330 armnn::IOutputSlot** outputOfLeakyRelu,
1331 armnn::ActivationDescriptor & desc)
1332{
1333 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1334
1335 // Verifying all these assumptions hold:
1336 //
1337 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1338 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1339 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1340 //
1341
1342 if (mulNodeDef.op() == "Mul")
1343 {
1344 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1345 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1346
1347 BOOST_ASSERT(inputs.size() == 2);
1348 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1349 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1350 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1351
1352 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1353 {
1354 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1355 {
1356 ParsedConstTfOperation<float>* alpha =
1357 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1358 inputs[alphaLayerIndex].m_IndexedValue);
1359
1360 std::vector<float> const_data;
1361 ConstTensor const_tensor = alpha->GetConstTensor(false, const_data);
1362
1363 if (const_data.size() == 1)
1364 {
1365 desc.m_Function = ActivationFunction::LeakyReLu;
1366 desc.m_A = const_data[0];
1367
1368 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1369 return true;
1370 }
1371 }
1372 }
1373 }
1374 return false;
1375}
1376
1377// For max nodes, we only support those as part of a leaky relu, i.e.,
1378// as part for a max(mul(a, x), x) expression. We thus need to
1379// identify one input as a multiplication with a scalar constant,
1380// extract the constant and the two inputs, verify that the two other
1381// inputs are the same node, and then create a leaky relu node.
1382
1383ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1384 const tensorflow::GraphDef& graphDef)
1385{
1386 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1387 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1388 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1389 IOutputSlot* outputOfLeakyRelu = nullptr;
1390
1391 ActivationDescriptor desc;
1392
1393 // There are four possible scenarios we need to support (respectively below):
1394 // 1, max(mul(a, x), x)
1395 // 2, max(mul(x, a), x)
1396 // 3, max(x, mul(a, x))
1397 // 4, max(x, mul(x, a))
1398
1399 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1400 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1401 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1402 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1403 {
1404 BOOST_ASSERT(outputOfLeakyRelu != nullptr);
1405
1406 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1407 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1408 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1409 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1410 }
1411 else
1412 {
1413 throw ParseException(
1414 boost::str(
1415 boost::format(
1416 "ArmNN currenly offers limited support for Maximum node when it can be fused to "
1417 "form a LeakyRelu activation as leakyrelu=max(mul(alpha, X), X). "
1418 "Node: %1% %2%")
1419 % nodeDef.name()
1420 % CHECK_LOCATION().AsString()));
1421 }
1422}
1423
surmeh01bceff2f2018-03-29 16:29:27 +01001424ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
1425 const tensorflow::GraphDef& graphDef)
1426{
1427 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001428 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01001429 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
1430 unsigned int numConcatView = numInputs - 1;
1431
1432 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), MaxNumOfTensorDimensions);
1433 std::vector<unsigned int>mergeDimSizes(MaxNumOfTensorDimensions, 0u);
1434
1435 unsigned int mergeDim = 0;
1436 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
1437
telsoa01c577f2c2018-08-31 09:22:23 +01001438 // The last input is the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01001439 if (!HasParsedConstTensor<int32_t>(inputs[numInputs - 1].m_IndexedValue->GetNode().name()))
1440 {
telsoa01c577f2c2018-08-31 09:22:23 +01001441 throw ParseException(
1442 boost::str(
1443 boost::format(
1444 "ArmNN only supports Concat with constant axis. "
1445 "Input %1%. Node %2% %3%")
1446 % inputs[numInputs - 1].m_IndexedValue->GetNode().name()
1447 % nodeDef.name()
1448 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001449 }
1450 ParsedConstTfOperation<int32_t>* shapeNode =
1451 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[numInputs - 1].m_IndexedValue);
1452
1453 std::vector<int32_t> axisTensorData;
1454 ConstTensor axisTensor = shapeNode->GetConstTensor(false, axisTensorData);
1455
telsoa01c577f2c2018-08-31 09:22:23 +01001456 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
surmeh01bceff2f2018-03-29 16:29:27 +01001457 const unsigned int concatDimInput = static_cast<unsigned int>(axisTensorData[0]);
1458
telsoa01c577f2c2018-08-31 09:22:23 +01001459 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
surmeh01bceff2f2018-03-29 16:29:27 +01001460 if (concatDimInput == 0 || concatDimInput == 2)
1461 {
telsoa01c577f2c2018-08-31 09:22:23 +01001462 throw ParseException(
1463 boost::str(
1464 boost::format(
1465 "Dimension %1% for concatenation is not supported by Armnn. "
1466 "Node %2% %3%")
1467 % concatDimInput
1468 % nodeDef.name()
1469 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001470 }
1471
telsoa01c577f2c2018-08-31 09:22:23 +01001472 // This is the only concatDim we support in armnn.
surmeh01bceff2f2018-03-29 16:29:27 +01001473 const unsigned int concatDim = 1;
1474 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1475 {
telsoa01c577f2c2018-08-31 09:22:23 +01001476 // Need to double check whether it should be
surmeh01bceff2f2018-03-29 16:29:27 +01001477 IOutputSlot& inputSlot =
1478 inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
1479 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1480
1481 if (inputTensorInfo.GetNumDimensions() != MaxNumOfTensorDimensions)
1482 {
telsoa01c577f2c2018-08-31 09:22:23 +01001483 throw ParseException(
1484 boost::str(
1485 boost::format(
1486 "The number of dimensions: %1% for input tensors of the "
1487 "concatenation op should be %2% for Node %3% %4%")
1488 % inputTensorInfo.GetNumDimensions()
1489 % MaxNumOfTensorDimensions
1490 % nodeDef.name()
1491 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001492 }
1493
1494 if (concatDimInput == 3)
1495 {
1496 inputTensorInfo = armnnUtils::Permuted(inputTensorInfo, NHWCToArmNN);
1497 }
1498
1499 for (unsigned int dim = 0; dim < MaxNumOfTensorDimensions; ++dim)
1500 {
1501 mergeDimSizes[dim] = inputTensorInfo.GetShape()[dim];
1502 }
1503
1504 for (unsigned int j = 0; j < concatDim; ++j)
1505 {
1506 concatDescriptor.SetViewOriginCoord(viewIndex, j, 0);
1507 }
1508
1509 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
1510 mergeDim += mergeDimSizes[concatDim];
1511
1512 for (unsigned int j = concatDim+1; j < MaxNumOfTensorDimensions; ++j)
1513 {
1514 concatDescriptor.SetViewOriginCoord(viewIndex, j, 0);
1515 }
1516 }
1517
1518 mergeDimSizes[concatDim] = mergeDim;
1519 armnn::IConnectableLayer *layer = m_Network->AddMergerLayer(concatDescriptor, nodeDef.name().c_str());
1520
1521 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(MaxNumOfTensorDimensions, mergeDimSizes.data(),
1522 DataType::Float32));
1523
1524 for (unsigned int v = 0; v < numConcatView; ++v)
1525 {
1526 IOutputSlot& inputSlot = inputs[v].m_IndexedValue->ResolveArmnnOutputSlot(inputs[v].m_Index);
1527 if (concatDimInput == 3)
1528 {
1529 IConnectableLayer* const swizzleLayer = AddSwizzleLayer(*m_Network, inputSlot, NHWCToArmNN,
1530 "swizzle_for-" + nodeDef.name());
1531 swizzleLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(v));
1532 }
1533 else
1534 {
1535 inputSlot.Connect(layer->GetInputSlot(v));
1536 }
1537 }
1538
1539 if (concatDimInput == 3)
1540 {
1541 IConnectableLayer* const deswizzleLayer = AddSwizzleLayer(*m_Network, layer->GetOutputSlot(0), ArmNNToNHWC,
1542 "deswizzle_for-" + nodeDef.name());
1543 layer = deswizzleLayer;
1544 }
1545
1546 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1547}
1548
1549ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
1550 const tensorflow::GraphDef& graphDef)
1551{
telsoa01c577f2c2018-08-31 09:22:23 +01001552 // Note: the Shape layer is handled in a special way, because:
1553 // 1. ARMNN doesn't support int32 tensors which it outputs.
1554 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01001555 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01001556 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01001557
1558 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
1559 if (tfDataType != tensorflow::DT_INT32)
1560 {
telsoa01c577f2c2018-08-31 09:22:23 +01001561 throw ParseException(
1562 boost::str(
1563 boost::format(
1564 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
1565 % tensorflow::DataType_Name(tfDataType)
1566 % nodeDef.name()
1567 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001568 }
1569
1570 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1571 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1572 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1573 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
1574
1575 std::vector<int32_t> shapeTensorData;
1576 shapeTensorData.reserve(prevLayerDimensions);
1577
1578 for (unsigned int i=0; i<prevLayerDimensions; ++i)
1579 {
1580 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
1581 }
1582
1583 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
1584
1585 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
1586 nodeDef,
1587 &shapeTensorData[0],
1588 shapeTensorInfo);
1589}
1590
1591ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
1592 const tensorflow::GraphDef& graphDef)
1593{
1594 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1595 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
1596
1597 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1598 {
telsoa01c577f2c2018-08-31 09:22:23 +01001599 throw ParseException(
1600 boost::str(
1601 boost::format(
1602 "ArmNN only supports Reshape layers with constant shapes. "
1603 "Input %1% Node %2% %3%")
1604 % inputs[1].m_IndexedValue->GetNode().name()
1605 % nodeDef.name()
1606 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001607 }
1608 ParsedConstTfOperation<int32_t>* shapeNode =
1609 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1610
1611 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
1612 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1613
1614 std::vector<int32_t> shapeTensorData;
1615 ConstTensor shapeTensor = shapeNode->GetConstTensor(false, shapeTensorData);
1616 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
1617
1618 TensorShape targetShape = outputTensorInfo.GetShape();
1619 ReshapeDescriptor reshapeDesc;
1620 reshapeDesc.m_TargetShape = targetShape;
1621
1622 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1623 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1624 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1625
1626 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1627}
1628
1629ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
1630 const tensorflow::GraphDef& graphDef)
1631{
1632 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1633
1634 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1635 {
telsoa01c577f2c2018-08-31 09:22:23 +01001636 throw ParseException(
1637 boost::str(
1638 boost::format(
1639 "ArmNN only supports ResizeBilinear layers with constant sizes. "
1640 "Input %1%. Node %2% %3%")
1641 % inputs[1].m_IndexedValue->GetNode().name()
1642 % nodeDef.name()
1643 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001644 }
1645 ParsedConstTfOperation<int32_t>* sizeNode =
1646 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1647
telsoa01c577f2c2018-08-31 09:22:23 +01001648 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01001649 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
1650 {
telsoa01c577f2c2018-08-31 09:22:23 +01001651 throw ParseException(
1652 boost::str(
1653 boost::format(
1654 "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
1655 "Node %1% %2%")
1656 % nodeDef.name()
1657 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001658 }
1659
telsoa01c577f2c2018-08-31 09:22:23 +01001660 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01001661 std::vector<int32_t> sizeTensorData;
1662 ConstTensor sizeTensor = sizeNode->GetConstTensor(false, sizeTensorData);
1663
telsoa01c577f2c2018-08-31 09:22:23 +01001664 // The descriptor only has target height and width attributes, which we get from the size tensor.
surmeh01bceff2f2018-03-29 16:29:27 +01001665 ResizeBilinearDescriptor desc;
1666 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
1667 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1668
1669 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, nodeDef.name().c_str());
1670
1671 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1672 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01001673 // The input shape is always in BHWC format, this will be swizzled below; for now,
1674 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01001675 unsigned int outBatch = inputTensorInfo.GetShape()[0];
1676 unsigned int outChannels = inputTensorInfo.GetShape()[3];
1677 unsigned int outHeight = desc.m_TargetHeight;
1678 unsigned int outWidth = desc.m_TargetWidth;
1679 TensorShape outShape({outBatch, outChannels, outHeight, outWidth});
telsoa01c577f2c2018-08-31 09:22:23 +01001680 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01001681 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
1682 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1683
telsoa01c577f2c2018-08-31 09:22:23 +01001684 // TensorFlow ResizeBilinear input is always in BHWC format, so add swizzle and deswizzle layers.
surmeh01bceff2f2018-03-29 16:29:27 +01001685 layer = SwizzleInDeswizzleOut(*m_Network, inputSlot, *layer, nodeDef.name());
1686
1687 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1688}
1689
1690TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
1691{
1692 BOOST_ASSERT(nodeDef.op() == "Squeeze");
1693 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
1694
1695 DataType type;
1696 if (tfDataType == tensorflow::DT_FLOAT)
1697 {
1698 type = DataType::Float32;
1699 }
1700 else if (tfDataType == tensorflow::DT_INT32)
1701 {
1702 type = DataType::Signed32;
1703 }
1704 else
1705 {
telsoa01c577f2c2018-08-31 09:22:23 +01001706 throw ParseException(
1707 boost::str(
1708 boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
1709 % tensorflow::DataType_Name(tfDataType)
1710 % nodeDef.name()
1711 % CHECK_LOCATION().AsString()));
1712 }
1713
1714
1715 if (inputTensorInfo.GetNumDimensions() > 4)
1716 {
1717 throw ParseException(
1718 boost::str(
1719 boost::format(
1720 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
1721 % inputTensorInfo.GetNumDimensions()
1722 % nodeDef.name()
1723 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001724 }
1725
1726 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01001727 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1728
surmeh01bceff2f2018-03-29 16:29:27 +01001729 if (squeezeDims.empty())
1730 {
telsoa01c577f2c2018-08-31 09:22:23 +01001731 squeezeDims.assign(dimensionSequence,
1732 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01001733 }
1734
1735 std::vector<uint32_t> outputDims;
1736 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1737 {
telsoa01c577f2c2018-08-31 09:22:23 +01001738 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1739 auto currentDimension = inputTensorInfo.GetShape()[i];
1740 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01001741 {
telsoa01c577f2c2018-08-31 09:22:23 +01001742 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01001743 }
1744 }
1745
1746 if (outputDims.size() > 4)
1747 {
telsoa01c577f2c2018-08-31 09:22:23 +01001748 throw ParseException(
1749 boost::str(
1750 boost::format(
1751 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
1752 % outputDims.size()
1753 % nodeDef.name()
1754 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001755 }
1756
telsoa01c577f2c2018-08-31 09:22:23 +01001757 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1758 outputDims.data());
1759
1760 TensorInfo outTensorInfo = inputTensorInfo;
1761 outTensorInfo.SetShape(outShape);
1762 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01001763
1764 return outTensorInfo;
1765}
1766
1767ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1768{
1769 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1770
1771 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1772 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1773
1774 TensorInfo outputInfo;
1775 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
1776
1777 ReshapeDescriptor reshapeDesc;
1778 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1779 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1780 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1781 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1782
1783 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1784}
1785
1786ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1787{
1788 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1789
1790 NormalizationDescriptor normalizationDescriptor;
1791 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
1792 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
1793 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
1794 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
1795 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
1796 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
1797
1798 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
1799 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
1800
1801 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1802
1803 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
1804 nodeDef.name().c_str());
1805
1806 const TensorInfo permutedInfo = armnnUtils::Permuted(prevLayerOutputSlot.GetTensorInfo(), NHWCToArmNN);
1807 layer->GetOutputSlot(0).SetTensorInfo(permutedInfo);
1808
1809 layer = SwizzleInDeswizzleOut(*m_Network, prevLayerOutputSlot, *layer, nodeDef.name());
1810
1811 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1812}
1813
1814/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01001815/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
1816/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
1817/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
1818///
surmeh01bceff2f2018-03-29 16:29:27 +01001819class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
1820{
1821public:
1822 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
1823 : DeferredSingleLayerParsedTfOperation(parser, node)
1824 {
1825 }
1826
1827 void CreateLayerDeferred() override
1828 {
1829 BOOST_ASSERT(m_Layer == nullptr);
1830 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
1831 }
1832};
1833
1834ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1835{
telsoa01c577f2c2018-08-31 09:22:23 +01001836 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01001837 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
1838}
1839
telsoa01c577f2c2018-08-31 09:22:23 +01001840/// An ParsedTfOperation for a Mul node.
1841/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
1842/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
1843/// and in these cases armnn doesn't need a separate layer for the Mul.
1844///
1845class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
1846{
1847public:
1848 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
1849 : DeferredSingleLayerParsedTfOperation(parser, node)
1850 {
1851 }
1852
1853 void CreateLayerDeferred() override
1854 {
1855 BOOST_ASSERT(m_Layer == nullptr);
1856 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
1857 }
1858};
1859
surmeh01bceff2f2018-03-29 16:29:27 +01001860ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1861{
1862 boost::ignore_unused(graphDef);
1863
telsoa01c577f2c2018-08-31 09:22:23 +01001864 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001865}
1866
1867ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
1868 const tensorflow::GraphDef& graphDef)
1869{
1870 boost::ignore_unused(graphDef);
1871
1872 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
1873
1874 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
1875
1876 auto it = m_InputShapes.find(nodeDef.name());
1877 if (it == m_InputShapes.end())
1878 {
telsoa01c577f2c2018-08-31 09:22:23 +01001879 throw ParseException(
1880 boost::str(
1881 boost::format(
1882 "Missing input shape for Placeholder '%1%' %2%")
1883 % nodeDef.name()
1884 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001885 }
1886 TensorInfo tensorInfo(it->second, DataType::Float32);
1887
1888 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
1889
1890 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1891
1892 TrackInputBinding(layer, layerId, tensorInfo);
1893
1894 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1895}
1896
saoste01bbd40612018-08-28 15:41:51 +01001897ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1898{
1899 boost::ignore_unused(graphDef);
1900 return AddRealDivLayer(nodeDef);
1901}
1902
surmeh01bceff2f2018-03-29 16:29:27 +01001903ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
1904 const tensorflow::GraphDef& graphDef)
1905{
1906 boost::ignore_unused(graphDef);
1907
1908 ActivationDescriptor activationDesc;
1909 activationDesc.m_Function = ActivationFunction::ReLu;
1910 return AddActivationLayer(nodeDef, activationDesc);
1911}
1912
1913ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
1914 const tensorflow::GraphDef& graphDef)
1915{
1916 boost::ignore_unused(graphDef);
1917
1918 ActivationDescriptor activationDesc;
1919 activationDesc.m_Function = ActivationFunction::BoundedReLu;
1920 activationDesc.m_A = 6.0f;
1921 activationDesc.m_B = 0.0f;
1922
1923 return AddActivationLayer(nodeDef, activationDesc);
1924}
1925
1926ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
1927 const tensorflow::GraphDef& graphDef)
1928{
1929 boost::ignore_unused(graphDef);
1930
1931 ActivationDescriptor activationDesc;
1932 activationDesc.m_Function = ActivationFunction::Sigmoid;
1933
1934 return AddActivationLayer(nodeDef, activationDesc);
1935}
1936
1937ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
1938 const tensorflow::GraphDef& graphDef)
1939{
1940 boost::ignore_unused(graphDef);
1941
1942 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1943
1944 SoftmaxDescriptor softmaxDescriptor;
1945 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
1946
1947 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1948 prevLayerSlot.Connect(layer->GetInputSlot(0));
1949 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
1950
1951 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1952}
1953
1954ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
1955 const tensorflow::GraphDef& graphDef)
1956{
1957 boost::ignore_unused(graphDef);
1958
1959 ActivationDescriptor activationDesc;
1960 activationDesc.m_Function = ActivationFunction::SoftReLu;
1961
1962 return AddActivationLayer(nodeDef, activationDesc);
1963}
1964
1965ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1966{
1967 boost::ignore_unused(graphDef);
1968
1969 ActivationDescriptor activationDesc;
1970 activationDesc.m_Function = ActivationFunction::TanH;
1971 activationDesc.m_A = 1.0f;
1972 activationDesc.m_B = 1.0f;
1973
1974 return AddActivationLayer(nodeDef, activationDesc);
1975}
1976
1977ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
1978 ActivationDescriptor& activationDesc)
1979{
1980 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1981
1982 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
1983
1984 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1985 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1986 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
1987 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1988}
1989
1990ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
1991 const tensorflow::GraphDef& graphDef)
1992{
1993 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
1994}
1995
1996ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
1997 const tensorflow::GraphDef& graphDef)
1998{
1999 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2000}
2001
2002ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
2003 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
2004{
2005 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2006 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2007 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2008
2009 if (inputs.size() != 1)
2010 {
telsoa01c577f2c2018-08-31 09:22:23 +01002011 throw ParseException(
2012 boost::str(
2013 boost::format(
2014 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
2015 % inputs.size()
2016 % nodeDef.name()
2017 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002018 }
2019
2020 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
2021 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2022 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
2023 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
2024
2025 Pooling2dDescriptor pooling2dDescriptor;
2026 pooling2dDescriptor.m_PoolType = pooltype;
2027 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
2028 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
2029
telsoa01c577f2c2018-08-31 09:22:23 +01002030 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
2031
surmeh01bceff2f2018-03-29 16:29:27 +01002032 if (dataFormat == "NHWC")
2033 {
2034 pooling2dDescriptor.m_StrideX = strides[2];
2035 pooling2dDescriptor.m_StrideY = strides[1];
2036 pooling2dDescriptor.m_PoolWidth = ksize[2];
2037 pooling2dDescriptor.m_PoolHeight = ksize[1];
telsoa01c577f2c2018-08-31 09:22:23 +01002038 // Swizzles input to supported memory layout.
surmeh01bceff2f2018-03-29 16:29:27 +01002039 inputTensorInfo = armnnUtils::Permuted(inputSlot.GetTensorInfo(), NHWCToArmNN);
2040 }
2041 else if (dataFormat == "NCHW")
2042 {
2043 pooling2dDescriptor.m_StrideX = strides[3];
2044 pooling2dDescriptor.m_StrideY = strides[2];
2045 pooling2dDescriptor.m_PoolWidth = ksize[3];
2046 pooling2dDescriptor.m_PoolHeight = ksize[2];
2047 }
surmeh01bceff2f2018-03-29 16:29:27 +01002048
2049 uint32_t inputHeight = inputTensorInfo.GetShape()[2];
2050 uint32_t inputWidth = inputTensorInfo.GetShape()[3];
2051
2052 bool padding = false;
2053 TensorInfo outputInfo;
telsoa01c577f2c2018-08-31 09:22:23 +01002054
2055 CHECK_PADDING_TYPE(nodeDef, paddingString);
2056
surmeh01bceff2f2018-03-29 16:29:27 +01002057 if (paddingString == "SAME")
2058 {
2059 padding = true;
2060 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2061 inputTensorInfo.GetShape()[1],
2062 static_cast<uint32_t>(ceil(
2063 static_cast<float>(inputHeight) /
2064 static_cast<float>(pooling2dDescriptor.m_StrideY))),
2065 static_cast<uint32_t>(ceil(
2066 static_cast<float>(inputWidth) /
2067 static_cast<float>(pooling2dDescriptor.m_StrideX)))
2068 }, DataType::Float32);
2069 }
2070 else if (paddingString == "VALID")
2071 {
2072 padding = false;
2073 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2074 inputTensorInfo.GetShape()[1],
2075 static_cast<uint32_t>(ceil(
2076 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
2077 static_cast<float>(pooling2dDescriptor.m_StrideY))),
2078 static_cast<uint32_t>(ceil(
2079 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
2080 static_cast<float>(pooling2dDescriptor.m_StrideX)))
2081 }, DataType::Float32);
2082 }
surmeh01bceff2f2018-03-29 16:29:27 +01002083
2084 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
2085 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
2086 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
2087 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
2088
2089
2090 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
2091 if (layer == nullptr)
2092 {
telsoa01c577f2c2018-08-31 09:22:23 +01002093 throw ParseException(
2094 boost::str(
2095 boost::format(
2096 "Failed to add pooling2d layer for %1% %2%")
2097 % nodeDef.name()
2098 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002099 }
2100
2101 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2102
2103 if (dataFormat == "NHWC")
2104 {
2105 layer = SwizzleInDeswizzleOut(*m_Network, inputSlot, *layer, nodeDef.name());
2106 }
2107 else
2108 {
2109 inputSlot.Connect(layer->GetInputSlot(0));
2110 }
2111
2112 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2113}
2114
2115ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
2116{
2117 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2118
2119 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2120 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2121
2122 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
2123 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
2124
2125 if (isBiasAdd)
2126 {
2127 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
2128 // with the same data in the correct dimension for broadcast in addition.
2129 if(input1Info.GetNumDimensions() != 1)
2130 {
telsoa01c577f2c2018-08-31 09:22:23 +01002131 throw ParseException(
2132 boost::str(
2133 boost::format(
2134 "Unsupported bias for BiasAdd. It should be a 1D vector. "
2135 "Got %1% dimensions for input %2%. Node %3% %4%")
2136 % input1Info.GetNumDimensions()
2137 % inputs[1].m_IndexedValue->GetNode().name()
2138 % nodeDef.name()
2139 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002140 }
2141
2142 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01002143
telsoa01c577f2c2018-08-31 09:22:23 +01002144 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01002145 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002146 }
2147 else
2148 {
2149 if (input0Info.GetNumDimensions() == 1)
2150 {
2151 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002152 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002153 }
2154
2155 if (input1Info.GetNumDimensions() == 1)
2156 {
2157 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002158 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002159 }
2160 }
2161
2162 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
2163
2164 input0Slot->Connect(layer->GetInputSlot(0));
2165 input1Slot->Connect(layer->GetInputSlot(1));
2166
2167 if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
2168 {
2169 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2170 }
2171 else
2172 {
2173 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2174 }
2175
2176 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2177}
2178
saoste01bbd40612018-08-28 15:41:51 +01002179ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
2180{
2181 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2182
2183 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
2184 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2185 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2186
2187 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2188 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2189
2190
2191 if (input0NumDims < input1NumDims)
2192 {
2193 const bool isNHWC = true;
2194 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2195 }
2196 if (input1NumDims < input0NumDims)
2197 {
2198 const bool isNHWC = true;
2199 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2200 }
2201
2202 input0Slot->Connect(layer->GetInputSlot(0));
2203 input1Slot->Connect(layer->GetInputSlot(1));
2204
2205 if (input0NumDims < input1NumDims)
2206 {
2207 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2208 }
2209 else
2210 {
2211 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2212
2213 }
2214 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2215}
2216
telsoa01c577f2c2018-08-31 09:22:23 +01002217IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
2218{
2219 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2220
2221 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
2222 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2223 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2224
2225 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2226 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2227
2228 if (input0NumDims < input1NumDims)
2229 {
2230 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002231 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002232 }
2233 if (input1NumDims < input0NumDims)
2234 {
2235 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002236 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002237 }
2238
2239 input0Slot->Connect(layer->GetInputSlot(0));
2240 input1Slot->Connect(layer->GetInputSlot(1));
2241
2242 if (input0NumDims < input1NumDims)
2243 {
2244 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2245 }
2246 else
2247 {
2248 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2249 }
2250 return layer;
2251}
2252
surmeh01bceff2f2018-03-29 16:29:27 +01002253IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
2254 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
2255{
telsoa01c577f2c2018-08-31 09:22:23 +01002256 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01002257 ParsedConstTfOperation<float>* biasNode = nullptr;
2258 if (addNodeDef != nullptr)
2259 {
2260 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01002261 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01002262 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
2263 {
2264 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
2265 }
2266 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
2267 {
2268 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
2269 }
2270 else
2271 {
telsoa01c577f2c2018-08-31 09:22:23 +01002272 throw ParseException(
2273 boost::str(
2274 boost::format(
2275 "ArmNN only supports fully connected layers with constant bias. "
2276 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
2277 % addInputs[0].m_IndexedValue->GetNode().name()
2278 % addInputs[1].m_IndexedValue->GetNode().name()
2279 % addNodeDef->name()
2280 % matMulNodeDef.name()
2281 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002282 }
2283 }
2284
telsoa01c577f2c2018-08-31 09:22:23 +01002285 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01002286 ParsedConstTfOperation<float>* weightNode = nullptr;
2287 ParsedTfOperation* inputNode = nullptr;
2288 unsigned int inputIdx = 0;
2289 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
2290 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
2291 {
2292 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
2293 inputNode = mulInputs[1].m_IndexedValue;
2294 inputIdx = mulInputs[1].m_Index;
2295 }
2296 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
2297 {
2298 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
2299 inputNode = mulInputs[0].m_IndexedValue;
2300 inputIdx = mulInputs[0].m_Index;
2301 }
2302 else
2303 {
telsoa01c577f2c2018-08-31 09:22:23 +01002304 throw ParseException(
2305 boost::str(
2306 boost::format(
2307 "ArmNN only supports fully connected layers with constant weights. "
2308 "Inputs %1% and %2%. MatMulNode %3% %4%")
2309 % mulInputs[0].m_IndexedValue->GetNode().name()
2310 % mulInputs[1].m_IndexedValue->GetNode().name()
2311 % matMulNodeDef.name()
2312 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002313 }
2314
2315 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01002316 // Handles weight.
surmeh01bceff2f2018-03-29 16:29:27 +01002317 ConstTensor weights = weightNode->GetConstTensor(false, weightTensorData);
2318
2319 FullyConnectedDescriptor desc;
2320 desc.m_BiasEnabled = addNodeDef != nullptr;
2321
2322 IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +01002323 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01002324 if (addNodeDef != nullptr)
2325 {
2326 std::vector<float> biasTensorData;
2327 ConstTensor biases = biasNode->GetConstTensor(false, biasTensorData);
2328
2329 if (weights.GetShape()[1] != biases.GetShape()[0])
2330 {
telsoa01c577f2c2018-08-31 09:22:23 +01002331 throw ParseException(
2332 boost::str(
2333 boost::format(
2334 "Shape of matmul weights and bias do not match. "
2335 "AddNode %1%. MatMulNode %2% %3%")
2336 % addNodeDef->name()
2337 % matMulNodeDef.name()
2338 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002339 }
2340
2341 layer = m_Network->AddFullyConnectedLayer(desc, weights, biases, armnnLayerName);
2342 }
2343 else
2344 {
2345 layer = m_Network->AddFullyConnectedLayer(desc, weights, armnnLayerName);
2346 }
2347
2348 BOOST_ASSERT(layer != nullptr);
2349
2350 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
2351 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
2352
telsoa01c577f2c2018-08-31 09:22:23 +01002353 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01002354 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
2355 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2356 return layer;
2357}
2358
2359void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2360{
telsoa01c577f2c2018-08-31 09:22:23 +01002361 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01002362 tensorflow::DataType type = tensorflow::DT_FLOAT;
2363 if (nodeDef.attr().count("T") != 0)
2364 {
2365 auto attr = nodeDef.attr().at("T");
2366 type = attr.type();
2367 }
2368 else if (nodeDef.attr().count("dtype") != 0)
2369 {
2370 auto attr = nodeDef.attr().at("dtype");
2371 type = attr.type();
2372 }
2373
2374 if (type != tensorflow::DT_FLOAT && nodeDef.op() != "Const")
2375 {
telsoa01c577f2c2018-08-31 09:22:23 +01002376 throw ParseException(
2377 boost::str(
2378 boost::format(
2379 "Currently only FLOAT is supported for tensorflow nodes (apart from Const). "
2380 "Got %1% for Node %2% %3%")
2381 % tensorflow::DataType_Name(type)
2382 % nodeDef.name()
2383 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002384 }
2385
2386 const std::string& operation = nodeDef.op();
2387 auto it = ms_OperationNameToParsingFunctions.find(operation);
2388 if (it != ms_OperationNameToParsingFunctions.end())
2389 {
2390 auto func = it->second;
2391 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
2392 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
2393
telsoa01c577f2c2018-08-31 09:22:23 +01002394 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01002395 auto it = m_ParsedTfOperations.find(nodeDef.name());
2396 if (it != m_ParsedTfOperations.end())
2397 {
2398 throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
2399 }
2400 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
2401
telsoa01c577f2c2018-08-31 09:22:23 +01002402 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01002403 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
2404 m_RequestedOutputs.end())
2405 {
2406 auto outId = ParseOutputId(nodeDef.name());
2407 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
2408 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
2409
2410 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
2411
2412 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
2413
2414 prevSlot.Connect(outputLayer->GetInputSlot(0));
2415
2416 TrackOutputBinding(outputLayer, layerId, tensorInfo);
2417 }
2418 }
2419 else
2420 {
telsoa01c577f2c2018-08-31 09:22:23 +01002421 throw ParseException(
2422 boost::str(
2423 boost::format(
2424 "Unsupported operation %1% in tensorflow::GraphDef %2%")
2425 % operation
2426 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002427 }
2428}
2429
2430void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
2431{
telsoa01c577f2c2018-08-31 09:22:23 +01002432 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01002433 m_NodesByName.clear();
2434 m_NetworkInputsBindingInfo.clear();
2435 m_NetworkOutputsBindingInfo.clear();
2436
2437 for (int i = 0; i < graphDef.node_size(); ++i)
2438 {
2439 const tensorflow::NodeDef& node = graphDef.node(i);
2440 m_NodesByName[node.name()] = &node;
2441 }
2442
telsoa01c577f2c2018-08-31 09:22:23 +01002443 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01002444 std::vector<const tensorflow::NodeDef*> targetNodes;
2445 for (const std::string& requestedOutputName : m_RequestedOutputs)
2446 {
2447 auto nodeIt = m_NodesByName.find(requestedOutputName);
2448 if (nodeIt == m_NodesByName.end())
2449 {
telsoa01c577f2c2018-08-31 09:22:23 +01002450 throw ParseException(
2451 boost::str(
2452 boost::format(
2453 "Couldn't find requested output node '%1%' in graph %2%")
2454 % requestedOutputName
2455 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002456 }
2457 targetNodes.push_back(nodeIt->second);
2458 }
2459
telsoa01c577f2c2018-08-31 09:22:23 +01002460 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01002461 std::vector<const tensorflow::NodeDef*> sortedNodes;
2462 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
2463 targetNodes,
2464 [this](const tensorflow::NodeDef* node)
2465 {
2466 auto outputs = GetTfInputNodes(*node);
2467 std::vector<const tensorflow::NodeDef*> nodesOnly;
2468 for (const auto & o : outputs) {
2469 nodesOnly.push_back(o.m_IndexedValue);
2470 }
2471 return nodesOnly;
2472 },
2473 sortedNodes))
2474 {
telsoa01c577f2c2018-08-31 09:22:23 +01002475 throw ParseException(
2476 boost::str(
2477 boost::format(
2478 "Cycle detected in graph %1%")
2479 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002480 }
2481
telsoa01c577f2c2018-08-31 09:22:23 +01002482 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01002483 for (const auto& it : sortedNodes)
2484 {
2485 const tensorflow::NodeDef& currentNode = *it;
2486 LoadNodeDef(currentNode, graphDef);
2487 }
2488}
2489
2490INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
2491 const std::map<std::string, TensorShape>& inputShapes,
2492 const std::vector<std::string>& requestedOutputs)
2493{
2494 FILE* fd = fopen(graphFile, "r");
2495
2496 if (fd == nullptr)
2497 {
telsoa01c577f2c2018-08-31 09:22:23 +01002498 throw FileNotFoundException(
2499 boost::str(
2500 boost::format(
2501 "Graph file %1% failed to open %2%")
2502 % graphFile
2503 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002504 }
2505
telsoa01c577f2c2018-08-31 09:22:23 +01002506 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01002507 tensorflow::GraphDef graphDef;
2508 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
2509 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
2510 delete input;
2511 fclose(fd);
2512
2513 if (!success)
2514 {
telsoa01c577f2c2018-08-31 09:22:23 +01002515 throw ParseException(
2516 boost::str(
2517 boost::format(
2518 "Failed to parse graph file %1%")
2519 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002520 }
2521
2522 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2523}
2524
2525INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
2526 const std::map<std::string, TensorShape>& inputShapes,
2527 const std::vector<std::string>& requestedOutputs)
2528{
telsoa01c577f2c2018-08-31 09:22:23 +01002529 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01002530 tensorflow::GraphDef graphDef;
2531 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
2532
2533 if (!success)
2534 {
telsoa01c577f2c2018-08-31 09:22:23 +01002535 throw ParseException(
2536 boost::str(
2537 boost::format(
2538 "Failed to parse graph file %1%")
2539 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002540 }
2541
2542 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2543}
2544
2545INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
2546 const std::map<std::string, TensorShape>& inputShapes,
2547 const std::vector<std::string>& requestedOutputs)
2548{
2549 FILE* fd = fopen(graphFile, "rb");
2550
2551 if (fd == nullptr)
2552 {
telsoa01c577f2c2018-08-31 09:22:23 +01002553 throw FileNotFoundException(
2554 boost::str(
2555 boost::format(
2556 "Graph file %1% failed to open %2%")
2557 % graphFile
2558 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002559 }
2560
telsoa01c577f2c2018-08-31 09:22:23 +01002561 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01002562 tensorflow::GraphDef graphDef;
2563
2564 google::protobuf::io::FileInputStream inStream(fileno(fd));
2565 google::protobuf::io::CodedInputStream codedStream(&inStream);
2566 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
2567 bool success = graphDef.ParseFromCodedStream(&codedStream);
2568 fclose(fd);
2569
2570 if (!success)
2571 {
telsoa01c577f2c2018-08-31 09:22:23 +01002572 throw ParseException(
2573 boost::str(
2574 boost::format(
2575 "Failed to parse protobuf file %1% %2%")
2576 % graphFile
2577 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002578 }
2579
2580 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2581}
2582
2583INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
2584 const std::map<std::string, TensorShape>& inputShapes,
2585 const std::vector<std::string>& requestedOutputs)
2586{
2587 m_Network = INetwork::Create();
2588
2589 m_InputShapes = inputShapes;
2590 if (requestedOutputs.size() == 0)
2591 {
telsoa01c577f2c2018-08-31 09:22:23 +01002592 throw ParseException(
2593 boost::str(
2594 boost::format(
2595 "requestedOutputs must have at least one entry %1%")
2596 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002597 }
2598 m_RequestedOutputs = requestedOutputs;
2599
2600 try
2601 {
2602 LoadGraphDef(graphDef);
2603 }
2604 catch (const ParseException& e)
2605 {
2606 Cleanup();
2607 throw e;
2608 }
2609
2610 Cleanup();
2611
2612 return std::move(m_Network);
2613}
2614
2615void TfParser::Cleanup()
2616{
telsoa01c577f2c2018-08-31 09:22:23 +01002617 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01002618 m_InputShapes.clear();
2619 m_RequestedOutputs.clear();
2620 m_NodesByName.clear();
2621 m_ParsedTfOperations.clear();
2622}
2623
2624BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
2625{
2626 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
2627}
2628
2629BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
2630{
2631 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
2632}
2633
2634std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
2635 const char* bindingPointDesc,
2636 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
2637{
2638 auto it = nameToBindingInfo.find(layerName);
2639 if (it == nameToBindingInfo.end())
2640 {
telsoa01c577f2c2018-08-31 09:22:23 +01002641 throw InvalidArgumentException(
2642 boost::str(
2643 boost::format(
2644 "Unknown %1% '%2%' %3%")
2645 % bindingPointDesc
2646 % layerName
2647 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002648 }
2649 return it->second;
2650}
2651
2652void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
2653{
2654 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
2655}
2656
2657void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
2658{
2659 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
2660}
2661
2662void TfParser::TrackBindingPoint(IConnectableLayer* layer,
2663 LayerBindingId id,
2664 const TensorInfo& tensorInfo,
2665 const char* bindingPointDesc,
2666 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
2667{
2668 const std::string layerName = layer->GetName();
2669 auto it = nameToBindingInfo.find(layerName);
2670 if (it == nameToBindingInfo.end())
2671 {
2672 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
2673 }
2674 else
2675 {
telsoa01c577f2c2018-08-31 09:22:23 +01002676 throw ParseException(
2677 boost::str(
2678 boost::format(
2679 "Id %1% used by more than one %2% layer %3%")
2680 % id
2681 % bindingPointDesc
2682 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002683 }
2684}
2685
2686} // namespace armnnTfParser