blob: 90bd992a2bb2b3ed968d7d601dab8f950b7adeb2 [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
5#include "TfParser.hpp"
6
7#include <armnn/INetwork.hpp>
8#include <armnn/Utils.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <armnn/Exceptions.hpp>
11#include <armnn/Descriptors.hpp>
12
13#include <GraphTopologicalSort.hpp>
Sadik Armagan479045b2018-10-01 11:51:37 +010014#include <ParserHelper.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010015#include <Permute.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010016#include <VerificationHelpers.hpp>
Matteo Martincigh46315822018-11-28 16:22:36 +000017#include <DataLayoutIndexed.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010018
19#include <google/protobuf/io/zero_copy_stream_impl.h>
20#include <google/protobuf/text_format.h>
21
22#include "tensorflow/core/framework/graph.pb.h"
23#include "tensorflow/core/framework/node_def.pb.h"
24#include "tensorflow/core/framework/types.pb.h"
25#include "tensorflow/core/framework/tensor.pb.h"
26#include "tensorflow/core/framework/tensor_shape.pb.h"
27
28#include <boost/assert.hpp>
29#include <boost/format.hpp>
30#include <boost/core/ignore_unused.hpp>
31#include <boost/log/trivial.hpp>
32#include <boost/numeric/conversion/cast.hpp>
33#include <boost/polymorphic_cast.hpp>
34
35#include <memory>
36#include <sstream>
37#include <numeric>
38#include <functional>
39
Matteo Martincigh46315822018-11-28 16:22:36 +000040using namespace armnnUtils;
surmeh01bceff2f2018-03-29 16:29:27 +010041using namespace armnn;
42
43namespace armnnTfParser
44{
45namespace
46{
47
48const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
49const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
50
surmeh01bceff2f2018-03-29 16:29:27 +010051
52template <typename Callable>
53void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
54 const std::string& attribName,
55 tensorflow::AttrValue::ValueCase expectedValueCase,
56 Callable callable)
57{
58 auto iter = nodeDef.attr().find(attribName);
59 if (iter != nodeDef.attr().end())
60 {
61 const auto& attrValue = iter->second;
62 if (attrValue.value_case() == expectedValueCase)
63 {
64 callable(attrValue);
65 }
66 else
67 {
telsoa01c577f2c2018-08-31 09:22:23 +010068 throw ParseException(
69 boost::str(
70 boost::format(
71 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
72 "but found %4% instead %5%")
73 % attribName
74 % nodeDef.name()
75 % static_cast<int>(expectedValueCase)
76 % static_cast<int>(attrValue.value_case())
77 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010078 }
79 }
80 else
81 {
telsoa01c577f2c2018-08-31 09:22:23 +010082 throw ParseException(
83 boost::str(
84 boost::format(
85 "Could not find required attribute %1% in node %2% %3%")
86 % attribName
87 % nodeDef.name()
88 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010089 }
90}
91
92template <typename Callable>
93void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
94 const std::string& attribName,
95 tensorflow::AttrValue::ValueCase expectedValueCase,
96 Callable callable)
97{
98 auto iter = nodeDef.attr().find(attribName);
99 if (iter != nodeDef.attr().end())
100 {
101 const auto& attrValue = iter->second;
102 if (attrValue.value_case() == expectedValueCase)
103 {
104 callable(attrValue);
105 }
106 else
107 {
telsoa01c577f2c2018-08-31 09:22:23 +0100108 throw ParseException(
109 boost::str(
110 boost::format(
111 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
112 "but found %4% instead %5%")
113 % attribName
114 % nodeDef.name()
115 % static_cast<int>(expectedValueCase)
116 % static_cast<int>(attrValue.value_case())
117 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100118 }
119 }
120}
121
122float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
123{
124 float attribValue = 0.0f;
125 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
126 [&attribValue](const tensorflow::AttrValue& attrValue)
127 {
128 attribValue = attrValue.f();
129 });
130 return attribValue;
131}
132
Conor Kennedyc2130a02018-12-05 11:05:54 +0000133int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
134{
135 int32_t attribValue = 0u;
136 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
137 [&attribValue](const tensorflow::AttrValue& attrValue)
138 {
139 attribValue = static_cast<int32_t>(attrValue.i());
140 });
141 return attribValue;
142}
143
surmeh01bceff2f2018-03-29 16:29:27 +0100144uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
145{
146 uint32_t attribValue = 0u;
147 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
148 [&attribValue](const tensorflow::AttrValue& attrValue)
149 {
150 attribValue = static_cast<uint32_t>(attrValue.i());
151 });
152 return attribValue;
153}
154
155std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
156{
157 std::string attribValue = "";
158 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
159 [&attribValue](const tensorflow::AttrValue& attrValue)
160 {
161 attribValue = attrValue.s();
162 });
163 return attribValue;
164}
165
166std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
167 const std::string& name)
168{
169 std::vector<uint32_t> attriList;
170 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
171 [&attriList](const tensorflow::AttrValue& attrValue)
172 {
173 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
174 {
175 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
176 }
177 });
178
179 return attriList;
180}
181
182std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
183 const std::string& name)
184{
185 std::vector<uint32_t> attriList;
186 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
187 [&attriList](const tensorflow::AttrValue& attrValue)
188 {
189 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
190 {
191 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
192 }
193 });
194
195 return attriList;
196}
197
198bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
199 const std::string& name,
200 bool defaultValue = false)
201{
202 bool attribValue = defaultValue;
203 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
204 [&attribValue](const tensorflow::AttrValue& attrValue)
205 {
206 attribValue = attrValue.b();
207 });
208 return attribValue;
209}
210
211tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
212{
213 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
214 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
215 [&attribValue](const tensorflow::AttrValue& attrValue)
216 {
217 attribValue = attrValue.type();
218 });
219 return attribValue;
220}
221
222TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
223{
224 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
225 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
226
227 if (stretchDim != targetDims.end())
228 {
229 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
230 {
telsoa01c577f2c2018-08-31 09:22:23 +0100231 throw ParseException(
232 boost::str(
233 boost::format(
234 "At most one component of shape can be -1 %1%")
235 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100236 }
237
telsoa01c577f2c2018-08-31 09:22:23 +0100238 auto targetNumElements =
239 boost::numeric_cast<unsigned int>(
240 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100241 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
242 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
243 }
244
245 TensorInfo reshapeInfo = input;
246 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
247
248 return reshapeInfo;
249}
250
telsoa01c577f2c2018-08-31 09:22:23 +0100251// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100252IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
253 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100254{
255 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
256 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
257 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
258 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
259 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
260 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
261
262 armnn::TensorInfo reshapedInfo = input1Info;
263 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
264
265 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
266 ReshapeDescriptor reshapeDesc;
267 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
268 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
269
270 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
271 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
272
273 input1Slot = &reshapeLayer->GetOutputSlot(0);
274
275 return input1Slot;
276}
277
278OutputId ParseOutputId(const std::string & name)
279{
280 unsigned int outputNum = 0;
281 size_t colonPos = name.find_last_of(":");
282 if (colonPos != std::string::npos)
283 {
284 int n = std::stoi(name.substr(colonPos+1));
285 if (n<0 || n>100)
286 {
telsoa01c577f2c2018-08-31 09:22:23 +0100287 throw ParseException(
288 boost::str(
289 boost::format(
290 "Output tensor id is out of range for %1% %2%")
291 % name
292 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100293 }
294 outputNum = static_cast<unsigned int>(n);
295 }
296 return OutputId(name.substr(0,colonPos),outputNum);
297}
298
telsoa01c577f2c2018-08-31 09:22:23 +0100299#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
300 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
301 { \
302 throw ParseException( \
303 boost::str( \
304 boost::format( \
305 "Unsupported data format %1% passed for %2% node %3%. " \
306 "Only NHWC and NCHW supported %4%") \
307 % FORMAT \
308 % NODE_TYPE \
309 % NODE_DEF.name() \
310 % CHECK_LOCATION().AsString())); \
311 }
312
313#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
314 if(PADDING != "SAME" && PADDING != "VALID" ) \
315 { \
316 throw ParseException( \
317 boost::str( \
318 boost::format( \
319 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
320 % PADDING \
321 % NODE_DEF.name() \
322 % CHECK_LOCATION().AsString())); \
323 } \
324
surmeh01bceff2f2018-03-29 16:29:27 +0100325} // namespace
326
327const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
328 { "Const", &TfParser::ParseConst },
329 { "Add", &TfParser::ParseAdd },
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000330 { "AddN", &TfParser::ParseAddN },
surmeh01bceff2f2018-03-29 16:29:27 +0100331 { "BiasAdd", &TfParser::ParseBiasAdd },
332 { "Identity", &TfParser::ParseIdentity },
333 { "Conv2D", &TfParser::ParseConv2D },
334 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
Conor Kennedyc2130a02018-12-05 11:05:54 +0000335 { "ExpandDims", &TfParser::ParseExpandDims },
surmeh01bceff2f2018-03-29 16:29:27 +0100336 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
jimfly01a06bf312018-12-18 16:24:51 +0000337 { "Greater", &TfParser::ParseGreater},
surmeh01bceff2f2018-03-29 16:29:27 +0100338 { "ConcatV2", &TfParser::ParseConcat },
339 { "LRN", &TfParser::ParseLrn },
340 { "MatMul", &TfParser::ParseMatMul },
341 { "Mul", &TfParser::ParseMul },
342 { "Placeholder", &TfParser::ParsePlaceholder },
saoste01bbd40612018-08-28 15:41:51 +0100343 { "RealDiv", &TfParser::ParseRealDiv },
surmeh01bceff2f2018-03-29 16:29:27 +0100344 { "Relu", &TfParser::ParseRelu },
345 { "Relu6", &TfParser::ParseRelu6 },
346 { "Reshape", &TfParser::ParseReshape },
347 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +0000348 { "Rsqrt", &TfParser::ParseRsqrt },
surmeh01bceff2f2018-03-29 16:29:27 +0100349 { "Shape", &TfParser::ParseShape },
350 { "Squeeze", &TfParser::ParseSqueeze },
351 { "Sigmoid", &TfParser::ParseSigmoid },
352 { "Softmax", &TfParser::ParseSoftmax },
353 { "Softplus", &TfParser::ParseSoftplus },
Sadik Armagan2ad6cb42018-12-27 11:23:44 +0000354 { "Split", &TfParser::ParseSplit },
surmeh01bceff2f2018-03-29 16:29:27 +0100355 { "Tanh", &TfParser::ParseTanh },
356 { "MaxPool", &TfParser::ParseMaxPool },
357 { "AvgPool", &TfParser::ParseAvgPool },
telsoa01c577f2c2018-08-31 09:22:23 +0100358 { "Maximum", &TfParser::ParseMaximum },
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +0000359 { "Minimum", &TfParser::ParseMinimum },
jimfly0184c70e62018-12-19 13:14:46 +0000360 { "Equal", &TfParser::ParseEqual },
jimfly01f6ba7472018-12-04 10:09:52 +0000361 { "Pad", &TfParser::ParsePad },
narpra016f37f832018-12-21 18:30:00 +0000362 { "Sub", &TfParser::ParseSub }
363};
364
365const std::list<std::string> TfParser::m_ControlInputs = {
366 "Assert"
surmeh01bceff2f2018-03-29 16:29:27 +0100367};
368
369ITfParser* ITfParser::CreateRaw()
370{
371 return new TfParser();
372}
373
374ITfParserPtr ITfParser::Create()
375{
376 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
377}
378
379void ITfParser::Destroy(ITfParser* parser)
380{
381 delete parser;
382}
383
384inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
385 uint32_t filterSize, bool samePadding,
386 uint32_t* paddingFront, uint32_t* paddingBack) {
387 *paddingFront = 0;
388 *paddingBack = 0;
389
390 if (samePadding) {
391 uint32_t outputSize = (inputSize + stride - 1) / stride;
392 uint32_t temp = (outputSize - 1) * stride + filterSize;
393 if (temp > inputSize) {
394 *paddingFront = (temp - inputSize) / 2;
395 *paddingBack = (temp - inputSize) - *paddingFront;
396 }
397 }
398}
399
400void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
401 bool samePadding)
402{
403 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
404}
405
406/// An Abstract base class which represents a single tensorflow operation (node)
407/// that has been (potentially partially) converted to Armnn.
408/// It may not yet have been fully converted into actual Armnn layers.
409class ParsedTfOperation
410{
411public:
412 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
413 : m_Parser(parser)
414 , m_Node(node)
415 {
416 }
417
418 virtual ~ParsedTfOperation() {};
419
420 const tensorflow::NodeDef& GetNode() const { return m_Node; }
421
422 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
423 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
424 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
425
426 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
427 virtual ParsedTfOperation* ResolveIdentityOperations()
428 {
429 return this;
430 }
431
432protected:
433 TfParser* m_Parser;
434 const tensorflow::NodeDef& m_Node;
435};
436
437/// An ParsedTfOperation where the Armnn equivalent is a single layer,
438/// with output slots that correspond directly to the Tf node outputs.
439class SingleLayerParsedTfOperation : public ParsedTfOperation
440{
441public:
442 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
443 : ParsedTfOperation(parser, node)
444 , m_Layer(layer)
445 {
446 }
447
448 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
449 {
450 BOOST_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100451 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100452 unsigned int armnnOutputSlotIdx = tfOutputIndex;
453 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
454 {
455 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100456 boost::str(
457 boost::format(
458 "The requested output slot #%1% "
459 "for %2% does not exist %3%")
460 % armnnOutputSlotIdx
461 % m_Layer->GetName()
462 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100463 }
464 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
465 }
466
467protected:
468 IConnectableLayer* m_Layer;
469};
470
telsoa01c577f2c2018-08-31 09:22:23 +0100471/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100472class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
473{
474public:
475 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
476 : SingleLayerParsedTfOperation(parser, node, nullptr)
477 {
478 }
479
480 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
481 {
482 if (!m_Layer)
483 {
484 CreateLayerDeferred();
485 }
486 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
487 }
488
489private:
490 virtual void CreateLayerDeferred() = 0;
491};
492
493
494TfParser::TfParser()
495 : m_Network(nullptr, nullptr)
496{
497}
498
499
500const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
501{
502 if (nodeDef->op() != "Identity")
503 {
504 return nodeDef;
505 }
506
507 if (nodeDef->input_size() != 1)
508 {
telsoa01c577f2c2018-08-31 09:22:23 +0100509 throw ParseException(
510 boost::str(
511 boost::format(
512 "Identity node should have a single input! %1% has %2% inputs %3%")
513 % nodeDef->name()
514 % nodeDef->input_size()
515 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100516 }
517
518 auto it = m_NodesByName.find(nodeDef->input(0));
519 if (it != m_NodesByName.end())
520 {
521 const tensorflow::NodeDef* inputNode = it->second;
522 return ResolveIdentityNode(inputNode);
523 }
524 else
525 {
telsoa01c577f2c2018-08-31 09:22:23 +0100526 throw ParseException(
527 boost::str(
528 boost::format(
529 "Cannot find what the Identity node %1% is linked to! %2%")
530 % nodeDef->name()
531 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100532 }
533}
534
535std::vector<OutputOfConstNodeDef>
536TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
537{
538 std::vector<OutputOfConstNodeDef> ret;
539
surmeh013537c2c2018-05-18 16:31:43 +0100540 if (nodeDef.op() == "Const")
541 {
542 // For some reason const node can have "Control Inputs". We ignore them for now.
543 return ret;
544 }
545
surmeh01bceff2f2018-03-29 16:29:27 +0100546 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
547 for (int j = 0; j < nodeDef.input_size(); ++j)
548 {
549 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100550
551 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
552 {
narpra016f37f832018-12-21 18:30:00 +0000553 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
554 continue;
surmeh013537c2c2018-05-18 16:31:43 +0100555 }
556
surmeh01bceff2f2018-03-29 16:29:27 +0100557 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
558 if (inputIt == m_NodesByName.end())
559 {
560 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100561 boost::str(
562 boost::format(
563 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
564 % nodeDef.input(j)
565 % nodeDef.name()
566 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100567 }
568 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
569 }
570
571 return ret;
572}
573
574std::vector<OutputOfParsedTfOperation>
575TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
576 std::size_t expectedNumInputs)
577{
telsoa01c577f2c2018-08-31 09:22:23 +0100578 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100579 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
580 const std::size_t numInputs = nodes.size();
581 if (numInputs != expectedNumInputs)
582 {
telsoa01c577f2c2018-08-31 09:22:23 +0100583 throw ParseException(
584 boost::str(
585 boost::format(
586 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
587 % nodeDef.name()
588 % expectedNumInputs
589 % numInputs
590 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100591 }
telsoa01c577f2c2018-08-31 09:22:23 +0100592 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100593 std::vector<OutputOfParsedTfOperation> result;
594 for (auto&& node : nodes)
595 {
596 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
597 if (it == m_ParsedTfOperations.end())
598 {
telsoa01c577f2c2018-08-31 09:22:23 +0100599 throw ParseException(
600 boost::str(
601 boost::format(
602 "Node with name '%1%' has not been parsed %2%")
603 % node.m_IndexedValue->name()
604 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100605 }
606 ParsedTfOperation* parsedOp = it->second.get();
607 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
608 parsedOp = parsedOp->ResolveIdentityOperations();
609 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
610 }
611 return result;
612}
613
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000614IConnectableLayer* TfParser::CreateAdditionLayer(
615 const tensorflow::NodeDef& nodeDef,
616 IOutputSlot* input0Slot,
617 IOutputSlot* input1Slot,
618 const std::string& layerName)
619{
620 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
621 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
622
623 const unsigned int input0Dim = input0Info.GetNumDimensions();
624 const unsigned int input1Dim = input1Info.GetNumDimensions();
625 if (input0Dim != input1Dim)
626 {
627 // broadcasting where input0 and input1 have different number of dimensions
628 // is only supported for 1D and 4D tensors pair
629 if (input0Dim == 1 && input1Dim == 4)
630 {
631 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
632 }
633 else if (input0Dim == 4 && input1Dim == 1)
634 {
635 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
636 }
637 else
638 {
639 throw ParseException(
640 boost::str(
641 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
642 % layerName
643 % nodeDef.name()
644 % CHECK_LOCATION().AsString()));
645 }
646 }
647 IConnectableLayer* const layer = m_Network->AddAdditionLayer(layerName.c_str());
648
649 input0Slot->Connect(layer->GetInputSlot(0));
650 input1Slot->Connect(layer->GetInputSlot(1));
651
652 // Ensure the output tensor has the correct dimensions even if a broadcast has been done
653 TensorInfo outputInfo = input0Slot->GetTensorInfo();
654 std::vector<unsigned int> outputShape;
655
656 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
657 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
658
659 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
660 {
661 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
662 }
663
664 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
665 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
666
667 return layer;
668}
669
670IConnectableLayer* TfParser::CreateAdditionLayer(
671 const tensorflow::NodeDef& nodeDef,
672 IConnectableLayer* layerOne,
673 IConnectableLayer* layerTwo,
674 unsigned int numberOfAddition,
675 unsigned long numberOfLayersToConnect,
676 bool isOdd)
677{
678 IOutputSlot* input0Slot = &layerOne->GetOutputSlot(0);
679 IOutputSlot* input1Slot = &layerTwo->GetOutputSlot(0);
680 std::string layerName(nodeDef.name());
681 if (isOdd || numberOfLayersToConnect != 2)
682 {
683 // we are not connecting the final layer
684 layerName.append("_addN_").append(std::to_string(numberOfAddition));
685 }
686 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
687}
688
689IConnectableLayer* TfParser::CreateAdditionLayer(
690 const tensorflow::NodeDef& nodeDef,
691 const OutputOfParsedTfOperation& opOne,
692 const OutputOfParsedTfOperation& opTwo,
693 unsigned int numberOfAddition)
694{
695 IOutputSlot* input0Slot = &opOne.m_IndexedValue->ResolveArmnnOutputSlot(opOne.m_Index);
696 IOutputSlot* input1Slot = &opTwo.m_IndexedValue->ResolveArmnnOutputSlot(opTwo.m_Index);
697 std::string layerName(nodeDef.name());
698 layerName.append("_addN_").append(std::to_string(numberOfAddition));
699 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
700}
701
702IConnectableLayer* TfParser::CreateAdditionLayer(
703 const tensorflow::NodeDef& nodeDef,
704 const OutputOfParsedTfOperation& op,
705 IConnectableLayer* layer)
706{
707 IOutputSlot* input0Slot = &op.m_IndexedValue->ResolveArmnnOutputSlot(op.m_Index);
708 IOutputSlot* input1Slot = &layer->GetOutputSlot(0);
709 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
710}
711
712ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
713{
714 uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
715 if (numberOfInputs < 2)
716 {
717 // should never happen
718 throw ParseException(
719 boost::str(
720 boost::format(
721 "AddN Node with name '%1%' has less than two (%2) inputs %3%")
722 % nodeDef.name()
723 % std::to_string(numberOfInputs)
724 % CHECK_LOCATION().AsString()));
725 }
726 else if (numberOfInputs == 2)
727 {
728 //this is the same as a simple Add operation
729 return AddAdditionLayer(nodeDef, false);
730 }
731 else
732 {
733 // build a binary tree of Add layers and return the final Add as the return from the function
734 // if we have an odd number of inputs then the final Add will consist of a layer connecting to an
735 // OutputOfParsedTfOperation, otherwise it will be two layers being added together
736 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
737 unsigned int numberOfAdditions = 0;
738 std::vector<IConnectableLayer*> layers;
739 // NOTE: at this point we will have a minimum of three inputs
740 for (unsigned int i = 0; i < numberOfInputs; ++i)
741 {
742 // every time i is odd we have two inputs to process.
743 bool onSecondItem = i % 2;
744 if (onSecondItem)
745 {
746 ++numberOfAdditions;
747 IConnectableLayer* newLayer = CreateAdditionLayer(
748 nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
749 layers.push_back(newLayer);
750 }
751 }
752
753 std::vector<IConnectableLayer*> layersToConnect(layers);
754 unsigned long numberOfLayersToConnect = layersToConnect.size();
755 bool isOdd = numberOfInputs % 2;
756
757 while (numberOfLayersToConnect > 1)
758 {
759 layers.clear();
760 for (unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
761 bool onSecondItem = i % 2;
762 if (onSecondItem) {
763 ++numberOfAdditions;
764 IConnectableLayer* newLayer = CreateAdditionLayer(
765 nodeDef,
766 layersToConnect[i - 1],
767 layersToConnect[i],
768 numberOfAdditions,
769 numberOfLayersToConnect,
770 isOdd);
771 layers.push_back(newLayer);
772 }
773 }
774 //OK... need to go again... maybe
775 layersToConnect = layers;
776 numberOfLayersToConnect = layersToConnect.size();
777 }
778 IConnectableLayer* finalLayer = layersToConnect[0];
779 // if we had an odd number of inputs we need to connect the final layer to the
780 // last OutputOfParsedTfOperation in order to create the last Add layer we will
781 // be handing back.
782 if (isOdd)
783 {
784 // connect the final layer to the last op
785 finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
786 }
787 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, finalLayer);
788 }
789}
790
surmeh01bceff2f2018-03-29 16:29:27 +0100791ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
792{
793 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
794
telsoa01c577f2c2018-08-31 09:22:23 +0100795 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
796 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100797 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
798 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
799 {
800 IConnectableLayer* layer =
801 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
802 &nodeDef,nodeDef.name().c_str());
803 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
804 }
805 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
806 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
807 {
808 IConnectableLayer* layer =
809 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
810 &nodeDef,nodeDef.name().c_str());
811 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
812 }
813 else
814 {
telsoa01c577f2c2018-08-31 09:22:23 +0100815 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100816 return AddAdditionLayer(nodeDef);
817 }
818}
819
820ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
821{
822 return AddAdditionLayer(nodeDef, true);
823}
824
825/// An ParsedTfOperation which forwards to another (used for Identity nodes).
826class ParsedIdentityTfOperation : public ParsedTfOperation
827{
828public:
829 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
830 : ParsedTfOperation(parser, node)
831 , m_Representative(representative)
832 {
833 }
834
835 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
836 {
837 BOOST_ASSERT(m_Representative);
838 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
839 }
840
841 virtual ParsedTfOperation* ResolveIdentityOperations() override
842 {
843 return m_Representative->ResolveIdentityOperations();
844 }
845
846private:
847 ParsedTfOperation* m_Representative;
848};
849
850ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
851{
852 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
853 // Any requests for the output slots of this node should be forwarded to the node connected as input.
854 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
855}
856
857/// An ParsedTfOperation for a Const node.
858/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
859/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
860template <typename T>
861class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
862{
863public:
864 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
865 const T* tensorData, const TensorInfo& tensorInfo)
866 : DeferredSingleLayerParsedTfOperation(parser, node),
867 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
868 m_TensorInfo(tensorInfo)
869 {
870 BOOST_ASSERT(tensorInfo.GetDataType() == GetDataType<T>());
871 }
872
873 void CreateLayerDeferred() override
874 {
875 BOOST_ASSERT(m_Layer == nullptr);
876 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
877 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
878 }
879
Matteo Martincigh482ca852018-12-12 09:20:55 +0000880 ConstTensor GetConstTensor(std::vector<T>& outputTensorData) const
surmeh01bceff2f2018-03-29 16:29:27 +0100881 {
surmeh01bceff2f2018-03-29 16:29:27 +0100882 outputTensorData.resize(m_TensorInfo.GetNumElements());
883
Matteo Martincigh482ca852018-12-12 09:20:55 +0000884 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
885
telsoa01c577f2c2018-08-31 09:22:23 +0100886 // Updates the result to point to the user provided storage.
Matteo Martincigh482ca852018-12-12 09:20:55 +0000887 ConstTensor constTensor(m_TensorInfo, outputTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +0100888 return constTensor;
889 }
890
Matteo Martincigh46315822018-11-28 16:22:36 +0000891 const T* GetStorage() const
892 {
893 return m_Storage.data();
894 }
895
896 const TensorInfo& GetTensorInfo() const
897 {
898 return m_TensorInfo;
899 }
900
surmeh01bceff2f2018-03-29 16:29:27 +0100901private:
902 ///< Manages the lifetime of the tensor data.
903 std::vector<T> m_Storage;
904 ///< Describes the layout of the tensor and points to the data in m_Storage.
905 TensorInfo m_TensorInfo;
906};
907
telsoa01c577f2c2018-08-31 09:22:23 +0100908DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
909 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100910{
911 switch (tfDataType)
912 {
913 case tensorflow::DT_FLOAT:
914 return DataType::Float32;
915 break;
916 case tensorflow::DT_INT32:
917 return DataType::Signed32;
918 break;
919 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100920 throw ParseException(
921 boost::str(
922 boost::format(
923 "Unknown DataType %1% for node %2% %3%")
924 % tensorflow::DataType_Name(tfDataType)
925 % nodeDef.name()
926 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100927 }
928}
929
930struct ParseTfTensorValueList
931{
932 template<typename DataType>
933 static void Parse(
934 const tensorflow::TensorProto& tfTensor,
935 unsigned int dstElements,
936 std::vector<int8_t>& outputData);
937
938 template <typename DataType>
939 static void ReadData(const void* srcData, unsigned int numSrcElements,
940 std::vector<int8_t>& dstData, unsigned int numDstElements)
941 {
telsoa01c577f2c2018-08-31 09:22:23 +0100942 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100943 if (numSrcElements == 0)
944 {
945 return;
946 }
947
telsoa01c577f2c2018-08-31 09:22:23 +0100948 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100949 if (numDstElements == 0)
950 {
951 numDstElements = numSrcElements;
952 }
953
telsoa01c577f2c2018-08-31 09:22:23 +0100954 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100955 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
956
957 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
958 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
959
telsoa01c577f2c2018-08-31 09:22:23 +0100960 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +0100961 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
962
963 if (numDstElements > numSrcElements)
964 {
telsoa01c577f2c2018-08-31 09:22:23 +0100965 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +0100966 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
967 }
968 }
969
970};
971
972template <>
973void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
974 unsigned int dstElements, std::vector<int8_t>& outputData)
975{
976 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
977 outputData, dstElements);
978}
979
980template <>
981void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
982 unsigned int dstElements, std::vector<int8_t>& outputData)
983{
984 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
985 outputData, dstElements);
986}
987
988template <template<typename> class OperatorType, typename T = int8_t>
989struct MakeTfOperation
990{
991 template<typename DataType, class... Args>
992 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
993 Args&&... args)
994 {
995 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
996 }
997};
998
999template <>
1000struct MakeTfOperation<ParsedConstTfOperation>
1001{
1002 template<typename DataType, class... Args>
1003 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
1004 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
1005 {
1006 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1007 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
1008 }
1009};
1010
1011template <class FuncType>
1012struct InvokeParseFunction
1013{
1014 template<class ResType, class... Args>
1015 inline static ResType Result(DataType dataType, Args&&... args)
1016 {
1017 if (dataType == DataType::Float32)
1018 {
1019 return FuncType::template Parse<float>(std::forward<Args>(args)...);
1020 }
1021 else if (dataType == DataType::Signed32)
1022 {
1023 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1024 }
1025
1026 return ResType();
1027 }
1028
1029 template<class... Args>
1030 inline static void Result(DataType dataType, Args&&... args)
1031 {
1032 if (dataType == DataType::Float32)
1033 {
1034 FuncType::template Parse<float>(std::forward<Args>(args)...);
1035 }
1036 else if (dataType == DataType::Signed32)
1037 {
1038 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1039 }
1040 }
1041};
1042
1043ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1044{
1045 BOOST_ASSERT(nodeDef.op() == "Const");
1046
1047 if (nodeDef.attr().count("value") == 0)
1048 {
telsoa01c577f2c2018-08-31 09:22:23 +01001049 throw ParseException(
1050 boost::str(
1051 boost::format(
1052 "Value not found for Const node - %1% %2%")
1053 % nodeDef.name()
1054 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001055 }
1056
1057 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
1058 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1059 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
1060
1061 const auto GetDimensionSize = [](auto& d) { return d.size(); };
1062
1063 std::vector<unsigned int> dimensionSizes;
1064 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1065 std::back_inserter(dimensionSizes), GetDimensionSize);
1066
telsoa01c577f2c2018-08-31 09:22:23 +01001067 // Calculates number of elements.
1068 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001069 unsigned int numElements = 0U;
1070
1071 if (!dimensionSizes.empty())
1072 {
1073 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1074 1U, std::multiplies<unsigned int>());
1075 }
1076
1077 std::vector<int8_t> tensorData;
1078
telsoa01c577f2c2018-08-31 09:22:23 +01001079 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001080 if (tfTensor.tensor_content().empty())
1081 {
1082 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
1083
1084 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +01001085 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +01001086 if (numElements == 0)
1087 {
telsoa01c577f2c2018-08-31 09:22:23 +01001088 const unsigned int tfNumElements =
1089 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001090 dimensionSizes.push_back(tfNumElements);
1091 }
1092 }
telsoa01c577f2c2018-08-31 09:22:23 +01001093 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001094 else
1095 {
1096 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1097
telsoa01c577f2c2018-08-31 09:22:23 +01001098 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +01001099 if (numElements == 0)
1100 {
telsoa01c577f2c2018-08-31 09:22:23 +01001101 throw ParseException(
1102 boost::str(
1103 boost::format(
1104 "No tensor shape found for Const node - %1% %2%")
1105 % nodeDef.name()
1106 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001107 }
1108 }
1109
telsoa01c577f2c2018-08-31 09:22:23 +01001110 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001111 if (tensorData.empty())
1112 {
telsoa01c577f2c2018-08-31 09:22:23 +01001113 throw ParseException(
1114 boost::str(
1115 boost::format(
1116 "No tensor data found for Const node - %1% %2%")
1117 % nodeDef.name()
1118 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001119 }
1120
telsoa01c577f2c2018-08-31 09:22:23 +01001121 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1122 dimensionSizes.data(),
1123 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001124
1125 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +01001126 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +01001127 if (tensorData.size() > tensorInfo.GetNumBytes())
1128 {
telsoa01c577f2c2018-08-31 09:22:23 +01001129 throw ParseException(
1130 boost::str(
1131 boost::format(
1132 "Number of elements (%1%) should be less than or equal "
1133 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
1134 % (tensorData.size() / GetDataTypeSize(dataType))
1135 % tensorInfo.GetNumElements()
1136 % nodeDef.name()
1137 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001138 }
1139
1140 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1141 dataType, this, nodeDef, tensorData, tensorInfo);
1142}
1143
1144template<typename Type>
1145bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
1146{
1147 auto it = m_ParsedTfOperations.find(nodeName);
jimfly01f6ba7472018-12-04 10:09:52 +00001148 if (it == m_ParsedTfOperations.end())
surmeh01bceff2f2018-03-29 16:29:27 +01001149 {
1150 return false;
1151 }
jimfly01f6ba7472018-12-04 10:09:52 +00001152 return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1153}
1154
1155template<typename Type>
1156bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
1157{
1158 return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
surmeh01bceff2f2018-03-29 16:29:27 +01001159}
1160
1161ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
1162 const tensorflow::GraphDef& graphDef)
1163{
1164 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1165 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1166 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1167
1168 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1169 {
telsoa01c577f2c2018-08-31 09:22:23 +01001170 throw ParseException(
1171 boost::str(
1172 boost::format(
1173 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1174 % nodeDef.name()
1175 % inputs[1].m_IndexedValue->GetNode().name()
1176 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001177 }
1178 ParsedConstTfOperation<float>* weightNode =
1179 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1180
1181 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1182 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1183 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1184
telsoa01c577f2c2018-08-31 09:22:23 +01001185 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
surmeh01bceff2f2018-03-29 16:29:27 +01001186 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1187 if (!dilations.empty())
1188 {
1189 for (auto dilation : dilations)
1190 {
1191 if (dilation != 1u)
1192 {
telsoa01c577f2c2018-08-31 09:22:23 +01001193 throw ParseException(
1194 boost::str(
1195 boost::format(
1196 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1197 % nodeDef.name()
1198 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001199 }
1200 }
1201 }
1202
1203 Convolution2dDescriptor desc;
1204 desc.m_BiasEnabled = false;
1205
telsoa01c577f2c2018-08-31 09:22:23 +01001206 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1207
Matteo Martincigh46315822018-11-28 16:22:36 +00001208 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001209
Matteo Martincigh46315822018-11-28 16:22:36 +00001210 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001211
Matteo Martincigh46315822018-11-28 16:22:36 +00001212 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001213
Matteo Martincigh46315822018-11-28 16:22:36 +00001214 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1215 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001216
Matteo Martincigh46315822018-11-28 16:22:36 +00001217 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1218 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1219
1220 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1221 // Tensorflow weights are [H, W, In, Out].
1222 // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1223 // and [Out, In, H, W] when the data layout is NCHW.
1224 PermutationVector permutationVector =
1225 dataLayout == DataLayout::NHWC ?
1226 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1227 std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1228
1229 // Swizzle the tensor using the given permutation vector.
1230 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1231 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1232
1233 // Swizzles the content of the tensor's permanent storage into a local storage.
1234 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1235 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001236 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Matteo Martincigh46315822018-11-28 16:22:36 +00001237
1238 // Create a weight tensor with the newly swizzled data.
1239 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1240
1241 uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1242 uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001243
1244 bool padding = false;
1245 TensorInfo outputInfo;
Matteo Martincigh46315822018-11-28 16:22:36 +00001246 unsigned int outputHeight = 0;
1247 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001248
1249 CHECK_PADDING_TYPE(nodeDef, paddingString);
1250
surmeh01bceff2f2018-03-29 16:29:27 +01001251 if (paddingString == "SAME")
1252 {
1253 padding = true;
Matteo Martincigh46315822018-11-28 16:22:36 +00001254
1255 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1256 static_cast<float>(desc.m_StrideY)));
1257 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1258 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001259 }
1260 else if (paddingString == "VALID")
1261 {
1262 padding = false;
Matteo Martincigh46315822018-11-28 16:22:36 +00001263
1264 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1265 static_cast<float>(desc.m_StrideY)));
1266 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1267 static_cast<float>(desc.m_StrideX)));
1268 }
1269
1270 switch (dataLayout)
1271 {
1272 case DataLayout::NHWC:
1273 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1274 outputHeight,
1275 outputWidth,
1276 weightTensor.GetShape()[0] },
1277 DataType::Float32);
1278 break;
1279 case DataLayout::NCHW:
1280 default:
surmeh01bceff2f2018-03-29 16:29:27 +01001281 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1282 weightTensor.GetShape()[0],
Matteo Martincigh46315822018-11-28 16:22:36 +00001283 outputHeight,
1284 outputWidth },
1285 DataType::Float32);
1286 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001287 }
surmeh01bceff2f2018-03-29 16:29:27 +01001288
1289 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1290 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1291
1292 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1293 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Matteo Martincigh46315822018-11-28 16:22:36 +00001294 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001295
1296 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1297}
1298
1299ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
telsoa01c577f2c2018-08-31 09:22:23 +01001300 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001301{
1302 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1303 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1304 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1305
1306 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1307 {
telsoa01c577f2c2018-08-31 09:22:23 +01001308 throw ParseException(
1309 boost::str(
1310 boost::format(
1311 "ArmNN only supports Depthwise Convolution layer with constant weights. "
1312 "Non const input found %1% for node %2% %3%")
1313 % inputs[1].m_IndexedValue->GetNode().name()
1314 % nodeDef.name()
1315 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001316 }
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001317
surmeh01bceff2f2018-03-29 16:29:27 +01001318 ParsedConstTfOperation<float>* weightNode =
1319 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1320
surmeh01bceff2f2018-03-29 16:29:27 +01001321 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1322 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1323 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1324
1325 DepthwiseConvolution2dDescriptor desc;
1326 desc.m_BiasEnabled = false;
1327
telsoa01c577f2c2018-08-31 09:22:23 +01001328 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1329
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001330 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001331
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001332 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001333
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001334 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001335
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001336 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1337 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001338
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001339 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1340 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1341
1342 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
Matteo Martincigh747ef822018-12-18 09:26:39 +00001343 // Tensorflow weights come in the format [H, W, I, M].
1344 // ArmNN weights have to be [M, I, H, W].
1345 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001346
1347 // Swizzle the tensor using the given permutation vector.
1348 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1349 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1350
1351 // Swizzles the content of the tensor's permanent storage into a local storage.
1352 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1353 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001354 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001355
1356 // Create a weight tensor with the newly swizzled data.
1357 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1358
Matteo Martincigh747ef822018-12-18 09:26:39 +00001359 uint32_t weightHeight = weightTensor.GetShape()[2];
1360 uint32_t weightWidth = weightTensor.GetShape()[3];
surmeh01bceff2f2018-03-29 16:29:27 +01001361
1362 bool padding = false;
1363 TensorInfo outputInfo;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001364 unsigned int outputHeight = 0;
1365 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001366
1367 CHECK_PADDING_TYPE(nodeDef, paddingString);
1368
surmeh01bceff2f2018-03-29 16:29:27 +01001369 if (paddingString == "SAME")
1370 {
1371 padding = true;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001372
1373 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1374 static_cast<float>(desc.m_StrideY)));
1375 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1376 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001377 }
1378 else if (paddingString == "VALID")
1379 {
1380 padding = false;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001381
1382 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1383 static_cast<float>(desc.m_StrideY)));
1384 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1385 static_cast<float>(desc.m_StrideX)));
1386 }
1387
1388 switch (dataLayout)
1389 {
1390 case DataLayout::NHWC:
1391 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1392 outputHeight,
1393 outputWidth,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001394 weightTensor.GetShape()[0] * weightTensor.GetShape()[1]},
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001395 DataType::Float32);
1396 break;
1397 case DataLayout::NCHW:
1398 default:
1399 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1400 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1401 outputHeight,
1402 outputWidth },
1403 DataType::Float32);
1404 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001405 }
surmeh01bceff2f2018-03-29 16:29:27 +01001406
1407 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1408 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1409
1410 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1411 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001412 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001413
1414 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1415}
1416
Conor Kennedyc2130a02018-12-05 11:05:54 +00001417TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
1418{
1419 BOOST_ASSERT(nodeDef.op() == "ExpandDims");
1420
1421 if (inputTensorInfo.GetNumDimensions() > 4) {
1422 throw ParseException(
1423 boost::str(
1424 boost::format(
1425 "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1426 % inputTensorInfo.GetNumDimensions()
1427 % nodeDef.name()
1428 % CHECK_LOCATION().AsString()));
1429 }
1430
1431 std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1432
1433 std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1434 std::vector<uint32_t> outputDims;
1435
1436 // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1437 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1438 {
1439 // add current input shape to outputDims
1440 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1441 auto currentDimension = inputTensorInfo.GetShape()[i];
1442 outputDims.push_back(currentDimension);
1443 }
1444
1445 // insert a dimension of 1 at index 'expandDim' of inputs shape
1446 if (expandDim >= 0)
1447 {
1448 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1449 outputDims.insert(getPosition, 1);
1450 }
1451
1452 // if negative number for 'expandDim' then count backwards from the last element
1453 // and insert 1 dimension at index 'expandDim'
1454 if (expandDim < 0)
1455 {
Matteo Martincighd7cceeb2018-12-06 09:06:29 +00001456 int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001457 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1458 outputDims.insert(getPosition, 1);
1459 }
1460 }
1461 else
1462 {
1463 throw InvalidArgumentException(
1464 boost::str(
1465 boost::format(
1466 "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1467 % expandDim
1468 % inputDimSize
1469 % CHECK_LOCATION().AsString()));
1470 }
1471
1472 if (outputDims.size() > 4)
1473 {
1474 throw ParseException(
1475 boost::str(
1476 boost::format(
1477 "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1478 % outputDims.size()
1479 % nodeDef.name()
1480 % CHECK_LOCATION().AsString()));
1481 }
1482
1483 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1484 outputDims.data());
1485
1486 TensorInfo outTensorInfo = inputTensorInfo;
1487 outTensorInfo.SetShape(outShape);
1488
1489 return outTensorInfo;
1490}
1491
1492ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1493{
1494 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1495
1496 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1497 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1498
1499 TensorInfo outputInfo;
1500 outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo);
1501
1502 ReshapeDescriptor reshapeDesc;
1503 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1504 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1505 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1506 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1507
1508 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1509}
1510
surmeh01bceff2f2018-03-29 16:29:27 +01001511ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1512 const tensorflow::GraphDef& graphDef)
1513{
1514 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1515
1516 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1517 {
telsoa01c577f2c2018-08-31 09:22:23 +01001518 throw ParseException(
1519 boost::str(
1520 boost::format(
1521 "ArmNN only supports FusedBatchNormalization layers with constant scale. "
1522 "Input %1%. Node %2% %3%")
1523 % inputs[1].m_IndexedValue->GetNode().name()
1524 % nodeDef.name()
1525 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001526 }
1527 ParsedConstTfOperation<float>* scaleNode =
1528 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1529
1530 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1531 {
telsoa01c577f2c2018-08-31 09:22:23 +01001532 throw ParseException(
1533 boost::str(
1534 boost::format(
1535 "ArmNN only supports FusedBatchNormalization layers with constant offset. "
1536 "Input %1%. Node %2% %3%")
1537 % inputs[2].m_IndexedValue->GetNode().name()
1538 % nodeDef.name()
1539 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001540 }
1541 ParsedConstTfOperation<float>* offsetNode =
1542 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1543
1544 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1545 {
telsoa01c577f2c2018-08-31 09:22:23 +01001546 throw ParseException(
1547 boost::str(
1548 boost::format(
1549 "ArmNN only supports FusedBatchNormalization layers with constant mean. "
1550 "Input %1%. Node %2% %3%")
1551 % inputs[3].m_IndexedValue->GetNode().name()
1552 % nodeDef.name()
1553 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001554 }
1555 ParsedConstTfOperation<float>* meanNode =
1556 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1557
1558 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1559 {
telsoa01c577f2c2018-08-31 09:22:23 +01001560 throw ParseException(
1561 boost::str(
1562 boost::format(
1563 "ArmNN only supports FusedBatchNormalization layers with constant variance. "
1564 "Input %1%. Node %2% %3%")
1565 % inputs[4].m_IndexedValue->GetNode().name()
1566 % nodeDef.name()
1567 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001568 }
1569 ParsedConstTfOperation<float>* varianceNode =
1570 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1571
Matteo Martincigh075c7502018-12-05 13:10:45 +00001572 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1573
1574 CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1575
telsoa01c577f2c2018-08-31 09:22:23 +01001576 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001577 BatchNormalizationDescriptor desc;
1578 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001579 desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001580
telsoa01c577f2c2018-08-31 09:22:23 +01001581 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1582 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001583 std::vector<float> scaleTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001584 ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001585
1586 std::vector<float> offsetTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001587 ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001588
1589 std::vector<float> meanTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001590 ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001591
1592 std::vector<float> varianceTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001593 ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001594
1595 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1596 meanTensor,
1597 varianceTensor,
1598 offsetTensor,
1599 scaleTensor,
1600 nodeDef.name().c_str());
1601
1602 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1603
Matteo Martincigh075c7502018-12-05 13:10:45 +00001604 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1605 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001606
1607 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1608}
1609
telsoa01c577f2c2018-08-31 09:22:23 +01001610bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1611 size_t alphaLayerIndex,
1612 const OutputOfParsedTfOperation& otherOp,
1613 armnn::IOutputSlot** outputOfLeakyRelu,
1614 armnn::ActivationDescriptor & desc)
1615{
1616 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1617
1618 // Verifying all these assumptions hold:
1619 //
1620 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1621 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1622 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1623 //
1624
1625 if (mulNodeDef.op() == "Mul")
1626 {
1627 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1628 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1629
1630 BOOST_ASSERT(inputs.size() == 2);
1631 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1632 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1633 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1634
1635 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1636 {
1637 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1638 {
1639 ParsedConstTfOperation<float>* alpha =
1640 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1641 inputs[alphaLayerIndex].m_IndexedValue);
1642
1643 std::vector<float> const_data;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001644 ConstTensor const_tensor = alpha->GetConstTensor(const_data);
telsoa01c577f2c2018-08-31 09:22:23 +01001645
1646 if (const_data.size() == 1)
1647 {
1648 desc.m_Function = ActivationFunction::LeakyReLu;
1649 desc.m_A = const_data[0];
1650
1651 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1652 return true;
1653 }
1654 }
1655 }
1656 }
1657 return false;
1658}
1659
telsoa01c577f2c2018-08-31 09:22:23 +01001660ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1661 const tensorflow::GraphDef& graphDef)
1662{
1663 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
Sadik Armagan975c09a2018-12-04 10:02:08 +00001664 if (inputs.size() != 2)
1665 {
1666 throw ParseException(
1667 boost::str(
1668 boost::format(
1669 "Maximum expects two inputs!. Got %1% for Node %2% %3%")
1670 % inputs.size()
1671 % nodeDef.name()
1672 % CHECK_LOCATION().AsString()));
1673 }
1674
telsoa01c577f2c2018-08-31 09:22:23 +01001675 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1676 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1677 IOutputSlot* outputOfLeakyRelu = nullptr;
1678
1679 ActivationDescriptor desc;
1680
Sadik Armagan975c09a2018-12-04 10:02:08 +00001681 // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1682 // i.e. one of the four possible scenarios:
1683 // 1, max(mul(a, x), x)
1684 // 2, max(mul(x, a), x)
1685 // 3, max(x, mul(a, x))
1686 // 4, max(x, mul(x, a))
1687 // These are handled by an activation layer.
telsoa01c577f2c2018-08-31 09:22:23 +01001688
1689 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1690 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1691 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1692 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1693 {
1694 BOOST_ASSERT(outputOfLeakyRelu != nullptr);
1695
1696 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1697 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1698 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1699 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1700 }
1701 else
1702 {
Sadik Armagan975c09a2018-12-04 10:02:08 +00001703 // Anything else is just a maximum layer.
1704
1705 return AddMaximumLayer(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001706 }
1707}
1708
jimfly0184c70e62018-12-19 13:14:46 +00001709std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> TfParser::ProcessElementwiseInputSlots(
1710 const tensorflow::NodeDef& nodeDef, const std::string& layerName)
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001711{
1712 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1713
1714 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1715 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1716 const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1717 const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1718
1719 if (input0Dim != input1Dim)
1720 {
1721 // broadcasting where input0 and input1 have different number of dimensions
1722 // is only supported for 1D and 4D tensors pair
1723 if (input0Dim == 1 && input1Dim == 4)
1724 {
1725 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1726 }
1727 else if (input0Dim == 4 && input1Dim == 1)
1728 {
1729 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1730 }
1731 else
1732 {
1733 throw ParseException(
jimfly0184c70e62018-12-19 13:14:46 +00001734 boost::str(
1735 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
1736 % layerName
1737 % nodeDef.name()
1738 % CHECK_LOCATION().AsString()));
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001739 }
1740 }
jimfly0184c70e62018-12-19 13:14:46 +00001741 return {input0Slot, input1Slot};
1742}
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001743
jimfly0184c70e62018-12-19 13:14:46 +00001744ParsedTfOperationPtr TfParser::ProcessElementwiseLayer(
1745 IOutputSlot* input0Slot,
1746 IOutputSlot* input1Slot,
1747 IConnectableLayer* const layer,
1748 const tensorflow::NodeDef& nodeDef)
1749{
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001750 input0Slot->Connect(layer->GetInputSlot(0));
1751 input1Slot->Connect(layer->GetInputSlot(1));
1752
1753 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1754 std::vector<unsigned int> outputShape;
1755
1756 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1757 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1758
1759 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1760 {
1761 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1762 }
1763
1764 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1765 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1766
1767 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1768}
1769
jimfly01a06bf312018-12-18 16:24:51 +00001770ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
1771 const tensorflow::GraphDef& graphDef)
1772{
1773 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
1774 IOutputSlot* input0Slot = inputLayers.first;
1775 IOutputSlot* input1Slot = inputLayers.second;
1776
1777 IConnectableLayer* const layer = m_Network->AddGreaterLayer(nodeDef.name().c_str());
1778
1779 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1780}
1781
jimfly0184c70e62018-12-19 13:14:46 +00001782ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
1783 const tensorflow::GraphDef& graphDef)
1784{
1785 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
1786 IOutputSlot* input0Slot = inputLayers.first;
1787 IOutputSlot* input1Slot = inputLayers.second;
1788
1789 IConnectableLayer* const layer = m_Network->AddEqualLayer(nodeDef.name().c_str());
1790
1791 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1792}
1793
1794ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
1795 const tensorflow::GraphDef& graphDef)
1796{
1797 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
1798 IOutputSlot* input0Slot = inputLayers.first;
1799 IOutputSlot* input1Slot = inputLayers.second;
1800
1801 IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1802
1803 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1804}
1805
jimfly0123be07e2018-12-04 17:47:22 +00001806ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1807{
1808 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1809
1810 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1811 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1812
1813 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
1814 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
1815
1816 if (input0Info.GetNumDimensions() == 1)
1817 {
1818 const bool isNHWC = true;
1819 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1820 }
1821
1822 if (input1Info.GetNumDimensions() == 1)
1823 {
1824 const bool isNHWC = true;
1825 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1826 }
1827
1828 IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
1829
1830 input0Slot->Connect(layer->GetInputSlot(0));
1831 input1Slot->Connect(layer->GetInputSlot(1));
1832
1833 if (input0Info.GetNumDimensions() == 1)
1834 {
1835 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
1836 }
1837 else
1838 {
1839 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
1840 }
1841
1842 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1843}
1844
jimfly01f6ba7472018-12-04 10:09:52 +00001845unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
1846 const TensorInfo& inputTensorInfo,
1847 const std::string& nodeName)
1848{
1849 unsigned int rank = paddingTensor.GetShape()[0];
1850 unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
1851 if (rank != expectedRank)
1852 {
1853 throw ParseException(
1854 boost::str(
1855 boost::format(
1856 "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
1857 % expectedRank
1858 % rank
1859 % nodeName
1860 % CHECK_LOCATION().AsString()));
1861 }
1862 unsigned int second = paddingTensor.GetShape()[1];
1863 if (second != 2)
1864 {
1865 throw ParseException(
1866 boost::str(
1867 boost::format(
1868 "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
1869 % rank
1870 % second
1871 % nodeName
1872 % CHECK_LOCATION().AsString()));
1873 }
1874 return rank;
1875}
1876
1877TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
1878 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1879{
1880 unsigned int numDims = inputTensorInfo.GetNumDimensions();
1881 std::vector<unsigned int> outDims;
1882 for (unsigned int i = 0; i < numDims; ++i)
1883 {
1884 unsigned int dimSize = inputTensorInfo.GetShape()[i];
1885 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
1886 dimSize += dimPadding.first;
1887 dimSize += dimPadding.second;
1888 outDims.push_back(dimSize);
1889 }
1890 TensorInfo paddedTensorInfo = inputTensorInfo;
1891 unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
1892 paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
1893 return paddedTensorInfo;
1894}
1895
1896ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
1897 const tensorflow::GraphDef& graphDef)
1898{
1899 // input consists of:
1900 // input[0] the tensor which will be padded
1901 // input[1] the tensor holding the padding values
1902 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1903 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1904 TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
1905 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
1906 {
1907 throw ParseException(
1908 boost::str(
1909 boost::format(
1910 "ArmNN only supports Pad with constant padding. "
1911 "Input %1%. Node %2% %3%")
1912 % inputs[1].m_IndexedValue->GetNode().name()
1913 % nodeDef.name()
1914 % CHECK_LOCATION().AsString()));
1915
1916 }
1917 ParsedConstTfOperation<int32_t>* paddingTensorOp =
1918 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1919
1920 std::vector<int32_t> paddingTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001921 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
jimfly01f6ba7472018-12-04 10:09:52 +00001922 // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
1923 // and should match the rank of the input tensor that is being padded.
1924 // For each dimension D of input, paddings[D, 0] indicates how many values to add
1925 // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
1926 // many values to add after the contents of tensor in that dimension
1927 // This needs to be translated into a padList for ACL
1928 std::vector<std::pair<unsigned int, unsigned int>> padList;
1929 unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
1930 for (unsigned int i = 0; i < rank; ++i)
1931 {
1932 std::pair<unsigned int, unsigned int> paddingForDim;
1933 for (unsigned int j = 0; j < 2; j++)
1934 {
1935 unsigned int index = (i * 2) + j;
1936 int paddingAmount = paddingTensorData[index];
1937 // make sure we can cast to an unsigned value
1938 if (paddingAmount < 0)
1939 {
1940 throw ParseException(
1941 boost::str(
1942 boost::format(
1943 "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
1944 % paddingAmount
1945 % i
1946 % j
1947 % nodeDef.name()
1948 % CHECK_LOCATION().AsString()));
1949 }
1950 if (j == 0)
1951 {
1952 paddingForDim.first = static_cast<unsigned int>(paddingAmount);
1953 }
1954 else
1955 {
1956 paddingForDim.second = static_cast<unsigned int>(paddingAmount);
1957 }
1958 }
1959 padList.push_back(paddingForDim);
1960 }
1961 PadDescriptor padDescriptor(padList);
1962 IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
1963 previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
1964 // Use the padding to calculate the new output tensor shape
1965 TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
1966 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1967 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1968}
1969
surmeh01bceff2f2018-03-29 16:29:27 +01001970ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
1971 const tensorflow::GraphDef& graphDef)
1972{
1973 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
Matteo Martincighf9afc792018-12-06 12:03:17 +00001974
telsoa01c577f2c2018-08-31 09:22:23 +01001975 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01001976 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
surmeh01bceff2f2018-03-29 16:29:27 +01001977
surmeh01bceff2f2018-03-29 16:29:27 +01001978 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
1979
telsoa01c577f2c2018-08-31 09:22:23 +01001980 // The last input is the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01001981 if (!HasParsedConstTensor<int32_t>(inputs[numInputs - 1].m_IndexedValue->GetNode().name()))
1982 {
telsoa01c577f2c2018-08-31 09:22:23 +01001983 throw ParseException(
1984 boost::str(
1985 boost::format(
1986 "ArmNN only supports Concat with constant axis. "
1987 "Input %1%. Node %2% %3%")
1988 % inputs[numInputs - 1].m_IndexedValue->GetNode().name()
1989 % nodeDef.name()
1990 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001991 }
1992 ParsedConstTfOperation<int32_t>* shapeNode =
1993 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[numInputs - 1].m_IndexedValue);
1994
Matteo Martincighf9afc792018-12-06 12:03:17 +00001995 // Get the axis tensor data
surmeh01bceff2f2018-03-29 16:29:27 +01001996 std::vector<int32_t> axisTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001997 shapeNode->GetConstTensor(axisTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001998
telsoa01c577f2c2018-08-31 09:22:23 +01001999 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002000 const unsigned int concatDim = static_cast<unsigned int>(axisTensorData[0]);
surmeh01bceff2f2018-03-29 16:29:27 +01002001
telsoa01c577f2c2018-08-31 09:22:23 +01002002 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002003 if (concatDim == 0 || concatDim == 2)
surmeh01bceff2f2018-03-29 16:29:27 +01002004 {
telsoa01c577f2c2018-08-31 09:22:23 +01002005 throw ParseException(
2006 boost::str(
2007 boost::format(
2008 "Dimension %1% for concatenation is not supported by Armnn. "
2009 "Node %2% %3%")
Matteo Martincighf9afc792018-12-06 12:03:17 +00002010 % concatDim
telsoa01c577f2c2018-08-31 09:22:23 +01002011 % nodeDef.name()
2012 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002013 }
2014
Matteo Martincighf9afc792018-12-06 12:03:17 +00002015 unsigned int numConcatViews = numInputs - 1;
2016 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), MaxNumOfTensorDimensions);
2017 concatDescriptor.SetConcatAxis(concatDim);
2018 TensorShape mergeDims(MaxNumOfTensorDimensions);
2019 unsigned int mergeDim = 0;
2020 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002021 {
telsoa01c577f2c2018-08-31 09:22:23 +01002022 // Need to double check whether it should be
Matteo Martincighf9afc792018-12-06 12:03:17 +00002023 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002024 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2025
Matteo Martincighf9afc792018-12-06 12:03:17 +00002026 // Double check dimensions of the tensors
2027 if (inputTensorInfo.GetNumDimensions() != MaxNumOfTensorDimensions)
2028 {
2029 throw armnn::ParseException(
2030 boost::str(
2031 boost::format(
2032 "The number of dimensions: %1% for input tensors of the "
2033 "concatenation op should be %2% %3%")
2034 % inputTensorInfo.GetNumDimensions()
2035 % MaxNumOfTensorDimensions
2036 % CHECK_LOCATION().AsString()));
2037 }
2038
2039 // Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
2040 mergeDims = inputTensorInfo.GetShape();
2041 unsigned int* viewOrigin = const_cast<unsigned int*>(concatDescriptor.GetViewOrigin(viewIndex));
2042 std::fill(viewOrigin, viewOrigin + MaxNumOfTensorDimensions, 0);
2043
2044 // Update the view origin coordinates and the merge dimension value
2045 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
2046 mergeDim += mergeDims[concatDim];
surmeh01bceff2f2018-03-29 16:29:27 +01002047 }
2048
Matteo Martincighf9afc792018-12-06 12:03:17 +00002049 // Update the output shape
2050 mergeDims[concatDim] = mergeDim;
surmeh01bceff2f2018-03-29 16:29:27 +01002051 armnn::IConnectableLayer *layer = m_Network->AddMergerLayer(concatDescriptor, nodeDef.name().c_str());
2052
Matteo Martincighf9afc792018-12-06 12:03:17 +00002053 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
surmeh01bceff2f2018-03-29 16:29:27 +01002054
Matteo Martincighf9afc792018-12-06 12:03:17 +00002055 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002056 {
Matteo Martincighf9afc792018-12-06 12:03:17 +00002057 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2058 inputSlot.Connect(layer->GetInputSlot(viewIndex));
surmeh01bceff2f2018-03-29 16:29:27 +01002059 }
2060
2061 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2062}
2063
2064ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
2065 const tensorflow::GraphDef& graphDef)
2066{
telsoa01c577f2c2018-08-31 09:22:23 +01002067 // Note: the Shape layer is handled in a special way, because:
2068 // 1. ARMNN doesn't support int32 tensors which it outputs.
2069 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01002070 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01002071 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01002072
2073 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
2074 if (tfDataType != tensorflow::DT_INT32)
2075 {
telsoa01c577f2c2018-08-31 09:22:23 +01002076 throw ParseException(
2077 boost::str(
2078 boost::format(
2079 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
2080 % tensorflow::DataType_Name(tfDataType)
2081 % nodeDef.name()
2082 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002083 }
2084
2085 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2086 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2087 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2088 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
2089
2090 std::vector<int32_t> shapeTensorData;
2091 shapeTensorData.reserve(prevLayerDimensions);
2092
2093 for (unsigned int i=0; i<prevLayerDimensions; ++i)
2094 {
2095 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
2096 }
2097
2098 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2099
2100 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
2101 nodeDef,
2102 &shapeTensorData[0],
2103 shapeTensorInfo);
2104}
2105
2106ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
2107 const tensorflow::GraphDef& graphDef)
2108{
2109 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2110 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2111
2112 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2113 {
telsoa01c577f2c2018-08-31 09:22:23 +01002114 throw ParseException(
2115 boost::str(
2116 boost::format(
2117 "ArmNN only supports Reshape layers with constant shapes. "
2118 "Input %1% Node %2% %3%")
2119 % inputs[1].m_IndexedValue->GetNode().name()
2120 % nodeDef.name()
2121 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002122 }
2123 ParsedConstTfOperation<int32_t>* shapeNode =
2124 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2125
2126 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2127 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2128
2129 std::vector<int32_t> shapeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002130 ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002131 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2132
2133 TensorShape targetShape = outputTensorInfo.GetShape();
2134 ReshapeDescriptor reshapeDesc;
2135 reshapeDesc.m_TargetShape = targetShape;
2136
2137 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2138 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2139 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2140
2141 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2142}
2143
2144ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
2145 const tensorflow::GraphDef& graphDef)
2146{
2147 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2148
2149 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2150 {
telsoa01c577f2c2018-08-31 09:22:23 +01002151 throw ParseException(
2152 boost::str(
2153 boost::format(
2154 "ArmNN only supports ResizeBilinear layers with constant sizes. "
2155 "Input %1%. Node %2% %3%")
2156 % inputs[1].m_IndexedValue->GetNode().name()
2157 % nodeDef.name()
2158 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002159 }
2160 ParsedConstTfOperation<int32_t>* sizeNode =
2161 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2162
telsoa01c577f2c2018-08-31 09:22:23 +01002163 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01002164 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
2165 {
telsoa01c577f2c2018-08-31 09:22:23 +01002166 throw ParseException(
2167 boost::str(
2168 boost::format(
2169 "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
2170 "Node %1% %2%")
2171 % nodeDef.name()
2172 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002173 }
2174
telsoa01c577f2c2018-08-31 09:22:23 +01002175 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01002176 std::vector<int32_t> sizeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002177 ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002178
telsoa01c577f2c2018-08-31 09:22:23 +01002179 // The descriptor only has target height and width attributes, which we get from the size tensor.
surmeh01bceff2f2018-03-29 16:29:27 +01002180 ResizeBilinearDescriptor desc;
2181 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
2182 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
jimfly018a121502018-12-06 16:19:52 +00002183 desc.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002184
2185 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, nodeDef.name().c_str());
2186
2187 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2188 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01002189 // The input shape is always in BHWC format, this will be swizzled below; for now,
2190 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01002191 unsigned int outBatch = inputTensorInfo.GetShape()[0];
2192 unsigned int outChannels = inputTensorInfo.GetShape()[3];
2193 unsigned int outHeight = desc.m_TargetHeight;
2194 unsigned int outWidth = desc.m_TargetWidth;
jimfly018a121502018-12-06 16:19:52 +00002195 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
telsoa01c577f2c2018-08-31 09:22:23 +01002196 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01002197 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2198 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2199
jimfly018a121502018-12-06 16:19:52 +00002200 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002201
2202 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2203}
2204
2205TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
2206{
2207 BOOST_ASSERT(nodeDef.op() == "Squeeze");
2208 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2209
2210 DataType type;
2211 if (tfDataType == tensorflow::DT_FLOAT)
2212 {
2213 type = DataType::Float32;
2214 }
2215 else if (tfDataType == tensorflow::DT_INT32)
2216 {
2217 type = DataType::Signed32;
2218 }
2219 else
2220 {
telsoa01c577f2c2018-08-31 09:22:23 +01002221 throw ParseException(
2222 boost::str(
2223 boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
2224 % tensorflow::DataType_Name(tfDataType)
2225 % nodeDef.name()
2226 % CHECK_LOCATION().AsString()));
2227 }
2228
2229
2230 if (inputTensorInfo.GetNumDimensions() > 4)
2231 {
2232 throw ParseException(
2233 boost::str(
2234 boost::format(
2235 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2236 % inputTensorInfo.GetNumDimensions()
2237 % nodeDef.name()
2238 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002239 }
2240
2241 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01002242 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2243
surmeh01bceff2f2018-03-29 16:29:27 +01002244 if (squeezeDims.empty())
2245 {
telsoa01c577f2c2018-08-31 09:22:23 +01002246 squeezeDims.assign(dimensionSequence,
2247 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01002248 }
2249
2250 std::vector<uint32_t> outputDims;
2251 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2252 {
telsoa01c577f2c2018-08-31 09:22:23 +01002253 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2254 auto currentDimension = inputTensorInfo.GetShape()[i];
2255 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01002256 {
telsoa01c577f2c2018-08-31 09:22:23 +01002257 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01002258 }
2259 }
2260
2261 if (outputDims.size() > 4)
2262 {
telsoa01c577f2c2018-08-31 09:22:23 +01002263 throw ParseException(
2264 boost::str(
2265 boost::format(
2266 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2267 % outputDims.size()
2268 % nodeDef.name()
2269 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002270 }
2271
telsoa01c577f2c2018-08-31 09:22:23 +01002272 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2273 outputDims.data());
2274
2275 TensorInfo outTensorInfo = inputTensorInfo;
2276 outTensorInfo.SetShape(outShape);
2277 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01002278
2279 return outTensorInfo;
2280}
2281
2282ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2283{
2284 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2285
2286 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2287 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2288
2289 TensorInfo outputInfo;
2290 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2291
2292 ReshapeDescriptor reshapeDesc;
2293 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2294 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2295 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2296 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2297
2298 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2299}
2300
2301ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2302{
2303 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2304
2305 NormalizationDescriptor normalizationDescriptor;
2306 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2307 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2308 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2309 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2310 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2311 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
ruoyan018174f362018-12-04 18:24:08 +00002312 normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002313
2314 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2315 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2316
2317 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002318 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2319 nodeDef.name().c_str());
ruoyan018174f362018-12-04 18:24:08 +00002320 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2321 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
surmeh01bceff2f2018-03-29 16:29:27 +01002322
2323 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2324}
2325
2326/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01002327/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2328/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2329/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2330///
surmeh01bceff2f2018-03-29 16:29:27 +01002331class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
2332{
2333public:
2334 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2335 : DeferredSingleLayerParsedTfOperation(parser, node)
2336 {
2337 }
2338
2339 void CreateLayerDeferred() override
2340 {
2341 BOOST_ASSERT(m_Layer == nullptr);
2342 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2343 }
2344};
2345
2346ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2347{
telsoa01c577f2c2018-08-31 09:22:23 +01002348 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01002349 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2350}
2351
telsoa01c577f2c2018-08-31 09:22:23 +01002352/// An ParsedTfOperation for a Mul node.
2353/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2354/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2355/// and in these cases armnn doesn't need a separate layer for the Mul.
2356///
2357class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
2358{
2359public:
2360 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2361 : DeferredSingleLayerParsedTfOperation(parser, node)
2362 {
2363 }
2364
2365 void CreateLayerDeferred() override
2366 {
2367 BOOST_ASSERT(m_Layer == nullptr);
2368 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2369 }
2370};
2371
surmeh01bceff2f2018-03-29 16:29:27 +01002372ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2373{
2374 boost::ignore_unused(graphDef);
2375
telsoa01c577f2c2018-08-31 09:22:23 +01002376 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002377}
2378
2379ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
2380 const tensorflow::GraphDef& graphDef)
2381{
2382 boost::ignore_unused(graphDef);
2383
2384 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2385
2386 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
2387
2388 auto it = m_InputShapes.find(nodeDef.name());
2389 if (it == m_InputShapes.end())
2390 {
telsoa01c577f2c2018-08-31 09:22:23 +01002391 throw ParseException(
2392 boost::str(
2393 boost::format(
2394 "Missing input shape for Placeholder '%1%' %2%")
2395 % nodeDef.name()
2396 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002397 }
2398 TensorInfo tensorInfo(it->second, DataType::Float32);
2399
2400 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2401
2402 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2403
2404 TrackInputBinding(layer, layerId, tensorInfo);
2405
2406 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2407}
2408
saoste01bbd40612018-08-28 15:41:51 +01002409ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2410{
2411 boost::ignore_unused(graphDef);
2412 return AddRealDivLayer(nodeDef);
2413}
2414
surmeh01bceff2f2018-03-29 16:29:27 +01002415ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
2416 const tensorflow::GraphDef& graphDef)
2417{
2418 boost::ignore_unused(graphDef);
2419
2420 ActivationDescriptor activationDesc;
2421 activationDesc.m_Function = ActivationFunction::ReLu;
2422 return AddActivationLayer(nodeDef, activationDesc);
2423}
2424
2425ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
2426 const tensorflow::GraphDef& graphDef)
2427{
2428 boost::ignore_unused(graphDef);
2429
2430 ActivationDescriptor activationDesc;
2431 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2432 activationDesc.m_A = 6.0f;
2433 activationDesc.m_B = 0.0f;
2434
2435 return AddActivationLayer(nodeDef, activationDesc);
2436}
2437
2438ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
2439 const tensorflow::GraphDef& graphDef)
2440{
2441 boost::ignore_unused(graphDef);
2442
2443 ActivationDescriptor activationDesc;
2444 activationDesc.m_Function = ActivationFunction::Sigmoid;
2445
2446 return AddActivationLayer(nodeDef, activationDesc);
2447}
2448
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002449ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
2450 const tensorflow::GraphDef &graphDef)
2451{
2452 boost::ignore_unused(graphDef);
2453
2454 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2455
2456 IConnectableLayer* const layer = m_Network->AddRsqrtLayer(nodeDef.name().c_str());
2457
2458 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2459 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2460 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2461
2462 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2463}
2464
surmeh01bceff2f2018-03-29 16:29:27 +01002465ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
2466 const tensorflow::GraphDef& graphDef)
2467{
2468 boost::ignore_unused(graphDef);
2469
2470 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2471
2472 SoftmaxDescriptor softmaxDescriptor;
2473 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2474
2475 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2476 prevLayerSlot.Connect(layer->GetInputSlot(0));
2477 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2478
2479 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2480}
2481
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002482ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
2483 const tensorflow::GraphDef& graphDef)
2484{
2485 boost::ignore_unused(graphDef);
2486
2487 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2488 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2489 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2490
2491 // The last input is the axis for split operation.
2492 if (!HasParsedConstTensor<int32_t>(inputs[numInputs - 1].m_IndexedValue->GetNode().name()))
2493 {
2494 throw ParseException(
2495 boost::str(
2496 boost::format(
2497 "ArmNN only supports split with constant axis. "
2498 "Input %1%. Node %2% %3%")
2499 % inputs[numInputs - 1].m_IndexedValue->GetNode().name()
2500 % nodeDef.name()
2501 % CHECK_LOCATION().AsString()));
2502 }
2503 ParsedConstTfOperation<int32_t>* shapeNode =
2504 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[numInputs - 1].m_IndexedValue);
2505
2506 // Get the axis tensor data
2507 std::vector<int32_t> axisTensorData;
2508 shapeNode->GetConstTensor(axisTensorData);
2509
2510 // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2511 const unsigned int splitDim = static_cast<unsigned int>(axisTensorData[0]);
2512
2513 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2514 if (splitDim == 0 || splitDim == 2)
2515 {
2516 throw ParseException(
2517 boost::str(
2518 boost::format(
2519 "Dimension %1% for split is not supported by Armnn. "
2520 "Node %2% %3%")
2521 % splitDim
2522 % nodeDef.name()
2523 % CHECK_LOCATION().AsString()));
2524 }
2525
2526 // As Armnn only supports splitter outputs of the same shape, therefore num_splits will be limited to an integer.
2527 uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef, "num_or_size_splits");
2528
2529 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2530 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2531
2532 if (inputTensorInfo.GetNumDimensions() != MaxNumOfTensorDimensions)
2533 {
2534 throw armnn::ParseException(
2535 boost::str(
2536 boost::format(
2537 "The number of dimensions: %1% for input tensors of the "
2538 "splitter op should be %2% %3%")
2539 % inputTensorInfo.GetNumDimensions()
2540 % MaxNumOfTensorDimensions
2541 % CHECK_LOCATION().AsString()));
2542 }
2543 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2544
2545 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2546
2547 // Add current input shape to splitterDimSizes
2548 for (unsigned int i = 0; i < inputDimSize; ++i)
2549 {
2550 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2551 }
2552
2553 if (splitterDimSizes[splitDim] % num_split != 0)
2554 {
2555 throw ParseException("Number of splits must evenly divide the dimension");
2556 }
2557 splitterDimSizes[splitDim] /= num_split;
2558
2559 SplitterDescriptor splitDesc(num_split);
2560 for (unsigned int g = 0; g < num_split; ++g)
2561 {
2562 // Set the size of the views.
2563 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2564 {
2565 splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2566 }
2567 splitDesc.SetViewOriginCoord(g, splitDim, splitterDimSizes[splitDim] * g);
2568 }
2569
2570 IConnectableLayer *layer = m_Network->AddSplitterLayer(splitDesc, nodeDef.name().c_str());
2571
2572 inputSlot.Connect(layer->GetInputSlot(0));
2573
2574 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2575 splitterDimSizes.data());
2576
2577 for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
2578 {
2579 layer->GetOutputSlot(i).SetTensorInfo(armnn::TensorInfo(outShape, inputTensorInfo.GetDataType()));
2580 }
2581
2582 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2583}
2584
surmeh01bceff2f2018-03-29 16:29:27 +01002585ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
2586 const tensorflow::GraphDef& graphDef)
2587{
2588 boost::ignore_unused(graphDef);
2589
2590 ActivationDescriptor activationDesc;
2591 activationDesc.m_Function = ActivationFunction::SoftReLu;
2592
2593 return AddActivationLayer(nodeDef, activationDesc);
2594}
2595
2596ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2597{
2598 boost::ignore_unused(graphDef);
2599
2600 ActivationDescriptor activationDesc;
2601 activationDesc.m_Function = ActivationFunction::TanH;
2602 activationDesc.m_A = 1.0f;
2603 activationDesc.m_B = 1.0f;
2604
2605 return AddActivationLayer(nodeDef, activationDesc);
2606}
2607
2608ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
2609 ActivationDescriptor& activationDesc)
2610{
2611 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2612
2613 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2614
2615 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2616 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2617 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2618 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2619}
2620
2621ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
2622 const tensorflow::GraphDef& graphDef)
2623{
2624 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
2625}
2626
2627ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
2628 const tensorflow::GraphDef& graphDef)
2629{
2630 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2631}
2632
2633ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
2634 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
2635{
2636 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2637 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2638 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2639
2640 if (inputs.size() != 1)
2641 {
telsoa01c577f2c2018-08-31 09:22:23 +01002642 throw ParseException(
2643 boost::str(
2644 boost::format(
2645 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
2646 % inputs.size()
2647 % nodeDef.name()
2648 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002649 }
2650
2651 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
2652 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2653 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
2654 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
2655
2656 Pooling2dDescriptor pooling2dDescriptor;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002657 pooling2dDescriptor.m_PoolType = pooltype;
2658 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
surmeh01bceff2f2018-03-29 16:29:27 +01002659 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
2660
telsoa01c577f2c2018-08-31 09:22:23 +01002661 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
FrancisMurtaghf005e312018-12-06 15:26:04 +00002662 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
2663 pooling2dDescriptor.m_DataLayout = dataLayout;
2664 DataLayoutIndexed dataLayoutIndexed(dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +01002665
FrancisMurtaghf005e312018-12-06 15:26:04 +00002666 pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
2667 pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
2668 pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
2669 pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002670
FrancisMurtaghf005e312018-12-06 15:26:04 +00002671 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
2672 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002673
2674 bool padding = false;
2675 TensorInfo outputInfo;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002676 unsigned int outputHeight = 0;
2677 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01002678
2679 CHECK_PADDING_TYPE(nodeDef, paddingString);
2680
surmeh01bceff2f2018-03-29 16:29:27 +01002681 if (paddingString == "SAME")
2682 {
2683 padding = true;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002684
2685 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
2686 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2687 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
2688 static_cast<float>(pooling2dDescriptor.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01002689 }
2690 else if (paddingString == "VALID")
2691 {
2692 padding = false;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002693
2694 outputHeight = static_cast<uint32_t>(ceil(
2695 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
2696 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2697 outputWidth = static_cast<uint32_t>(ceil(
2698 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
2699 static_cast<float>(pooling2dDescriptor.m_StrideX)));
2700 }
2701
2702 switch (dataLayout)
2703 {
2704 case DataLayout::NHWC:
2705 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2706 outputHeight,
2707 outputWidth,
2708 inputTensorInfo.GetShape()[3] },
2709 DataType::Float32);
2710 break;
2711 case DataLayout::NCHW:
2712 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2713 inputTensorInfo.GetShape()[1],
2714 outputHeight,
2715 outputWidth },
2716 DataType::Float32);
2717 break;
surmeh01bceff2f2018-03-29 16:29:27 +01002718 }
surmeh01bceff2f2018-03-29 16:29:27 +01002719
2720 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002721 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002722 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002723 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002724
2725
2726 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
2727 if (layer == nullptr)
2728 {
telsoa01c577f2c2018-08-31 09:22:23 +01002729 throw ParseException(
2730 boost::str(
2731 boost::format(
2732 "Failed to add pooling2d layer for %1% %2%")
2733 % nodeDef.name()
2734 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002735 }
2736
2737 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2738
FrancisMurtaghf005e312018-12-06 15:26:04 +00002739 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002740
2741 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2742}
2743
2744ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
2745{
2746 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2747
2748 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2749 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2750
2751 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
2752 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
2753
2754 if (isBiasAdd)
2755 {
2756 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
2757 // with the same data in the correct dimension for broadcast in addition.
2758 if(input1Info.GetNumDimensions() != 1)
2759 {
telsoa01c577f2c2018-08-31 09:22:23 +01002760 throw ParseException(
2761 boost::str(
2762 boost::format(
2763 "Unsupported bias for BiasAdd. It should be a 1D vector. "
2764 "Got %1% dimensions for input %2%. Node %3% %4%")
2765 % input1Info.GetNumDimensions()
2766 % inputs[1].m_IndexedValue->GetNode().name()
2767 % nodeDef.name()
2768 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002769 }
2770
2771 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01002772
telsoa01c577f2c2018-08-31 09:22:23 +01002773 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01002774 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002775 }
2776 else
2777 {
2778 if (input0Info.GetNumDimensions() == 1)
2779 {
2780 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002781 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002782 }
2783
2784 if (input1Info.GetNumDimensions() == 1)
2785 {
2786 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002787 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002788 }
2789 }
2790
2791 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
2792
2793 input0Slot->Connect(layer->GetInputSlot(0));
2794 input1Slot->Connect(layer->GetInputSlot(1));
2795
2796 if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
2797 {
2798 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2799 }
2800 else
2801 {
2802 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2803 }
2804
2805 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2806}
2807
saoste01bbd40612018-08-28 15:41:51 +01002808ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
2809{
2810 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2811
2812 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
2813 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2814 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2815
2816 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2817 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2818
2819
2820 if (input0NumDims < input1NumDims)
2821 {
2822 const bool isNHWC = true;
2823 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2824 }
2825 if (input1NumDims < input0NumDims)
2826 {
2827 const bool isNHWC = true;
2828 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2829 }
2830
2831 input0Slot->Connect(layer->GetInputSlot(0));
2832 input1Slot->Connect(layer->GetInputSlot(1));
2833
2834 if (input0NumDims < input1NumDims)
2835 {
2836 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2837 }
2838 else
2839 {
2840 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2841
2842 }
2843 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2844}
2845
Sadik Armagan975c09a2018-12-04 10:02:08 +00002846ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
2847{
2848 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2849
2850 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2851 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2852
2853 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2854 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2855
2856 if (input0NumDims < input1NumDims)
2857 {
2858 const bool isNHWC = true;
2859 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2860 }
2861 if (input1NumDims < input0NumDims)
2862 {
2863 const bool isNHWC = true;
2864 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2865 }
2866
2867 IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
2868
2869 input0Slot->Connect(layer->GetInputSlot(0));
2870 input1Slot->Connect(layer->GetInputSlot(1));
2871
2872 TensorInfo outputInfo = input0Slot->GetTensorInfo();
2873 std::vector<unsigned int> outputShape;
2874
2875 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
2876 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
2877
2878 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
2879 {
2880 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
2881 }
2882
2883 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
2884 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2885
2886 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2887}
2888
telsoa01c577f2c2018-08-31 09:22:23 +01002889IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
2890{
2891 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2892
2893 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
2894 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2895 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2896
2897 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2898 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2899
2900 if (input0NumDims < input1NumDims)
2901 {
2902 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002903 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002904 }
2905 if (input1NumDims < input0NumDims)
2906 {
2907 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002908 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002909 }
2910
2911 input0Slot->Connect(layer->GetInputSlot(0));
2912 input1Slot->Connect(layer->GetInputSlot(1));
2913
2914 if (input0NumDims < input1NumDims)
2915 {
2916 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2917 }
2918 else
2919 {
2920 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2921 }
2922 return layer;
2923}
2924
surmeh01bceff2f2018-03-29 16:29:27 +01002925IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
2926 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
2927{
telsoa01c577f2c2018-08-31 09:22:23 +01002928 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01002929 ParsedConstTfOperation<float>* biasNode = nullptr;
2930 if (addNodeDef != nullptr)
2931 {
2932 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01002933 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01002934 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
2935 {
2936 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
2937 }
2938 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
2939 {
2940 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
2941 }
2942 else
2943 {
telsoa01c577f2c2018-08-31 09:22:23 +01002944 throw ParseException(
2945 boost::str(
2946 boost::format(
2947 "ArmNN only supports fully connected layers with constant bias. "
2948 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
2949 % addInputs[0].m_IndexedValue->GetNode().name()
2950 % addInputs[1].m_IndexedValue->GetNode().name()
2951 % addNodeDef->name()
2952 % matMulNodeDef.name()
2953 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002954 }
2955 }
2956
telsoa01c577f2c2018-08-31 09:22:23 +01002957 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01002958 ParsedConstTfOperation<float>* weightNode = nullptr;
2959 ParsedTfOperation* inputNode = nullptr;
2960 unsigned int inputIdx = 0;
2961 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
2962 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
2963 {
2964 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
2965 inputNode = mulInputs[1].m_IndexedValue;
2966 inputIdx = mulInputs[1].m_Index;
2967 }
2968 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
2969 {
2970 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
2971 inputNode = mulInputs[0].m_IndexedValue;
2972 inputIdx = mulInputs[0].m_Index;
2973 }
2974 else
2975 {
telsoa01c577f2c2018-08-31 09:22:23 +01002976 throw ParseException(
2977 boost::str(
2978 boost::format(
2979 "ArmNN only supports fully connected layers with constant weights. "
2980 "Inputs %1% and %2%. MatMulNode %3% %4%")
2981 % mulInputs[0].m_IndexedValue->GetNode().name()
2982 % mulInputs[1].m_IndexedValue->GetNode().name()
2983 % matMulNodeDef.name()
2984 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002985 }
2986
2987 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01002988 // Handles weight.
Matteo Martincigh482ca852018-12-12 09:20:55 +00002989 ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002990
2991 FullyConnectedDescriptor desc;
2992 desc.m_BiasEnabled = addNodeDef != nullptr;
2993
2994 IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +01002995 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01002996 if (addNodeDef != nullptr)
2997 {
2998 std::vector<float> biasTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002999 ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003000
3001 if (weights.GetShape()[1] != biases.GetShape()[0])
3002 {
telsoa01c577f2c2018-08-31 09:22:23 +01003003 throw ParseException(
3004 boost::str(
3005 boost::format(
3006 "Shape of matmul weights and bias do not match. "
3007 "AddNode %1%. MatMulNode %2% %3%")
3008 % addNodeDef->name()
3009 % matMulNodeDef.name()
3010 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003011 }
3012
3013 layer = m_Network->AddFullyConnectedLayer(desc, weights, biases, armnnLayerName);
3014 }
3015 else
3016 {
3017 layer = m_Network->AddFullyConnectedLayer(desc, weights, armnnLayerName);
3018 }
3019
3020 BOOST_ASSERT(layer != nullptr);
3021
3022 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3023 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3024
telsoa01c577f2c2018-08-31 09:22:23 +01003025 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01003026 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
3027 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3028 return layer;
3029}
3030
3031void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
3032{
telsoa01c577f2c2018-08-31 09:22:23 +01003033 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01003034 tensorflow::DataType type = tensorflow::DT_FLOAT;
3035 if (nodeDef.attr().count("T") != 0)
3036 {
3037 auto attr = nodeDef.attr().at("T");
3038 type = attr.type();
3039 }
3040 else if (nodeDef.attr().count("dtype") != 0)
3041 {
3042 auto attr = nodeDef.attr().at("dtype");
3043 type = attr.type();
3044 }
3045
3046 if (type != tensorflow::DT_FLOAT && nodeDef.op() != "Const")
3047 {
telsoa01c577f2c2018-08-31 09:22:23 +01003048 throw ParseException(
3049 boost::str(
3050 boost::format(
3051 "Currently only FLOAT is supported for tensorflow nodes (apart from Const). "
3052 "Got %1% for Node %2% %3%")
3053 % tensorflow::DataType_Name(type)
3054 % nodeDef.name()
3055 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003056 }
3057
3058 const std::string& operation = nodeDef.op();
narpra016f37f832018-12-21 18:30:00 +00003059 auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3060 if (itControlInput != m_ControlInputs.end())
3061 {
3062 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
3063 return;
3064 }
surmeh01bceff2f2018-03-29 16:29:27 +01003065 auto it = ms_OperationNameToParsingFunctions.find(operation);
3066 if (it != ms_OperationNameToParsingFunctions.end())
3067 {
3068 auto func = it->second;
3069 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
3070 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3071
telsoa01c577f2c2018-08-31 09:22:23 +01003072 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01003073 auto it = m_ParsedTfOperations.find(nodeDef.name());
3074 if (it != m_ParsedTfOperations.end())
3075 {
3076 throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
3077 }
3078 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3079
telsoa01c577f2c2018-08-31 09:22:23 +01003080 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003081 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3082 m_RequestedOutputs.end())
3083 {
3084 auto outId = ParseOutputId(nodeDef.name());
3085 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
3086 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3087
3088 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3089
3090 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
3091
3092 prevSlot.Connect(outputLayer->GetInputSlot(0));
3093
3094 TrackOutputBinding(outputLayer, layerId, tensorInfo);
3095 }
3096 }
3097 else
3098 {
telsoa01c577f2c2018-08-31 09:22:23 +01003099 throw ParseException(
3100 boost::str(
3101 boost::format(
3102 "Unsupported operation %1% in tensorflow::GraphDef %2%")
3103 % operation
3104 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003105 }
3106}
3107
3108void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
3109{
telsoa01c577f2c2018-08-31 09:22:23 +01003110 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01003111 m_NodesByName.clear();
3112 m_NetworkInputsBindingInfo.clear();
3113 m_NetworkOutputsBindingInfo.clear();
3114
3115 for (int i = 0; i < graphDef.node_size(); ++i)
3116 {
3117 const tensorflow::NodeDef& node = graphDef.node(i);
3118 m_NodesByName[node.name()] = &node;
3119 }
3120
telsoa01c577f2c2018-08-31 09:22:23 +01003121 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01003122 std::vector<const tensorflow::NodeDef*> targetNodes;
3123 for (const std::string& requestedOutputName : m_RequestedOutputs)
3124 {
3125 auto nodeIt = m_NodesByName.find(requestedOutputName);
3126 if (nodeIt == m_NodesByName.end())
3127 {
telsoa01c577f2c2018-08-31 09:22:23 +01003128 throw ParseException(
3129 boost::str(
3130 boost::format(
3131 "Couldn't find requested output node '%1%' in graph %2%")
3132 % requestedOutputName
3133 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003134 }
3135 targetNodes.push_back(nodeIt->second);
3136 }
3137
telsoa01c577f2c2018-08-31 09:22:23 +01003138 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003139 std::vector<const tensorflow::NodeDef*> sortedNodes;
3140 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3141 targetNodes,
3142 [this](const tensorflow::NodeDef* node)
3143 {
3144 auto outputs = GetTfInputNodes(*node);
3145 std::vector<const tensorflow::NodeDef*> nodesOnly;
3146 for (const auto & o : outputs) {
3147 nodesOnly.push_back(o.m_IndexedValue);
3148 }
3149 return nodesOnly;
3150 },
3151 sortedNodes))
3152 {
telsoa01c577f2c2018-08-31 09:22:23 +01003153 throw ParseException(
3154 boost::str(
3155 boost::format(
3156 "Cycle detected in graph %1%")
3157 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003158 }
3159
telsoa01c577f2c2018-08-31 09:22:23 +01003160 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003161 for (const auto& it : sortedNodes)
3162 {
3163 const tensorflow::NodeDef& currentNode = *it;
3164 LoadNodeDef(currentNode, graphDef);
3165 }
3166}
3167
3168INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
3169 const std::map<std::string, TensorShape>& inputShapes,
3170 const std::vector<std::string>& requestedOutputs)
3171{
3172 FILE* fd = fopen(graphFile, "r");
3173
3174 if (fd == nullptr)
3175 {
telsoa01c577f2c2018-08-31 09:22:23 +01003176 throw FileNotFoundException(
3177 boost::str(
3178 boost::format(
3179 "Graph file %1% failed to open %2%")
3180 % graphFile
3181 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003182 }
3183
telsoa01c577f2c2018-08-31 09:22:23 +01003184 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003185 tensorflow::GraphDef graphDef;
3186 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
3187 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3188 delete input;
3189 fclose(fd);
3190
3191 if (!success)
3192 {
telsoa01c577f2c2018-08-31 09:22:23 +01003193 throw ParseException(
3194 boost::str(
3195 boost::format(
3196 "Failed to parse graph file %1%")
3197 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003198 }
3199
3200 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3201}
3202
3203INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
3204 const std::map<std::string, TensorShape>& inputShapes,
3205 const std::vector<std::string>& requestedOutputs)
3206{
telsoa01c577f2c2018-08-31 09:22:23 +01003207 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003208 tensorflow::GraphDef graphDef;
3209 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3210
3211 if (!success)
3212 {
telsoa01c577f2c2018-08-31 09:22:23 +01003213 throw ParseException(
3214 boost::str(
3215 boost::format(
3216 "Failed to parse graph file %1%")
3217 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003218 }
3219
3220 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3221}
3222
3223INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
3224 const std::map<std::string, TensorShape>& inputShapes,
3225 const std::vector<std::string>& requestedOutputs)
3226{
3227 FILE* fd = fopen(graphFile, "rb");
3228
3229 if (fd == nullptr)
3230 {
telsoa01c577f2c2018-08-31 09:22:23 +01003231 throw FileNotFoundException(
3232 boost::str(
3233 boost::format(
3234 "Graph file %1% failed to open %2%")
3235 % graphFile
3236 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003237 }
3238
telsoa01c577f2c2018-08-31 09:22:23 +01003239 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003240 tensorflow::GraphDef graphDef;
3241
3242 google::protobuf::io::FileInputStream inStream(fileno(fd));
3243 google::protobuf::io::CodedInputStream codedStream(&inStream);
3244 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
3245 bool success = graphDef.ParseFromCodedStream(&codedStream);
3246 fclose(fd);
3247
3248 if (!success)
3249 {
telsoa01c577f2c2018-08-31 09:22:23 +01003250 throw ParseException(
3251 boost::str(
3252 boost::format(
3253 "Failed to parse protobuf file %1% %2%")
3254 % graphFile
3255 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003256 }
3257
3258 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3259}
3260
3261INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
3262 const std::map<std::string, TensorShape>& inputShapes,
3263 const std::vector<std::string>& requestedOutputs)
3264{
3265 m_Network = INetwork::Create();
3266
3267 m_InputShapes = inputShapes;
3268 if (requestedOutputs.size() == 0)
3269 {
telsoa01c577f2c2018-08-31 09:22:23 +01003270 throw ParseException(
3271 boost::str(
3272 boost::format(
3273 "requestedOutputs must have at least one entry %1%")
3274 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003275 }
3276 m_RequestedOutputs = requestedOutputs;
3277
3278 try
3279 {
3280 LoadGraphDef(graphDef);
3281 }
3282 catch (const ParseException& e)
3283 {
3284 Cleanup();
3285 throw e;
3286 }
3287
3288 Cleanup();
3289
3290 return std::move(m_Network);
3291}
3292
3293void TfParser::Cleanup()
3294{
telsoa01c577f2c2018-08-31 09:22:23 +01003295 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01003296 m_InputShapes.clear();
3297 m_RequestedOutputs.clear();
3298 m_NodesByName.clear();
3299 m_ParsedTfOperations.clear();
3300}
3301
3302BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
3303{
3304 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
3305}
3306
3307BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
3308{
3309 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
3310}
3311
3312std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
3313 const char* bindingPointDesc,
3314 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3315{
3316 auto it = nameToBindingInfo.find(layerName);
3317 if (it == nameToBindingInfo.end())
3318 {
telsoa01c577f2c2018-08-31 09:22:23 +01003319 throw InvalidArgumentException(
3320 boost::str(
3321 boost::format(
3322 "Unknown %1% '%2%' %3%")
3323 % bindingPointDesc
3324 % layerName
3325 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003326 }
3327 return it->second;
3328}
3329
3330void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3331{
3332 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3333}
3334
3335void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3336{
3337 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3338}
3339
3340void TfParser::TrackBindingPoint(IConnectableLayer* layer,
3341 LayerBindingId id,
3342 const TensorInfo& tensorInfo,
3343 const char* bindingPointDesc,
3344 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3345{
3346 const std::string layerName = layer->GetName();
3347 auto it = nameToBindingInfo.find(layerName);
3348 if (it == nameToBindingInfo.end())
3349 {
3350 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3351 }
3352 else
3353 {
telsoa01c577f2c2018-08-31 09:22:23 +01003354 throw ParseException(
3355 boost::str(
3356 boost::format(
3357 "Id %1% used by more than one %2% layer %3%")
3358 % id
3359 % bindingPointDesc
3360 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003361 }
3362}
3363
3364} // namespace armnnTfParser