blob: d085ed84e321fa88dbb20c1b015f30159c922316 [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00005
surmeh01bceff2f2018-03-29 16:29:27 +01006#include "TfParser.hpp"
7
surmeh01bceff2f2018-03-29 16:29:27 +01008#include <armnn/TypesUtils.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +01009#include <armnn/Descriptors.hpp>
10
11#include <GraphTopologicalSort.hpp>
Sadik Armagan479045b2018-10-01 11:51:37 +010012#include <ParserHelper.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010013#include <Permute.hpp>
Matteo Martincigh46315822018-11-28 16:22:36 +000014#include <DataLayoutIndexed.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010015
16#include <google/protobuf/io/zero_copy_stream_impl.h>
17#include <google/protobuf/text_format.h>
18
19#include "tensorflow/core/framework/graph.pb.h"
surmeh01bceff2f2018-03-29 16:29:27 +010020
surmeh01bceff2f2018-03-29 16:29:27 +010021#include <boost/format.hpp>
22#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010023#include <boost/format.hpp>
24#include <boost/numeric/conversion/cast.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010025#include <boost/polymorphic_cast.hpp>
26
surmeh01bceff2f2018-03-29 16:29:27 +010027#include <numeric>
surmeh01bceff2f2018-03-29 16:29:27 +010028
Matteo Martincigh46315822018-11-28 16:22:36 +000029using namespace armnnUtils;
surmeh01bceff2f2018-03-29 16:29:27 +010030using namespace armnn;
31
32namespace armnnTfParser
33{
34namespace
35{
36
37const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
38const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
39
surmeh01bceff2f2018-03-29 16:29:27 +010040
41template <typename Callable>
42void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
43 const std::string& attribName,
44 tensorflow::AttrValue::ValueCase expectedValueCase,
45 Callable callable)
46{
47 auto iter = nodeDef.attr().find(attribName);
48 if (iter != nodeDef.attr().end())
49 {
50 const auto& attrValue = iter->second;
51 if (attrValue.value_case() == expectedValueCase)
52 {
53 callable(attrValue);
54 }
55 else
56 {
telsoa01c577f2c2018-08-31 09:22:23 +010057 throw ParseException(
58 boost::str(
59 boost::format(
60 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
61 "but found %4% instead %5%")
62 % attribName
63 % nodeDef.name()
64 % static_cast<int>(expectedValueCase)
65 % static_cast<int>(attrValue.value_case())
66 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010067 }
68 }
69 else
70 {
telsoa01c577f2c2018-08-31 09:22:23 +010071 throw ParseException(
72 boost::str(
73 boost::format(
74 "Could not find required attribute %1% in node %2% %3%")
75 % attribName
76 % nodeDef.name()
77 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010078 }
79}
80
81template <typename Callable>
82void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
83 const std::string& attribName,
84 tensorflow::AttrValue::ValueCase expectedValueCase,
85 Callable callable)
86{
87 auto iter = nodeDef.attr().find(attribName);
88 if (iter != nodeDef.attr().end())
89 {
90 const auto& attrValue = iter->second;
91 if (attrValue.value_case() == expectedValueCase)
92 {
93 callable(attrValue);
94 }
95 else
96 {
telsoa01c577f2c2018-08-31 09:22:23 +010097 throw ParseException(
98 boost::str(
99 boost::format(
100 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
101 "but found %4% instead %5%")
102 % attribName
103 % nodeDef.name()
104 % static_cast<int>(expectedValueCase)
105 % static_cast<int>(attrValue.value_case())
106 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100107 }
108 }
109}
110
111float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
112{
113 float attribValue = 0.0f;
114 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
115 [&attribValue](const tensorflow::AttrValue& attrValue)
116 {
117 attribValue = attrValue.f();
118 });
119 return attribValue;
120}
121
Conor Kennedyc2130a02018-12-05 11:05:54 +0000122int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
123{
124 int32_t attribValue = 0u;
125 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
126 [&attribValue](const tensorflow::AttrValue& attrValue)
127 {
128 attribValue = static_cast<int32_t>(attrValue.i());
129 });
130 return attribValue;
131}
132
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000133bool ReadMandatoryNodeBoolAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
134{
135 bool attribValue = false;
136 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
137 [&attribValue](const tensorflow::AttrValue& attrValue)
138 {
139 attribValue = static_cast<bool>(attrValue.b());
140 });
141 return attribValue;
142}
143
surmeh01bceff2f2018-03-29 16:29:27 +0100144uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
145{
146 uint32_t attribValue = 0u;
147 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
148 [&attribValue](const tensorflow::AttrValue& attrValue)
149 {
150 attribValue = static_cast<uint32_t>(attrValue.i());
151 });
152 return attribValue;
153}
154
155std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
156{
157 std::string attribValue = "";
158 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
159 [&attribValue](const tensorflow::AttrValue& attrValue)
160 {
161 attribValue = attrValue.s();
162 });
163 return attribValue;
164}
165
166std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
167 const std::string& name)
168{
169 std::vector<uint32_t> attriList;
170 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
171 [&attriList](const tensorflow::AttrValue& attrValue)
172 {
173 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
174 {
175 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
176 }
177 });
178
179 return attriList;
180}
181
182std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
183 const std::string& name)
184{
185 std::vector<uint32_t> attriList;
186 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
187 [&attriList](const tensorflow::AttrValue& attrValue)
188 {
189 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
190 {
191 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
192 }
193 });
194
195 return attriList;
196}
197
198bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
199 const std::string& name,
200 bool defaultValue = false)
201{
202 bool attribValue = defaultValue;
203 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
204 [&attribValue](const tensorflow::AttrValue& attrValue)
205 {
206 attribValue = attrValue.b();
207 });
208 return attribValue;
209}
210
211tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
212{
213 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
214 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
215 [&attribValue](const tensorflow::AttrValue& attrValue)
216 {
217 attribValue = attrValue.type();
218 });
219 return attribValue;
220}
221
222TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
223{
224 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
225 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
226
227 if (stretchDim != targetDims.end())
228 {
229 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
230 {
telsoa01c577f2c2018-08-31 09:22:23 +0100231 throw ParseException(
232 boost::str(
233 boost::format(
234 "At most one component of shape can be -1 %1%")
235 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100236 }
237
telsoa01c577f2c2018-08-31 09:22:23 +0100238 auto targetNumElements =
239 boost::numeric_cast<unsigned int>(
240 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100241 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
242 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
243 }
244
245 TensorInfo reshapeInfo = input;
246 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
247
248 return reshapeInfo;
249}
250
telsoa01c577f2c2018-08-31 09:22:23 +0100251// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100252IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
253 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100254{
255 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
256 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
257 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
258 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
259 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
260 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
261
262 armnn::TensorInfo reshapedInfo = input1Info;
263 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
264
265 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
266 ReshapeDescriptor reshapeDesc;
267 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
268 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
269
270 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
271 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
272
273 input1Slot = &reshapeLayer->GetOutputSlot(0);
274
275 return input1Slot;
276}
277
278OutputId ParseOutputId(const std::string & name)
279{
280 unsigned int outputNum = 0;
281 size_t colonPos = name.find_last_of(":");
282 if (colonPos != std::string::npos)
283 {
284 int n = std::stoi(name.substr(colonPos+1));
285 if (n<0 || n>100)
286 {
telsoa01c577f2c2018-08-31 09:22:23 +0100287 throw ParseException(
288 boost::str(
289 boost::format(
290 "Output tensor id is out of range for %1% %2%")
291 % name
292 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100293 }
294 outputNum = static_cast<unsigned int>(n);
295 }
296 return OutputId(name.substr(0,colonPos),outputNum);
297}
298
telsoa01c577f2c2018-08-31 09:22:23 +0100299#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
300 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
301 { \
302 throw ParseException( \
303 boost::str( \
304 boost::format( \
305 "Unsupported data format %1% passed for %2% node %3%. " \
306 "Only NHWC and NCHW supported %4%") \
307 % FORMAT \
308 % NODE_TYPE \
309 % NODE_DEF.name() \
310 % CHECK_LOCATION().AsString())); \
311 }
312
313#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
314 if(PADDING != "SAME" && PADDING != "VALID" ) \
315 { \
316 throw ParseException( \
317 boost::str( \
318 boost::format( \
319 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
320 % PADDING \
321 % NODE_DEF.name() \
322 % CHECK_LOCATION().AsString())); \
323 } \
324
surmeh01bceff2f2018-03-29 16:29:27 +0100325} // namespace
326
327const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
328 { "Const", &TfParser::ParseConst },
329 { "Add", &TfParser::ParseAdd },
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000330 { "AddN", &TfParser::ParseAddN },
surmeh01bceff2f2018-03-29 16:29:27 +0100331 { "BiasAdd", &TfParser::ParseBiasAdd },
332 { "Identity", &TfParser::ParseIdentity },
333 { "Conv2D", &TfParser::ParseConv2D },
334 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
Conor Kennedyc2130a02018-12-05 11:05:54 +0000335 { "ExpandDims", &TfParser::ParseExpandDims },
surmeh01bceff2f2018-03-29 16:29:27 +0100336 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
FrancisMurtagh94412af2019-01-24 10:53:39 +0000337 { "Gather", &TfParser::ParseGather},
jimfly01a06bf312018-12-18 16:24:51 +0000338 { "Greater", &TfParser::ParseGreater},
surmeh01bceff2f2018-03-29 16:29:27 +0100339 { "ConcatV2", &TfParser::ParseConcat },
340 { "LRN", &TfParser::ParseLrn },
341 { "MatMul", &TfParser::ParseMatMul },
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000342 { "Mean", &TfParser::ParseMean },
surmeh01bceff2f2018-03-29 16:29:27 +0100343 { "Mul", &TfParser::ParseMul },
344 { "Placeholder", &TfParser::ParsePlaceholder },
saoste01bbd40612018-08-28 15:41:51 +0100345 { "RealDiv", &TfParser::ParseRealDiv },
surmeh01bceff2f2018-03-29 16:29:27 +0100346 { "Relu", &TfParser::ParseRelu },
347 { "Relu6", &TfParser::ParseRelu6 },
348 { "Reshape", &TfParser::ParseReshape },
349 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +0000350 { "Rsqrt", &TfParser::ParseRsqrt },
surmeh01bceff2f2018-03-29 16:29:27 +0100351 { "Shape", &TfParser::ParseShape },
352 { "Squeeze", &TfParser::ParseSqueeze },
353 { "Sigmoid", &TfParser::ParseSigmoid },
354 { "Softmax", &TfParser::ParseSoftmax },
355 { "Softplus", &TfParser::ParseSoftplus },
Sadik Armagan2ad6cb42018-12-27 11:23:44 +0000356 { "Split", &TfParser::ParseSplit },
surmeh01bceff2f2018-03-29 16:29:27 +0100357 { "Tanh", &TfParser::ParseTanh },
358 { "MaxPool", &TfParser::ParseMaxPool },
359 { "AvgPool", &TfParser::ParseAvgPool },
telsoa01c577f2c2018-08-31 09:22:23 +0100360 { "Maximum", &TfParser::ParseMaximum },
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +0000361 { "Minimum", &TfParser::ParseMinimum },
jimfly0184c70e62018-12-19 13:14:46 +0000362 { "Equal", &TfParser::ParseEqual },
jimfly01f6ba7472018-12-04 10:09:52 +0000363 { "Pad", &TfParser::ParsePad },
narpra016f37f832018-12-21 18:30:00 +0000364 { "Sub", &TfParser::ParseSub }
365};
366
367const std::list<std::string> TfParser::m_ControlInputs = {
368 "Assert"
surmeh01bceff2f2018-03-29 16:29:27 +0100369};
370
371ITfParser* ITfParser::CreateRaw()
372{
373 return new TfParser();
374}
375
376ITfParserPtr ITfParser::Create()
377{
378 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
379}
380
381void ITfParser::Destroy(ITfParser* parser)
382{
383 delete parser;
384}
385
386inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
387 uint32_t filterSize, bool samePadding,
388 uint32_t* paddingFront, uint32_t* paddingBack) {
389 *paddingFront = 0;
390 *paddingBack = 0;
391
392 if (samePadding) {
393 uint32_t outputSize = (inputSize + stride - 1) / stride;
394 uint32_t temp = (outputSize - 1) * stride + filterSize;
395 if (temp > inputSize) {
396 *paddingFront = (temp - inputSize) / 2;
397 *paddingBack = (temp - inputSize) - *paddingFront;
398 }
399 }
400}
401
402void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
403 bool samePadding)
404{
405 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
406}
407
408/// An Abstract base class which represents a single tensorflow operation (node)
409/// that has been (potentially partially) converted to Armnn.
410/// It may not yet have been fully converted into actual Armnn layers.
411class ParsedTfOperation
412{
413public:
414 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
415 : m_Parser(parser)
416 , m_Node(node)
417 {
418 }
419
420 virtual ~ParsedTfOperation() {};
421
422 const tensorflow::NodeDef& GetNode() const { return m_Node; }
423
424 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
425 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
426 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
427
428 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
429 virtual ParsedTfOperation* ResolveIdentityOperations()
430 {
431 return this;
432 }
433
434protected:
435 TfParser* m_Parser;
436 const tensorflow::NodeDef& m_Node;
437};
438
439/// An ParsedTfOperation where the Armnn equivalent is a single layer,
440/// with output slots that correspond directly to the Tf node outputs.
441class SingleLayerParsedTfOperation : public ParsedTfOperation
442{
443public:
444 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
445 : ParsedTfOperation(parser, node)
446 , m_Layer(layer)
447 {
448 }
449
450 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
451 {
452 BOOST_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100453 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100454 unsigned int armnnOutputSlotIdx = tfOutputIndex;
455 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
456 {
457 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100458 boost::str(
459 boost::format(
460 "The requested output slot #%1% "
461 "for %2% does not exist %3%")
462 % armnnOutputSlotIdx
463 % m_Layer->GetName()
464 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100465 }
466 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
467 }
468
469protected:
470 IConnectableLayer* m_Layer;
471};
472
telsoa01c577f2c2018-08-31 09:22:23 +0100473/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100474class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
475{
476public:
477 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
478 : SingleLayerParsedTfOperation(parser, node, nullptr)
479 {
480 }
481
482 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
483 {
484 if (!m_Layer)
485 {
486 CreateLayerDeferred();
487 }
488 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
489 }
490
491private:
492 virtual void CreateLayerDeferred() = 0;
493};
494
495
496TfParser::TfParser()
497 : m_Network(nullptr, nullptr)
498{
499}
500
501
502const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
503{
504 if (nodeDef->op() != "Identity")
505 {
506 return nodeDef;
507 }
508
509 if (nodeDef->input_size() != 1)
510 {
telsoa01c577f2c2018-08-31 09:22:23 +0100511 throw ParseException(
512 boost::str(
513 boost::format(
514 "Identity node should have a single input! %1% has %2% inputs %3%")
515 % nodeDef->name()
516 % nodeDef->input_size()
517 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100518 }
519
520 auto it = m_NodesByName.find(nodeDef->input(0));
521 if (it != m_NodesByName.end())
522 {
523 const tensorflow::NodeDef* inputNode = it->second;
524 return ResolveIdentityNode(inputNode);
525 }
526 else
527 {
telsoa01c577f2c2018-08-31 09:22:23 +0100528 throw ParseException(
529 boost::str(
530 boost::format(
531 "Cannot find what the Identity node %1% is linked to! %2%")
532 % nodeDef->name()
533 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100534 }
535}
536
537std::vector<OutputOfConstNodeDef>
538TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
539{
540 std::vector<OutputOfConstNodeDef> ret;
541
surmeh013537c2c2018-05-18 16:31:43 +0100542 if (nodeDef.op() == "Const")
543 {
544 // For some reason const node can have "Control Inputs". We ignore them for now.
545 return ret;
546 }
547
surmeh01bceff2f2018-03-29 16:29:27 +0100548 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
549 for (int j = 0; j < nodeDef.input_size(); ++j)
550 {
551 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100552
553 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
554 {
narpra016f37f832018-12-21 18:30:00 +0000555 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
556 continue;
surmeh013537c2c2018-05-18 16:31:43 +0100557 }
558
surmeh01bceff2f2018-03-29 16:29:27 +0100559 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
560 if (inputIt == m_NodesByName.end())
561 {
562 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100563 boost::str(
564 boost::format(
565 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
566 % nodeDef.input(j)
567 % nodeDef.name()
568 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100569 }
570 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
571 }
572
573 return ret;
574}
575
576std::vector<OutputOfParsedTfOperation>
577TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
578 std::size_t expectedNumInputs)
579{
telsoa01c577f2c2018-08-31 09:22:23 +0100580 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100581 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
582 const std::size_t numInputs = nodes.size();
583 if (numInputs != expectedNumInputs)
584 {
telsoa01c577f2c2018-08-31 09:22:23 +0100585 throw ParseException(
586 boost::str(
587 boost::format(
588 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
589 % nodeDef.name()
590 % expectedNumInputs
591 % numInputs
592 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100593 }
telsoa01c577f2c2018-08-31 09:22:23 +0100594 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100595 std::vector<OutputOfParsedTfOperation> result;
596 for (auto&& node : nodes)
597 {
598 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
599 if (it == m_ParsedTfOperations.end())
600 {
telsoa01c577f2c2018-08-31 09:22:23 +0100601 throw ParseException(
602 boost::str(
603 boost::format(
604 "Node with name '%1%' has not been parsed %2%")
605 % node.m_IndexedValue->name()
606 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100607 }
608 ParsedTfOperation* parsedOp = it->second.get();
609 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
610 parsedOp = parsedOp->ResolveIdentityOperations();
611 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
612 }
613 return result;
614}
615
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000616IConnectableLayer* TfParser::CreateAdditionLayer(
617 const tensorflow::NodeDef& nodeDef,
618 IOutputSlot* input0Slot,
619 IOutputSlot* input1Slot,
620 const std::string& layerName)
621{
622 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
623 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
624
625 const unsigned int input0Dim = input0Info.GetNumDimensions();
626 const unsigned int input1Dim = input1Info.GetNumDimensions();
627 if (input0Dim != input1Dim)
628 {
629 // broadcasting where input0 and input1 have different number of dimensions
630 // is only supported for 1D and 4D tensors pair
631 if (input0Dim == 1 && input1Dim == 4)
632 {
633 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
634 }
635 else if (input0Dim == 4 && input1Dim == 1)
636 {
637 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
638 }
639 else
640 {
641 throw ParseException(
642 boost::str(
643 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
644 % layerName
645 % nodeDef.name()
646 % CHECK_LOCATION().AsString()));
647 }
648 }
649 IConnectableLayer* const layer = m_Network->AddAdditionLayer(layerName.c_str());
650
651 input0Slot->Connect(layer->GetInputSlot(0));
652 input1Slot->Connect(layer->GetInputSlot(1));
653
654 // Ensure the output tensor has the correct dimensions even if a broadcast has been done
655 TensorInfo outputInfo = input0Slot->GetTensorInfo();
656 std::vector<unsigned int> outputShape;
657
658 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
659 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
660
661 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
662 {
663 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
664 }
665
666 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
667 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
668
669 return layer;
670}
671
672IConnectableLayer* TfParser::CreateAdditionLayer(
673 const tensorflow::NodeDef& nodeDef,
674 IConnectableLayer* layerOne,
675 IConnectableLayer* layerTwo,
676 unsigned int numberOfAddition,
677 unsigned long numberOfLayersToConnect,
678 bool isOdd)
679{
680 IOutputSlot* input0Slot = &layerOne->GetOutputSlot(0);
681 IOutputSlot* input1Slot = &layerTwo->GetOutputSlot(0);
682 std::string layerName(nodeDef.name());
683 if (isOdd || numberOfLayersToConnect != 2)
684 {
685 // we are not connecting the final layer
686 layerName.append("_addN_").append(std::to_string(numberOfAddition));
687 }
688 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
689}
690
691IConnectableLayer* TfParser::CreateAdditionLayer(
692 const tensorflow::NodeDef& nodeDef,
693 const OutputOfParsedTfOperation& opOne,
694 const OutputOfParsedTfOperation& opTwo,
695 unsigned int numberOfAddition)
696{
697 IOutputSlot* input0Slot = &opOne.m_IndexedValue->ResolveArmnnOutputSlot(opOne.m_Index);
698 IOutputSlot* input1Slot = &opTwo.m_IndexedValue->ResolveArmnnOutputSlot(opTwo.m_Index);
699 std::string layerName(nodeDef.name());
700 layerName.append("_addN_").append(std::to_string(numberOfAddition));
701 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
702}
703
704IConnectableLayer* TfParser::CreateAdditionLayer(
705 const tensorflow::NodeDef& nodeDef,
706 const OutputOfParsedTfOperation& op,
707 IConnectableLayer* layer)
708{
709 IOutputSlot* input0Slot = &op.m_IndexedValue->ResolveArmnnOutputSlot(op.m_Index);
710 IOutputSlot* input1Slot = &layer->GetOutputSlot(0);
711 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
712}
713
714ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
715{
716 uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
717 if (numberOfInputs < 2)
718 {
719 // should never happen
720 throw ParseException(
721 boost::str(
722 boost::format(
723 "AddN Node with name '%1%' has less than two (%2) inputs %3%")
724 % nodeDef.name()
725 % std::to_string(numberOfInputs)
726 % CHECK_LOCATION().AsString()));
727 }
728 else if (numberOfInputs == 2)
729 {
730 //this is the same as a simple Add operation
731 return AddAdditionLayer(nodeDef, false);
732 }
733 else
734 {
735 // build a binary tree of Add layers and return the final Add as the return from the function
736 // if we have an odd number of inputs then the final Add will consist of a layer connecting to an
737 // OutputOfParsedTfOperation, otherwise it will be two layers being added together
738 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
739 unsigned int numberOfAdditions = 0;
740 std::vector<IConnectableLayer*> layers;
741 // NOTE: at this point we will have a minimum of three inputs
742 for (unsigned int i = 0; i < numberOfInputs; ++i)
743 {
744 // every time i is odd we have two inputs to process.
745 bool onSecondItem = i % 2;
746 if (onSecondItem)
747 {
748 ++numberOfAdditions;
749 IConnectableLayer* newLayer = CreateAdditionLayer(
750 nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
751 layers.push_back(newLayer);
752 }
753 }
754
755 std::vector<IConnectableLayer*> layersToConnect(layers);
756 unsigned long numberOfLayersToConnect = layersToConnect.size();
757 bool isOdd = numberOfInputs % 2;
758
759 while (numberOfLayersToConnect > 1)
760 {
761 layers.clear();
762 for (unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
763 bool onSecondItem = i % 2;
764 if (onSecondItem) {
765 ++numberOfAdditions;
766 IConnectableLayer* newLayer = CreateAdditionLayer(
767 nodeDef,
768 layersToConnect[i - 1],
769 layersToConnect[i],
770 numberOfAdditions,
771 numberOfLayersToConnect,
772 isOdd);
773 layers.push_back(newLayer);
774 }
775 }
776 //OK... need to go again... maybe
777 layersToConnect = layers;
778 numberOfLayersToConnect = layersToConnect.size();
779 }
780 IConnectableLayer* finalLayer = layersToConnect[0];
781 // if we had an odd number of inputs we need to connect the final layer to the
782 // last OutputOfParsedTfOperation in order to create the last Add layer we will
783 // be handing back.
784 if (isOdd)
785 {
786 // connect the final layer to the last op
787 finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
788 }
789 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, finalLayer);
790 }
791}
792
surmeh01bceff2f2018-03-29 16:29:27 +0100793ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
794{
795 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
796
telsoa01c577f2c2018-08-31 09:22:23 +0100797 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
798 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100799 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
800 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
801 {
802 IConnectableLayer* layer =
803 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
804 &nodeDef,nodeDef.name().c_str());
805 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
806 }
807 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
808 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
809 {
810 IConnectableLayer* layer =
811 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
812 &nodeDef,nodeDef.name().c_str());
813 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
814 }
815 else
816 {
telsoa01c577f2c2018-08-31 09:22:23 +0100817 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100818 return AddAdditionLayer(nodeDef);
819 }
820}
821
822ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
823{
824 return AddAdditionLayer(nodeDef, true);
825}
826
827/// An ParsedTfOperation which forwards to another (used for Identity nodes).
828class ParsedIdentityTfOperation : public ParsedTfOperation
829{
830public:
831 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
832 : ParsedTfOperation(parser, node)
833 , m_Representative(representative)
834 {
835 }
836
837 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
838 {
839 BOOST_ASSERT(m_Representative);
840 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
841 }
842
843 virtual ParsedTfOperation* ResolveIdentityOperations() override
844 {
845 return m_Representative->ResolveIdentityOperations();
846 }
847
848private:
849 ParsedTfOperation* m_Representative;
850};
851
852ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
853{
854 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
855 // Any requests for the output slots of this node should be forwarded to the node connected as input.
856 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
857}
858
859/// An ParsedTfOperation for a Const node.
860/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
861/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
862template <typename T>
863class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
864{
865public:
866 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
867 const T* tensorData, const TensorInfo& tensorInfo)
868 : DeferredSingleLayerParsedTfOperation(parser, node),
869 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
870 m_TensorInfo(tensorInfo)
871 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000872 BOOST_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
surmeh01bceff2f2018-03-29 16:29:27 +0100873 }
874
875 void CreateLayerDeferred() override
876 {
877 BOOST_ASSERT(m_Layer == nullptr);
878 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
879 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
880 }
881
Matteo Martincigh482ca852018-12-12 09:20:55 +0000882 ConstTensor GetConstTensor(std::vector<T>& outputTensorData) const
surmeh01bceff2f2018-03-29 16:29:27 +0100883 {
surmeh01bceff2f2018-03-29 16:29:27 +0100884 outputTensorData.resize(m_TensorInfo.GetNumElements());
885
Matteo Martincigh482ca852018-12-12 09:20:55 +0000886 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
887
telsoa01c577f2c2018-08-31 09:22:23 +0100888 // Updates the result to point to the user provided storage.
Matteo Martincigh482ca852018-12-12 09:20:55 +0000889 ConstTensor constTensor(m_TensorInfo, outputTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +0100890 return constTensor;
891 }
892
Matteo Martincigh46315822018-11-28 16:22:36 +0000893 const T* GetStorage() const
894 {
895 return m_Storage.data();
896 }
897
898 const TensorInfo& GetTensorInfo() const
899 {
900 return m_TensorInfo;
901 }
902
surmeh01bceff2f2018-03-29 16:29:27 +0100903private:
904 ///< Manages the lifetime of the tensor data.
905 std::vector<T> m_Storage;
906 ///< Describes the layout of the tensor and points to the data in m_Storage.
907 TensorInfo m_TensorInfo;
908};
909
telsoa01c577f2c2018-08-31 09:22:23 +0100910DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
911 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100912{
913 switch (tfDataType)
914 {
915 case tensorflow::DT_FLOAT:
916 return DataType::Float32;
917 break;
918 case tensorflow::DT_INT32:
919 return DataType::Signed32;
920 break;
921 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100922 throw ParseException(
923 boost::str(
924 boost::format(
925 "Unknown DataType %1% for node %2% %3%")
926 % tensorflow::DataType_Name(tfDataType)
927 % nodeDef.name()
928 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100929 }
930}
931
932struct ParseTfTensorValueList
933{
934 template<typename DataType>
935 static void Parse(
936 const tensorflow::TensorProto& tfTensor,
937 unsigned int dstElements,
938 std::vector<int8_t>& outputData);
939
940 template <typename DataType>
941 static void ReadData(const void* srcData, unsigned int numSrcElements,
942 std::vector<int8_t>& dstData, unsigned int numDstElements)
943 {
telsoa01c577f2c2018-08-31 09:22:23 +0100944 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100945 if (numSrcElements == 0)
946 {
947 return;
948 }
949
telsoa01c577f2c2018-08-31 09:22:23 +0100950 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100951 if (numDstElements == 0)
952 {
953 numDstElements = numSrcElements;
954 }
955
telsoa01c577f2c2018-08-31 09:22:23 +0100956 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100957 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
958
959 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
960 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
961
telsoa01c577f2c2018-08-31 09:22:23 +0100962 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +0100963 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
964
965 if (numDstElements > numSrcElements)
966 {
telsoa01c577f2c2018-08-31 09:22:23 +0100967 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +0100968 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
969 }
970 }
971
972};
973
974template <>
975void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
976 unsigned int dstElements, std::vector<int8_t>& outputData)
977{
978 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
979 outputData, dstElements);
980}
981
982template <>
983void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
984 unsigned int dstElements, std::vector<int8_t>& outputData)
985{
986 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
987 outputData, dstElements);
988}
989
990template <template<typename> class OperatorType, typename T = int8_t>
991struct MakeTfOperation
992{
993 template<typename DataType, class... Args>
994 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
995 Args&&... args)
996 {
997 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
998 }
999};
1000
1001template <>
1002struct MakeTfOperation<ParsedConstTfOperation>
1003{
1004 template<typename DataType, class... Args>
1005 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
1006 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
1007 {
1008 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1009 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
1010 }
1011};
1012
1013template <class FuncType>
1014struct InvokeParseFunction
1015{
1016 template<class ResType, class... Args>
1017 inline static ResType Result(DataType dataType, Args&&... args)
1018 {
1019 if (dataType == DataType::Float32)
1020 {
1021 return FuncType::template Parse<float>(std::forward<Args>(args)...);
1022 }
1023 else if (dataType == DataType::Signed32)
1024 {
1025 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1026 }
1027
1028 return ResType();
1029 }
1030
1031 template<class... Args>
1032 inline static void Result(DataType dataType, Args&&... args)
1033 {
1034 if (dataType == DataType::Float32)
1035 {
1036 FuncType::template Parse<float>(std::forward<Args>(args)...);
1037 }
1038 else if (dataType == DataType::Signed32)
1039 {
1040 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1041 }
1042 }
1043};
1044
1045ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1046{
1047 BOOST_ASSERT(nodeDef.op() == "Const");
1048
1049 if (nodeDef.attr().count("value") == 0)
1050 {
telsoa01c577f2c2018-08-31 09:22:23 +01001051 throw ParseException(
1052 boost::str(
1053 boost::format(
1054 "Value not found for Const node - %1% %2%")
1055 % nodeDef.name()
1056 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001057 }
1058
1059 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
1060 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1061 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
1062
1063 const auto GetDimensionSize = [](auto& d) { return d.size(); };
1064
1065 std::vector<unsigned int> dimensionSizes;
1066 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1067 std::back_inserter(dimensionSizes), GetDimensionSize);
1068
telsoa01c577f2c2018-08-31 09:22:23 +01001069 // Calculates number of elements.
1070 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001071 unsigned int numElements = 0U;
1072
1073 if (!dimensionSizes.empty())
1074 {
1075 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1076 1U, std::multiplies<unsigned int>());
1077 }
1078
1079 std::vector<int8_t> tensorData;
1080
telsoa01c577f2c2018-08-31 09:22:23 +01001081 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001082 if (tfTensor.tensor_content().empty())
1083 {
1084 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
1085
1086 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +01001087 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +01001088 if (numElements == 0)
1089 {
telsoa01c577f2c2018-08-31 09:22:23 +01001090 const unsigned int tfNumElements =
1091 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001092 dimensionSizes.push_back(tfNumElements);
1093 }
1094 }
telsoa01c577f2c2018-08-31 09:22:23 +01001095 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001096 else
1097 {
1098 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1099
telsoa01c577f2c2018-08-31 09:22:23 +01001100 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +01001101 if (numElements == 0)
1102 {
telsoa01c577f2c2018-08-31 09:22:23 +01001103 throw ParseException(
1104 boost::str(
1105 boost::format(
1106 "No tensor shape found for Const node - %1% %2%")
1107 % nodeDef.name()
1108 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001109 }
1110 }
1111
telsoa01c577f2c2018-08-31 09:22:23 +01001112 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001113 if (tensorData.empty())
1114 {
telsoa01c577f2c2018-08-31 09:22:23 +01001115 throw ParseException(
1116 boost::str(
1117 boost::format(
1118 "No tensor data found for Const node - %1% %2%")
1119 % nodeDef.name()
1120 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001121 }
1122
telsoa01c577f2c2018-08-31 09:22:23 +01001123 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1124 dimensionSizes.data(),
1125 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001126
1127 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +01001128 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +01001129 if (tensorData.size() > tensorInfo.GetNumBytes())
1130 {
telsoa01c577f2c2018-08-31 09:22:23 +01001131 throw ParseException(
1132 boost::str(
1133 boost::format(
1134 "Number of elements (%1%) should be less than or equal "
1135 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
1136 % (tensorData.size() / GetDataTypeSize(dataType))
1137 % tensorInfo.GetNumElements()
1138 % nodeDef.name()
1139 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001140 }
1141
1142 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1143 dataType, this, nodeDef, tensorData, tensorInfo);
1144}
1145
1146template<typename Type>
1147bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
1148{
1149 auto it = m_ParsedTfOperations.find(nodeName);
jimfly01f6ba7472018-12-04 10:09:52 +00001150 if (it == m_ParsedTfOperations.end())
surmeh01bceff2f2018-03-29 16:29:27 +01001151 {
1152 return false;
1153 }
jimfly01f6ba7472018-12-04 10:09:52 +00001154 return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1155}
1156
1157template<typename Type>
1158bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
1159{
1160 return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
surmeh01bceff2f2018-03-29 16:29:27 +01001161}
1162
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00001163unsigned int TfParser::GetConstInputIndex(const std::vector<OutputOfParsedTfOperation>& inputs)
1164{
1165 for (unsigned int i = 0; i < inputs.size(); i++)
1166 {
1167 if (HasParsedConstTensor<int32_t>(inputs[i].m_IndexedValue->GetNode().name()))
1168 {
1169 return i;
1170 }
1171 }
1172 throw ParseException(
1173 boost::str(
1174 boost::format(
1175 "ArmNN only supports operators with constant axis. %1%")
1176 % CHECK_LOCATION().AsString()));
1177
1178}
1179
surmeh01bceff2f2018-03-29 16:29:27 +01001180ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
1181 const tensorflow::GraphDef& graphDef)
1182{
1183 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1184 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1185 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1186
1187 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1188 {
telsoa01c577f2c2018-08-31 09:22:23 +01001189 throw ParseException(
1190 boost::str(
1191 boost::format(
1192 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1193 % nodeDef.name()
1194 % inputs[1].m_IndexedValue->GetNode().name()
1195 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001196 }
1197 ParsedConstTfOperation<float>* weightNode =
1198 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1199
1200 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1201 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1202 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1203
telsoa01c577f2c2018-08-31 09:22:23 +01001204 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
surmeh01bceff2f2018-03-29 16:29:27 +01001205 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1206 if (!dilations.empty())
1207 {
1208 for (auto dilation : dilations)
1209 {
1210 if (dilation != 1u)
1211 {
telsoa01c577f2c2018-08-31 09:22:23 +01001212 throw ParseException(
1213 boost::str(
1214 boost::format(
1215 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1216 % nodeDef.name()
1217 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001218 }
1219 }
1220 }
1221
1222 Convolution2dDescriptor desc;
1223 desc.m_BiasEnabled = false;
1224
telsoa01c577f2c2018-08-31 09:22:23 +01001225 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1226
Matteo Martincigh46315822018-11-28 16:22:36 +00001227 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001228
Matteo Martincigh46315822018-11-28 16:22:36 +00001229 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001230
Matteo Martincigh46315822018-11-28 16:22:36 +00001231 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001232
Matteo Martincigh46315822018-11-28 16:22:36 +00001233 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1234 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001235
Matteo Martincigh46315822018-11-28 16:22:36 +00001236 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1237 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1238
1239 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1240 // Tensorflow weights are [H, W, In, Out].
1241 // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1242 // and [Out, In, H, W] when the data layout is NCHW.
1243 PermutationVector permutationVector =
1244 dataLayout == DataLayout::NHWC ?
1245 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1246 std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1247
1248 // Swizzle the tensor using the given permutation vector.
1249 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1250 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1251
1252 // Swizzles the content of the tensor's permanent storage into a local storage.
1253 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1254 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001255 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Matteo Martincigh46315822018-11-28 16:22:36 +00001256
1257 // Create a weight tensor with the newly swizzled data.
1258 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1259
1260 uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1261 uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001262
1263 bool padding = false;
1264 TensorInfo outputInfo;
Matteo Martincigh46315822018-11-28 16:22:36 +00001265 unsigned int outputHeight = 0;
1266 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001267
1268 CHECK_PADDING_TYPE(nodeDef, paddingString);
1269
surmeh01bceff2f2018-03-29 16:29:27 +01001270 if (paddingString == "SAME")
1271 {
1272 padding = true;
Matteo Martincigh46315822018-11-28 16:22:36 +00001273
1274 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1275 static_cast<float>(desc.m_StrideY)));
1276 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1277 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001278 }
1279 else if (paddingString == "VALID")
1280 {
1281 padding = false;
Matteo Martincigh46315822018-11-28 16:22:36 +00001282
1283 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1284 static_cast<float>(desc.m_StrideY)));
1285 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1286 static_cast<float>(desc.m_StrideX)));
1287 }
1288
1289 switch (dataLayout)
1290 {
1291 case DataLayout::NHWC:
1292 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1293 outputHeight,
1294 outputWidth,
1295 weightTensor.GetShape()[0] },
1296 DataType::Float32);
1297 break;
1298 case DataLayout::NCHW:
1299 default:
surmeh01bceff2f2018-03-29 16:29:27 +01001300 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1301 weightTensor.GetShape()[0],
Matteo Martincigh46315822018-11-28 16:22:36 +00001302 outputHeight,
1303 outputWidth },
1304 DataType::Float32);
1305 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001306 }
surmeh01bceff2f2018-03-29 16:29:27 +01001307
1308 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1309 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1310
Matteo Martincighfc598e12019-05-14 10:36:13 +01001311 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc,
1312 weightTensor,
1313 EmptyOptional(),
1314 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001315 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Matteo Martincigh46315822018-11-28 16:22:36 +00001316 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001317
1318 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1319}
1320
1321ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
telsoa01c577f2c2018-08-31 09:22:23 +01001322 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001323{
1324 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1325 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1326 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1327
1328 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1329 {
telsoa01c577f2c2018-08-31 09:22:23 +01001330 throw ParseException(
1331 boost::str(
1332 boost::format(
1333 "ArmNN only supports Depthwise Convolution layer with constant weights. "
1334 "Non const input found %1% for node %2% %3%")
1335 % inputs[1].m_IndexedValue->GetNode().name()
1336 % nodeDef.name()
1337 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001338 }
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001339
surmeh01bceff2f2018-03-29 16:29:27 +01001340 ParsedConstTfOperation<float>* weightNode =
1341 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1342
surmeh01bceff2f2018-03-29 16:29:27 +01001343 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1344 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1345 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1346
1347 DepthwiseConvolution2dDescriptor desc;
1348 desc.m_BiasEnabled = false;
1349
telsoa01c577f2c2018-08-31 09:22:23 +01001350 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1351
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001352 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001353
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001354 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001355
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001356 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001357
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001358 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1359 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001360
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001361 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1362 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1363
1364 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
Matteo Martincigh747ef822018-12-18 09:26:39 +00001365 // Tensorflow weights come in the format [H, W, I, M].
1366 // ArmNN weights have to be [M, I, H, W].
1367 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001368
1369 // Swizzle the tensor using the given permutation vector.
1370 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1371 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1372
1373 // Swizzles the content of the tensor's permanent storage into a local storage.
1374 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1375 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001376 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001377
1378 // Create a weight tensor with the newly swizzled data.
1379 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1380
Matteo Martincigh747ef822018-12-18 09:26:39 +00001381 uint32_t weightHeight = weightTensor.GetShape()[2];
1382 uint32_t weightWidth = weightTensor.GetShape()[3];
surmeh01bceff2f2018-03-29 16:29:27 +01001383
1384 bool padding = false;
1385 TensorInfo outputInfo;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001386 unsigned int outputHeight = 0;
1387 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001388
1389 CHECK_PADDING_TYPE(nodeDef, paddingString);
1390
surmeh01bceff2f2018-03-29 16:29:27 +01001391 if (paddingString == "SAME")
1392 {
1393 padding = true;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001394
1395 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1396 static_cast<float>(desc.m_StrideY)));
1397 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1398 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001399 }
1400 else if (paddingString == "VALID")
1401 {
1402 padding = false;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001403
1404 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1405 static_cast<float>(desc.m_StrideY)));
1406 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1407 static_cast<float>(desc.m_StrideX)));
1408 }
1409
1410 switch (dataLayout)
1411 {
1412 case DataLayout::NHWC:
1413 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1414 outputHeight,
1415 outputWidth,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001416 weightTensor.GetShape()[0] * weightTensor.GetShape()[1]},
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001417 DataType::Float32);
1418 break;
1419 case DataLayout::NCHW:
1420 default:
1421 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1422 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1423 outputHeight,
1424 outputWidth },
1425 DataType::Float32);
1426 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001427 }
surmeh01bceff2f2018-03-29 16:29:27 +01001428
1429 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1430 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1431
Matteo Martincighfc598e12019-05-14 10:36:13 +01001432 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1433 weightTensor,
1434 EmptyOptional(),
1435 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001436 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001437 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001438
1439 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1440}
1441
Conor Kennedyc2130a02018-12-05 11:05:54 +00001442TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
1443{
1444 BOOST_ASSERT(nodeDef.op() == "ExpandDims");
1445
1446 if (inputTensorInfo.GetNumDimensions() > 4) {
1447 throw ParseException(
1448 boost::str(
1449 boost::format(
1450 "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1451 % inputTensorInfo.GetNumDimensions()
1452 % nodeDef.name()
1453 % CHECK_LOCATION().AsString()));
1454 }
1455
1456 std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1457
1458 std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1459 std::vector<uint32_t> outputDims;
1460
1461 // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1462 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1463 {
1464 // add current input shape to outputDims
1465 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1466 auto currentDimension = inputTensorInfo.GetShape()[i];
1467 outputDims.push_back(currentDimension);
1468 }
1469
1470 // insert a dimension of 1 at index 'expandDim' of inputs shape
1471 if (expandDim >= 0)
1472 {
1473 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1474 outputDims.insert(getPosition, 1);
1475 }
1476
1477 // if negative number for 'expandDim' then count backwards from the last element
1478 // and insert 1 dimension at index 'expandDim'
1479 if (expandDim < 0)
1480 {
Matteo Martincighd7cceeb2018-12-06 09:06:29 +00001481 int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001482 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1483 outputDims.insert(getPosition, 1);
1484 }
1485 }
1486 else
1487 {
1488 throw InvalidArgumentException(
1489 boost::str(
1490 boost::format(
1491 "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1492 % expandDim
1493 % inputDimSize
1494 % CHECK_LOCATION().AsString()));
1495 }
1496
1497 if (outputDims.size() > 4)
1498 {
1499 throw ParseException(
1500 boost::str(
1501 boost::format(
1502 "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1503 % outputDims.size()
1504 % nodeDef.name()
1505 % CHECK_LOCATION().AsString()));
1506 }
1507
1508 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1509 outputDims.data());
1510
1511 TensorInfo outTensorInfo = inputTensorInfo;
1512 outTensorInfo.SetShape(outShape);
1513
1514 return outTensorInfo;
1515}
1516
1517ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1518{
1519 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1520
1521 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1522 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1523
1524 TensorInfo outputInfo;
1525 outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo);
1526
1527 ReshapeDescriptor reshapeDesc;
1528 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1529 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1530 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1531 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1532
1533 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1534}
1535
surmeh01bceff2f2018-03-29 16:29:27 +01001536ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1537 const tensorflow::GraphDef& graphDef)
1538{
1539 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1540
1541 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1542 {
telsoa01c577f2c2018-08-31 09:22:23 +01001543 throw ParseException(
1544 boost::str(
1545 boost::format(
1546 "ArmNN only supports FusedBatchNormalization layers with constant scale. "
1547 "Input %1%. Node %2% %3%")
1548 % inputs[1].m_IndexedValue->GetNode().name()
1549 % nodeDef.name()
1550 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001551 }
1552 ParsedConstTfOperation<float>* scaleNode =
1553 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1554
1555 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1556 {
telsoa01c577f2c2018-08-31 09:22:23 +01001557 throw ParseException(
1558 boost::str(
1559 boost::format(
1560 "ArmNN only supports FusedBatchNormalization layers with constant offset. "
1561 "Input %1%. Node %2% %3%")
1562 % inputs[2].m_IndexedValue->GetNode().name()
1563 % nodeDef.name()
1564 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001565 }
1566 ParsedConstTfOperation<float>* offsetNode =
1567 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1568
1569 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1570 {
telsoa01c577f2c2018-08-31 09:22:23 +01001571 throw ParseException(
1572 boost::str(
1573 boost::format(
1574 "ArmNN only supports FusedBatchNormalization layers with constant mean. "
1575 "Input %1%. Node %2% %3%")
1576 % inputs[3].m_IndexedValue->GetNode().name()
1577 % nodeDef.name()
1578 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001579 }
1580 ParsedConstTfOperation<float>* meanNode =
1581 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1582
1583 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1584 {
telsoa01c577f2c2018-08-31 09:22:23 +01001585 throw ParseException(
1586 boost::str(
1587 boost::format(
1588 "ArmNN only supports FusedBatchNormalization layers with constant variance. "
1589 "Input %1%. Node %2% %3%")
1590 % inputs[4].m_IndexedValue->GetNode().name()
1591 % nodeDef.name()
1592 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001593 }
1594 ParsedConstTfOperation<float>* varianceNode =
1595 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1596
Matteo Martincigh075c7502018-12-05 13:10:45 +00001597 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1598
1599 CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1600
telsoa01c577f2c2018-08-31 09:22:23 +01001601 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001602 BatchNormalizationDescriptor desc;
1603 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001604 desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001605
telsoa01c577f2c2018-08-31 09:22:23 +01001606 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1607 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001608 std::vector<float> scaleTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001609 ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001610
1611 std::vector<float> offsetTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001612 ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001613
1614 std::vector<float> meanTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001615 ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001616
1617 std::vector<float> varianceTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001618 ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001619
1620 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1621 meanTensor,
1622 varianceTensor,
1623 offsetTensor,
1624 scaleTensor,
1625 nodeDef.name().c_str());
1626
1627 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1628
Matteo Martincigh075c7502018-12-05 13:10:45 +00001629 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1630 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001631
1632 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1633}
1634
telsoa01c577f2c2018-08-31 09:22:23 +01001635bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1636 size_t alphaLayerIndex,
1637 const OutputOfParsedTfOperation& otherOp,
1638 armnn::IOutputSlot** outputOfLeakyRelu,
1639 armnn::ActivationDescriptor & desc)
1640{
1641 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1642
1643 // Verifying all these assumptions hold:
1644 //
1645 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1646 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1647 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1648 //
1649
1650 if (mulNodeDef.op() == "Mul")
1651 {
1652 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1653 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1654
1655 BOOST_ASSERT(inputs.size() == 2);
1656 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1657 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1658 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1659
1660 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1661 {
1662 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1663 {
1664 ParsedConstTfOperation<float>* alpha =
1665 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1666 inputs[alphaLayerIndex].m_IndexedValue);
1667
1668 std::vector<float> const_data;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001669 ConstTensor const_tensor = alpha->GetConstTensor(const_data);
telsoa01c577f2c2018-08-31 09:22:23 +01001670
1671 if (const_data.size() == 1)
1672 {
1673 desc.m_Function = ActivationFunction::LeakyReLu;
1674 desc.m_A = const_data[0];
1675
1676 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1677 return true;
1678 }
1679 }
1680 }
1681 }
1682 return false;
1683}
1684
telsoa01c577f2c2018-08-31 09:22:23 +01001685ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1686 const tensorflow::GraphDef& graphDef)
1687{
1688 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
Sadik Armagan975c09a2018-12-04 10:02:08 +00001689 if (inputs.size() != 2)
1690 {
1691 throw ParseException(
1692 boost::str(
1693 boost::format(
1694 "Maximum expects two inputs!. Got %1% for Node %2% %3%")
1695 % inputs.size()
1696 % nodeDef.name()
1697 % CHECK_LOCATION().AsString()));
1698 }
1699
telsoa01c577f2c2018-08-31 09:22:23 +01001700 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1701 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1702 IOutputSlot* outputOfLeakyRelu = nullptr;
1703
1704 ActivationDescriptor desc;
1705
Sadik Armagan975c09a2018-12-04 10:02:08 +00001706 // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1707 // i.e. one of the four possible scenarios:
1708 // 1, max(mul(a, x), x)
1709 // 2, max(mul(x, a), x)
1710 // 3, max(x, mul(a, x))
1711 // 4, max(x, mul(x, a))
1712 // These are handled by an activation layer.
telsoa01c577f2c2018-08-31 09:22:23 +01001713
1714 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1715 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1716 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1717 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1718 {
1719 BOOST_ASSERT(outputOfLeakyRelu != nullptr);
1720
1721 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1722 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1723 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1724 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1725 }
1726 else
1727 {
Sadik Armagan975c09a2018-12-04 10:02:08 +00001728 // Anything else is just a maximum layer.
1729
1730 return AddMaximumLayer(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001731 }
1732}
1733
jimfly0184c70e62018-12-19 13:14:46 +00001734std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> TfParser::ProcessElementwiseInputSlots(
1735 const tensorflow::NodeDef& nodeDef, const std::string& layerName)
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001736{
1737 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1738
1739 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1740 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1741 const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1742 const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1743
1744 if (input0Dim != input1Dim)
1745 {
1746 // broadcasting where input0 and input1 have different number of dimensions
1747 // is only supported for 1D and 4D tensors pair
1748 if (input0Dim == 1 && input1Dim == 4)
1749 {
1750 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1751 }
1752 else if (input0Dim == 4 && input1Dim == 1)
1753 {
1754 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1755 }
1756 else
1757 {
1758 throw ParseException(
jimfly0184c70e62018-12-19 13:14:46 +00001759 boost::str(
1760 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
1761 % layerName
1762 % nodeDef.name()
1763 % CHECK_LOCATION().AsString()));
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001764 }
1765 }
jimfly0184c70e62018-12-19 13:14:46 +00001766 return {input0Slot, input1Slot};
1767}
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001768
kevmay012b4d88e2019-01-24 14:05:09 +00001769ParsedTfOperationPtr TfParser::ProcessComparisonLayer(
1770 IOutputSlot* input0Slot,
1771 IOutputSlot* input1Slot,
1772 IConnectableLayer* const layer,
1773 const tensorflow::NodeDef& nodeDef)
1774{
1775 input0Slot->Connect(layer->GetInputSlot(0));
1776 input1Slot->Connect(layer->GetInputSlot(1));
1777
1778 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1779 outputInfo.SetDataType(DataType::Boolean);
1780 std::vector<unsigned int> outputShape;
1781
1782 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1783 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1784
1785 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1786 {
1787 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1788 }
1789
1790 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1791 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1792
1793 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1794}
1795
jimfly0184c70e62018-12-19 13:14:46 +00001796ParsedTfOperationPtr TfParser::ProcessElementwiseLayer(
1797 IOutputSlot* input0Slot,
1798 IOutputSlot* input1Slot,
1799 IConnectableLayer* const layer,
1800 const tensorflow::NodeDef& nodeDef)
1801{
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001802 input0Slot->Connect(layer->GetInputSlot(0));
1803 input1Slot->Connect(layer->GetInputSlot(1));
1804
1805 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1806 std::vector<unsigned int> outputShape;
1807
1808 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1809 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1810
1811 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1812 {
1813 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1814 }
1815
1816 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1817 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1818
1819 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1820}
1821
FrancisMurtagh94412af2019-01-24 10:53:39 +00001822ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
1823 const tensorflow::GraphDef& graphDef)
1824{
1825 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1826 IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1827 IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1828
1829 // Infer shape of output tensor
1830 unsigned int paramsDim = params.GetTensorInfo().GetNumDimensions();
1831 unsigned int indicesDim = indices.GetTensorInfo().GetNumDimensions();
1832 unsigned int outputDim = paramsDim - 1 + indicesDim;
1833
1834 std::vector<unsigned int> dimSizes;
1835
1836 for (unsigned int i = 0; i < indicesDim; ++i)
1837 {
1838 dimSizes.push_back(indices.GetTensorInfo().GetShape()[i]);
1839 }
1840 for (unsigned int i = 1; i < paramsDim; ++i)
1841 {
1842 dimSizes.push_back(params.GetTensorInfo().GetShape()[i]);
1843 }
1844
1845 const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data());
1846
1847 const TensorInfo inferredOutputInfo(inferredShape, params.GetTensorInfo().GetDataType());
1848
1849 IConnectableLayer* const layer = m_Network->AddGatherLayer(nodeDef.name().c_str());
1850 layer->GetOutputSlot(0).SetTensorInfo(inferredOutputInfo);
1851
1852 params.Connect(layer->GetInputSlot(0));
1853 indices.Connect(layer->GetInputSlot(1));
1854
1855 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1856}
1857
jimfly01a06bf312018-12-18 16:24:51 +00001858ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
1859 const tensorflow::GraphDef& graphDef)
1860{
1861 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
1862 IOutputSlot* input0Slot = inputLayers.first;
1863 IOutputSlot* input1Slot = inputLayers.second;
1864
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001865 ComparisonDescriptor descriptor(ComparisonOperation::Greater);
1866 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly01a06bf312018-12-18 16:24:51 +00001867
kevmay012b4d88e2019-01-24 14:05:09 +00001868 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly01a06bf312018-12-18 16:24:51 +00001869}
1870
jimfly0184c70e62018-12-19 13:14:46 +00001871ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
1872 const tensorflow::GraphDef& graphDef)
1873{
1874 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
1875 IOutputSlot* input0Slot = inputLayers.first;
1876 IOutputSlot* input1Slot = inputLayers.second;
1877
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001878 ComparisonDescriptor descriptor(ComparisonOperation::Equal);
1879 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly0184c70e62018-12-19 13:14:46 +00001880
kevmay012b4d88e2019-01-24 14:05:09 +00001881 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly0184c70e62018-12-19 13:14:46 +00001882}
1883
1884ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
1885 const tensorflow::GraphDef& graphDef)
1886{
1887 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
1888 IOutputSlot* input0Slot = inputLayers.first;
1889 IOutputSlot* input1Slot = inputLayers.second;
1890
1891 IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1892
1893 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1894}
1895
jimfly0123be07e2018-12-04 17:47:22 +00001896ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1897{
1898 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1899
1900 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1901 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1902
1903 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
1904 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
1905
1906 if (input0Info.GetNumDimensions() == 1)
1907 {
1908 const bool isNHWC = true;
1909 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1910 }
1911
1912 if (input1Info.GetNumDimensions() == 1)
1913 {
1914 const bool isNHWC = true;
1915 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1916 }
1917
1918 IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
1919
1920 input0Slot->Connect(layer->GetInputSlot(0));
1921 input1Slot->Connect(layer->GetInputSlot(1));
1922
1923 if (input0Info.GetNumDimensions() == 1)
1924 {
1925 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
1926 }
1927 else
1928 {
1929 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
1930 }
1931
1932 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1933}
1934
jimfly01f6ba7472018-12-04 10:09:52 +00001935unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
1936 const TensorInfo& inputTensorInfo,
1937 const std::string& nodeName)
1938{
1939 unsigned int rank = paddingTensor.GetShape()[0];
1940 unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
1941 if (rank != expectedRank)
1942 {
1943 throw ParseException(
1944 boost::str(
1945 boost::format(
1946 "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
1947 % expectedRank
1948 % rank
1949 % nodeName
1950 % CHECK_LOCATION().AsString()));
1951 }
1952 unsigned int second = paddingTensor.GetShape()[1];
1953 if (second != 2)
1954 {
1955 throw ParseException(
1956 boost::str(
1957 boost::format(
1958 "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
1959 % rank
1960 % second
1961 % nodeName
1962 % CHECK_LOCATION().AsString()));
1963 }
1964 return rank;
1965}
1966
1967TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
1968 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1969{
1970 unsigned int numDims = inputTensorInfo.GetNumDimensions();
1971 std::vector<unsigned int> outDims;
1972 for (unsigned int i = 0; i < numDims; ++i)
1973 {
1974 unsigned int dimSize = inputTensorInfo.GetShape()[i];
1975 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
1976 dimSize += dimPadding.first;
1977 dimSize += dimPadding.second;
1978 outDims.push_back(dimSize);
1979 }
1980 TensorInfo paddedTensorInfo = inputTensorInfo;
1981 unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
1982 paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
1983 return paddedTensorInfo;
1984}
1985
1986ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
1987 const tensorflow::GraphDef& graphDef)
1988{
1989 // input consists of:
1990 // input[0] the tensor which will be padded
1991 // input[1] the tensor holding the padding values
1992 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1993 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1994 TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
1995 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
1996 {
1997 throw ParseException(
1998 boost::str(
1999 boost::format(
2000 "ArmNN only supports Pad with constant padding. "
2001 "Input %1%. Node %2% %3%")
2002 % inputs[1].m_IndexedValue->GetNode().name()
2003 % nodeDef.name()
2004 % CHECK_LOCATION().AsString()));
2005
2006 }
2007 ParsedConstTfOperation<int32_t>* paddingTensorOp =
2008 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2009
2010 std::vector<int32_t> paddingTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002011 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
jimfly01f6ba7472018-12-04 10:09:52 +00002012 // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
2013 // and should match the rank of the input tensor that is being padded.
2014 // For each dimension D of input, paddings[D, 0] indicates how many values to add
2015 // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
2016 // many values to add after the contents of tensor in that dimension
2017 // This needs to be translated into a padList for ACL
2018 std::vector<std::pair<unsigned int, unsigned int>> padList;
2019 unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
2020 for (unsigned int i = 0; i < rank; ++i)
2021 {
2022 std::pair<unsigned int, unsigned int> paddingForDim;
2023 for (unsigned int j = 0; j < 2; j++)
2024 {
2025 unsigned int index = (i * 2) + j;
2026 int paddingAmount = paddingTensorData[index];
2027 // make sure we can cast to an unsigned value
2028 if (paddingAmount < 0)
2029 {
2030 throw ParseException(
2031 boost::str(
2032 boost::format(
2033 "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
2034 % paddingAmount
2035 % i
2036 % j
2037 % nodeDef.name()
2038 % CHECK_LOCATION().AsString()));
2039 }
2040 if (j == 0)
2041 {
2042 paddingForDim.first = static_cast<unsigned int>(paddingAmount);
2043 }
2044 else
2045 {
2046 paddingForDim.second = static_cast<unsigned int>(paddingAmount);
2047 }
2048 }
2049 padList.push_back(paddingForDim);
2050 }
2051 PadDescriptor padDescriptor(padList);
2052 IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
2053 previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
2054 // Use the padding to calculate the new output tensor shape
2055 TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
2056 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2057 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2058}
2059
surmeh01bceff2f2018-03-29 16:29:27 +01002060ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
2061 const tensorflow::GraphDef& graphDef)
2062{
2063 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002064
telsoa01c577f2c2018-08-31 09:22:23 +01002065 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01002066 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
surmeh01bceff2f2018-03-29 16:29:27 +01002067
surmeh01bceff2f2018-03-29 16:29:27 +01002068 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2069
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002070 // Constant tensor index
2071 unsigned int index = GetConstInputIndex(inputs);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002072 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002073 ParsedConstTfOperation<int32_t>* shapeNode =
2074 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2075
surmeh01bceff2f2018-03-29 16:29:27 +01002076 std::vector<int32_t> axisTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002077 shapeNode->GetConstTensor(axisTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002078
telsoa01c577f2c2018-08-31 09:22:23 +01002079 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002080 const unsigned int concatDim = static_cast<unsigned int>(axisTensorData[0]);
surmeh01bceff2f2018-03-29 16:29:27 +01002081
telsoa01c577f2c2018-08-31 09:22:23 +01002082 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002083 if (concatDim == 0 || concatDim == 2)
surmeh01bceff2f2018-03-29 16:29:27 +01002084 {
telsoa01c577f2c2018-08-31 09:22:23 +01002085 throw ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002086 boost::str(
2087 boost::format(
telsoa01c577f2c2018-08-31 09:22:23 +01002088 "Dimension %1% for concatenation is not supported by Armnn. "
2089 "Node %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002090 % concatDim
2091 % nodeDef.name()
2092 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002093 }
2094
Matthew Jacksondba634f2019-08-15 15:14:18 +01002095 const unsigned int supportedNumDims = 4;
Matteo Martincighf9afc792018-12-06 12:03:17 +00002096 unsigned int numConcatViews = numInputs - 1;
Matthew Jacksondba634f2019-08-15 15:14:18 +01002097 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002098 concatDescriptor.SetConcatAxis(concatDim);
Matthew Jacksondba634f2019-08-15 15:14:18 +01002099 TensorShape mergeDims(supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002100 unsigned int mergeDim = 0;
2101 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002102 {
telsoa01c577f2c2018-08-31 09:22:23 +01002103 // Need to double check whether it should be
Matteo Martincighf9afc792018-12-06 12:03:17 +00002104 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002105 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2106
Matteo Martincighf9afc792018-12-06 12:03:17 +00002107 // Double check dimensions of the tensors
Matthew Jacksondba634f2019-08-15 15:14:18 +01002108 if (inputTensorInfo.GetNumDimensions() != supportedNumDims)
Matteo Martincighf9afc792018-12-06 12:03:17 +00002109 {
2110 throw armnn::ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002111 boost::str(
2112 boost::format(
Matteo Martincighf9afc792018-12-06 12:03:17 +00002113 "The number of dimensions: %1% for input tensors of the "
2114 "concatenation op should be %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002115 % inputTensorInfo.GetNumDimensions()
Matthew Jacksondba634f2019-08-15 15:14:18 +01002116 % supportedNumDims
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002117 % CHECK_LOCATION().AsString()));
Matteo Martincighf9afc792018-12-06 12:03:17 +00002118 }
2119
2120 // Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
2121 mergeDims = inputTensorInfo.GetShape();
2122 unsigned int* viewOrigin = const_cast<unsigned int*>(concatDescriptor.GetViewOrigin(viewIndex));
Matthew Jacksondba634f2019-08-15 15:14:18 +01002123 std::fill(viewOrigin, viewOrigin + supportedNumDims, 0);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002124
2125 // Update the view origin coordinates and the merge dimension value
2126 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
2127 mergeDim += mergeDims[concatDim];
surmeh01bceff2f2018-03-29 16:29:27 +01002128 }
2129
Matteo Martincighf9afc792018-12-06 12:03:17 +00002130 // Update the output shape
2131 mergeDims[concatDim] = mergeDim;
Jim Flynn906f9462019-05-10 13:55:21 +01002132 armnn::IConnectableLayer *layer = m_Network->AddConcatLayer(concatDescriptor, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002133
Matteo Martincighf9afc792018-12-06 12:03:17 +00002134 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
surmeh01bceff2f2018-03-29 16:29:27 +01002135
Matteo Martincighf9afc792018-12-06 12:03:17 +00002136 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002137 {
Matteo Martincighf9afc792018-12-06 12:03:17 +00002138 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2139 inputSlot.Connect(layer->GetInputSlot(viewIndex));
surmeh01bceff2f2018-03-29 16:29:27 +01002140 }
2141
2142 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2143}
2144
2145ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
2146 const tensorflow::GraphDef& graphDef)
2147{
telsoa01c577f2c2018-08-31 09:22:23 +01002148 // Note: the Shape layer is handled in a special way, because:
2149 // 1. ARMNN doesn't support int32 tensors which it outputs.
2150 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01002151 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01002152 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01002153
2154 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
2155 if (tfDataType != tensorflow::DT_INT32)
2156 {
telsoa01c577f2c2018-08-31 09:22:23 +01002157 throw ParseException(
2158 boost::str(
2159 boost::format(
2160 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
2161 % tensorflow::DataType_Name(tfDataType)
2162 % nodeDef.name()
2163 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002164 }
2165
2166 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2167 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2168 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2169 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
2170
2171 std::vector<int32_t> shapeTensorData;
2172 shapeTensorData.reserve(prevLayerDimensions);
2173
2174 for (unsigned int i=0; i<prevLayerDimensions; ++i)
2175 {
2176 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
2177 }
2178
2179 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2180
2181 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
2182 nodeDef,
2183 &shapeTensorData[0],
2184 shapeTensorInfo);
2185}
2186
2187ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
2188 const tensorflow::GraphDef& graphDef)
2189{
2190 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2191 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2192
2193 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2194 {
telsoa01c577f2c2018-08-31 09:22:23 +01002195 throw ParseException(
2196 boost::str(
2197 boost::format(
2198 "ArmNN only supports Reshape layers with constant shapes. "
2199 "Input %1% Node %2% %3%")
2200 % inputs[1].m_IndexedValue->GetNode().name()
2201 % nodeDef.name()
2202 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002203 }
2204 ParsedConstTfOperation<int32_t>* shapeNode =
2205 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2206
2207 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2208 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2209
2210 std::vector<int32_t> shapeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002211 ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002212 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2213
2214 TensorShape targetShape = outputTensorInfo.GetShape();
2215 ReshapeDescriptor reshapeDesc;
2216 reshapeDesc.m_TargetShape = targetShape;
2217
2218 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2219 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2220 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2221
2222 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2223}
2224
2225ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
2226 const tensorflow::GraphDef& graphDef)
2227{
2228 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2229
2230 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2231 {
telsoa01c577f2c2018-08-31 09:22:23 +01002232 throw ParseException(
2233 boost::str(
2234 boost::format(
2235 "ArmNN only supports ResizeBilinear layers with constant sizes. "
2236 "Input %1%. Node %2% %3%")
2237 % inputs[1].m_IndexedValue->GetNode().name()
2238 % nodeDef.name()
2239 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002240 }
2241 ParsedConstTfOperation<int32_t>* sizeNode =
2242 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2243
telsoa01c577f2c2018-08-31 09:22:23 +01002244 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01002245 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
2246 {
telsoa01c577f2c2018-08-31 09:22:23 +01002247 throw ParseException(
2248 boost::str(
2249 boost::format(
2250 "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
2251 "Node %1% %2%")
2252 % nodeDef.name()
2253 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002254 }
2255
telsoa01c577f2c2018-08-31 09:22:23 +01002256 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01002257 std::vector<int32_t> sizeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002258 ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002259
telsoa01c577f2c2018-08-31 09:22:23 +01002260 // The descriptor only has target height and width attributes, which we get from the size tensor.
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002261 ResizeDescriptor desc;
2262 desc.m_Method = armnn::ResizeMethod::Bilinear;
surmeh01bceff2f2018-03-29 16:29:27 +01002263 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002264 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2265 desc.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002266
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002267 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002268
2269 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2270 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01002271 // The input shape is always in BHWC format, this will be swizzled below; for now,
2272 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01002273 unsigned int outBatch = inputTensorInfo.GetShape()[0];
2274 unsigned int outChannels = inputTensorInfo.GetShape()[3];
2275 unsigned int outHeight = desc.m_TargetHeight;
2276 unsigned int outWidth = desc.m_TargetWidth;
jimfly018a121502018-12-06 16:19:52 +00002277 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
telsoa01c577f2c2018-08-31 09:22:23 +01002278 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01002279 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2280 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2281
jimfly018a121502018-12-06 16:19:52 +00002282 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002283
2284 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2285}
2286
2287TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
2288{
2289 BOOST_ASSERT(nodeDef.op() == "Squeeze");
2290 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2291
2292 DataType type;
2293 if (tfDataType == tensorflow::DT_FLOAT)
2294 {
2295 type = DataType::Float32;
2296 }
2297 else if (tfDataType == tensorflow::DT_INT32)
2298 {
2299 type = DataType::Signed32;
2300 }
2301 else
2302 {
telsoa01c577f2c2018-08-31 09:22:23 +01002303 throw ParseException(
2304 boost::str(
2305 boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
2306 % tensorflow::DataType_Name(tfDataType)
2307 % nodeDef.name()
2308 % CHECK_LOCATION().AsString()));
2309 }
2310
2311
2312 if (inputTensorInfo.GetNumDimensions() > 4)
2313 {
2314 throw ParseException(
2315 boost::str(
2316 boost::format(
2317 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2318 % inputTensorInfo.GetNumDimensions()
2319 % nodeDef.name()
2320 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002321 }
2322
2323 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01002324 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2325
surmeh01bceff2f2018-03-29 16:29:27 +01002326 if (squeezeDims.empty())
2327 {
telsoa01c577f2c2018-08-31 09:22:23 +01002328 squeezeDims.assign(dimensionSequence,
2329 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01002330 }
2331
2332 std::vector<uint32_t> outputDims;
2333 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2334 {
telsoa01c577f2c2018-08-31 09:22:23 +01002335 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2336 auto currentDimension = inputTensorInfo.GetShape()[i];
2337 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01002338 {
telsoa01c577f2c2018-08-31 09:22:23 +01002339 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01002340 }
2341 }
2342
2343 if (outputDims.size() > 4)
2344 {
telsoa01c577f2c2018-08-31 09:22:23 +01002345 throw ParseException(
2346 boost::str(
2347 boost::format(
2348 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2349 % outputDims.size()
2350 % nodeDef.name()
2351 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002352 }
2353
telsoa01c577f2c2018-08-31 09:22:23 +01002354 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2355 outputDims.data());
2356
2357 TensorInfo outTensorInfo = inputTensorInfo;
2358 outTensorInfo.SetShape(outShape);
2359 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01002360
2361 return outTensorInfo;
2362}
2363
2364ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2365{
2366 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2367
2368 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2369 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2370
2371 TensorInfo outputInfo;
2372 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2373
2374 ReshapeDescriptor reshapeDesc;
2375 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2376 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2377 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2378 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2379
2380 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2381}
2382
2383ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2384{
2385 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2386
2387 NormalizationDescriptor normalizationDescriptor;
2388 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2389 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2390 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2391 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2392 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2393 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
ruoyan018174f362018-12-04 18:24:08 +00002394 normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002395
2396 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2397 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2398
2399 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002400 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2401 nodeDef.name().c_str());
ruoyan018174f362018-12-04 18:24:08 +00002402 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2403 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
surmeh01bceff2f2018-03-29 16:29:27 +01002404
2405 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2406}
2407
2408/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01002409/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2410/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2411/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2412///
surmeh01bceff2f2018-03-29 16:29:27 +01002413class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
2414{
2415public:
2416 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2417 : DeferredSingleLayerParsedTfOperation(parser, node)
2418 {
2419 }
2420
2421 void CreateLayerDeferred() override
2422 {
2423 BOOST_ASSERT(m_Layer == nullptr);
2424 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2425 }
2426};
2427
2428ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2429{
telsoa01c577f2c2018-08-31 09:22:23 +01002430 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01002431 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2432}
2433
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002434ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2435{
2436 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2437 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2438 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2439
2440 if (inputs.size() != 2)
2441 {
2442 throw ParseException(
2443 boost::str(boost::format("Mean expects two inputs!. Got %1% for Node %2% %3%")
2444 % inputs.size()
2445 % nodeDef.name()
2446 % CHECK_LOCATION().AsString()));
2447 }
2448
2449 bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef, "keep_dims");
2450
2451 ParsedConstTfOperation<int32_t>* axisNode =
2452 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2453
2454 const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo();
2455
2456 ConstTensor axisTensor(axisTensorInfo, axisNode->GetStorage());
2457 const int* axisData = static_cast<const int*>(axisTensor.GetMemoryArea());
2458
2459 TensorInfo outputTensorInfo;
2460 MeanDescriptor meanDescriptor;
2461 meanDescriptor.m_KeepDims = keepDims;
2462
2463 // Negative axis values are supported so that the process requires
2464 // to convert them into the corresponding positive ones.
2465 // Duplicate values are also removed.
2466 std::vector<int> rawAxisVector(axisData, axisData + axisTensorInfo.GetNumElements());
2467 std::set<unsigned int> positiveAxisSet;
2468 int rank = static_cast<int>(inputTensorInfo.GetNumDimensions());
2469
2470 std::transform(rawAxisVector.begin(), rawAxisVector.end(),
2471 std::inserter(positiveAxisSet, positiveAxisSet.begin()),
2472 [rank](int i) -> unsigned int { return static_cast<unsigned int>((i + rank) % rank); });
2473
2474 CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
2475
2476 if (inputTensorInfo.GetNumDimensions() > positiveAxisSet.size())
2477 {
2478 meanDescriptor.m_Axis.assign(positiveAxisSet.begin(), positiveAxisSet.end());
2479 }
2480
2481 IConnectableLayer* layer = m_Network->AddMeanLayer(meanDescriptor, nodeDef.name().c_str());
2482 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2483 inputSlot.Connect(layer->GetInputSlot(0));
2484
2485 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2486}
2487
telsoa01c577f2c2018-08-31 09:22:23 +01002488/// An ParsedTfOperation for a Mul node.
2489/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2490/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2491/// and in these cases armnn doesn't need a separate layer for the Mul.
2492///
2493class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
2494{
2495public:
2496 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2497 : DeferredSingleLayerParsedTfOperation(parser, node)
2498 {
2499 }
2500
2501 void CreateLayerDeferred() override
2502 {
2503 BOOST_ASSERT(m_Layer == nullptr);
2504 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2505 }
2506};
2507
surmeh01bceff2f2018-03-29 16:29:27 +01002508ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2509{
2510 boost::ignore_unused(graphDef);
2511
telsoa01c577f2c2018-08-31 09:22:23 +01002512 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002513}
2514
2515ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
2516 const tensorflow::GraphDef& graphDef)
2517{
2518 boost::ignore_unused(graphDef);
2519
2520 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2521
2522 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
2523
2524 auto it = m_InputShapes.find(nodeDef.name());
2525 if (it == m_InputShapes.end())
2526 {
telsoa01c577f2c2018-08-31 09:22:23 +01002527 throw ParseException(
2528 boost::str(
2529 boost::format(
2530 "Missing input shape for Placeholder '%1%' %2%")
2531 % nodeDef.name()
2532 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002533 }
2534 TensorInfo tensorInfo(it->second, DataType::Float32);
2535
2536 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2537
2538 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2539
2540 TrackInputBinding(layer, layerId, tensorInfo);
2541
2542 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2543}
2544
saoste01bbd40612018-08-28 15:41:51 +01002545ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2546{
2547 boost::ignore_unused(graphDef);
2548 return AddRealDivLayer(nodeDef);
2549}
2550
surmeh01bceff2f2018-03-29 16:29:27 +01002551ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
2552 const tensorflow::GraphDef& graphDef)
2553{
2554 boost::ignore_unused(graphDef);
2555
2556 ActivationDescriptor activationDesc;
2557 activationDesc.m_Function = ActivationFunction::ReLu;
2558 return AddActivationLayer(nodeDef, activationDesc);
2559}
2560
2561ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
2562 const tensorflow::GraphDef& graphDef)
2563{
2564 boost::ignore_unused(graphDef);
2565
2566 ActivationDescriptor activationDesc;
2567 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2568 activationDesc.m_A = 6.0f;
2569 activationDesc.m_B = 0.0f;
2570
2571 return AddActivationLayer(nodeDef, activationDesc);
2572}
2573
2574ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
2575 const tensorflow::GraphDef& graphDef)
2576{
2577 boost::ignore_unused(graphDef);
2578
2579 ActivationDescriptor activationDesc;
2580 activationDesc.m_Function = ActivationFunction::Sigmoid;
2581
2582 return AddActivationLayer(nodeDef, activationDesc);
2583}
2584
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002585ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
2586 const tensorflow::GraphDef &graphDef)
2587{
2588 boost::ignore_unused(graphDef);
2589
2590 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2591
2592 IConnectableLayer* const layer = m_Network->AddRsqrtLayer(nodeDef.name().c_str());
2593
2594 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2595 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2596 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2597
2598 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2599}
2600
surmeh01bceff2f2018-03-29 16:29:27 +01002601ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
2602 const tensorflow::GraphDef& graphDef)
2603{
2604 boost::ignore_unused(graphDef);
2605
2606 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2607
2608 SoftmaxDescriptor softmaxDescriptor;
2609 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2610
2611 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2612 prevLayerSlot.Connect(layer->GetInputSlot(0));
2613 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2614
2615 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2616}
2617
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002618ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
2619 const tensorflow::GraphDef& graphDef)
2620{
2621 boost::ignore_unused(graphDef);
2622
2623 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2624 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2625 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2626
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002627 // Constant tensor index
2628 unsigned int index = GetConstInputIndex(inputs);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002629 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002630 ParsedConstTfOperation<int32_t>* shapeNode =
2631 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2632
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002633 std::vector<int32_t> axisTensorData;
2634 shapeNode->GetConstTensor(axisTensorData);
2635
2636 // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2637 const unsigned int splitDim = static_cast<unsigned int>(axisTensorData[0]);
2638
2639 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2640 if (splitDim == 0 || splitDim == 2)
2641 {
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002642 throw armnn::ParseException(
2643 boost::str(
2644 boost::format(
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002645 "Dimension %1% for split is not supported by Armnn. "
2646 "Node %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002647 % splitDim
2648 % nodeDef.name()
2649 % CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002650 }
2651
Saoirse Stewart315258e2019-02-28 11:32:41 +00002652 // As Armnn only supports splitter outputs of the same shape, therefore num_split will be limited to an integer.
2653 uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef, "num_split");
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002654
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002655 IOutputSlot& inputSlot = inputs[1 - index].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1 - index].m_Index);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002656 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2657
Matthew Jacksondba634f2019-08-15 15:14:18 +01002658 const unsigned int supportedNumDims = 4;
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002659 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2660
Matthew Jacksondba634f2019-08-15 15:14:18 +01002661 if (inputDimSize != supportedNumDims)
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002662 {
2663 throw armnn::ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002664 boost::str(
2665 boost::format(
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002666 "The number of dimensions: %1% for input tensors of the "
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002667 "split op should be %2% %3%")
2668 % inputTensorInfo.GetNumDimensions()
Matthew Jacksondba634f2019-08-15 15:14:18 +01002669 % supportedNumDims
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002670 % CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002671 }
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002672
2673 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2674
2675 // Add current input shape to splitterDimSizes
2676 for (unsigned int i = 0; i < inputDimSize; ++i)
2677 {
2678 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2679 }
2680
2681 if (splitterDimSizes[splitDim] % num_split != 0)
2682 {
2683 throw ParseException("Number of splits must evenly divide the dimension");
2684 }
2685 splitterDimSizes[splitDim] /= num_split;
2686
2687 SplitterDescriptor splitDesc(num_split);
2688 for (unsigned int g = 0; g < num_split; ++g)
2689 {
2690 // Set the size of the views.
2691 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2692 {
2693 splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2694 }
2695 splitDesc.SetViewOriginCoord(g, splitDim, splitterDimSizes[splitDim] * g);
2696 }
2697
2698 IConnectableLayer *layer = m_Network->AddSplitterLayer(splitDesc, nodeDef.name().c_str());
2699
2700 inputSlot.Connect(layer->GetInputSlot(0));
2701
2702 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2703 splitterDimSizes.data());
2704
2705 for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
2706 {
2707 layer->GetOutputSlot(i).SetTensorInfo(armnn::TensorInfo(outShape, inputTensorInfo.GetDataType()));
2708 }
2709
2710 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2711}
2712
surmeh01bceff2f2018-03-29 16:29:27 +01002713ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
2714 const tensorflow::GraphDef& graphDef)
2715{
2716 boost::ignore_unused(graphDef);
2717
2718 ActivationDescriptor activationDesc;
2719 activationDesc.m_Function = ActivationFunction::SoftReLu;
2720
2721 return AddActivationLayer(nodeDef, activationDesc);
2722}
2723
2724ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2725{
2726 boost::ignore_unused(graphDef);
2727
2728 ActivationDescriptor activationDesc;
2729 activationDesc.m_Function = ActivationFunction::TanH;
2730 activationDesc.m_A = 1.0f;
2731 activationDesc.m_B = 1.0f;
2732
2733 return AddActivationLayer(nodeDef, activationDesc);
2734}
2735
2736ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
2737 ActivationDescriptor& activationDesc)
2738{
2739 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2740
2741 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2742
2743 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2744 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2745 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2746 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2747}
2748
2749ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
2750 const tensorflow::GraphDef& graphDef)
2751{
2752 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
2753}
2754
2755ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
2756 const tensorflow::GraphDef& graphDef)
2757{
2758 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2759}
2760
2761ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
2762 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
2763{
2764 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2765 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2766 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2767
2768 if (inputs.size() != 1)
2769 {
telsoa01c577f2c2018-08-31 09:22:23 +01002770 throw ParseException(
2771 boost::str(
2772 boost::format(
2773 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
2774 % inputs.size()
2775 % nodeDef.name()
2776 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002777 }
2778
2779 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
2780 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2781 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
2782 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
2783
2784 Pooling2dDescriptor pooling2dDescriptor;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002785 pooling2dDescriptor.m_PoolType = pooltype;
2786 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
surmeh01bceff2f2018-03-29 16:29:27 +01002787 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
2788
telsoa01c577f2c2018-08-31 09:22:23 +01002789 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
FrancisMurtaghf005e312018-12-06 15:26:04 +00002790 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
2791 pooling2dDescriptor.m_DataLayout = dataLayout;
2792 DataLayoutIndexed dataLayoutIndexed(dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +01002793
FrancisMurtaghf005e312018-12-06 15:26:04 +00002794 pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
2795 pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
2796 pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
2797 pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002798
FrancisMurtaghf005e312018-12-06 15:26:04 +00002799 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
2800 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002801
2802 bool padding = false;
2803 TensorInfo outputInfo;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002804 unsigned int outputHeight = 0;
2805 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01002806
2807 CHECK_PADDING_TYPE(nodeDef, paddingString);
2808
surmeh01bceff2f2018-03-29 16:29:27 +01002809 if (paddingString == "SAME")
2810 {
2811 padding = true;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002812
2813 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
2814 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2815 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
2816 static_cast<float>(pooling2dDescriptor.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01002817 }
2818 else if (paddingString == "VALID")
2819 {
2820 padding = false;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002821
2822 outputHeight = static_cast<uint32_t>(ceil(
2823 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
2824 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2825 outputWidth = static_cast<uint32_t>(ceil(
2826 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
2827 static_cast<float>(pooling2dDescriptor.m_StrideX)));
2828 }
2829
2830 switch (dataLayout)
2831 {
2832 case DataLayout::NHWC:
2833 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2834 outputHeight,
2835 outputWidth,
2836 inputTensorInfo.GetShape()[3] },
2837 DataType::Float32);
2838 break;
2839 case DataLayout::NCHW:
2840 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2841 inputTensorInfo.GetShape()[1],
2842 outputHeight,
2843 outputWidth },
2844 DataType::Float32);
2845 break;
surmeh01bceff2f2018-03-29 16:29:27 +01002846 }
surmeh01bceff2f2018-03-29 16:29:27 +01002847
2848 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002849 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002850 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002851 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002852
2853
2854 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
2855 if (layer == nullptr)
2856 {
telsoa01c577f2c2018-08-31 09:22:23 +01002857 throw ParseException(
2858 boost::str(
2859 boost::format(
2860 "Failed to add pooling2d layer for %1% %2%")
2861 % nodeDef.name()
2862 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002863 }
2864
2865 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2866
FrancisMurtaghf005e312018-12-06 15:26:04 +00002867 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002868
2869 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2870}
2871
2872ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
2873{
2874 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2875
2876 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2877 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2878
2879 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
2880 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
2881
2882 if (isBiasAdd)
2883 {
2884 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
2885 // with the same data in the correct dimension for broadcast in addition.
2886 if(input1Info.GetNumDimensions() != 1)
2887 {
telsoa01c577f2c2018-08-31 09:22:23 +01002888 throw ParseException(
2889 boost::str(
2890 boost::format(
2891 "Unsupported bias for BiasAdd. It should be a 1D vector. "
2892 "Got %1% dimensions for input %2%. Node %3% %4%")
2893 % input1Info.GetNumDimensions()
2894 % inputs[1].m_IndexedValue->GetNode().name()
2895 % nodeDef.name()
2896 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002897 }
2898
2899 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01002900
telsoa01c577f2c2018-08-31 09:22:23 +01002901 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01002902 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002903 }
2904 else
2905 {
2906 if (input0Info.GetNumDimensions() == 1)
2907 {
2908 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002909 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002910 }
2911
2912 if (input1Info.GetNumDimensions() == 1)
2913 {
2914 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002915 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002916 }
2917 }
2918
2919 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
2920
2921 input0Slot->Connect(layer->GetInputSlot(0));
2922 input1Slot->Connect(layer->GetInputSlot(1));
2923
Nattapat Chaimanowongfab64f02019-02-15 16:46:24 +00002924 if (input0Info.GetNumDimensions() == input1Info.GetNumDimensions())
2925 {
2926 const TensorShape& input0Shape = input0Info.GetShape();
2927 const TensorShape& input1Shape = input1Info.GetShape();
2928
2929 std::vector<unsigned int> outputShape;
2930 outputShape.reserve(input0Shape.GetNumDimensions());
2931 TensorInfo outputInfo(input0Info);
2932
2933 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
2934 {
2935 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
2936 }
2937
2938 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
2939
2940 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2941 }
2942 else if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
surmeh01bceff2f2018-03-29 16:29:27 +01002943 {
2944 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2945 }
2946 else
2947 {
2948 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2949 }
2950
2951 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2952}
2953
saoste01bbd40612018-08-28 15:41:51 +01002954ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
2955{
2956 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2957
2958 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
2959 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2960 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2961
2962 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2963 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2964
2965
2966 if (input0NumDims < input1NumDims)
2967 {
2968 const bool isNHWC = true;
2969 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2970 }
2971 if (input1NumDims < input0NumDims)
2972 {
2973 const bool isNHWC = true;
2974 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2975 }
2976
2977 input0Slot->Connect(layer->GetInputSlot(0));
2978 input1Slot->Connect(layer->GetInputSlot(1));
2979
2980 if (input0NumDims < input1NumDims)
2981 {
2982 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2983 }
2984 else
2985 {
2986 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2987
2988 }
2989 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2990}
2991
Sadik Armagan975c09a2018-12-04 10:02:08 +00002992ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
2993{
2994 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2995
2996 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2997 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2998
2999 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3000 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3001
3002 if (input0NumDims < input1NumDims)
3003 {
3004 const bool isNHWC = true;
3005 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3006 }
3007 if (input1NumDims < input0NumDims)
3008 {
3009 const bool isNHWC = true;
3010 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3011 }
3012
3013 IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
3014
3015 input0Slot->Connect(layer->GetInputSlot(0));
3016 input1Slot->Connect(layer->GetInputSlot(1));
3017
3018 TensorInfo outputInfo = input0Slot->GetTensorInfo();
3019 std::vector<unsigned int> outputShape;
3020
3021 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
3022 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
3023
3024 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3025 {
3026 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3027 }
3028
3029 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3030 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3031
3032 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3033}
3034
telsoa01c577f2c2018-08-31 09:22:23 +01003035IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
3036{
3037 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3038
3039 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
3040 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3041 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3042
3043 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3044 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3045
3046 if (input0NumDims < input1NumDims)
3047 {
3048 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003049 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003050 }
3051 if (input1NumDims < input0NumDims)
3052 {
3053 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003054 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003055 }
3056
3057 input0Slot->Connect(layer->GetInputSlot(0));
3058 input1Slot->Connect(layer->GetInputSlot(1));
3059
3060 if (input0NumDims < input1NumDims)
3061 {
3062 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3063 }
3064 else
3065 {
3066 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3067 }
3068 return layer;
3069}
3070
surmeh01bceff2f2018-03-29 16:29:27 +01003071IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
3072 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
3073{
telsoa01c577f2c2018-08-31 09:22:23 +01003074 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01003075 ParsedConstTfOperation<float>* biasNode = nullptr;
3076 if (addNodeDef != nullptr)
3077 {
3078 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01003079 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003080 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
3081 {
3082 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
3083 }
3084 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
3085 {
3086 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
3087 }
3088 else
3089 {
telsoa01c577f2c2018-08-31 09:22:23 +01003090 throw ParseException(
3091 boost::str(
3092 boost::format(
3093 "ArmNN only supports fully connected layers with constant bias. "
3094 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
3095 % addInputs[0].m_IndexedValue->GetNode().name()
3096 % addInputs[1].m_IndexedValue->GetNode().name()
3097 % addNodeDef->name()
3098 % matMulNodeDef.name()
3099 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003100 }
3101 }
3102
telsoa01c577f2c2018-08-31 09:22:23 +01003103 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003104 ParsedConstTfOperation<float>* weightNode = nullptr;
3105 ParsedTfOperation* inputNode = nullptr;
3106 unsigned int inputIdx = 0;
3107 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
3108 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
3109 {
3110 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
3111 inputNode = mulInputs[1].m_IndexedValue;
3112 inputIdx = mulInputs[1].m_Index;
3113 }
3114 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
3115 {
3116 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
3117 inputNode = mulInputs[0].m_IndexedValue;
3118 inputIdx = mulInputs[0].m_Index;
3119 }
3120 else
3121 {
telsoa01c577f2c2018-08-31 09:22:23 +01003122 throw ParseException(
3123 boost::str(
3124 boost::format(
3125 "ArmNN only supports fully connected layers with constant weights. "
3126 "Inputs %1% and %2%. MatMulNode %3% %4%")
3127 % mulInputs[0].m_IndexedValue->GetNode().name()
3128 % mulInputs[1].m_IndexedValue->GetNode().name()
3129 % matMulNodeDef.name()
3130 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003131 }
3132
3133 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003134 // Handles weight.
Matteo Martincigh482ca852018-12-12 09:20:55 +00003135 ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003136
3137 FullyConnectedDescriptor desc;
3138 desc.m_BiasEnabled = addNodeDef != nullptr;
3139
3140 IConnectableLayer* layer = nullptr;
Matteo Martincighfc598e12019-05-14 10:36:13 +01003141 Optional<ConstTensor> optionalBiases;
3142 std::vector<float> biasTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003143 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003144 if (addNodeDef != nullptr)
3145 {
Matteo Martincigh482ca852018-12-12 09:20:55 +00003146 ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003147
3148 if (weights.GetShape()[1] != biases.GetShape()[0])
3149 {
telsoa01c577f2c2018-08-31 09:22:23 +01003150 throw ParseException(
3151 boost::str(
3152 boost::format(
3153 "Shape of matmul weights and bias do not match. "
3154 "AddNode %1%. MatMulNode %2% %3%")
3155 % addNodeDef->name()
3156 % matMulNodeDef.name()
3157 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003158 }
3159
Matteo Martincighfc598e12019-05-14 10:36:13 +01003160 optionalBiases = Optional<ConstTensor>(biases);
surmeh01bceff2f2018-03-29 16:29:27 +01003161 }
Matteo Martincighfc598e12019-05-14 10:36:13 +01003162 layer = m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
surmeh01bceff2f2018-03-29 16:29:27 +01003163
3164 BOOST_ASSERT(layer != nullptr);
3165
3166 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3167 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3168
telsoa01c577f2c2018-08-31 09:22:23 +01003169 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01003170 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
3171 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3172 return layer;
3173}
3174
3175void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
3176{
telsoa01c577f2c2018-08-31 09:22:23 +01003177 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01003178 tensorflow::DataType type = tensorflow::DT_FLOAT;
3179 if (nodeDef.attr().count("T") != 0)
3180 {
3181 auto attr = nodeDef.attr().at("T");
3182 type = attr.type();
3183 }
3184 else if (nodeDef.attr().count("dtype") != 0)
3185 {
3186 auto attr = nodeDef.attr().at("dtype");
3187 type = attr.type();
3188 }
3189
Ferran Balaguerc602f292019-02-08 17:09:55 +00003190 if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() != "Const")
surmeh01bceff2f2018-03-29 16:29:27 +01003191 {
telsoa01c577f2c2018-08-31 09:22:23 +01003192 throw ParseException(
3193 boost::str(
3194 boost::format(
Ferran Balaguerc602f292019-02-08 17:09:55 +00003195 "Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). "
telsoa01c577f2c2018-08-31 09:22:23 +01003196 "Got %1% for Node %2% %3%")
3197 % tensorflow::DataType_Name(type)
3198 % nodeDef.name()
3199 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003200 }
3201
3202 const std::string& operation = nodeDef.op();
narpra016f37f832018-12-21 18:30:00 +00003203 auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3204 if (itControlInput != m_ControlInputs.end())
3205 {
3206 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
3207 return;
3208 }
surmeh01bceff2f2018-03-29 16:29:27 +01003209 auto it = ms_OperationNameToParsingFunctions.find(operation);
3210 if (it != ms_OperationNameToParsingFunctions.end())
3211 {
3212 auto func = it->second;
3213 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
3214 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3215
telsoa01c577f2c2018-08-31 09:22:23 +01003216 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01003217 auto it = m_ParsedTfOperations.find(nodeDef.name());
3218 if (it != m_ParsedTfOperations.end())
3219 {
3220 throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
3221 }
3222 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3223
telsoa01c577f2c2018-08-31 09:22:23 +01003224 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003225 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3226 m_RequestedOutputs.end())
3227 {
3228 auto outId = ParseOutputId(nodeDef.name());
3229 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
3230 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3231
3232 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3233
3234 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
3235
3236 prevSlot.Connect(outputLayer->GetInputSlot(0));
3237
3238 TrackOutputBinding(outputLayer, layerId, tensorInfo);
3239 }
3240 }
3241 else
3242 {
telsoa01c577f2c2018-08-31 09:22:23 +01003243 throw ParseException(
3244 boost::str(
3245 boost::format(
3246 "Unsupported operation %1% in tensorflow::GraphDef %2%")
3247 % operation
3248 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003249 }
3250}
3251
3252void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
3253{
telsoa01c577f2c2018-08-31 09:22:23 +01003254 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01003255 m_NodesByName.clear();
3256 m_NetworkInputsBindingInfo.clear();
3257 m_NetworkOutputsBindingInfo.clear();
3258
3259 for (int i = 0; i < graphDef.node_size(); ++i)
3260 {
3261 const tensorflow::NodeDef& node = graphDef.node(i);
3262 m_NodesByName[node.name()] = &node;
3263 }
3264
Francis Murtaghbb190a62019-04-04 11:16:29 +01003265 // Checks that the input nodes the user has requested exist.
3266 for (const auto& pair : m_InputShapes)
3267 {
3268 const std::string& requestedInputName = pair.first;
3269 auto nodeIt = m_NodesByName.find(requestedInputName);
3270 if (nodeIt == m_NodesByName.end())
3271 {
3272 throw ParseException(
3273 boost::str(
3274 boost::format(
3275 "Couldn't find requested input node '%1%' in graph %2%")
3276 % requestedInputName
3277 % CHECK_LOCATION().AsString()));
3278 }
3279 }
3280
telsoa01c577f2c2018-08-31 09:22:23 +01003281 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01003282 std::vector<const tensorflow::NodeDef*> targetNodes;
3283 for (const std::string& requestedOutputName : m_RequestedOutputs)
3284 {
3285 auto nodeIt = m_NodesByName.find(requestedOutputName);
3286 if (nodeIt == m_NodesByName.end())
3287 {
telsoa01c577f2c2018-08-31 09:22:23 +01003288 throw ParseException(
3289 boost::str(
3290 boost::format(
3291 "Couldn't find requested output node '%1%' in graph %2%")
3292 % requestedOutputName
3293 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003294 }
3295 targetNodes.push_back(nodeIt->second);
3296 }
3297
telsoa01c577f2c2018-08-31 09:22:23 +01003298 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003299 std::vector<const tensorflow::NodeDef*> sortedNodes;
3300 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3301 targetNodes,
3302 [this](const tensorflow::NodeDef* node)
3303 {
3304 auto outputs = GetTfInputNodes(*node);
3305 std::vector<const tensorflow::NodeDef*> nodesOnly;
3306 for (const auto & o : outputs) {
3307 nodesOnly.push_back(o.m_IndexedValue);
3308 }
3309 return nodesOnly;
3310 },
3311 sortedNodes))
3312 {
telsoa01c577f2c2018-08-31 09:22:23 +01003313 throw ParseException(
3314 boost::str(
3315 boost::format(
3316 "Cycle detected in graph %1%")
3317 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003318 }
3319
telsoa01c577f2c2018-08-31 09:22:23 +01003320 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003321 for (const auto& it : sortedNodes)
3322 {
3323 const tensorflow::NodeDef& currentNode = *it;
3324 LoadNodeDef(currentNode, graphDef);
3325 }
3326}
3327
3328INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
3329 const std::map<std::string, TensorShape>& inputShapes,
3330 const std::vector<std::string>& requestedOutputs)
3331{
3332 FILE* fd = fopen(graphFile, "r");
3333
3334 if (fd == nullptr)
3335 {
telsoa01c577f2c2018-08-31 09:22:23 +01003336 throw FileNotFoundException(
3337 boost::str(
3338 boost::format(
3339 "Graph file %1% failed to open %2%")
3340 % graphFile
3341 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003342 }
3343
telsoa01c577f2c2018-08-31 09:22:23 +01003344 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003345 tensorflow::GraphDef graphDef;
3346 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
3347 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3348 delete input;
3349 fclose(fd);
3350
3351 if (!success)
3352 {
telsoa01c577f2c2018-08-31 09:22:23 +01003353 throw ParseException(
3354 boost::str(
3355 boost::format(
3356 "Failed to parse graph file %1%")
3357 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003358 }
3359
3360 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3361}
3362
3363INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
3364 const std::map<std::string, TensorShape>& inputShapes,
3365 const std::vector<std::string>& requestedOutputs)
3366{
telsoa01c577f2c2018-08-31 09:22:23 +01003367 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003368 tensorflow::GraphDef graphDef;
3369 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3370
3371 if (!success)
3372 {
telsoa01c577f2c2018-08-31 09:22:23 +01003373 throw ParseException(
3374 boost::str(
3375 boost::format(
3376 "Failed to parse graph file %1%")
3377 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003378 }
3379
3380 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3381}
3382
3383INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
3384 const std::map<std::string, TensorShape>& inputShapes,
3385 const std::vector<std::string>& requestedOutputs)
3386{
3387 FILE* fd = fopen(graphFile, "rb");
3388
3389 if (fd == nullptr)
3390 {
telsoa01c577f2c2018-08-31 09:22:23 +01003391 throw FileNotFoundException(
3392 boost::str(
3393 boost::format(
3394 "Graph file %1% failed to open %2%")
3395 % graphFile
3396 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003397 }
3398
telsoa01c577f2c2018-08-31 09:22:23 +01003399 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003400 tensorflow::GraphDef graphDef;
3401
3402 google::protobuf::io::FileInputStream inStream(fileno(fd));
3403 google::protobuf::io::CodedInputStream codedStream(&inStream);
3404 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
3405 bool success = graphDef.ParseFromCodedStream(&codedStream);
3406 fclose(fd);
3407
3408 if (!success)
3409 {
telsoa01c577f2c2018-08-31 09:22:23 +01003410 throw ParseException(
3411 boost::str(
3412 boost::format(
3413 "Failed to parse protobuf file %1% %2%")
3414 % graphFile
3415 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003416 }
3417
3418 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3419}
3420
3421INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
3422 const std::map<std::string, TensorShape>& inputShapes,
3423 const std::vector<std::string>& requestedOutputs)
3424{
3425 m_Network = INetwork::Create();
3426
3427 m_InputShapes = inputShapes;
3428 if (requestedOutputs.size() == 0)
3429 {
telsoa01c577f2c2018-08-31 09:22:23 +01003430 throw ParseException(
3431 boost::str(
3432 boost::format(
3433 "requestedOutputs must have at least one entry %1%")
3434 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003435 }
3436 m_RequestedOutputs = requestedOutputs;
3437
3438 try
3439 {
3440 LoadGraphDef(graphDef);
3441 }
3442 catch (const ParseException& e)
3443 {
3444 Cleanup();
3445 throw e;
3446 }
3447
3448 Cleanup();
3449
3450 return std::move(m_Network);
3451}
3452
3453void TfParser::Cleanup()
3454{
telsoa01c577f2c2018-08-31 09:22:23 +01003455 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01003456 m_InputShapes.clear();
3457 m_RequestedOutputs.clear();
3458 m_NodesByName.clear();
3459 m_ParsedTfOperations.clear();
3460}
3461
3462BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
3463{
3464 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
3465}
3466
3467BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
3468{
3469 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
3470}
3471
3472std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
3473 const char* bindingPointDesc,
3474 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3475{
3476 auto it = nameToBindingInfo.find(layerName);
3477 if (it == nameToBindingInfo.end())
3478 {
telsoa01c577f2c2018-08-31 09:22:23 +01003479 throw InvalidArgumentException(
3480 boost::str(
3481 boost::format(
3482 "Unknown %1% '%2%' %3%")
3483 % bindingPointDesc
3484 % layerName
3485 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003486 }
3487 return it->second;
3488}
3489
3490void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3491{
3492 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3493}
3494
3495void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3496{
3497 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3498}
3499
3500void TfParser::TrackBindingPoint(IConnectableLayer* layer,
3501 LayerBindingId id,
3502 const TensorInfo& tensorInfo,
3503 const char* bindingPointDesc,
3504 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3505{
3506 const std::string layerName = layer->GetName();
3507 auto it = nameToBindingInfo.find(layerName);
3508 if (it == nameToBindingInfo.end())
3509 {
3510 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3511 }
3512 else
3513 {
telsoa01c577f2c2018-08-31 09:22:23 +01003514 throw ParseException(
3515 boost::str(
3516 boost::format(
3517 "Id %1% used by more than one %2% layer %3%")
3518 % id
3519 % bindingPointDesc
3520 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003521 }
3522}
3523
3524} // namespace armnnTfParser