blob: 51423bf6a788550e1523a1e861ad754c3cd75bad [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00005
surmeh01bceff2f2018-03-29 16:29:27 +01006#include "TfParser.hpp"
7
surmeh01bceff2f2018-03-29 16:29:27 +01008#include <armnn/TypesUtils.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +01009#include <armnn/Descriptors.hpp>
10
11#include <GraphTopologicalSort.hpp>
Sadik Armagan479045b2018-10-01 11:51:37 +010012#include <ParserHelper.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010013#include <Permute.hpp>
Matteo Martincigh46315822018-11-28 16:22:36 +000014#include <DataLayoutIndexed.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010015
16#include <google/protobuf/io/zero_copy_stream_impl.h>
17#include <google/protobuf/text_format.h>
18
19#include "tensorflow/core/framework/graph.pb.h"
surmeh01bceff2f2018-03-29 16:29:27 +010020
surmeh01bceff2f2018-03-29 16:29:27 +010021#include <boost/format.hpp>
22#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010023#include <boost/format.hpp>
24#include <boost/numeric/conversion/cast.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010025#include <boost/polymorphic_cast.hpp>
26
surmeh01bceff2f2018-03-29 16:29:27 +010027#include <numeric>
surmeh01bceff2f2018-03-29 16:29:27 +010028
Matteo Martincigh46315822018-11-28 16:22:36 +000029using namespace armnnUtils;
surmeh01bceff2f2018-03-29 16:29:27 +010030using namespace armnn;
31
32namespace armnnTfParser
33{
34namespace
35{
36
37const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
38const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
39
surmeh01bceff2f2018-03-29 16:29:27 +010040
41template <typename Callable>
42void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
43 const std::string& attribName,
44 tensorflow::AttrValue::ValueCase expectedValueCase,
45 Callable callable)
46{
47 auto iter = nodeDef.attr().find(attribName);
48 if (iter != nodeDef.attr().end())
49 {
50 const auto& attrValue = iter->second;
51 if (attrValue.value_case() == expectedValueCase)
52 {
53 callable(attrValue);
54 }
55 else
56 {
telsoa01c577f2c2018-08-31 09:22:23 +010057 throw ParseException(
58 boost::str(
59 boost::format(
60 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
61 "but found %4% instead %5%")
62 % attribName
63 % nodeDef.name()
64 % static_cast<int>(expectedValueCase)
65 % static_cast<int>(attrValue.value_case())
66 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010067 }
68 }
69 else
70 {
telsoa01c577f2c2018-08-31 09:22:23 +010071 throw ParseException(
72 boost::str(
73 boost::format(
74 "Could not find required attribute %1% in node %2% %3%")
75 % attribName
76 % nodeDef.name()
77 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010078 }
79}
80
81template <typename Callable>
82void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
83 const std::string& attribName,
84 tensorflow::AttrValue::ValueCase expectedValueCase,
85 Callable callable)
86{
87 auto iter = nodeDef.attr().find(attribName);
88 if (iter != nodeDef.attr().end())
89 {
90 const auto& attrValue = iter->second;
91 if (attrValue.value_case() == expectedValueCase)
92 {
93 callable(attrValue);
94 }
95 else
96 {
telsoa01c577f2c2018-08-31 09:22:23 +010097 throw ParseException(
98 boost::str(
99 boost::format(
100 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
101 "but found %4% instead %5%")
102 % attribName
103 % nodeDef.name()
104 % static_cast<int>(expectedValueCase)
105 % static_cast<int>(attrValue.value_case())
106 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100107 }
108 }
109}
110
111float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
112{
113 float attribValue = 0.0f;
114 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
115 [&attribValue](const tensorflow::AttrValue& attrValue)
116 {
117 attribValue = attrValue.f();
118 });
119 return attribValue;
120}
121
Conor Kennedyc2130a02018-12-05 11:05:54 +0000122int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
123{
124 int32_t attribValue = 0u;
125 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
126 [&attribValue](const tensorflow::AttrValue& attrValue)
127 {
128 attribValue = static_cast<int32_t>(attrValue.i());
129 });
130 return attribValue;
131}
132
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000133bool ReadMandatoryNodeBoolAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
134{
135 bool attribValue = false;
136 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
137 [&attribValue](const tensorflow::AttrValue& attrValue)
138 {
139 attribValue = static_cast<bool>(attrValue.b());
140 });
141 return attribValue;
142}
143
surmeh01bceff2f2018-03-29 16:29:27 +0100144uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
145{
146 uint32_t attribValue = 0u;
147 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
148 [&attribValue](const tensorflow::AttrValue& attrValue)
149 {
150 attribValue = static_cast<uint32_t>(attrValue.i());
151 });
152 return attribValue;
153}
154
155std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
156{
157 std::string attribValue = "";
158 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
159 [&attribValue](const tensorflow::AttrValue& attrValue)
160 {
161 attribValue = attrValue.s();
162 });
163 return attribValue;
164}
165
166std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
167 const std::string& name)
168{
169 std::vector<uint32_t> attriList;
170 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
171 [&attriList](const tensorflow::AttrValue& attrValue)
172 {
173 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
174 {
175 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
176 }
177 });
178
179 return attriList;
180}
181
182std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
183 const std::string& name)
184{
185 std::vector<uint32_t> attriList;
186 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
187 [&attriList](const tensorflow::AttrValue& attrValue)
188 {
189 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
190 {
191 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
192 }
193 });
194
195 return attriList;
196}
197
Aron Virginas-Tar2e259272019-11-27 13:29:51 +0000198std::string ReadOptionalNodeStringAttribute(const tensorflow::NodeDef& nodeDef,
199 const std::string& name,
200 const std::string& defaultValue = "")
201{
202 std::string attribValue = defaultValue;
203 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
204 [&attribValue](const tensorflow::AttrValue& attrValue)
205 {
206 attribValue = attrValue.s();
207 });
208 return attribValue;
209}
210
surmeh01bceff2f2018-03-29 16:29:27 +0100211bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
212 const std::string& name,
213 bool defaultValue = false)
214{
215 bool attribValue = defaultValue;
216 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
217 [&attribValue](const tensorflow::AttrValue& attrValue)
218 {
219 attribValue = attrValue.b();
220 });
221 return attribValue;
222}
223
224tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
225{
226 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
227 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
228 [&attribValue](const tensorflow::AttrValue& attrValue)
229 {
230 attribValue = attrValue.type();
231 });
232 return attribValue;
233}
234
235TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
236{
237 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
238 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
239
240 if (stretchDim != targetDims.end())
241 {
242 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
243 {
telsoa01c577f2c2018-08-31 09:22:23 +0100244 throw ParseException(
245 boost::str(
246 boost::format(
247 "At most one component of shape can be -1 %1%")
248 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100249 }
250
telsoa01c577f2c2018-08-31 09:22:23 +0100251 auto targetNumElements =
252 boost::numeric_cast<unsigned int>(
253 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100254 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
255 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
256 }
257
258 TensorInfo reshapeInfo = input;
259 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
260
261 return reshapeInfo;
262}
263
telsoa01c577f2c2018-08-31 09:22:23 +0100264// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100265IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
266 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100267{
268 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
269 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
270 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
271 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
272 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
273 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
274
275 armnn::TensorInfo reshapedInfo = input1Info;
276 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
277
278 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
279 ReshapeDescriptor reshapeDesc;
280 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
281 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
282
283 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
284 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
285
286 input1Slot = &reshapeLayer->GetOutputSlot(0);
287
288 return input1Slot;
289}
290
291OutputId ParseOutputId(const std::string & name)
292{
293 unsigned int outputNum = 0;
294 size_t colonPos = name.find_last_of(":");
295 if (colonPos != std::string::npos)
296 {
297 int n = std::stoi(name.substr(colonPos+1));
298 if (n<0 || n>100)
299 {
telsoa01c577f2c2018-08-31 09:22:23 +0100300 throw ParseException(
301 boost::str(
302 boost::format(
303 "Output tensor id is out of range for %1% %2%")
304 % name
305 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100306 }
307 outputNum = static_cast<unsigned int>(n);
308 }
309 return OutputId(name.substr(0,colonPos),outputNum);
310}
311
telsoa01c577f2c2018-08-31 09:22:23 +0100312#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
313 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
314 { \
315 throw ParseException( \
316 boost::str( \
317 boost::format( \
318 "Unsupported data format %1% passed for %2% node %3%. " \
319 "Only NHWC and NCHW supported %4%") \
320 % FORMAT \
321 % NODE_TYPE \
322 % NODE_DEF.name() \
323 % CHECK_LOCATION().AsString())); \
324 }
325
326#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
327 if(PADDING != "SAME" && PADDING != "VALID" ) \
328 { \
329 throw ParseException( \
330 boost::str( \
331 boost::format( \
332 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
333 % PADDING \
334 % NODE_DEF.name() \
335 % CHECK_LOCATION().AsString())); \
336 } \
337
surmeh01bceff2f2018-03-29 16:29:27 +0100338} // namespace
339
340const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
341 { "Const", &TfParser::ParseConst },
342 { "Add", &TfParser::ParseAdd },
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000343 { "AddN", &TfParser::ParseAddN },
surmeh01bceff2f2018-03-29 16:29:27 +0100344 { "BiasAdd", &TfParser::ParseBiasAdd },
345 { "Identity", &TfParser::ParseIdentity },
346 { "Conv2D", &TfParser::ParseConv2D },
347 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
Conor Kennedyc2130a02018-12-05 11:05:54 +0000348 { "ExpandDims", &TfParser::ParseExpandDims },
surmeh01bceff2f2018-03-29 16:29:27 +0100349 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
FrancisMurtagh94412af2019-01-24 10:53:39 +0000350 { "Gather", &TfParser::ParseGather},
jimfly01a06bf312018-12-18 16:24:51 +0000351 { "Greater", &TfParser::ParseGreater},
surmeh01bceff2f2018-03-29 16:29:27 +0100352 { "ConcatV2", &TfParser::ParseConcat },
353 { "LRN", &TfParser::ParseLrn },
354 { "MatMul", &TfParser::ParseMatMul },
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000355 { "Mean", &TfParser::ParseMean },
surmeh01bceff2f2018-03-29 16:29:27 +0100356 { "Mul", &TfParser::ParseMul },
357 { "Placeholder", &TfParser::ParsePlaceholder },
saoste01bbd40612018-08-28 15:41:51 +0100358 { "RealDiv", &TfParser::ParseRealDiv },
surmeh01bceff2f2018-03-29 16:29:27 +0100359 { "Relu", &TfParser::ParseRelu },
360 { "Relu6", &TfParser::ParseRelu6 },
361 { "Reshape", &TfParser::ParseReshape },
362 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +0000363 { "Rsqrt", &TfParser::ParseRsqrt },
surmeh01bceff2f2018-03-29 16:29:27 +0100364 { "Shape", &TfParser::ParseShape },
365 { "Squeeze", &TfParser::ParseSqueeze },
366 { "Sigmoid", &TfParser::ParseSigmoid },
367 { "Softmax", &TfParser::ParseSoftmax },
368 { "Softplus", &TfParser::ParseSoftplus },
Sadik Armagan2ad6cb42018-12-27 11:23:44 +0000369 { "Split", &TfParser::ParseSplit },
surmeh01bceff2f2018-03-29 16:29:27 +0100370 { "Tanh", &TfParser::ParseTanh },
371 { "MaxPool", &TfParser::ParseMaxPool },
372 { "AvgPool", &TfParser::ParseAvgPool },
telsoa01c577f2c2018-08-31 09:22:23 +0100373 { "Maximum", &TfParser::ParseMaximum },
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +0000374 { "Minimum", &TfParser::ParseMinimum },
jimfly0184c70e62018-12-19 13:14:46 +0000375 { "Equal", &TfParser::ParseEqual },
jimfly01f6ba7472018-12-04 10:09:52 +0000376 { "Pad", &TfParser::ParsePad },
narpra016f37f832018-12-21 18:30:00 +0000377 { "Sub", &TfParser::ParseSub }
378};
379
380const std::list<std::string> TfParser::m_ControlInputs = {
381 "Assert"
surmeh01bceff2f2018-03-29 16:29:27 +0100382};
383
384ITfParser* ITfParser::CreateRaw()
385{
386 return new TfParser();
387}
388
389ITfParserPtr ITfParser::Create()
390{
391 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
392}
393
394void ITfParser::Destroy(ITfParser* parser)
395{
396 delete parser;
397}
398
399inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
400 uint32_t filterSize, bool samePadding,
401 uint32_t* paddingFront, uint32_t* paddingBack) {
402 *paddingFront = 0;
403 *paddingBack = 0;
404
405 if (samePadding) {
406 uint32_t outputSize = (inputSize + stride - 1) / stride;
407 uint32_t temp = (outputSize - 1) * stride + filterSize;
408 if (temp > inputSize) {
409 *paddingFront = (temp - inputSize) / 2;
410 *paddingBack = (temp - inputSize) - *paddingFront;
411 }
412 }
413}
414
415void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
416 bool samePadding)
417{
418 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
419}
420
421/// An Abstract base class which represents a single tensorflow operation (node)
422/// that has been (potentially partially) converted to Armnn.
423/// It may not yet have been fully converted into actual Armnn layers.
424class ParsedTfOperation
425{
426public:
427 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
428 : m_Parser(parser)
429 , m_Node(node)
430 {
431 }
432
433 virtual ~ParsedTfOperation() {};
434
435 const tensorflow::NodeDef& GetNode() const { return m_Node; }
436
437 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
438 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
439 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
440
441 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
442 virtual ParsedTfOperation* ResolveIdentityOperations()
443 {
444 return this;
445 }
446
447protected:
448 TfParser* m_Parser;
449 const tensorflow::NodeDef& m_Node;
450};
451
452/// An ParsedTfOperation where the Armnn equivalent is a single layer,
453/// with output slots that correspond directly to the Tf node outputs.
454class SingleLayerParsedTfOperation : public ParsedTfOperation
455{
456public:
457 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
458 : ParsedTfOperation(parser, node)
459 , m_Layer(layer)
460 {
461 }
462
463 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
464 {
465 BOOST_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100466 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100467 unsigned int armnnOutputSlotIdx = tfOutputIndex;
468 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
469 {
470 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100471 boost::str(
472 boost::format(
473 "The requested output slot #%1% "
474 "for %2% does not exist %3%")
475 % armnnOutputSlotIdx
476 % m_Layer->GetName()
477 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100478 }
479 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
480 }
481
482protected:
483 IConnectableLayer* m_Layer;
484};
485
telsoa01c577f2c2018-08-31 09:22:23 +0100486/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100487class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
488{
489public:
490 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
491 : SingleLayerParsedTfOperation(parser, node, nullptr)
492 {
493 }
494
495 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
496 {
497 if (!m_Layer)
498 {
499 CreateLayerDeferred();
500 }
501 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
502 }
503
504private:
505 virtual void CreateLayerDeferred() = 0;
506};
507
508
509TfParser::TfParser()
510 : m_Network(nullptr, nullptr)
511{
512}
513
514
515const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
516{
517 if (nodeDef->op() != "Identity")
518 {
519 return nodeDef;
520 }
521
522 if (nodeDef->input_size() != 1)
523 {
telsoa01c577f2c2018-08-31 09:22:23 +0100524 throw ParseException(
525 boost::str(
526 boost::format(
527 "Identity node should have a single input! %1% has %2% inputs %3%")
528 % nodeDef->name()
529 % nodeDef->input_size()
530 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100531 }
532
533 auto it = m_NodesByName.find(nodeDef->input(0));
534 if (it != m_NodesByName.end())
535 {
536 const tensorflow::NodeDef* inputNode = it->second;
537 return ResolveIdentityNode(inputNode);
538 }
539 else
540 {
telsoa01c577f2c2018-08-31 09:22:23 +0100541 throw ParseException(
542 boost::str(
543 boost::format(
544 "Cannot find what the Identity node %1% is linked to! %2%")
545 % nodeDef->name()
546 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100547 }
548}
549
550std::vector<OutputOfConstNodeDef>
551TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
552{
553 std::vector<OutputOfConstNodeDef> ret;
554
surmeh013537c2c2018-05-18 16:31:43 +0100555 if (nodeDef.op() == "Const")
556 {
557 // For some reason const node can have "Control Inputs". We ignore them for now.
558 return ret;
559 }
560
surmeh01bceff2f2018-03-29 16:29:27 +0100561 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
562 for (int j = 0; j < nodeDef.input_size(); ++j)
563 {
564 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100565
566 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
567 {
narpra016f37f832018-12-21 18:30:00 +0000568 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
569 continue;
surmeh013537c2c2018-05-18 16:31:43 +0100570 }
571
surmeh01bceff2f2018-03-29 16:29:27 +0100572 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
573 if (inputIt == m_NodesByName.end())
574 {
575 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100576 boost::str(
577 boost::format(
578 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
579 % nodeDef.input(j)
580 % nodeDef.name()
581 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100582 }
583 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
584 }
585
586 return ret;
587}
588
589std::vector<OutputOfParsedTfOperation>
590TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
591 std::size_t expectedNumInputs)
592{
telsoa01c577f2c2018-08-31 09:22:23 +0100593 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100594 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
595 const std::size_t numInputs = nodes.size();
596 if (numInputs != expectedNumInputs)
597 {
telsoa01c577f2c2018-08-31 09:22:23 +0100598 throw ParseException(
599 boost::str(
600 boost::format(
601 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
602 % nodeDef.name()
603 % expectedNumInputs
604 % numInputs
605 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100606 }
telsoa01c577f2c2018-08-31 09:22:23 +0100607 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100608 std::vector<OutputOfParsedTfOperation> result;
609 for (auto&& node : nodes)
610 {
611 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
612 if (it == m_ParsedTfOperations.end())
613 {
telsoa01c577f2c2018-08-31 09:22:23 +0100614 throw ParseException(
615 boost::str(
616 boost::format(
617 "Node with name '%1%' has not been parsed %2%")
618 % node.m_IndexedValue->name()
619 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100620 }
621 ParsedTfOperation* parsedOp = it->second.get();
622 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
623 parsedOp = parsedOp->ResolveIdentityOperations();
624 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
625 }
626 return result;
627}
628
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000629IConnectableLayer* TfParser::CreateAdditionLayer(
630 const tensorflow::NodeDef& nodeDef,
631 IOutputSlot* input0Slot,
632 IOutputSlot* input1Slot,
633 const std::string& layerName)
634{
635 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
636 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
637
638 const unsigned int input0Dim = input0Info.GetNumDimensions();
639 const unsigned int input1Dim = input1Info.GetNumDimensions();
640 if (input0Dim != input1Dim)
641 {
642 // broadcasting where input0 and input1 have different number of dimensions
643 // is only supported for 1D and 4D tensors pair
644 if (input0Dim == 1 && input1Dim == 4)
645 {
646 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
647 }
648 else if (input0Dim == 4 && input1Dim == 1)
649 {
650 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
651 }
652 else
653 {
654 throw ParseException(
655 boost::str(
656 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
657 % layerName
658 % nodeDef.name()
659 % CHECK_LOCATION().AsString()));
660 }
661 }
662 IConnectableLayer* const layer = m_Network->AddAdditionLayer(layerName.c_str());
663
664 input0Slot->Connect(layer->GetInputSlot(0));
665 input1Slot->Connect(layer->GetInputSlot(1));
666
667 // Ensure the output tensor has the correct dimensions even if a broadcast has been done
668 TensorInfo outputInfo = input0Slot->GetTensorInfo();
669 std::vector<unsigned int> outputShape;
670
671 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
672 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
673
674 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
675 {
676 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
677 }
678
679 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
680 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
681
682 return layer;
683}
684
685IConnectableLayer* TfParser::CreateAdditionLayer(
686 const tensorflow::NodeDef& nodeDef,
687 IConnectableLayer* layerOne,
688 IConnectableLayer* layerTwo,
689 unsigned int numberOfAddition,
690 unsigned long numberOfLayersToConnect,
691 bool isOdd)
692{
693 IOutputSlot* input0Slot = &layerOne->GetOutputSlot(0);
694 IOutputSlot* input1Slot = &layerTwo->GetOutputSlot(0);
695 std::string layerName(nodeDef.name());
696 if (isOdd || numberOfLayersToConnect != 2)
697 {
698 // we are not connecting the final layer
699 layerName.append("_addN_").append(std::to_string(numberOfAddition));
700 }
701 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
702}
703
704IConnectableLayer* TfParser::CreateAdditionLayer(
705 const tensorflow::NodeDef& nodeDef,
706 const OutputOfParsedTfOperation& opOne,
707 const OutputOfParsedTfOperation& opTwo,
708 unsigned int numberOfAddition)
709{
710 IOutputSlot* input0Slot = &opOne.m_IndexedValue->ResolveArmnnOutputSlot(opOne.m_Index);
711 IOutputSlot* input1Slot = &opTwo.m_IndexedValue->ResolveArmnnOutputSlot(opTwo.m_Index);
712 std::string layerName(nodeDef.name());
713 layerName.append("_addN_").append(std::to_string(numberOfAddition));
714 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
715}
716
717IConnectableLayer* TfParser::CreateAdditionLayer(
718 const tensorflow::NodeDef& nodeDef,
719 const OutputOfParsedTfOperation& op,
720 IConnectableLayer* layer)
721{
722 IOutputSlot* input0Slot = &op.m_IndexedValue->ResolveArmnnOutputSlot(op.m_Index);
723 IOutputSlot* input1Slot = &layer->GetOutputSlot(0);
724 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
725}
726
727ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
728{
729 uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
730 if (numberOfInputs < 2)
731 {
732 // should never happen
733 throw ParseException(
734 boost::str(
735 boost::format(
736 "AddN Node with name '%1%' has less than two (%2) inputs %3%")
737 % nodeDef.name()
738 % std::to_string(numberOfInputs)
739 % CHECK_LOCATION().AsString()));
740 }
741 else if (numberOfInputs == 2)
742 {
743 //this is the same as a simple Add operation
744 return AddAdditionLayer(nodeDef, false);
745 }
746 else
747 {
748 // build a binary tree of Add layers and return the final Add as the return from the function
749 // if we have an odd number of inputs then the final Add will consist of a layer connecting to an
750 // OutputOfParsedTfOperation, otherwise it will be two layers being added together
751 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
752 unsigned int numberOfAdditions = 0;
753 std::vector<IConnectableLayer*> layers;
754 // NOTE: at this point we will have a minimum of three inputs
755 for (unsigned int i = 0; i < numberOfInputs; ++i)
756 {
757 // every time i is odd we have two inputs to process.
758 bool onSecondItem = i % 2;
759 if (onSecondItem)
760 {
761 ++numberOfAdditions;
762 IConnectableLayer* newLayer = CreateAdditionLayer(
763 nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
764 layers.push_back(newLayer);
765 }
766 }
767
768 std::vector<IConnectableLayer*> layersToConnect(layers);
769 unsigned long numberOfLayersToConnect = layersToConnect.size();
770 bool isOdd = numberOfInputs % 2;
771
772 while (numberOfLayersToConnect > 1)
773 {
774 layers.clear();
775 for (unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
776 bool onSecondItem = i % 2;
777 if (onSecondItem) {
778 ++numberOfAdditions;
779 IConnectableLayer* newLayer = CreateAdditionLayer(
780 nodeDef,
781 layersToConnect[i - 1],
782 layersToConnect[i],
783 numberOfAdditions,
784 numberOfLayersToConnect,
785 isOdd);
786 layers.push_back(newLayer);
787 }
788 }
789 //OK... need to go again... maybe
790 layersToConnect = layers;
791 numberOfLayersToConnect = layersToConnect.size();
792 }
793 IConnectableLayer* finalLayer = layersToConnect[0];
794 // if we had an odd number of inputs we need to connect the final layer to the
795 // last OutputOfParsedTfOperation in order to create the last Add layer we will
796 // be handing back.
797 if (isOdd)
798 {
799 // connect the final layer to the last op
800 finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
801 }
802 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, finalLayer);
803 }
804}
805
surmeh01bceff2f2018-03-29 16:29:27 +0100806ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
807{
808 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
809
telsoa01c577f2c2018-08-31 09:22:23 +0100810 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
811 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100812 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
813 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
814 {
815 IConnectableLayer* layer =
816 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
817 &nodeDef,nodeDef.name().c_str());
818 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
819 }
820 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
821 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
822 {
823 IConnectableLayer* layer =
824 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
825 &nodeDef,nodeDef.name().c_str());
826 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
827 }
828 else
829 {
telsoa01c577f2c2018-08-31 09:22:23 +0100830 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100831 return AddAdditionLayer(nodeDef);
832 }
833}
834
835ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
836{
837 return AddAdditionLayer(nodeDef, true);
838}
839
840/// An ParsedTfOperation which forwards to another (used for Identity nodes).
841class ParsedIdentityTfOperation : public ParsedTfOperation
842{
843public:
844 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
845 : ParsedTfOperation(parser, node)
846 , m_Representative(representative)
847 {
848 }
849
850 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
851 {
852 BOOST_ASSERT(m_Representative);
853 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
854 }
855
856 virtual ParsedTfOperation* ResolveIdentityOperations() override
857 {
858 return m_Representative->ResolveIdentityOperations();
859 }
860
861private:
862 ParsedTfOperation* m_Representative;
863};
864
865ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
866{
867 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
868 // Any requests for the output slots of this node should be forwarded to the node connected as input.
869 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
870}
871
872/// An ParsedTfOperation for a Const node.
873/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
874/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
875template <typename T>
876class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
877{
878public:
879 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
880 const T* tensorData, const TensorInfo& tensorInfo)
881 : DeferredSingleLayerParsedTfOperation(parser, node),
882 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
883 m_TensorInfo(tensorInfo)
884 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000885 BOOST_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
surmeh01bceff2f2018-03-29 16:29:27 +0100886 }
887
888 void CreateLayerDeferred() override
889 {
890 BOOST_ASSERT(m_Layer == nullptr);
891 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
892 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
893 }
894
Matteo Martincigh482ca852018-12-12 09:20:55 +0000895 ConstTensor GetConstTensor(std::vector<T>& outputTensorData) const
surmeh01bceff2f2018-03-29 16:29:27 +0100896 {
surmeh01bceff2f2018-03-29 16:29:27 +0100897 outputTensorData.resize(m_TensorInfo.GetNumElements());
898
Matteo Martincigh482ca852018-12-12 09:20:55 +0000899 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
900
telsoa01c577f2c2018-08-31 09:22:23 +0100901 // Updates the result to point to the user provided storage.
Matteo Martincigh482ca852018-12-12 09:20:55 +0000902 ConstTensor constTensor(m_TensorInfo, outputTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +0100903 return constTensor;
904 }
905
Matteo Martincigh46315822018-11-28 16:22:36 +0000906 const T* GetStorage() const
907 {
908 return m_Storage.data();
909 }
910
911 const TensorInfo& GetTensorInfo() const
912 {
913 return m_TensorInfo;
914 }
915
surmeh01bceff2f2018-03-29 16:29:27 +0100916private:
917 ///< Manages the lifetime of the tensor data.
918 std::vector<T> m_Storage;
919 ///< Describes the layout of the tensor and points to the data in m_Storage.
920 TensorInfo m_TensorInfo;
921};
922
telsoa01c577f2c2018-08-31 09:22:23 +0100923DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
924 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100925{
926 switch (tfDataType)
927 {
928 case tensorflow::DT_FLOAT:
929 return DataType::Float32;
930 break;
931 case tensorflow::DT_INT32:
932 return DataType::Signed32;
933 break;
934 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100935 throw ParseException(
936 boost::str(
937 boost::format(
938 "Unknown DataType %1% for node %2% %3%")
939 % tensorflow::DataType_Name(tfDataType)
940 % nodeDef.name()
941 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100942 }
943}
944
945struct ParseTfTensorValueList
946{
947 template<typename DataType>
948 static void Parse(
949 const tensorflow::TensorProto& tfTensor,
950 unsigned int dstElements,
951 std::vector<int8_t>& outputData);
952
953 template <typename DataType>
954 static void ReadData(const void* srcData, unsigned int numSrcElements,
955 std::vector<int8_t>& dstData, unsigned int numDstElements)
956 {
telsoa01c577f2c2018-08-31 09:22:23 +0100957 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100958 if (numSrcElements == 0)
959 {
960 return;
961 }
962
telsoa01c577f2c2018-08-31 09:22:23 +0100963 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100964 if (numDstElements == 0)
965 {
966 numDstElements = numSrcElements;
967 }
968
telsoa01c577f2c2018-08-31 09:22:23 +0100969 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100970 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
971
972 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
973 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
974
telsoa01c577f2c2018-08-31 09:22:23 +0100975 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +0100976 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
977
978 if (numDstElements > numSrcElements)
979 {
telsoa01c577f2c2018-08-31 09:22:23 +0100980 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +0100981 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
982 }
983 }
984
985};
986
987template <>
988void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
989 unsigned int dstElements, std::vector<int8_t>& outputData)
990{
991 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
992 outputData, dstElements);
993}
994
995template <>
996void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
997 unsigned int dstElements, std::vector<int8_t>& outputData)
998{
999 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
1000 outputData, dstElements);
1001}
1002
1003template <template<typename> class OperatorType, typename T = int8_t>
1004struct MakeTfOperation
1005{
1006 template<typename DataType, class... Args>
1007 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
1008 Args&&... args)
1009 {
1010 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
1011 }
1012};
1013
1014template <>
1015struct MakeTfOperation<ParsedConstTfOperation>
1016{
1017 template<typename DataType, class... Args>
1018 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
1019 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
1020 {
1021 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1022 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
1023 }
1024};
1025
1026template <class FuncType>
1027struct InvokeParseFunction
1028{
1029 template<class ResType, class... Args>
1030 inline static ResType Result(DataType dataType, Args&&... args)
1031 {
1032 if (dataType == DataType::Float32)
1033 {
1034 return FuncType::template Parse<float>(std::forward<Args>(args)...);
1035 }
1036 else if (dataType == DataType::Signed32)
1037 {
1038 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1039 }
1040
1041 return ResType();
1042 }
1043
1044 template<class... Args>
1045 inline static void Result(DataType dataType, Args&&... args)
1046 {
1047 if (dataType == DataType::Float32)
1048 {
1049 FuncType::template Parse<float>(std::forward<Args>(args)...);
1050 }
1051 else if (dataType == DataType::Signed32)
1052 {
1053 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1054 }
1055 }
1056};
1057
1058ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1059{
1060 BOOST_ASSERT(nodeDef.op() == "Const");
1061
1062 if (nodeDef.attr().count("value") == 0)
1063 {
telsoa01c577f2c2018-08-31 09:22:23 +01001064 throw ParseException(
1065 boost::str(
1066 boost::format(
1067 "Value not found for Const node - %1% %2%")
1068 % nodeDef.name()
1069 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001070 }
1071
1072 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
1073 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1074 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
1075
1076 const auto GetDimensionSize = [](auto& d) { return d.size(); };
1077
1078 std::vector<unsigned int> dimensionSizes;
1079 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1080 std::back_inserter(dimensionSizes), GetDimensionSize);
1081
telsoa01c577f2c2018-08-31 09:22:23 +01001082 // Calculates number of elements.
1083 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001084 unsigned int numElements = 0U;
1085
1086 if (!dimensionSizes.empty())
1087 {
1088 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1089 1U, std::multiplies<unsigned int>());
1090 }
1091
1092 std::vector<int8_t> tensorData;
1093
telsoa01c577f2c2018-08-31 09:22:23 +01001094 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001095 if (tfTensor.tensor_content().empty())
1096 {
1097 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
1098
1099 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +01001100 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +01001101 if (numElements == 0)
1102 {
telsoa01c577f2c2018-08-31 09:22:23 +01001103 const unsigned int tfNumElements =
1104 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001105 dimensionSizes.push_back(tfNumElements);
1106 }
1107 }
telsoa01c577f2c2018-08-31 09:22:23 +01001108 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001109 else
1110 {
1111 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1112
telsoa01c577f2c2018-08-31 09:22:23 +01001113 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +01001114 if (numElements == 0)
1115 {
telsoa01c577f2c2018-08-31 09:22:23 +01001116 throw ParseException(
1117 boost::str(
1118 boost::format(
1119 "No tensor shape found for Const node - %1% %2%")
1120 % nodeDef.name()
1121 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001122 }
1123 }
1124
telsoa01c577f2c2018-08-31 09:22:23 +01001125 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001126 if (tensorData.empty())
1127 {
telsoa01c577f2c2018-08-31 09:22:23 +01001128 throw ParseException(
1129 boost::str(
1130 boost::format(
1131 "No tensor data found for Const node - %1% %2%")
1132 % nodeDef.name()
1133 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001134 }
1135
telsoa01c577f2c2018-08-31 09:22:23 +01001136 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1137 dimensionSizes.data(),
1138 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001139
1140 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +01001141 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +01001142 if (tensorData.size() > tensorInfo.GetNumBytes())
1143 {
telsoa01c577f2c2018-08-31 09:22:23 +01001144 throw ParseException(
1145 boost::str(
1146 boost::format(
1147 "Number of elements (%1%) should be less than or equal "
1148 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
1149 % (tensorData.size() / GetDataTypeSize(dataType))
1150 % tensorInfo.GetNumElements()
1151 % nodeDef.name()
1152 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001153 }
1154
1155 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1156 dataType, this, nodeDef, tensorData, tensorInfo);
1157}
1158
1159template<typename Type>
1160bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
1161{
1162 auto it = m_ParsedTfOperations.find(nodeName);
jimfly01f6ba7472018-12-04 10:09:52 +00001163 if (it == m_ParsedTfOperations.end())
surmeh01bceff2f2018-03-29 16:29:27 +01001164 {
1165 return false;
1166 }
jimfly01f6ba7472018-12-04 10:09:52 +00001167 return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1168}
1169
1170template<typename Type>
1171bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
1172{
1173 return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
surmeh01bceff2f2018-03-29 16:29:27 +01001174}
1175
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00001176unsigned int TfParser::GetConstInputIndex(const std::vector<OutputOfParsedTfOperation>& inputs)
1177{
1178 for (unsigned int i = 0; i < inputs.size(); i++)
1179 {
1180 if (HasParsedConstTensor<int32_t>(inputs[i].m_IndexedValue->GetNode().name()))
1181 {
1182 return i;
1183 }
1184 }
1185 throw ParseException(
1186 boost::str(
1187 boost::format(
1188 "ArmNN only supports operators with constant axis. %1%")
1189 % CHECK_LOCATION().AsString()));
1190
1191}
1192
surmeh01bceff2f2018-03-29 16:29:27 +01001193ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
1194 const tensorflow::GraphDef& graphDef)
1195{
1196 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1197 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1198 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1199
1200 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1201 {
telsoa01c577f2c2018-08-31 09:22:23 +01001202 throw ParseException(
1203 boost::str(
1204 boost::format(
1205 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1206 % nodeDef.name()
1207 % inputs[1].m_IndexedValue->GetNode().name()
1208 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001209 }
1210 ParsedConstTfOperation<float>* weightNode =
1211 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1212
1213 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1214 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1215 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1216
telsoa01c577f2c2018-08-31 09:22:23 +01001217 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
surmeh01bceff2f2018-03-29 16:29:27 +01001218 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1219 if (!dilations.empty())
1220 {
1221 for (auto dilation : dilations)
1222 {
1223 if (dilation != 1u)
1224 {
telsoa01c577f2c2018-08-31 09:22:23 +01001225 throw ParseException(
1226 boost::str(
1227 boost::format(
1228 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1229 % nodeDef.name()
1230 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001231 }
1232 }
1233 }
1234
1235 Convolution2dDescriptor desc;
1236 desc.m_BiasEnabled = false;
1237
telsoa01c577f2c2018-08-31 09:22:23 +01001238 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1239
Matteo Martincigh46315822018-11-28 16:22:36 +00001240 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001241
Matteo Martincigh46315822018-11-28 16:22:36 +00001242 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001243
Matteo Martincigh46315822018-11-28 16:22:36 +00001244 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001245
Matteo Martincigh46315822018-11-28 16:22:36 +00001246 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1247 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001248
Matteo Martincigh46315822018-11-28 16:22:36 +00001249 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1250 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1251
1252 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1253 // Tensorflow weights are [H, W, In, Out].
1254 // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1255 // and [Out, In, H, W] when the data layout is NCHW.
1256 PermutationVector permutationVector =
1257 dataLayout == DataLayout::NHWC ?
1258 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1259 std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1260
1261 // Swizzle the tensor using the given permutation vector.
1262 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1263 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1264
1265 // Swizzles the content of the tensor's permanent storage into a local storage.
1266 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1267 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001268 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Matteo Martincigh46315822018-11-28 16:22:36 +00001269
1270 // Create a weight tensor with the newly swizzled data.
1271 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1272
1273 uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1274 uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001275
1276 bool padding = false;
1277 TensorInfo outputInfo;
Matteo Martincigh46315822018-11-28 16:22:36 +00001278 unsigned int outputHeight = 0;
1279 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001280
1281 CHECK_PADDING_TYPE(nodeDef, paddingString);
1282
surmeh01bceff2f2018-03-29 16:29:27 +01001283 if (paddingString == "SAME")
1284 {
1285 padding = true;
Matteo Martincigh46315822018-11-28 16:22:36 +00001286
1287 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1288 static_cast<float>(desc.m_StrideY)));
1289 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1290 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001291 }
1292 else if (paddingString == "VALID")
1293 {
1294 padding = false;
Matteo Martincigh46315822018-11-28 16:22:36 +00001295
1296 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1297 static_cast<float>(desc.m_StrideY)));
1298 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1299 static_cast<float>(desc.m_StrideX)));
1300 }
1301
1302 switch (dataLayout)
1303 {
1304 case DataLayout::NHWC:
1305 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1306 outputHeight,
1307 outputWidth,
1308 weightTensor.GetShape()[0] },
1309 DataType::Float32);
1310 break;
1311 case DataLayout::NCHW:
1312 default:
surmeh01bceff2f2018-03-29 16:29:27 +01001313 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1314 weightTensor.GetShape()[0],
Matteo Martincigh46315822018-11-28 16:22:36 +00001315 outputHeight,
1316 outputWidth },
1317 DataType::Float32);
1318 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001319 }
surmeh01bceff2f2018-03-29 16:29:27 +01001320
1321 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1322 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1323
Matteo Martincighfc598e12019-05-14 10:36:13 +01001324 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc,
1325 weightTensor,
1326 EmptyOptional(),
1327 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001328 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Matteo Martincigh46315822018-11-28 16:22:36 +00001329 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001330
1331 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1332}
1333
1334ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
telsoa01c577f2c2018-08-31 09:22:23 +01001335 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001336{
1337 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1338 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1339 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1340
1341 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1342 {
telsoa01c577f2c2018-08-31 09:22:23 +01001343 throw ParseException(
1344 boost::str(
1345 boost::format(
1346 "ArmNN only supports Depthwise Convolution layer with constant weights. "
1347 "Non const input found %1% for node %2% %3%")
1348 % inputs[1].m_IndexedValue->GetNode().name()
1349 % nodeDef.name()
1350 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001351 }
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001352
surmeh01bceff2f2018-03-29 16:29:27 +01001353 ParsedConstTfOperation<float>* weightNode =
1354 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1355
surmeh01bceff2f2018-03-29 16:29:27 +01001356 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1357 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1358 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1359
1360 DepthwiseConvolution2dDescriptor desc;
1361 desc.m_BiasEnabled = false;
1362
telsoa01c577f2c2018-08-31 09:22:23 +01001363 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1364
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001365 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001366
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001367 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001368
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001369 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001370
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001371 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1372 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001373
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001374 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1375 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1376
1377 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
Matteo Martincigh747ef822018-12-18 09:26:39 +00001378 // Tensorflow weights come in the format [H, W, I, M].
1379 // ArmNN weights have to be [M, I, H, W].
1380 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001381
1382 // Swizzle the tensor using the given permutation vector.
1383 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1384 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1385
1386 // Swizzles the content of the tensor's permanent storage into a local storage.
1387 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1388 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001389 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001390
1391 // Create a weight tensor with the newly swizzled data.
1392 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1393
Matteo Martincigh747ef822018-12-18 09:26:39 +00001394 uint32_t weightHeight = weightTensor.GetShape()[2];
1395 uint32_t weightWidth = weightTensor.GetShape()[3];
surmeh01bceff2f2018-03-29 16:29:27 +01001396
1397 bool padding = false;
1398 TensorInfo outputInfo;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001399 unsigned int outputHeight = 0;
1400 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001401
1402 CHECK_PADDING_TYPE(nodeDef, paddingString);
1403
surmeh01bceff2f2018-03-29 16:29:27 +01001404 if (paddingString == "SAME")
1405 {
1406 padding = true;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001407
1408 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1409 static_cast<float>(desc.m_StrideY)));
1410 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1411 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001412 }
1413 else if (paddingString == "VALID")
1414 {
1415 padding = false;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001416
1417 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1418 static_cast<float>(desc.m_StrideY)));
1419 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1420 static_cast<float>(desc.m_StrideX)));
1421 }
1422
1423 switch (dataLayout)
1424 {
1425 case DataLayout::NHWC:
1426 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1427 outputHeight,
1428 outputWidth,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001429 weightTensor.GetShape()[0] * weightTensor.GetShape()[1]},
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001430 DataType::Float32);
1431 break;
1432 case DataLayout::NCHW:
1433 default:
1434 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1435 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1436 outputHeight,
1437 outputWidth },
1438 DataType::Float32);
1439 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001440 }
surmeh01bceff2f2018-03-29 16:29:27 +01001441
1442 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1443 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1444
Matteo Martincighfc598e12019-05-14 10:36:13 +01001445 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1446 weightTensor,
1447 EmptyOptional(),
1448 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001449 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001450 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001451
1452 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1453}
1454
Conor Kennedyc2130a02018-12-05 11:05:54 +00001455TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
1456{
1457 BOOST_ASSERT(nodeDef.op() == "ExpandDims");
1458
1459 if (inputTensorInfo.GetNumDimensions() > 4) {
1460 throw ParseException(
1461 boost::str(
1462 boost::format(
1463 "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1464 % inputTensorInfo.GetNumDimensions()
1465 % nodeDef.name()
1466 % CHECK_LOCATION().AsString()));
1467 }
1468
1469 std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1470
1471 std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1472 std::vector<uint32_t> outputDims;
1473
1474 // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1475 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1476 {
1477 // add current input shape to outputDims
1478 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1479 auto currentDimension = inputTensorInfo.GetShape()[i];
1480 outputDims.push_back(currentDimension);
1481 }
1482
1483 // insert a dimension of 1 at index 'expandDim' of inputs shape
1484 if (expandDim >= 0)
1485 {
1486 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1487 outputDims.insert(getPosition, 1);
1488 }
1489
1490 // if negative number for 'expandDim' then count backwards from the last element
1491 // and insert 1 dimension at index 'expandDim'
1492 if (expandDim < 0)
1493 {
Matteo Martincighd7cceeb2018-12-06 09:06:29 +00001494 int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001495 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1496 outputDims.insert(getPosition, 1);
1497 }
1498 }
1499 else
1500 {
1501 throw InvalidArgumentException(
1502 boost::str(
1503 boost::format(
1504 "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1505 % expandDim
1506 % inputDimSize
1507 % CHECK_LOCATION().AsString()));
1508 }
1509
1510 if (outputDims.size() > 4)
1511 {
1512 throw ParseException(
1513 boost::str(
1514 boost::format(
1515 "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1516 % outputDims.size()
1517 % nodeDef.name()
1518 % CHECK_LOCATION().AsString()));
1519 }
1520
1521 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1522 outputDims.data());
1523
1524 TensorInfo outTensorInfo = inputTensorInfo;
1525 outTensorInfo.SetShape(outShape);
1526
1527 return outTensorInfo;
1528}
1529
1530ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1531{
1532 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1533
1534 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1535 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1536
1537 TensorInfo outputInfo;
1538 outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo);
1539
1540 ReshapeDescriptor reshapeDesc;
1541 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1542 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1543 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1544 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1545
1546 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1547}
1548
surmeh01bceff2f2018-03-29 16:29:27 +01001549ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1550 const tensorflow::GraphDef& graphDef)
1551{
1552 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1553
1554 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1555 {
telsoa01c577f2c2018-08-31 09:22:23 +01001556 throw ParseException(
1557 boost::str(
1558 boost::format(
1559 "ArmNN only supports FusedBatchNormalization layers with constant scale. "
1560 "Input %1%. Node %2% %3%")
1561 % inputs[1].m_IndexedValue->GetNode().name()
1562 % nodeDef.name()
1563 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001564 }
1565 ParsedConstTfOperation<float>* scaleNode =
1566 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1567
1568 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1569 {
telsoa01c577f2c2018-08-31 09:22:23 +01001570 throw ParseException(
1571 boost::str(
1572 boost::format(
1573 "ArmNN only supports FusedBatchNormalization layers with constant offset. "
1574 "Input %1%. Node %2% %3%")
1575 % inputs[2].m_IndexedValue->GetNode().name()
1576 % nodeDef.name()
1577 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001578 }
1579 ParsedConstTfOperation<float>* offsetNode =
1580 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1581
1582 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1583 {
telsoa01c577f2c2018-08-31 09:22:23 +01001584 throw ParseException(
1585 boost::str(
1586 boost::format(
1587 "ArmNN only supports FusedBatchNormalization layers with constant mean. "
1588 "Input %1%. Node %2% %3%")
1589 % inputs[3].m_IndexedValue->GetNode().name()
1590 % nodeDef.name()
1591 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001592 }
1593 ParsedConstTfOperation<float>* meanNode =
1594 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1595
1596 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1597 {
telsoa01c577f2c2018-08-31 09:22:23 +01001598 throw ParseException(
1599 boost::str(
1600 boost::format(
1601 "ArmNN only supports FusedBatchNormalization layers with constant variance. "
1602 "Input %1%. Node %2% %3%")
1603 % inputs[4].m_IndexedValue->GetNode().name()
1604 % nodeDef.name()
1605 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001606 }
1607 ParsedConstTfOperation<float>* varianceNode =
1608 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1609
Aron Virginas-Tar2e259272019-11-27 13:29:51 +00001610 const std::string dataFormat = ReadOptionalNodeStringAttribute(nodeDef, "data_format", "NHWC");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001611 CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1612
telsoa01c577f2c2018-08-31 09:22:23 +01001613 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001614 BatchNormalizationDescriptor desc;
1615 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001616 desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001617
telsoa01c577f2c2018-08-31 09:22:23 +01001618 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1619 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001620 std::vector<float> scaleTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001621 ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001622
1623 std::vector<float> offsetTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001624 ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001625
1626 std::vector<float> meanTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001627 ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001628
1629 std::vector<float> varianceTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001630 ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001631
1632 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1633 meanTensor,
1634 varianceTensor,
1635 offsetTensor,
1636 scaleTensor,
1637 nodeDef.name().c_str());
1638
1639 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1640
Matteo Martincigh075c7502018-12-05 13:10:45 +00001641 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1642 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001643
1644 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1645}
1646
telsoa01c577f2c2018-08-31 09:22:23 +01001647bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1648 size_t alphaLayerIndex,
1649 const OutputOfParsedTfOperation& otherOp,
1650 armnn::IOutputSlot** outputOfLeakyRelu,
1651 armnn::ActivationDescriptor & desc)
1652{
1653 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1654
1655 // Verifying all these assumptions hold:
1656 //
1657 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1658 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1659 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1660 //
1661
1662 if (mulNodeDef.op() == "Mul")
1663 {
1664 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1665 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1666
1667 BOOST_ASSERT(inputs.size() == 2);
1668 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1669 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1670 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1671
1672 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1673 {
1674 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1675 {
1676 ParsedConstTfOperation<float>* alpha =
1677 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1678 inputs[alphaLayerIndex].m_IndexedValue);
1679
1680 std::vector<float> const_data;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001681 ConstTensor const_tensor = alpha->GetConstTensor(const_data);
telsoa01c577f2c2018-08-31 09:22:23 +01001682
1683 if (const_data.size() == 1)
1684 {
1685 desc.m_Function = ActivationFunction::LeakyReLu;
1686 desc.m_A = const_data[0];
1687
1688 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1689 return true;
1690 }
1691 }
1692 }
1693 }
1694 return false;
1695}
1696
telsoa01c577f2c2018-08-31 09:22:23 +01001697ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1698 const tensorflow::GraphDef& graphDef)
1699{
1700 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
Sadik Armagan975c09a2018-12-04 10:02:08 +00001701 if (inputs.size() != 2)
1702 {
1703 throw ParseException(
1704 boost::str(
1705 boost::format(
1706 "Maximum expects two inputs!. Got %1% for Node %2% %3%")
1707 % inputs.size()
1708 % nodeDef.name()
1709 % CHECK_LOCATION().AsString()));
1710 }
1711
telsoa01c577f2c2018-08-31 09:22:23 +01001712 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1713 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1714 IOutputSlot* outputOfLeakyRelu = nullptr;
1715
1716 ActivationDescriptor desc;
1717
Sadik Armagan975c09a2018-12-04 10:02:08 +00001718 // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1719 // i.e. one of the four possible scenarios:
1720 // 1, max(mul(a, x), x)
1721 // 2, max(mul(x, a), x)
1722 // 3, max(x, mul(a, x))
1723 // 4, max(x, mul(x, a))
1724 // These are handled by an activation layer.
telsoa01c577f2c2018-08-31 09:22:23 +01001725
1726 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1727 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1728 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1729 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1730 {
1731 BOOST_ASSERT(outputOfLeakyRelu != nullptr);
1732
1733 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1734 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1735 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1736 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1737 }
1738 else
1739 {
Sadik Armagan975c09a2018-12-04 10:02:08 +00001740 // Anything else is just a maximum layer.
1741
1742 return AddMaximumLayer(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001743 }
1744}
1745
jimfly0184c70e62018-12-19 13:14:46 +00001746std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> TfParser::ProcessElementwiseInputSlots(
1747 const tensorflow::NodeDef& nodeDef, const std::string& layerName)
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001748{
1749 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1750
1751 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1752 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1753 const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1754 const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1755
1756 if (input0Dim != input1Dim)
1757 {
1758 // broadcasting where input0 and input1 have different number of dimensions
1759 // is only supported for 1D and 4D tensors pair
1760 if (input0Dim == 1 && input1Dim == 4)
1761 {
1762 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1763 }
1764 else if (input0Dim == 4 && input1Dim == 1)
1765 {
1766 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1767 }
1768 else
1769 {
1770 throw ParseException(
jimfly0184c70e62018-12-19 13:14:46 +00001771 boost::str(
1772 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
1773 % layerName
1774 % nodeDef.name()
1775 % CHECK_LOCATION().AsString()));
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001776 }
1777 }
jimfly0184c70e62018-12-19 13:14:46 +00001778 return {input0Slot, input1Slot};
1779}
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001780
kevmay012b4d88e2019-01-24 14:05:09 +00001781ParsedTfOperationPtr TfParser::ProcessComparisonLayer(
1782 IOutputSlot* input0Slot,
1783 IOutputSlot* input1Slot,
1784 IConnectableLayer* const layer,
1785 const tensorflow::NodeDef& nodeDef)
1786{
1787 input0Slot->Connect(layer->GetInputSlot(0));
1788 input1Slot->Connect(layer->GetInputSlot(1));
1789
1790 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1791 outputInfo.SetDataType(DataType::Boolean);
1792 std::vector<unsigned int> outputShape;
1793
1794 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1795 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1796
1797 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1798 {
1799 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1800 }
1801
1802 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1803 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1804
1805 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1806}
1807
jimfly0184c70e62018-12-19 13:14:46 +00001808ParsedTfOperationPtr TfParser::ProcessElementwiseLayer(
1809 IOutputSlot* input0Slot,
1810 IOutputSlot* input1Slot,
1811 IConnectableLayer* const layer,
1812 const tensorflow::NodeDef& nodeDef)
1813{
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001814 input0Slot->Connect(layer->GetInputSlot(0));
1815 input1Slot->Connect(layer->GetInputSlot(1));
1816
1817 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1818 std::vector<unsigned int> outputShape;
1819
1820 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1821 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1822
1823 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1824 {
1825 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1826 }
1827
1828 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1829 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1830
1831 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1832}
1833
FrancisMurtagh94412af2019-01-24 10:53:39 +00001834ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
1835 const tensorflow::GraphDef& graphDef)
1836{
1837 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1838 IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1839 IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1840
1841 // Infer shape of output tensor
1842 unsigned int paramsDim = params.GetTensorInfo().GetNumDimensions();
1843 unsigned int indicesDim = indices.GetTensorInfo().GetNumDimensions();
1844 unsigned int outputDim = paramsDim - 1 + indicesDim;
1845
1846 std::vector<unsigned int> dimSizes;
1847
1848 for (unsigned int i = 0; i < indicesDim; ++i)
1849 {
1850 dimSizes.push_back(indices.GetTensorInfo().GetShape()[i]);
1851 }
1852 for (unsigned int i = 1; i < paramsDim; ++i)
1853 {
1854 dimSizes.push_back(params.GetTensorInfo().GetShape()[i]);
1855 }
1856
1857 const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data());
1858
1859 const TensorInfo inferredOutputInfo(inferredShape, params.GetTensorInfo().GetDataType());
1860
1861 IConnectableLayer* const layer = m_Network->AddGatherLayer(nodeDef.name().c_str());
1862 layer->GetOutputSlot(0).SetTensorInfo(inferredOutputInfo);
1863
1864 params.Connect(layer->GetInputSlot(0));
1865 indices.Connect(layer->GetInputSlot(1));
1866
1867 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1868}
1869
jimfly01a06bf312018-12-18 16:24:51 +00001870ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
1871 const tensorflow::GraphDef& graphDef)
1872{
1873 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
1874 IOutputSlot* input0Slot = inputLayers.first;
1875 IOutputSlot* input1Slot = inputLayers.second;
1876
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001877 ComparisonDescriptor descriptor(ComparisonOperation::Greater);
1878 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly01a06bf312018-12-18 16:24:51 +00001879
kevmay012b4d88e2019-01-24 14:05:09 +00001880 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly01a06bf312018-12-18 16:24:51 +00001881}
1882
jimfly0184c70e62018-12-19 13:14:46 +00001883ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
1884 const tensorflow::GraphDef& graphDef)
1885{
1886 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
1887 IOutputSlot* input0Slot = inputLayers.first;
1888 IOutputSlot* input1Slot = inputLayers.second;
1889
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001890 ComparisonDescriptor descriptor(ComparisonOperation::Equal);
1891 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly0184c70e62018-12-19 13:14:46 +00001892
kevmay012b4d88e2019-01-24 14:05:09 +00001893 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly0184c70e62018-12-19 13:14:46 +00001894}
1895
1896ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
1897 const tensorflow::GraphDef& graphDef)
1898{
1899 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
1900 IOutputSlot* input0Slot = inputLayers.first;
1901 IOutputSlot* input1Slot = inputLayers.second;
1902
1903 IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1904
1905 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1906}
1907
jimfly0123be07e2018-12-04 17:47:22 +00001908ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1909{
1910 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1911
1912 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1913 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1914
1915 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
1916 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
1917
1918 if (input0Info.GetNumDimensions() == 1)
1919 {
1920 const bool isNHWC = true;
1921 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1922 }
1923
1924 if (input1Info.GetNumDimensions() == 1)
1925 {
1926 const bool isNHWC = true;
1927 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1928 }
1929
1930 IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
1931
1932 input0Slot->Connect(layer->GetInputSlot(0));
1933 input1Slot->Connect(layer->GetInputSlot(1));
1934
1935 if (input0Info.GetNumDimensions() == 1)
1936 {
1937 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
1938 }
1939 else
1940 {
1941 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
1942 }
1943
1944 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1945}
1946
jimfly01f6ba7472018-12-04 10:09:52 +00001947unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
1948 const TensorInfo& inputTensorInfo,
1949 const std::string& nodeName)
1950{
1951 unsigned int rank = paddingTensor.GetShape()[0];
1952 unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
1953 if (rank != expectedRank)
1954 {
1955 throw ParseException(
1956 boost::str(
1957 boost::format(
1958 "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
1959 % expectedRank
1960 % rank
1961 % nodeName
1962 % CHECK_LOCATION().AsString()));
1963 }
1964 unsigned int second = paddingTensor.GetShape()[1];
1965 if (second != 2)
1966 {
1967 throw ParseException(
1968 boost::str(
1969 boost::format(
1970 "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
1971 % rank
1972 % second
1973 % nodeName
1974 % CHECK_LOCATION().AsString()));
1975 }
1976 return rank;
1977}
1978
1979TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
1980 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1981{
1982 unsigned int numDims = inputTensorInfo.GetNumDimensions();
1983 std::vector<unsigned int> outDims;
1984 for (unsigned int i = 0; i < numDims; ++i)
1985 {
1986 unsigned int dimSize = inputTensorInfo.GetShape()[i];
1987 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
1988 dimSize += dimPadding.first;
1989 dimSize += dimPadding.second;
1990 outDims.push_back(dimSize);
1991 }
1992 TensorInfo paddedTensorInfo = inputTensorInfo;
1993 unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
1994 paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
1995 return paddedTensorInfo;
1996}
1997
1998ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
1999 const tensorflow::GraphDef& graphDef)
2000{
2001 // input consists of:
2002 // input[0] the tensor which will be padded
2003 // input[1] the tensor holding the padding values
2004 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2005 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2006 TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
2007 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
2008 {
2009 throw ParseException(
2010 boost::str(
2011 boost::format(
2012 "ArmNN only supports Pad with constant padding. "
2013 "Input %1%. Node %2% %3%")
2014 % inputs[1].m_IndexedValue->GetNode().name()
2015 % nodeDef.name()
2016 % CHECK_LOCATION().AsString()));
2017
2018 }
2019 ParsedConstTfOperation<int32_t>* paddingTensorOp =
2020 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2021
2022 std::vector<int32_t> paddingTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002023 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
jimfly01f6ba7472018-12-04 10:09:52 +00002024 // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
2025 // and should match the rank of the input tensor that is being padded.
2026 // For each dimension D of input, paddings[D, 0] indicates how many values to add
2027 // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
2028 // many values to add after the contents of tensor in that dimension
2029 // This needs to be translated into a padList for ACL
2030 std::vector<std::pair<unsigned int, unsigned int>> padList;
2031 unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
2032 for (unsigned int i = 0; i < rank; ++i)
2033 {
2034 std::pair<unsigned int, unsigned int> paddingForDim;
2035 for (unsigned int j = 0; j < 2; j++)
2036 {
2037 unsigned int index = (i * 2) + j;
2038 int paddingAmount = paddingTensorData[index];
2039 // make sure we can cast to an unsigned value
2040 if (paddingAmount < 0)
2041 {
2042 throw ParseException(
2043 boost::str(
2044 boost::format(
2045 "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
2046 % paddingAmount
2047 % i
2048 % j
2049 % nodeDef.name()
2050 % CHECK_LOCATION().AsString()));
2051 }
2052 if (j == 0)
2053 {
2054 paddingForDim.first = static_cast<unsigned int>(paddingAmount);
2055 }
2056 else
2057 {
2058 paddingForDim.second = static_cast<unsigned int>(paddingAmount);
2059 }
2060 }
2061 padList.push_back(paddingForDim);
2062 }
2063 PadDescriptor padDescriptor(padList);
2064 IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
2065 previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
2066 // Use the padding to calculate the new output tensor shape
2067 TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
2068 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2069 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2070}
2071
surmeh01bceff2f2018-03-29 16:29:27 +01002072ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
2073 const tensorflow::GraphDef& graphDef)
2074{
2075 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002076
telsoa01c577f2c2018-08-31 09:22:23 +01002077 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01002078 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
surmeh01bceff2f2018-03-29 16:29:27 +01002079
surmeh01bceff2f2018-03-29 16:29:27 +01002080 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2081
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002082 // Constant tensor index
2083 unsigned int index = GetConstInputIndex(inputs);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002084 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002085 ParsedConstTfOperation<int32_t>* shapeNode =
2086 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2087
surmeh01bceff2f2018-03-29 16:29:27 +01002088 std::vector<int32_t> axisTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002089 shapeNode->GetConstTensor(axisTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002090
telsoa01c577f2c2018-08-31 09:22:23 +01002091 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002092 const unsigned int concatDim = static_cast<unsigned int>(axisTensorData[0]);
surmeh01bceff2f2018-03-29 16:29:27 +01002093
telsoa01c577f2c2018-08-31 09:22:23 +01002094 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002095 if (concatDim == 0 || concatDim == 2)
surmeh01bceff2f2018-03-29 16:29:27 +01002096 {
telsoa01c577f2c2018-08-31 09:22:23 +01002097 throw ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002098 boost::str(
2099 boost::format(
telsoa01c577f2c2018-08-31 09:22:23 +01002100 "Dimension %1% for concatenation is not supported by Armnn. "
2101 "Node %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002102 % concatDim
2103 % nodeDef.name()
2104 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002105 }
2106
Matthew Jacksondba634f2019-08-15 15:14:18 +01002107 const unsigned int supportedNumDims = 4;
Matteo Martincighf9afc792018-12-06 12:03:17 +00002108 unsigned int numConcatViews = numInputs - 1;
Matthew Jacksondba634f2019-08-15 15:14:18 +01002109 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002110 concatDescriptor.SetConcatAxis(concatDim);
Matthew Jacksondba634f2019-08-15 15:14:18 +01002111 TensorShape mergeDims(supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002112 unsigned int mergeDim = 0;
2113 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002114 {
telsoa01c577f2c2018-08-31 09:22:23 +01002115 // Need to double check whether it should be
Matteo Martincighf9afc792018-12-06 12:03:17 +00002116 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002117 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2118
Matteo Martincighf9afc792018-12-06 12:03:17 +00002119 // Double check dimensions of the tensors
Matthew Jacksondba634f2019-08-15 15:14:18 +01002120 if (inputTensorInfo.GetNumDimensions() != supportedNumDims)
Matteo Martincighf9afc792018-12-06 12:03:17 +00002121 {
2122 throw armnn::ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002123 boost::str(
2124 boost::format(
Matteo Martincighf9afc792018-12-06 12:03:17 +00002125 "The number of dimensions: %1% for input tensors of the "
2126 "concatenation op should be %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002127 % inputTensorInfo.GetNumDimensions()
Matthew Jacksondba634f2019-08-15 15:14:18 +01002128 % supportedNumDims
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002129 % CHECK_LOCATION().AsString()));
Matteo Martincighf9afc792018-12-06 12:03:17 +00002130 }
2131
2132 // Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
2133 mergeDims = inputTensorInfo.GetShape();
2134 unsigned int* viewOrigin = const_cast<unsigned int*>(concatDescriptor.GetViewOrigin(viewIndex));
Matthew Jacksondba634f2019-08-15 15:14:18 +01002135 std::fill(viewOrigin, viewOrigin + supportedNumDims, 0);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002136
2137 // Update the view origin coordinates and the merge dimension value
2138 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
2139 mergeDim += mergeDims[concatDim];
surmeh01bceff2f2018-03-29 16:29:27 +01002140 }
2141
Matteo Martincighf9afc792018-12-06 12:03:17 +00002142 // Update the output shape
2143 mergeDims[concatDim] = mergeDim;
Jim Flynn906f9462019-05-10 13:55:21 +01002144 armnn::IConnectableLayer *layer = m_Network->AddConcatLayer(concatDescriptor, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002145
Matteo Martincighf9afc792018-12-06 12:03:17 +00002146 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
surmeh01bceff2f2018-03-29 16:29:27 +01002147
Matteo Martincighf9afc792018-12-06 12:03:17 +00002148 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002149 {
Matteo Martincighf9afc792018-12-06 12:03:17 +00002150 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2151 inputSlot.Connect(layer->GetInputSlot(viewIndex));
surmeh01bceff2f2018-03-29 16:29:27 +01002152 }
2153
2154 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2155}
2156
2157ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
2158 const tensorflow::GraphDef& graphDef)
2159{
telsoa01c577f2c2018-08-31 09:22:23 +01002160 // Note: the Shape layer is handled in a special way, because:
2161 // 1. ARMNN doesn't support int32 tensors which it outputs.
2162 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01002163 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01002164 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01002165
2166 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
2167 if (tfDataType != tensorflow::DT_INT32)
2168 {
telsoa01c577f2c2018-08-31 09:22:23 +01002169 throw ParseException(
2170 boost::str(
2171 boost::format(
2172 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
2173 % tensorflow::DataType_Name(tfDataType)
2174 % nodeDef.name()
2175 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002176 }
2177
2178 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2179 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2180 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2181 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
2182
2183 std::vector<int32_t> shapeTensorData;
2184 shapeTensorData.reserve(prevLayerDimensions);
2185
2186 for (unsigned int i=0; i<prevLayerDimensions; ++i)
2187 {
2188 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
2189 }
2190
2191 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2192
2193 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
2194 nodeDef,
2195 &shapeTensorData[0],
2196 shapeTensorInfo);
2197}
2198
2199ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
2200 const tensorflow::GraphDef& graphDef)
2201{
2202 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2203 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2204
2205 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2206 {
telsoa01c577f2c2018-08-31 09:22:23 +01002207 throw ParseException(
2208 boost::str(
2209 boost::format(
2210 "ArmNN only supports Reshape layers with constant shapes. "
2211 "Input %1% Node %2% %3%")
2212 % inputs[1].m_IndexedValue->GetNode().name()
2213 % nodeDef.name()
2214 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002215 }
2216 ParsedConstTfOperation<int32_t>* shapeNode =
2217 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2218
2219 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2220 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2221
2222 std::vector<int32_t> shapeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002223 ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002224 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2225
2226 TensorShape targetShape = outputTensorInfo.GetShape();
2227 ReshapeDescriptor reshapeDesc;
2228 reshapeDesc.m_TargetShape = targetShape;
2229
2230 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2231 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2232 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2233
2234 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2235}
2236
2237ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
2238 const tensorflow::GraphDef& graphDef)
2239{
2240 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2241
2242 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2243 {
telsoa01c577f2c2018-08-31 09:22:23 +01002244 throw ParseException(
2245 boost::str(
2246 boost::format(
2247 "ArmNN only supports ResizeBilinear layers with constant sizes. "
2248 "Input %1%. Node %2% %3%")
2249 % inputs[1].m_IndexedValue->GetNode().name()
2250 % nodeDef.name()
2251 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002252 }
2253 ParsedConstTfOperation<int32_t>* sizeNode =
2254 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2255
telsoa01c577f2c2018-08-31 09:22:23 +01002256 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01002257 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
2258 {
telsoa01c577f2c2018-08-31 09:22:23 +01002259 throw ParseException(
2260 boost::str(
2261 boost::format(
2262 "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
2263 "Node %1% %2%")
2264 % nodeDef.name()
2265 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002266 }
2267
telsoa01c577f2c2018-08-31 09:22:23 +01002268 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01002269 std::vector<int32_t> sizeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002270 ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002271
telsoa01c577f2c2018-08-31 09:22:23 +01002272 // The descriptor only has target height and width attributes, which we get from the size tensor.
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002273 ResizeDescriptor desc;
2274 desc.m_Method = armnn::ResizeMethod::Bilinear;
surmeh01bceff2f2018-03-29 16:29:27 +01002275 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002276 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2277 desc.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002278
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002279 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002280
2281 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2282 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01002283 // The input shape is always in BHWC format, this will be swizzled below; for now,
2284 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01002285 unsigned int outBatch = inputTensorInfo.GetShape()[0];
2286 unsigned int outChannels = inputTensorInfo.GetShape()[3];
2287 unsigned int outHeight = desc.m_TargetHeight;
2288 unsigned int outWidth = desc.m_TargetWidth;
jimfly018a121502018-12-06 16:19:52 +00002289 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
telsoa01c577f2c2018-08-31 09:22:23 +01002290 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01002291 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2292 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2293
jimfly018a121502018-12-06 16:19:52 +00002294 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002295
2296 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2297}
2298
2299TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
2300{
2301 BOOST_ASSERT(nodeDef.op() == "Squeeze");
2302 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2303
2304 DataType type;
2305 if (tfDataType == tensorflow::DT_FLOAT)
2306 {
2307 type = DataType::Float32;
2308 }
2309 else if (tfDataType == tensorflow::DT_INT32)
2310 {
2311 type = DataType::Signed32;
2312 }
2313 else
2314 {
telsoa01c577f2c2018-08-31 09:22:23 +01002315 throw ParseException(
2316 boost::str(
2317 boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
2318 % tensorflow::DataType_Name(tfDataType)
2319 % nodeDef.name()
2320 % CHECK_LOCATION().AsString()));
2321 }
2322
2323
2324 if (inputTensorInfo.GetNumDimensions() > 4)
2325 {
2326 throw ParseException(
2327 boost::str(
2328 boost::format(
2329 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2330 % inputTensorInfo.GetNumDimensions()
2331 % nodeDef.name()
2332 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002333 }
2334
2335 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01002336 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2337
surmeh01bceff2f2018-03-29 16:29:27 +01002338 if (squeezeDims.empty())
2339 {
telsoa01c577f2c2018-08-31 09:22:23 +01002340 squeezeDims.assign(dimensionSequence,
2341 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01002342 }
2343
2344 std::vector<uint32_t> outputDims;
2345 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2346 {
telsoa01c577f2c2018-08-31 09:22:23 +01002347 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2348 auto currentDimension = inputTensorInfo.GetShape()[i];
2349 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01002350 {
telsoa01c577f2c2018-08-31 09:22:23 +01002351 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01002352 }
2353 }
2354
2355 if (outputDims.size() > 4)
2356 {
telsoa01c577f2c2018-08-31 09:22:23 +01002357 throw ParseException(
2358 boost::str(
2359 boost::format(
2360 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2361 % outputDims.size()
2362 % nodeDef.name()
2363 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002364 }
2365
telsoa01c577f2c2018-08-31 09:22:23 +01002366 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2367 outputDims.data());
2368
2369 TensorInfo outTensorInfo = inputTensorInfo;
2370 outTensorInfo.SetShape(outShape);
2371 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01002372
2373 return outTensorInfo;
2374}
2375
2376ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2377{
2378 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2379
2380 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2381 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2382
2383 TensorInfo outputInfo;
2384 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2385
2386 ReshapeDescriptor reshapeDesc;
2387 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2388 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2389 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2390 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2391
2392 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2393}
2394
2395ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2396{
2397 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2398
2399 NormalizationDescriptor normalizationDescriptor;
2400 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2401 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2402 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2403 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2404 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2405 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
ruoyan018174f362018-12-04 18:24:08 +00002406 normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002407
2408 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2409 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2410
2411 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002412 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2413 nodeDef.name().c_str());
ruoyan018174f362018-12-04 18:24:08 +00002414 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2415 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
surmeh01bceff2f2018-03-29 16:29:27 +01002416
2417 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2418}
2419
2420/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01002421/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2422/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2423/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2424///
surmeh01bceff2f2018-03-29 16:29:27 +01002425class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
2426{
2427public:
2428 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2429 : DeferredSingleLayerParsedTfOperation(parser, node)
2430 {
2431 }
2432
2433 void CreateLayerDeferred() override
2434 {
2435 BOOST_ASSERT(m_Layer == nullptr);
2436 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2437 }
2438};
2439
2440ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2441{
telsoa01c577f2c2018-08-31 09:22:23 +01002442 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01002443 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2444}
2445
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002446ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2447{
2448 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2449 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2450 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2451
2452 if (inputs.size() != 2)
2453 {
2454 throw ParseException(
2455 boost::str(boost::format("Mean expects two inputs!. Got %1% for Node %2% %3%")
2456 % inputs.size()
2457 % nodeDef.name()
2458 % CHECK_LOCATION().AsString()));
2459 }
2460
2461 bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef, "keep_dims");
2462
2463 ParsedConstTfOperation<int32_t>* axisNode =
2464 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2465
2466 const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo();
2467
2468 ConstTensor axisTensor(axisTensorInfo, axisNode->GetStorage());
2469 const int* axisData = static_cast<const int*>(axisTensor.GetMemoryArea());
2470
2471 TensorInfo outputTensorInfo;
2472 MeanDescriptor meanDescriptor;
2473 meanDescriptor.m_KeepDims = keepDims;
2474
2475 // Negative axis values are supported so that the process requires
2476 // to convert them into the corresponding positive ones.
2477 // Duplicate values are also removed.
2478 std::vector<int> rawAxisVector(axisData, axisData + axisTensorInfo.GetNumElements());
2479 std::set<unsigned int> positiveAxisSet;
2480 int rank = static_cast<int>(inputTensorInfo.GetNumDimensions());
2481
2482 std::transform(rawAxisVector.begin(), rawAxisVector.end(),
2483 std::inserter(positiveAxisSet, positiveAxisSet.begin()),
2484 [rank](int i) -> unsigned int { return static_cast<unsigned int>((i + rank) % rank); });
2485
2486 CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
2487
2488 if (inputTensorInfo.GetNumDimensions() > positiveAxisSet.size())
2489 {
2490 meanDescriptor.m_Axis.assign(positiveAxisSet.begin(), positiveAxisSet.end());
2491 }
2492
2493 IConnectableLayer* layer = m_Network->AddMeanLayer(meanDescriptor, nodeDef.name().c_str());
2494 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2495 inputSlot.Connect(layer->GetInputSlot(0));
2496
2497 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2498}
2499
telsoa01c577f2c2018-08-31 09:22:23 +01002500/// An ParsedTfOperation for a Mul node.
2501/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2502/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2503/// and in these cases armnn doesn't need a separate layer for the Mul.
2504///
2505class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
2506{
2507public:
2508 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2509 : DeferredSingleLayerParsedTfOperation(parser, node)
2510 {
2511 }
2512
2513 void CreateLayerDeferred() override
2514 {
2515 BOOST_ASSERT(m_Layer == nullptr);
2516 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2517 }
2518};
2519
surmeh01bceff2f2018-03-29 16:29:27 +01002520ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2521{
2522 boost::ignore_unused(graphDef);
2523
telsoa01c577f2c2018-08-31 09:22:23 +01002524 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002525}
2526
2527ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
2528 const tensorflow::GraphDef& graphDef)
2529{
2530 boost::ignore_unused(graphDef);
2531
2532 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2533
2534 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
2535
2536 auto it = m_InputShapes.find(nodeDef.name());
2537 if (it == m_InputShapes.end())
2538 {
telsoa01c577f2c2018-08-31 09:22:23 +01002539 throw ParseException(
2540 boost::str(
2541 boost::format(
2542 "Missing input shape for Placeholder '%1%' %2%")
2543 % nodeDef.name()
2544 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002545 }
2546 TensorInfo tensorInfo(it->second, DataType::Float32);
2547
2548 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2549
2550 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2551
2552 TrackInputBinding(layer, layerId, tensorInfo);
2553
2554 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2555}
2556
saoste01bbd40612018-08-28 15:41:51 +01002557ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2558{
2559 boost::ignore_unused(graphDef);
2560 return AddRealDivLayer(nodeDef);
2561}
2562
surmeh01bceff2f2018-03-29 16:29:27 +01002563ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
2564 const tensorflow::GraphDef& graphDef)
2565{
2566 boost::ignore_unused(graphDef);
2567
2568 ActivationDescriptor activationDesc;
2569 activationDesc.m_Function = ActivationFunction::ReLu;
2570 return AddActivationLayer(nodeDef, activationDesc);
2571}
2572
2573ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
2574 const tensorflow::GraphDef& graphDef)
2575{
2576 boost::ignore_unused(graphDef);
2577
2578 ActivationDescriptor activationDesc;
2579 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2580 activationDesc.m_A = 6.0f;
2581 activationDesc.m_B = 0.0f;
2582
2583 return AddActivationLayer(nodeDef, activationDesc);
2584}
2585
2586ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
2587 const tensorflow::GraphDef& graphDef)
2588{
2589 boost::ignore_unused(graphDef);
2590
2591 ActivationDescriptor activationDesc;
2592 activationDesc.m_Function = ActivationFunction::Sigmoid;
2593
2594 return AddActivationLayer(nodeDef, activationDesc);
2595}
2596
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002597ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
2598 const tensorflow::GraphDef &graphDef)
2599{
2600 boost::ignore_unused(graphDef);
2601
2602 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2603
2604 IConnectableLayer* const layer = m_Network->AddRsqrtLayer(nodeDef.name().c_str());
2605
2606 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2607 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2608 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2609
2610 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2611}
2612
surmeh01bceff2f2018-03-29 16:29:27 +01002613ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
2614 const tensorflow::GraphDef& graphDef)
2615{
2616 boost::ignore_unused(graphDef);
2617
2618 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2619
2620 SoftmaxDescriptor softmaxDescriptor;
2621 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2622
2623 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2624 prevLayerSlot.Connect(layer->GetInputSlot(0));
2625 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2626
2627 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2628}
2629
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002630ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
2631 const tensorflow::GraphDef& graphDef)
2632{
2633 boost::ignore_unused(graphDef);
2634
2635 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2636 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2637 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2638
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002639 // Constant tensor index
2640 unsigned int index = GetConstInputIndex(inputs);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002641 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002642 ParsedConstTfOperation<int32_t>* shapeNode =
2643 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2644
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002645 std::vector<int32_t> axisTensorData;
2646 shapeNode->GetConstTensor(axisTensorData);
2647
2648 // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2649 const unsigned int splitDim = static_cast<unsigned int>(axisTensorData[0]);
2650
2651 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2652 if (splitDim == 0 || splitDim == 2)
2653 {
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002654 throw armnn::ParseException(
2655 boost::str(
2656 boost::format(
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002657 "Dimension %1% for split is not supported by Armnn. "
2658 "Node %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002659 % splitDim
2660 % nodeDef.name()
2661 % CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002662 }
2663
Saoirse Stewart315258e2019-02-28 11:32:41 +00002664 // As Armnn only supports splitter outputs of the same shape, therefore num_split will be limited to an integer.
2665 uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef, "num_split");
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002666
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002667 IOutputSlot& inputSlot = inputs[1 - index].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1 - index].m_Index);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002668 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2669
Matthew Jacksondba634f2019-08-15 15:14:18 +01002670 const unsigned int supportedNumDims = 4;
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002671 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2672
Matthew Jacksondba634f2019-08-15 15:14:18 +01002673 if (inputDimSize != supportedNumDims)
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002674 {
2675 throw armnn::ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002676 boost::str(
2677 boost::format(
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002678 "The number of dimensions: %1% for input tensors of the "
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002679 "split op should be %2% %3%")
2680 % inputTensorInfo.GetNumDimensions()
Matthew Jacksondba634f2019-08-15 15:14:18 +01002681 % supportedNumDims
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002682 % CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002683 }
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002684
2685 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2686
2687 // Add current input shape to splitterDimSizes
2688 for (unsigned int i = 0; i < inputDimSize; ++i)
2689 {
2690 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2691 }
2692
2693 if (splitterDimSizes[splitDim] % num_split != 0)
2694 {
2695 throw ParseException("Number of splits must evenly divide the dimension");
2696 }
2697 splitterDimSizes[splitDim] /= num_split;
2698
2699 SplitterDescriptor splitDesc(num_split);
2700 for (unsigned int g = 0; g < num_split; ++g)
2701 {
2702 // Set the size of the views.
2703 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2704 {
2705 splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2706 }
2707 splitDesc.SetViewOriginCoord(g, splitDim, splitterDimSizes[splitDim] * g);
2708 }
2709
2710 IConnectableLayer *layer = m_Network->AddSplitterLayer(splitDesc, nodeDef.name().c_str());
2711
2712 inputSlot.Connect(layer->GetInputSlot(0));
2713
2714 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2715 splitterDimSizes.data());
2716
2717 for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
2718 {
2719 layer->GetOutputSlot(i).SetTensorInfo(armnn::TensorInfo(outShape, inputTensorInfo.GetDataType()));
2720 }
2721
2722 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2723}
2724
surmeh01bceff2f2018-03-29 16:29:27 +01002725ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
2726 const tensorflow::GraphDef& graphDef)
2727{
2728 boost::ignore_unused(graphDef);
2729
2730 ActivationDescriptor activationDesc;
2731 activationDesc.m_Function = ActivationFunction::SoftReLu;
2732
2733 return AddActivationLayer(nodeDef, activationDesc);
2734}
2735
2736ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2737{
2738 boost::ignore_unused(graphDef);
2739
2740 ActivationDescriptor activationDesc;
2741 activationDesc.m_Function = ActivationFunction::TanH;
2742 activationDesc.m_A = 1.0f;
2743 activationDesc.m_B = 1.0f;
2744
2745 return AddActivationLayer(nodeDef, activationDesc);
2746}
2747
2748ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
2749 ActivationDescriptor& activationDesc)
2750{
2751 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2752
2753 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2754
2755 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2756 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2757 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2758 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2759}
2760
2761ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
2762 const tensorflow::GraphDef& graphDef)
2763{
2764 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
2765}
2766
2767ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
2768 const tensorflow::GraphDef& graphDef)
2769{
2770 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2771}
2772
2773ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
2774 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
2775{
2776 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2777 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2778 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2779
2780 if (inputs.size() != 1)
2781 {
telsoa01c577f2c2018-08-31 09:22:23 +01002782 throw ParseException(
2783 boost::str(
2784 boost::format(
2785 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
2786 % inputs.size()
2787 % nodeDef.name()
2788 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002789 }
2790
2791 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
2792 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2793 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
2794 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
2795
2796 Pooling2dDescriptor pooling2dDescriptor;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002797 pooling2dDescriptor.m_PoolType = pooltype;
2798 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
surmeh01bceff2f2018-03-29 16:29:27 +01002799 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
2800
telsoa01c577f2c2018-08-31 09:22:23 +01002801 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
FrancisMurtaghf005e312018-12-06 15:26:04 +00002802 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
2803 pooling2dDescriptor.m_DataLayout = dataLayout;
2804 DataLayoutIndexed dataLayoutIndexed(dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +01002805
FrancisMurtaghf005e312018-12-06 15:26:04 +00002806 pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
2807 pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
2808 pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
2809 pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002810
FrancisMurtaghf005e312018-12-06 15:26:04 +00002811 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
2812 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002813
2814 bool padding = false;
2815 TensorInfo outputInfo;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002816 unsigned int outputHeight = 0;
2817 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01002818
2819 CHECK_PADDING_TYPE(nodeDef, paddingString);
2820
surmeh01bceff2f2018-03-29 16:29:27 +01002821 if (paddingString == "SAME")
2822 {
2823 padding = true;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002824
2825 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
2826 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2827 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
2828 static_cast<float>(pooling2dDescriptor.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01002829 }
2830 else if (paddingString == "VALID")
2831 {
2832 padding = false;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002833
2834 outputHeight = static_cast<uint32_t>(ceil(
2835 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
2836 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2837 outputWidth = static_cast<uint32_t>(ceil(
2838 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
2839 static_cast<float>(pooling2dDescriptor.m_StrideX)));
2840 }
2841
2842 switch (dataLayout)
2843 {
2844 case DataLayout::NHWC:
2845 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2846 outputHeight,
2847 outputWidth,
2848 inputTensorInfo.GetShape()[3] },
2849 DataType::Float32);
2850 break;
2851 case DataLayout::NCHW:
2852 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2853 inputTensorInfo.GetShape()[1],
2854 outputHeight,
2855 outputWidth },
2856 DataType::Float32);
2857 break;
surmeh01bceff2f2018-03-29 16:29:27 +01002858 }
surmeh01bceff2f2018-03-29 16:29:27 +01002859
2860 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002861 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002862 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002863 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002864
2865
2866 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
2867 if (layer == nullptr)
2868 {
telsoa01c577f2c2018-08-31 09:22:23 +01002869 throw ParseException(
2870 boost::str(
2871 boost::format(
2872 "Failed to add pooling2d layer for %1% %2%")
2873 % nodeDef.name()
2874 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002875 }
2876
2877 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2878
FrancisMurtaghf005e312018-12-06 15:26:04 +00002879 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002880
2881 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2882}
2883
2884ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
2885{
2886 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2887
2888 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2889 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2890
2891 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
2892 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
2893
2894 if (isBiasAdd)
2895 {
2896 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
2897 // with the same data in the correct dimension for broadcast in addition.
2898 if(input1Info.GetNumDimensions() != 1)
2899 {
telsoa01c577f2c2018-08-31 09:22:23 +01002900 throw ParseException(
2901 boost::str(
2902 boost::format(
2903 "Unsupported bias for BiasAdd. It should be a 1D vector. "
2904 "Got %1% dimensions for input %2%. Node %3% %4%")
2905 % input1Info.GetNumDimensions()
2906 % inputs[1].m_IndexedValue->GetNode().name()
2907 % nodeDef.name()
2908 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002909 }
2910
2911 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01002912
telsoa01c577f2c2018-08-31 09:22:23 +01002913 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01002914 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002915 }
2916 else
2917 {
2918 if (input0Info.GetNumDimensions() == 1)
2919 {
2920 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002921 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002922 }
2923
2924 if (input1Info.GetNumDimensions() == 1)
2925 {
2926 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002927 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002928 }
2929 }
2930
2931 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
2932
2933 input0Slot->Connect(layer->GetInputSlot(0));
2934 input1Slot->Connect(layer->GetInputSlot(1));
2935
Nattapat Chaimanowongfab64f02019-02-15 16:46:24 +00002936 if (input0Info.GetNumDimensions() == input1Info.GetNumDimensions())
2937 {
2938 const TensorShape& input0Shape = input0Info.GetShape();
2939 const TensorShape& input1Shape = input1Info.GetShape();
2940
2941 std::vector<unsigned int> outputShape;
2942 outputShape.reserve(input0Shape.GetNumDimensions());
2943 TensorInfo outputInfo(input0Info);
2944
2945 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
2946 {
2947 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
2948 }
2949
2950 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
2951
2952 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2953 }
2954 else if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
surmeh01bceff2f2018-03-29 16:29:27 +01002955 {
2956 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2957 }
2958 else
2959 {
2960 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2961 }
2962
2963 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2964}
2965
saoste01bbd40612018-08-28 15:41:51 +01002966ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
2967{
2968 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2969
2970 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
2971 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2972 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2973
2974 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2975 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2976
2977
2978 if (input0NumDims < input1NumDims)
2979 {
2980 const bool isNHWC = true;
2981 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2982 }
2983 if (input1NumDims < input0NumDims)
2984 {
2985 const bool isNHWC = true;
2986 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2987 }
2988
2989 input0Slot->Connect(layer->GetInputSlot(0));
2990 input1Slot->Connect(layer->GetInputSlot(1));
2991
2992 if (input0NumDims < input1NumDims)
2993 {
2994 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2995 }
2996 else
2997 {
2998 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2999
3000 }
3001 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3002}
3003
Sadik Armagan975c09a2018-12-04 10:02:08 +00003004ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
3005{
3006 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3007
3008 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3009 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3010
3011 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3012 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3013
3014 if (input0NumDims < input1NumDims)
3015 {
3016 const bool isNHWC = true;
3017 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3018 }
3019 if (input1NumDims < input0NumDims)
3020 {
3021 const bool isNHWC = true;
3022 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3023 }
3024
3025 IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
3026
3027 input0Slot->Connect(layer->GetInputSlot(0));
3028 input1Slot->Connect(layer->GetInputSlot(1));
3029
3030 TensorInfo outputInfo = input0Slot->GetTensorInfo();
3031 std::vector<unsigned int> outputShape;
3032
3033 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
3034 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
3035
3036 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3037 {
3038 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3039 }
3040
3041 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3042 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3043
3044 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3045}
3046
telsoa01c577f2c2018-08-31 09:22:23 +01003047IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
3048{
3049 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3050
3051 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
3052 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3053 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3054
3055 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3056 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3057
3058 if (input0NumDims < input1NumDims)
3059 {
3060 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003061 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003062 }
3063 if (input1NumDims < input0NumDims)
3064 {
3065 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003066 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003067 }
3068
3069 input0Slot->Connect(layer->GetInputSlot(0));
3070 input1Slot->Connect(layer->GetInputSlot(1));
3071
3072 if (input0NumDims < input1NumDims)
3073 {
3074 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3075 }
3076 else
3077 {
3078 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3079 }
3080 return layer;
3081}
3082
surmeh01bceff2f2018-03-29 16:29:27 +01003083IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
3084 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
3085{
telsoa01c577f2c2018-08-31 09:22:23 +01003086 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01003087 ParsedConstTfOperation<float>* biasNode = nullptr;
3088 if (addNodeDef != nullptr)
3089 {
3090 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01003091 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003092 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
3093 {
3094 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
3095 }
3096 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
3097 {
3098 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
3099 }
3100 else
3101 {
telsoa01c577f2c2018-08-31 09:22:23 +01003102 throw ParseException(
3103 boost::str(
3104 boost::format(
3105 "ArmNN only supports fully connected layers with constant bias. "
3106 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
3107 % addInputs[0].m_IndexedValue->GetNode().name()
3108 % addInputs[1].m_IndexedValue->GetNode().name()
3109 % addNodeDef->name()
3110 % matMulNodeDef.name()
3111 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003112 }
3113 }
3114
telsoa01c577f2c2018-08-31 09:22:23 +01003115 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003116 ParsedConstTfOperation<float>* weightNode = nullptr;
3117 ParsedTfOperation* inputNode = nullptr;
3118 unsigned int inputIdx = 0;
3119 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
3120 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
3121 {
3122 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
3123 inputNode = mulInputs[1].m_IndexedValue;
3124 inputIdx = mulInputs[1].m_Index;
3125 }
3126 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
3127 {
3128 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
3129 inputNode = mulInputs[0].m_IndexedValue;
3130 inputIdx = mulInputs[0].m_Index;
3131 }
3132 else
3133 {
telsoa01c577f2c2018-08-31 09:22:23 +01003134 throw ParseException(
3135 boost::str(
3136 boost::format(
3137 "ArmNN only supports fully connected layers with constant weights. "
3138 "Inputs %1% and %2%. MatMulNode %3% %4%")
3139 % mulInputs[0].m_IndexedValue->GetNode().name()
3140 % mulInputs[1].m_IndexedValue->GetNode().name()
3141 % matMulNodeDef.name()
3142 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003143 }
3144
3145 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003146 // Handles weight.
Matteo Martincigh482ca852018-12-12 09:20:55 +00003147 ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003148
3149 FullyConnectedDescriptor desc;
3150 desc.m_BiasEnabled = addNodeDef != nullptr;
3151
3152 IConnectableLayer* layer = nullptr;
Matteo Martincighfc598e12019-05-14 10:36:13 +01003153 Optional<ConstTensor> optionalBiases;
3154 std::vector<float> biasTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003155 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003156 if (addNodeDef != nullptr)
3157 {
Matteo Martincigh482ca852018-12-12 09:20:55 +00003158 ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003159
3160 if (weights.GetShape()[1] != biases.GetShape()[0])
3161 {
telsoa01c577f2c2018-08-31 09:22:23 +01003162 throw ParseException(
3163 boost::str(
3164 boost::format(
3165 "Shape of matmul weights and bias do not match. "
3166 "AddNode %1%. MatMulNode %2% %3%")
3167 % addNodeDef->name()
3168 % matMulNodeDef.name()
3169 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003170 }
3171
Matteo Martincighfc598e12019-05-14 10:36:13 +01003172 optionalBiases = Optional<ConstTensor>(biases);
surmeh01bceff2f2018-03-29 16:29:27 +01003173 }
Matteo Martincighfc598e12019-05-14 10:36:13 +01003174 layer = m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
surmeh01bceff2f2018-03-29 16:29:27 +01003175
3176 BOOST_ASSERT(layer != nullptr);
3177
3178 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3179 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3180
telsoa01c577f2c2018-08-31 09:22:23 +01003181 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01003182 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
3183 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3184 return layer;
3185}
3186
3187void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
3188{
telsoa01c577f2c2018-08-31 09:22:23 +01003189 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01003190 tensorflow::DataType type = tensorflow::DT_FLOAT;
3191 if (nodeDef.attr().count("T") != 0)
3192 {
3193 auto attr = nodeDef.attr().at("T");
3194 type = attr.type();
3195 }
3196 else if (nodeDef.attr().count("dtype") != 0)
3197 {
3198 auto attr = nodeDef.attr().at("dtype");
3199 type = attr.type();
3200 }
3201
Ferran Balaguerc602f292019-02-08 17:09:55 +00003202 if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() != "Const")
surmeh01bceff2f2018-03-29 16:29:27 +01003203 {
telsoa01c577f2c2018-08-31 09:22:23 +01003204 throw ParseException(
3205 boost::str(
3206 boost::format(
Ferran Balaguerc602f292019-02-08 17:09:55 +00003207 "Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). "
telsoa01c577f2c2018-08-31 09:22:23 +01003208 "Got %1% for Node %2% %3%")
3209 % tensorflow::DataType_Name(type)
3210 % nodeDef.name()
3211 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003212 }
3213
3214 const std::string& operation = nodeDef.op();
narpra016f37f832018-12-21 18:30:00 +00003215 auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3216 if (itControlInput != m_ControlInputs.end())
3217 {
3218 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
3219 return;
3220 }
surmeh01bceff2f2018-03-29 16:29:27 +01003221 auto it = ms_OperationNameToParsingFunctions.find(operation);
3222 if (it != ms_OperationNameToParsingFunctions.end())
3223 {
3224 auto func = it->second;
3225 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
3226 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3227
telsoa01c577f2c2018-08-31 09:22:23 +01003228 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01003229 auto it = m_ParsedTfOperations.find(nodeDef.name());
3230 if (it != m_ParsedTfOperations.end())
3231 {
3232 throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
3233 }
3234 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3235
telsoa01c577f2c2018-08-31 09:22:23 +01003236 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003237 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3238 m_RequestedOutputs.end())
3239 {
3240 auto outId = ParseOutputId(nodeDef.name());
3241 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
3242 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3243
3244 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3245
3246 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
3247
3248 prevSlot.Connect(outputLayer->GetInputSlot(0));
3249
3250 TrackOutputBinding(outputLayer, layerId, tensorInfo);
3251 }
3252 }
3253 else
3254 {
telsoa01c577f2c2018-08-31 09:22:23 +01003255 throw ParseException(
3256 boost::str(
3257 boost::format(
3258 "Unsupported operation %1% in tensorflow::GraphDef %2%")
3259 % operation
3260 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003261 }
3262}
3263
3264void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
3265{
telsoa01c577f2c2018-08-31 09:22:23 +01003266 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01003267 m_NodesByName.clear();
3268 m_NetworkInputsBindingInfo.clear();
3269 m_NetworkOutputsBindingInfo.clear();
3270
3271 for (int i = 0; i < graphDef.node_size(); ++i)
3272 {
3273 const tensorflow::NodeDef& node = graphDef.node(i);
3274 m_NodesByName[node.name()] = &node;
3275 }
3276
Francis Murtaghbb190a62019-04-04 11:16:29 +01003277 // Checks that the input nodes the user has requested exist.
3278 for (const auto& pair : m_InputShapes)
3279 {
3280 const std::string& requestedInputName = pair.first;
3281 auto nodeIt = m_NodesByName.find(requestedInputName);
3282 if (nodeIt == m_NodesByName.end())
3283 {
3284 throw ParseException(
3285 boost::str(
3286 boost::format(
3287 "Couldn't find requested input node '%1%' in graph %2%")
3288 % requestedInputName
3289 % CHECK_LOCATION().AsString()));
3290 }
3291 }
3292
telsoa01c577f2c2018-08-31 09:22:23 +01003293 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01003294 std::vector<const tensorflow::NodeDef*> targetNodes;
3295 for (const std::string& requestedOutputName : m_RequestedOutputs)
3296 {
3297 auto nodeIt = m_NodesByName.find(requestedOutputName);
3298 if (nodeIt == m_NodesByName.end())
3299 {
telsoa01c577f2c2018-08-31 09:22:23 +01003300 throw ParseException(
3301 boost::str(
3302 boost::format(
3303 "Couldn't find requested output node '%1%' in graph %2%")
3304 % requestedOutputName
3305 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003306 }
3307 targetNodes.push_back(nodeIt->second);
3308 }
3309
telsoa01c577f2c2018-08-31 09:22:23 +01003310 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003311 std::vector<const tensorflow::NodeDef*> sortedNodes;
3312 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3313 targetNodes,
3314 [this](const tensorflow::NodeDef* node)
3315 {
3316 auto outputs = GetTfInputNodes(*node);
3317 std::vector<const tensorflow::NodeDef*> nodesOnly;
3318 for (const auto & o : outputs) {
3319 nodesOnly.push_back(o.m_IndexedValue);
3320 }
3321 return nodesOnly;
3322 },
3323 sortedNodes))
3324 {
telsoa01c577f2c2018-08-31 09:22:23 +01003325 throw ParseException(
3326 boost::str(
3327 boost::format(
3328 "Cycle detected in graph %1%")
3329 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003330 }
3331
telsoa01c577f2c2018-08-31 09:22:23 +01003332 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003333 for (const auto& it : sortedNodes)
3334 {
3335 const tensorflow::NodeDef& currentNode = *it;
3336 LoadNodeDef(currentNode, graphDef);
3337 }
3338}
3339
3340INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
3341 const std::map<std::string, TensorShape>& inputShapes,
3342 const std::vector<std::string>& requestedOutputs)
3343{
3344 FILE* fd = fopen(graphFile, "r");
3345
3346 if (fd == nullptr)
3347 {
telsoa01c577f2c2018-08-31 09:22:23 +01003348 throw FileNotFoundException(
3349 boost::str(
3350 boost::format(
3351 "Graph file %1% failed to open %2%")
3352 % graphFile
3353 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003354 }
3355
telsoa01c577f2c2018-08-31 09:22:23 +01003356 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003357 tensorflow::GraphDef graphDef;
3358 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
3359 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3360 delete input;
3361 fclose(fd);
3362
3363 if (!success)
3364 {
telsoa01c577f2c2018-08-31 09:22:23 +01003365 throw ParseException(
3366 boost::str(
3367 boost::format(
3368 "Failed to parse graph file %1%")
3369 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003370 }
3371
3372 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3373}
3374
3375INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
3376 const std::map<std::string, TensorShape>& inputShapes,
3377 const std::vector<std::string>& requestedOutputs)
3378{
telsoa01c577f2c2018-08-31 09:22:23 +01003379 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003380 tensorflow::GraphDef graphDef;
3381 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3382
3383 if (!success)
3384 {
telsoa01c577f2c2018-08-31 09:22:23 +01003385 throw ParseException(
3386 boost::str(
3387 boost::format(
3388 "Failed to parse graph file %1%")
3389 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003390 }
3391
3392 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3393}
3394
3395INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
3396 const std::map<std::string, TensorShape>& inputShapes,
3397 const std::vector<std::string>& requestedOutputs)
3398{
3399 FILE* fd = fopen(graphFile, "rb");
3400
3401 if (fd == nullptr)
3402 {
telsoa01c577f2c2018-08-31 09:22:23 +01003403 throw FileNotFoundException(
3404 boost::str(
3405 boost::format(
3406 "Graph file %1% failed to open %2%")
3407 % graphFile
3408 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003409 }
3410
telsoa01c577f2c2018-08-31 09:22:23 +01003411 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003412 tensorflow::GraphDef graphDef;
3413
3414 google::protobuf::io::FileInputStream inStream(fileno(fd));
3415 google::protobuf::io::CodedInputStream codedStream(&inStream);
3416 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
3417 bool success = graphDef.ParseFromCodedStream(&codedStream);
3418 fclose(fd);
3419
3420 if (!success)
3421 {
telsoa01c577f2c2018-08-31 09:22:23 +01003422 throw ParseException(
3423 boost::str(
3424 boost::format(
3425 "Failed to parse protobuf file %1% %2%")
3426 % graphFile
3427 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003428 }
3429
3430 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3431}
3432
3433INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
3434 const std::map<std::string, TensorShape>& inputShapes,
3435 const std::vector<std::string>& requestedOutputs)
3436{
3437 m_Network = INetwork::Create();
3438
3439 m_InputShapes = inputShapes;
3440 if (requestedOutputs.size() == 0)
3441 {
telsoa01c577f2c2018-08-31 09:22:23 +01003442 throw ParseException(
3443 boost::str(
3444 boost::format(
3445 "requestedOutputs must have at least one entry %1%")
3446 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003447 }
3448 m_RequestedOutputs = requestedOutputs;
3449
3450 try
3451 {
3452 LoadGraphDef(graphDef);
3453 }
3454 catch (const ParseException& e)
3455 {
3456 Cleanup();
3457 throw e;
3458 }
3459
3460 Cleanup();
3461
3462 return std::move(m_Network);
3463}
3464
3465void TfParser::Cleanup()
3466{
telsoa01c577f2c2018-08-31 09:22:23 +01003467 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01003468 m_InputShapes.clear();
3469 m_RequestedOutputs.clear();
3470 m_NodesByName.clear();
3471 m_ParsedTfOperations.clear();
3472}
3473
3474BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
3475{
3476 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
3477}
3478
3479BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
3480{
3481 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
3482}
3483
3484std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
3485 const char* bindingPointDesc,
3486 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3487{
3488 auto it = nameToBindingInfo.find(layerName);
3489 if (it == nameToBindingInfo.end())
3490 {
telsoa01c577f2c2018-08-31 09:22:23 +01003491 throw InvalidArgumentException(
3492 boost::str(
3493 boost::format(
3494 "Unknown %1% '%2%' %3%")
3495 % bindingPointDesc
3496 % layerName
3497 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003498 }
3499 return it->second;
3500}
3501
3502void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3503{
3504 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3505}
3506
3507void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3508{
3509 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3510}
3511
3512void TfParser::TrackBindingPoint(IConnectableLayer* layer,
3513 LayerBindingId id,
3514 const TensorInfo& tensorInfo,
3515 const char* bindingPointDesc,
3516 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3517{
3518 const std::string layerName = layer->GetName();
3519 auto it = nameToBindingInfo.find(layerName);
3520 if (it == nameToBindingInfo.end())
3521 {
3522 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3523 }
3524 else
3525 {
telsoa01c577f2c2018-08-31 09:22:23 +01003526 throw ParseException(
3527 boost::str(
3528 boost::format(
3529 "Id %1% used by more than one %2% layer %3%")
3530 % id
3531 % bindingPointDesc
3532 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003533 }
3534}
3535
3536} // namespace armnnTfParser