blob: 8e57e56917b836f38facea4d432514950c473983 [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00005
surmeh01bceff2f2018-03-29 16:29:27 +01006#include "TfParser.hpp"
7
surmeh01bceff2f2018-03-29 16:29:27 +01008#include <armnn/TypesUtils.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +01009#include <armnn/Descriptors.hpp>
10
11#include <GraphTopologicalSort.hpp>
Sadik Armagan479045b2018-10-01 11:51:37 +010012#include <ParserHelper.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010013#include <Permute.hpp>
Matteo Martincigh46315822018-11-28 16:22:36 +000014#include <DataLayoutIndexed.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010015
16#include <google/protobuf/io/zero_copy_stream_impl.h>
17#include <google/protobuf/text_format.h>
18
19#include "tensorflow/core/framework/graph.pb.h"
surmeh01bceff2f2018-03-29 16:29:27 +010020
surmeh01bceff2f2018-03-29 16:29:27 +010021#include <boost/format.hpp>
22#include <boost/core/ignore_unused.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010023#include <boost/polymorphic_cast.hpp>
24
surmeh01bceff2f2018-03-29 16:29:27 +010025#include <numeric>
surmeh01bceff2f2018-03-29 16:29:27 +010026
Matteo Martincigh46315822018-11-28 16:22:36 +000027using namespace armnnUtils;
surmeh01bceff2f2018-03-29 16:29:27 +010028using namespace armnn;
29
30namespace armnnTfParser
31{
32namespace
33{
34
35const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
36const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
37
surmeh01bceff2f2018-03-29 16:29:27 +010038
39template <typename Callable>
40void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
41 const std::string& attribName,
42 tensorflow::AttrValue::ValueCase expectedValueCase,
43 Callable callable)
44{
45 auto iter = nodeDef.attr().find(attribName);
46 if (iter != nodeDef.attr().end())
47 {
48 const auto& attrValue = iter->second;
49 if (attrValue.value_case() == expectedValueCase)
50 {
51 callable(attrValue);
52 }
53 else
54 {
telsoa01c577f2c2018-08-31 09:22:23 +010055 throw ParseException(
56 boost::str(
57 boost::format(
58 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
59 "but found %4% instead %5%")
60 % attribName
61 % nodeDef.name()
62 % static_cast<int>(expectedValueCase)
63 % static_cast<int>(attrValue.value_case())
64 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010065 }
66 }
67 else
68 {
telsoa01c577f2c2018-08-31 09:22:23 +010069 throw ParseException(
70 boost::str(
71 boost::format(
72 "Could not find required attribute %1% in node %2% %3%")
73 % attribName
74 % nodeDef.name()
75 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010076 }
77}
78
79template <typename Callable>
80void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
81 const std::string& attribName,
82 tensorflow::AttrValue::ValueCase expectedValueCase,
83 Callable callable)
84{
85 auto iter = nodeDef.attr().find(attribName);
86 if (iter != nodeDef.attr().end())
87 {
88 const auto& attrValue = iter->second;
89 if (attrValue.value_case() == expectedValueCase)
90 {
91 callable(attrValue);
92 }
93 else
94 {
telsoa01c577f2c2018-08-31 09:22:23 +010095 throw ParseException(
96 boost::str(
97 boost::format(
98 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
99 "but found %4% instead %5%")
100 % attribName
101 % nodeDef.name()
102 % static_cast<int>(expectedValueCase)
103 % static_cast<int>(attrValue.value_case())
104 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100105 }
106 }
107}
108
109float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
110{
111 float attribValue = 0.0f;
112 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
113 [&attribValue](const tensorflow::AttrValue& attrValue)
114 {
115 attribValue = attrValue.f();
116 });
117 return attribValue;
118}
119
Conor Kennedyc2130a02018-12-05 11:05:54 +0000120int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
121{
122 int32_t attribValue = 0u;
123 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
124 [&attribValue](const tensorflow::AttrValue& attrValue)
125 {
126 attribValue = static_cast<int32_t>(attrValue.i());
127 });
128 return attribValue;
129}
130
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000131bool ReadMandatoryNodeBoolAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
132{
133 bool attribValue = false;
134 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
135 [&attribValue](const tensorflow::AttrValue& attrValue)
136 {
137 attribValue = static_cast<bool>(attrValue.b());
138 });
139 return attribValue;
140}
141
surmeh01bceff2f2018-03-29 16:29:27 +0100142uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
143{
144 uint32_t attribValue = 0u;
145 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
146 [&attribValue](const tensorflow::AttrValue& attrValue)
147 {
148 attribValue = static_cast<uint32_t>(attrValue.i());
149 });
150 return attribValue;
151}
152
153std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
154{
155 std::string attribValue = "";
156 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
157 [&attribValue](const tensorflow::AttrValue& attrValue)
158 {
159 attribValue = attrValue.s();
160 });
161 return attribValue;
162}
163
164std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
165 const std::string& name)
166{
167 std::vector<uint32_t> attriList;
168 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
169 [&attriList](const tensorflow::AttrValue& attrValue)
170 {
171 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
172 {
173 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
174 }
175 });
176
177 return attriList;
178}
179
180std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
181 const std::string& name)
182{
183 std::vector<uint32_t> attriList;
184 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
185 [&attriList](const tensorflow::AttrValue& attrValue)
186 {
187 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
188 {
189 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
190 }
191 });
192
193 return attriList;
194}
195
196bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
197 const std::string& name,
198 bool defaultValue = false)
199{
200 bool attribValue = defaultValue;
201 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
202 [&attribValue](const tensorflow::AttrValue& attrValue)
203 {
204 attribValue = attrValue.b();
205 });
206 return attribValue;
207}
208
209tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
210{
211 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
212 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
213 [&attribValue](const tensorflow::AttrValue& attrValue)
214 {
215 attribValue = attrValue.type();
216 });
217 return attribValue;
218}
219
220TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
221{
222 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
223 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
224
225 if (stretchDim != targetDims.end())
226 {
227 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
228 {
telsoa01c577f2c2018-08-31 09:22:23 +0100229 throw ParseException(
230 boost::str(
231 boost::format(
232 "At most one component of shape can be -1 %1%")
233 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100234 }
235
telsoa01c577f2c2018-08-31 09:22:23 +0100236 auto targetNumElements =
237 boost::numeric_cast<unsigned int>(
238 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100239 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
240 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
241 }
242
243 TensorInfo reshapeInfo = input;
244 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
245
246 return reshapeInfo;
247}
248
telsoa01c577f2c2018-08-31 09:22:23 +0100249// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100250IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
251 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100252{
253 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
254 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
255 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
256 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
257 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
258 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
259
260 armnn::TensorInfo reshapedInfo = input1Info;
261 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
262
263 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
264 ReshapeDescriptor reshapeDesc;
265 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
266 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
267
268 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
269 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
270
271 input1Slot = &reshapeLayer->GetOutputSlot(0);
272
273 return input1Slot;
274}
275
276OutputId ParseOutputId(const std::string & name)
277{
278 unsigned int outputNum = 0;
279 size_t colonPos = name.find_last_of(":");
280 if (colonPos != std::string::npos)
281 {
282 int n = std::stoi(name.substr(colonPos+1));
283 if (n<0 || n>100)
284 {
telsoa01c577f2c2018-08-31 09:22:23 +0100285 throw ParseException(
286 boost::str(
287 boost::format(
288 "Output tensor id is out of range for %1% %2%")
289 % name
290 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100291 }
292 outputNum = static_cast<unsigned int>(n);
293 }
294 return OutputId(name.substr(0,colonPos),outputNum);
295}
296
telsoa01c577f2c2018-08-31 09:22:23 +0100297#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
298 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
299 { \
300 throw ParseException( \
301 boost::str( \
302 boost::format( \
303 "Unsupported data format %1% passed for %2% node %3%. " \
304 "Only NHWC and NCHW supported %4%") \
305 % FORMAT \
306 % NODE_TYPE \
307 % NODE_DEF.name() \
308 % CHECK_LOCATION().AsString())); \
309 }
310
311#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
312 if(PADDING != "SAME" && PADDING != "VALID" ) \
313 { \
314 throw ParseException( \
315 boost::str( \
316 boost::format( \
317 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
318 % PADDING \
319 % NODE_DEF.name() \
320 % CHECK_LOCATION().AsString())); \
321 } \
322
surmeh01bceff2f2018-03-29 16:29:27 +0100323} // namespace
324
325const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
326 { "Const", &TfParser::ParseConst },
327 { "Add", &TfParser::ParseAdd },
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000328 { "AddN", &TfParser::ParseAddN },
surmeh01bceff2f2018-03-29 16:29:27 +0100329 { "BiasAdd", &TfParser::ParseBiasAdd },
330 { "Identity", &TfParser::ParseIdentity },
331 { "Conv2D", &TfParser::ParseConv2D },
332 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
Conor Kennedyc2130a02018-12-05 11:05:54 +0000333 { "ExpandDims", &TfParser::ParseExpandDims },
surmeh01bceff2f2018-03-29 16:29:27 +0100334 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
FrancisMurtagh94412af2019-01-24 10:53:39 +0000335 { "Gather", &TfParser::ParseGather},
jimfly01a06bf312018-12-18 16:24:51 +0000336 { "Greater", &TfParser::ParseGreater},
surmeh01bceff2f2018-03-29 16:29:27 +0100337 { "ConcatV2", &TfParser::ParseConcat },
338 { "LRN", &TfParser::ParseLrn },
339 { "MatMul", &TfParser::ParseMatMul },
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000340 { "Mean", &TfParser::ParseMean },
surmeh01bceff2f2018-03-29 16:29:27 +0100341 { "Mul", &TfParser::ParseMul },
342 { "Placeholder", &TfParser::ParsePlaceholder },
saoste01bbd40612018-08-28 15:41:51 +0100343 { "RealDiv", &TfParser::ParseRealDiv },
surmeh01bceff2f2018-03-29 16:29:27 +0100344 { "Relu", &TfParser::ParseRelu },
345 { "Relu6", &TfParser::ParseRelu6 },
346 { "Reshape", &TfParser::ParseReshape },
347 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +0000348 { "Rsqrt", &TfParser::ParseRsqrt },
surmeh01bceff2f2018-03-29 16:29:27 +0100349 { "Shape", &TfParser::ParseShape },
350 { "Squeeze", &TfParser::ParseSqueeze },
351 { "Sigmoid", &TfParser::ParseSigmoid },
352 { "Softmax", &TfParser::ParseSoftmax },
353 { "Softplus", &TfParser::ParseSoftplus },
Sadik Armagan2ad6cb42018-12-27 11:23:44 +0000354 { "Split", &TfParser::ParseSplit },
surmeh01bceff2f2018-03-29 16:29:27 +0100355 { "Tanh", &TfParser::ParseTanh },
356 { "MaxPool", &TfParser::ParseMaxPool },
357 { "AvgPool", &TfParser::ParseAvgPool },
telsoa01c577f2c2018-08-31 09:22:23 +0100358 { "Maximum", &TfParser::ParseMaximum },
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +0000359 { "Minimum", &TfParser::ParseMinimum },
jimfly0184c70e62018-12-19 13:14:46 +0000360 { "Equal", &TfParser::ParseEqual },
jimfly01f6ba7472018-12-04 10:09:52 +0000361 { "Pad", &TfParser::ParsePad },
narpra016f37f832018-12-21 18:30:00 +0000362 { "Sub", &TfParser::ParseSub }
363};
364
365const std::list<std::string> TfParser::m_ControlInputs = {
366 "Assert"
surmeh01bceff2f2018-03-29 16:29:27 +0100367};
368
369ITfParser* ITfParser::CreateRaw()
370{
371 return new TfParser();
372}
373
374ITfParserPtr ITfParser::Create()
375{
376 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
377}
378
379void ITfParser::Destroy(ITfParser* parser)
380{
381 delete parser;
382}
383
384inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
385 uint32_t filterSize, bool samePadding,
386 uint32_t* paddingFront, uint32_t* paddingBack) {
387 *paddingFront = 0;
388 *paddingBack = 0;
389
390 if (samePadding) {
391 uint32_t outputSize = (inputSize + stride - 1) / stride;
392 uint32_t temp = (outputSize - 1) * stride + filterSize;
393 if (temp > inputSize) {
394 *paddingFront = (temp - inputSize) / 2;
395 *paddingBack = (temp - inputSize) - *paddingFront;
396 }
397 }
398}
399
400void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
401 bool samePadding)
402{
403 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
404}
405
406/// An Abstract base class which represents a single tensorflow operation (node)
407/// that has been (potentially partially) converted to Armnn.
408/// It may not yet have been fully converted into actual Armnn layers.
409class ParsedTfOperation
410{
411public:
412 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
413 : m_Parser(parser)
414 , m_Node(node)
415 {
416 }
417
418 virtual ~ParsedTfOperation() {};
419
420 const tensorflow::NodeDef& GetNode() const { return m_Node; }
421
422 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
423 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
424 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
425
426 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
427 virtual ParsedTfOperation* ResolveIdentityOperations()
428 {
429 return this;
430 }
431
432protected:
433 TfParser* m_Parser;
434 const tensorflow::NodeDef& m_Node;
435};
436
437/// An ParsedTfOperation where the Armnn equivalent is a single layer,
438/// with output slots that correspond directly to the Tf node outputs.
439class SingleLayerParsedTfOperation : public ParsedTfOperation
440{
441public:
442 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
443 : ParsedTfOperation(parser, node)
444 , m_Layer(layer)
445 {
446 }
447
448 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
449 {
450 BOOST_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100451 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100452 unsigned int armnnOutputSlotIdx = tfOutputIndex;
453 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
454 {
455 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100456 boost::str(
457 boost::format(
458 "The requested output slot #%1% "
459 "for %2% does not exist %3%")
460 % armnnOutputSlotIdx
461 % m_Layer->GetName()
462 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100463 }
464 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
465 }
466
467protected:
468 IConnectableLayer* m_Layer;
469};
470
telsoa01c577f2c2018-08-31 09:22:23 +0100471/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100472class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
473{
474public:
475 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
476 : SingleLayerParsedTfOperation(parser, node, nullptr)
477 {
478 }
479
480 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
481 {
482 if (!m_Layer)
483 {
484 CreateLayerDeferred();
485 }
486 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
487 }
488
489private:
490 virtual void CreateLayerDeferred() = 0;
491};
492
493
494TfParser::TfParser()
495 : m_Network(nullptr, nullptr)
496{
497}
498
499
500const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
501{
502 if (nodeDef->op() != "Identity")
503 {
504 return nodeDef;
505 }
506
507 if (nodeDef->input_size() != 1)
508 {
telsoa01c577f2c2018-08-31 09:22:23 +0100509 throw ParseException(
510 boost::str(
511 boost::format(
512 "Identity node should have a single input! %1% has %2% inputs %3%")
513 % nodeDef->name()
514 % nodeDef->input_size()
515 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100516 }
517
518 auto it = m_NodesByName.find(nodeDef->input(0));
519 if (it != m_NodesByName.end())
520 {
521 const tensorflow::NodeDef* inputNode = it->second;
522 return ResolveIdentityNode(inputNode);
523 }
524 else
525 {
telsoa01c577f2c2018-08-31 09:22:23 +0100526 throw ParseException(
527 boost::str(
528 boost::format(
529 "Cannot find what the Identity node %1% is linked to! %2%")
530 % nodeDef->name()
531 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100532 }
533}
534
535std::vector<OutputOfConstNodeDef>
536TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
537{
538 std::vector<OutputOfConstNodeDef> ret;
539
surmeh013537c2c2018-05-18 16:31:43 +0100540 if (nodeDef.op() == "Const")
541 {
542 // For some reason const node can have "Control Inputs". We ignore them for now.
543 return ret;
544 }
545
surmeh01bceff2f2018-03-29 16:29:27 +0100546 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
547 for (int j = 0; j < nodeDef.input_size(); ++j)
548 {
549 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100550
551 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
552 {
narpra016f37f832018-12-21 18:30:00 +0000553 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
554 continue;
surmeh013537c2c2018-05-18 16:31:43 +0100555 }
556
surmeh01bceff2f2018-03-29 16:29:27 +0100557 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
558 if (inputIt == m_NodesByName.end())
559 {
560 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100561 boost::str(
562 boost::format(
563 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
564 % nodeDef.input(j)
565 % nodeDef.name()
566 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100567 }
568 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
569 }
570
571 return ret;
572}
573
574std::vector<OutputOfParsedTfOperation>
575TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
576 std::size_t expectedNumInputs)
577{
telsoa01c577f2c2018-08-31 09:22:23 +0100578 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100579 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
580 const std::size_t numInputs = nodes.size();
581 if (numInputs != expectedNumInputs)
582 {
telsoa01c577f2c2018-08-31 09:22:23 +0100583 throw ParseException(
584 boost::str(
585 boost::format(
586 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
587 % nodeDef.name()
588 % expectedNumInputs
589 % numInputs
590 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100591 }
telsoa01c577f2c2018-08-31 09:22:23 +0100592 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100593 std::vector<OutputOfParsedTfOperation> result;
594 for (auto&& node : nodes)
595 {
596 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
597 if (it == m_ParsedTfOperations.end())
598 {
telsoa01c577f2c2018-08-31 09:22:23 +0100599 throw ParseException(
600 boost::str(
601 boost::format(
602 "Node with name '%1%' has not been parsed %2%")
603 % node.m_IndexedValue->name()
604 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100605 }
606 ParsedTfOperation* parsedOp = it->second.get();
607 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
608 parsedOp = parsedOp->ResolveIdentityOperations();
609 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
610 }
611 return result;
612}
613
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000614IConnectableLayer* TfParser::CreateAdditionLayer(
615 const tensorflow::NodeDef& nodeDef,
616 IOutputSlot* input0Slot,
617 IOutputSlot* input1Slot,
618 const std::string& layerName)
619{
620 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
621 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
622
623 const unsigned int input0Dim = input0Info.GetNumDimensions();
624 const unsigned int input1Dim = input1Info.GetNumDimensions();
625 if (input0Dim != input1Dim)
626 {
627 // broadcasting where input0 and input1 have different number of dimensions
628 // is only supported for 1D and 4D tensors pair
629 if (input0Dim == 1 && input1Dim == 4)
630 {
631 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
632 }
633 else if (input0Dim == 4 && input1Dim == 1)
634 {
635 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
636 }
637 else
638 {
639 throw ParseException(
640 boost::str(
641 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
642 % layerName
643 % nodeDef.name()
644 % CHECK_LOCATION().AsString()));
645 }
646 }
647 IConnectableLayer* const layer = m_Network->AddAdditionLayer(layerName.c_str());
648
649 input0Slot->Connect(layer->GetInputSlot(0));
650 input1Slot->Connect(layer->GetInputSlot(1));
651
652 // Ensure the output tensor has the correct dimensions even if a broadcast has been done
653 TensorInfo outputInfo = input0Slot->GetTensorInfo();
654 std::vector<unsigned int> outputShape;
655
656 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
657 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
658
659 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
660 {
661 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
662 }
663
664 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
665 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
666
667 return layer;
668}
669
670IConnectableLayer* TfParser::CreateAdditionLayer(
671 const tensorflow::NodeDef& nodeDef,
672 IConnectableLayer* layerOne,
673 IConnectableLayer* layerTwo,
674 unsigned int numberOfAddition,
675 unsigned long numberOfLayersToConnect,
676 bool isOdd)
677{
678 IOutputSlot* input0Slot = &layerOne->GetOutputSlot(0);
679 IOutputSlot* input1Slot = &layerTwo->GetOutputSlot(0);
680 std::string layerName(nodeDef.name());
681 if (isOdd || numberOfLayersToConnect != 2)
682 {
683 // we are not connecting the final layer
684 layerName.append("_addN_").append(std::to_string(numberOfAddition));
685 }
686 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
687}
688
689IConnectableLayer* TfParser::CreateAdditionLayer(
690 const tensorflow::NodeDef& nodeDef,
691 const OutputOfParsedTfOperation& opOne,
692 const OutputOfParsedTfOperation& opTwo,
693 unsigned int numberOfAddition)
694{
695 IOutputSlot* input0Slot = &opOne.m_IndexedValue->ResolveArmnnOutputSlot(opOne.m_Index);
696 IOutputSlot* input1Slot = &opTwo.m_IndexedValue->ResolveArmnnOutputSlot(opTwo.m_Index);
697 std::string layerName(nodeDef.name());
698 layerName.append("_addN_").append(std::to_string(numberOfAddition));
699 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
700}
701
702IConnectableLayer* TfParser::CreateAdditionLayer(
703 const tensorflow::NodeDef& nodeDef,
704 const OutputOfParsedTfOperation& op,
705 IConnectableLayer* layer)
706{
707 IOutputSlot* input0Slot = &op.m_IndexedValue->ResolveArmnnOutputSlot(op.m_Index);
708 IOutputSlot* input1Slot = &layer->GetOutputSlot(0);
709 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
710}
711
712ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
713{
714 uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
715 if (numberOfInputs < 2)
716 {
717 // should never happen
718 throw ParseException(
719 boost::str(
720 boost::format(
721 "AddN Node with name '%1%' has less than two (%2) inputs %3%")
722 % nodeDef.name()
723 % std::to_string(numberOfInputs)
724 % CHECK_LOCATION().AsString()));
725 }
726 else if (numberOfInputs == 2)
727 {
728 //this is the same as a simple Add operation
729 return AddAdditionLayer(nodeDef, false);
730 }
731 else
732 {
733 // build a binary tree of Add layers and return the final Add as the return from the function
734 // if we have an odd number of inputs then the final Add will consist of a layer connecting to an
735 // OutputOfParsedTfOperation, otherwise it will be two layers being added together
736 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
737 unsigned int numberOfAdditions = 0;
738 std::vector<IConnectableLayer*> layers;
739 // NOTE: at this point we will have a minimum of three inputs
740 for (unsigned int i = 0; i < numberOfInputs; ++i)
741 {
742 // every time i is odd we have two inputs to process.
743 bool onSecondItem = i % 2;
744 if (onSecondItem)
745 {
746 ++numberOfAdditions;
747 IConnectableLayer* newLayer = CreateAdditionLayer(
748 nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
749 layers.push_back(newLayer);
750 }
751 }
752
753 std::vector<IConnectableLayer*> layersToConnect(layers);
754 unsigned long numberOfLayersToConnect = layersToConnect.size();
755 bool isOdd = numberOfInputs % 2;
756
757 while (numberOfLayersToConnect > 1)
758 {
759 layers.clear();
760 for (unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
761 bool onSecondItem = i % 2;
762 if (onSecondItem) {
763 ++numberOfAdditions;
764 IConnectableLayer* newLayer = CreateAdditionLayer(
765 nodeDef,
766 layersToConnect[i - 1],
767 layersToConnect[i],
768 numberOfAdditions,
769 numberOfLayersToConnect,
770 isOdd);
771 layers.push_back(newLayer);
772 }
773 }
774 //OK... need to go again... maybe
775 layersToConnect = layers;
776 numberOfLayersToConnect = layersToConnect.size();
777 }
778 IConnectableLayer* finalLayer = layersToConnect[0];
779 // if we had an odd number of inputs we need to connect the final layer to the
780 // last OutputOfParsedTfOperation in order to create the last Add layer we will
781 // be handing back.
782 if (isOdd)
783 {
784 // connect the final layer to the last op
785 finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
786 }
787 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, finalLayer);
788 }
789}
790
surmeh01bceff2f2018-03-29 16:29:27 +0100791ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
792{
793 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
794
telsoa01c577f2c2018-08-31 09:22:23 +0100795 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
796 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100797 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
798 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
799 {
800 IConnectableLayer* layer =
801 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
802 &nodeDef,nodeDef.name().c_str());
803 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
804 }
805 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
806 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
807 {
808 IConnectableLayer* layer =
809 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
810 &nodeDef,nodeDef.name().c_str());
811 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
812 }
813 else
814 {
telsoa01c577f2c2018-08-31 09:22:23 +0100815 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100816 return AddAdditionLayer(nodeDef);
817 }
818}
819
820ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
821{
822 return AddAdditionLayer(nodeDef, true);
823}
824
825/// An ParsedTfOperation which forwards to another (used for Identity nodes).
826class ParsedIdentityTfOperation : public ParsedTfOperation
827{
828public:
829 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
830 : ParsedTfOperation(parser, node)
831 , m_Representative(representative)
832 {
833 }
834
835 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
836 {
837 BOOST_ASSERT(m_Representative);
838 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
839 }
840
841 virtual ParsedTfOperation* ResolveIdentityOperations() override
842 {
843 return m_Representative->ResolveIdentityOperations();
844 }
845
846private:
847 ParsedTfOperation* m_Representative;
848};
849
850ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
851{
852 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
853 // Any requests for the output slots of this node should be forwarded to the node connected as input.
854 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
855}
856
857/// An ParsedTfOperation for a Const node.
858/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
859/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
860template <typename T>
861class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
862{
863public:
864 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
865 const T* tensorData, const TensorInfo& tensorInfo)
866 : DeferredSingleLayerParsedTfOperation(parser, node),
867 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
868 m_TensorInfo(tensorInfo)
869 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000870 BOOST_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
surmeh01bceff2f2018-03-29 16:29:27 +0100871 }
872
873 void CreateLayerDeferred() override
874 {
875 BOOST_ASSERT(m_Layer == nullptr);
876 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
877 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
878 }
879
Matteo Martincigh482ca852018-12-12 09:20:55 +0000880 ConstTensor GetConstTensor(std::vector<T>& outputTensorData) const
surmeh01bceff2f2018-03-29 16:29:27 +0100881 {
surmeh01bceff2f2018-03-29 16:29:27 +0100882 outputTensorData.resize(m_TensorInfo.GetNumElements());
883
Matteo Martincigh482ca852018-12-12 09:20:55 +0000884 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
885
telsoa01c577f2c2018-08-31 09:22:23 +0100886 // Updates the result to point to the user provided storage.
Matteo Martincigh482ca852018-12-12 09:20:55 +0000887 ConstTensor constTensor(m_TensorInfo, outputTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +0100888 return constTensor;
889 }
890
Matteo Martincigh46315822018-11-28 16:22:36 +0000891 const T* GetStorage() const
892 {
893 return m_Storage.data();
894 }
895
896 const TensorInfo& GetTensorInfo() const
897 {
898 return m_TensorInfo;
899 }
900
surmeh01bceff2f2018-03-29 16:29:27 +0100901private:
902 ///< Manages the lifetime of the tensor data.
903 std::vector<T> m_Storage;
904 ///< Describes the layout of the tensor and points to the data in m_Storage.
905 TensorInfo m_TensorInfo;
906};
907
telsoa01c577f2c2018-08-31 09:22:23 +0100908DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
909 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100910{
911 switch (tfDataType)
912 {
913 case tensorflow::DT_FLOAT:
914 return DataType::Float32;
915 break;
916 case tensorflow::DT_INT32:
917 return DataType::Signed32;
918 break;
919 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100920 throw ParseException(
921 boost::str(
922 boost::format(
923 "Unknown DataType %1% for node %2% %3%")
924 % tensorflow::DataType_Name(tfDataType)
925 % nodeDef.name()
926 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100927 }
928}
929
930struct ParseTfTensorValueList
931{
932 template<typename DataType>
933 static void Parse(
934 const tensorflow::TensorProto& tfTensor,
935 unsigned int dstElements,
936 std::vector<int8_t>& outputData);
937
938 template <typename DataType>
939 static void ReadData(const void* srcData, unsigned int numSrcElements,
940 std::vector<int8_t>& dstData, unsigned int numDstElements)
941 {
telsoa01c577f2c2018-08-31 09:22:23 +0100942 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100943 if (numSrcElements == 0)
944 {
945 return;
946 }
947
telsoa01c577f2c2018-08-31 09:22:23 +0100948 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100949 if (numDstElements == 0)
950 {
951 numDstElements = numSrcElements;
952 }
953
telsoa01c577f2c2018-08-31 09:22:23 +0100954 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100955 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
956
957 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
958 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
959
telsoa01c577f2c2018-08-31 09:22:23 +0100960 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +0100961 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
962
963 if (numDstElements > numSrcElements)
964 {
telsoa01c577f2c2018-08-31 09:22:23 +0100965 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +0100966 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
967 }
968 }
969
970};
971
972template <>
973void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
974 unsigned int dstElements, std::vector<int8_t>& outputData)
975{
976 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
977 outputData, dstElements);
978}
979
980template <>
981void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
982 unsigned int dstElements, std::vector<int8_t>& outputData)
983{
984 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
985 outputData, dstElements);
986}
987
988template <template<typename> class OperatorType, typename T = int8_t>
989struct MakeTfOperation
990{
991 template<typename DataType, class... Args>
992 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
993 Args&&... args)
994 {
995 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
996 }
997};
998
999template <>
1000struct MakeTfOperation<ParsedConstTfOperation>
1001{
1002 template<typename DataType, class... Args>
1003 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
1004 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
1005 {
1006 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1007 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
1008 }
1009};
1010
1011template <class FuncType>
1012struct InvokeParseFunction
1013{
1014 template<class ResType, class... Args>
1015 inline static ResType Result(DataType dataType, Args&&... args)
1016 {
1017 if (dataType == DataType::Float32)
1018 {
1019 return FuncType::template Parse<float>(std::forward<Args>(args)...);
1020 }
1021 else if (dataType == DataType::Signed32)
1022 {
1023 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1024 }
1025
1026 return ResType();
1027 }
1028
1029 template<class... Args>
1030 inline static void Result(DataType dataType, Args&&... args)
1031 {
1032 if (dataType == DataType::Float32)
1033 {
1034 FuncType::template Parse<float>(std::forward<Args>(args)...);
1035 }
1036 else if (dataType == DataType::Signed32)
1037 {
1038 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1039 }
1040 }
1041};
1042
1043ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1044{
1045 BOOST_ASSERT(nodeDef.op() == "Const");
1046
1047 if (nodeDef.attr().count("value") == 0)
1048 {
telsoa01c577f2c2018-08-31 09:22:23 +01001049 throw ParseException(
1050 boost::str(
1051 boost::format(
1052 "Value not found for Const node - %1% %2%")
1053 % nodeDef.name()
1054 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001055 }
1056
1057 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
1058 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1059 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
1060
1061 const auto GetDimensionSize = [](auto& d) { return d.size(); };
1062
1063 std::vector<unsigned int> dimensionSizes;
1064 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1065 std::back_inserter(dimensionSizes), GetDimensionSize);
1066
telsoa01c577f2c2018-08-31 09:22:23 +01001067 // Calculates number of elements.
1068 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001069 unsigned int numElements = 0U;
1070
1071 if (!dimensionSizes.empty())
1072 {
1073 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1074 1U, std::multiplies<unsigned int>());
1075 }
1076
1077 std::vector<int8_t> tensorData;
1078
telsoa01c577f2c2018-08-31 09:22:23 +01001079 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001080 if (tfTensor.tensor_content().empty())
1081 {
1082 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
1083
1084 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +01001085 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +01001086 if (numElements == 0)
1087 {
telsoa01c577f2c2018-08-31 09:22:23 +01001088 const unsigned int tfNumElements =
1089 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001090 dimensionSizes.push_back(tfNumElements);
1091 }
1092 }
telsoa01c577f2c2018-08-31 09:22:23 +01001093 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001094 else
1095 {
1096 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1097
telsoa01c577f2c2018-08-31 09:22:23 +01001098 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +01001099 if (numElements == 0)
1100 {
telsoa01c577f2c2018-08-31 09:22:23 +01001101 throw ParseException(
1102 boost::str(
1103 boost::format(
1104 "No tensor shape found for Const node - %1% %2%")
1105 % nodeDef.name()
1106 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001107 }
1108 }
1109
telsoa01c577f2c2018-08-31 09:22:23 +01001110 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001111 if (tensorData.empty())
1112 {
telsoa01c577f2c2018-08-31 09:22:23 +01001113 throw ParseException(
1114 boost::str(
1115 boost::format(
1116 "No tensor data found for Const node - %1% %2%")
1117 % nodeDef.name()
1118 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001119 }
1120
telsoa01c577f2c2018-08-31 09:22:23 +01001121 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1122 dimensionSizes.data(),
1123 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001124
1125 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +01001126 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +01001127 if (tensorData.size() > tensorInfo.GetNumBytes())
1128 {
telsoa01c577f2c2018-08-31 09:22:23 +01001129 throw ParseException(
1130 boost::str(
1131 boost::format(
1132 "Number of elements (%1%) should be less than or equal "
1133 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
1134 % (tensorData.size() / GetDataTypeSize(dataType))
1135 % tensorInfo.GetNumElements()
1136 % nodeDef.name()
1137 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001138 }
1139
1140 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1141 dataType, this, nodeDef, tensorData, tensorInfo);
1142}
1143
1144template<typename Type>
1145bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
1146{
1147 auto it = m_ParsedTfOperations.find(nodeName);
jimfly01f6ba7472018-12-04 10:09:52 +00001148 if (it == m_ParsedTfOperations.end())
surmeh01bceff2f2018-03-29 16:29:27 +01001149 {
1150 return false;
1151 }
jimfly01f6ba7472018-12-04 10:09:52 +00001152 return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1153}
1154
1155template<typename Type>
1156bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
1157{
1158 return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
surmeh01bceff2f2018-03-29 16:29:27 +01001159}
1160
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00001161unsigned int TfParser::GetConstInputIndex(const std::vector<OutputOfParsedTfOperation>& inputs)
1162{
1163 for (unsigned int i = 0; i < inputs.size(); i++)
1164 {
1165 if (HasParsedConstTensor<int32_t>(inputs[i].m_IndexedValue->GetNode().name()))
1166 {
1167 return i;
1168 }
1169 }
1170 throw ParseException(
1171 boost::str(
1172 boost::format(
1173 "ArmNN only supports operators with constant axis. %1%")
1174 % CHECK_LOCATION().AsString()));
1175
1176}
1177
surmeh01bceff2f2018-03-29 16:29:27 +01001178ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
1179 const tensorflow::GraphDef& graphDef)
1180{
1181 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1182 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1183 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1184
1185 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1186 {
telsoa01c577f2c2018-08-31 09:22:23 +01001187 throw ParseException(
1188 boost::str(
1189 boost::format(
1190 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1191 % nodeDef.name()
1192 % inputs[1].m_IndexedValue->GetNode().name()
1193 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001194 }
1195 ParsedConstTfOperation<float>* weightNode =
1196 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1197
1198 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1199 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1200 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1201
telsoa01c577f2c2018-08-31 09:22:23 +01001202 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
surmeh01bceff2f2018-03-29 16:29:27 +01001203 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1204 if (!dilations.empty())
1205 {
1206 for (auto dilation : dilations)
1207 {
1208 if (dilation != 1u)
1209 {
telsoa01c577f2c2018-08-31 09:22:23 +01001210 throw ParseException(
1211 boost::str(
1212 boost::format(
1213 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1214 % nodeDef.name()
1215 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001216 }
1217 }
1218 }
1219
1220 Convolution2dDescriptor desc;
1221 desc.m_BiasEnabled = false;
1222
telsoa01c577f2c2018-08-31 09:22:23 +01001223 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1224
Matteo Martincigh46315822018-11-28 16:22:36 +00001225 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001226
Matteo Martincigh46315822018-11-28 16:22:36 +00001227 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001228
Matteo Martincigh46315822018-11-28 16:22:36 +00001229 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001230
Matteo Martincigh46315822018-11-28 16:22:36 +00001231 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1232 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001233
Matteo Martincigh46315822018-11-28 16:22:36 +00001234 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1235 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1236
1237 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1238 // Tensorflow weights are [H, W, In, Out].
1239 // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1240 // and [Out, In, H, W] when the data layout is NCHW.
1241 PermutationVector permutationVector =
1242 dataLayout == DataLayout::NHWC ?
1243 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1244 std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1245
1246 // Swizzle the tensor using the given permutation vector.
1247 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1248 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1249
1250 // Swizzles the content of the tensor's permanent storage into a local storage.
1251 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1252 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001253 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Matteo Martincigh46315822018-11-28 16:22:36 +00001254
1255 // Create a weight tensor with the newly swizzled data.
1256 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1257
1258 uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1259 uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001260
1261 bool padding = false;
1262 TensorInfo outputInfo;
Matteo Martincigh46315822018-11-28 16:22:36 +00001263 unsigned int outputHeight = 0;
1264 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001265
1266 CHECK_PADDING_TYPE(nodeDef, paddingString);
1267
surmeh01bceff2f2018-03-29 16:29:27 +01001268 if (paddingString == "SAME")
1269 {
1270 padding = true;
Matteo Martincigh46315822018-11-28 16:22:36 +00001271
1272 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1273 static_cast<float>(desc.m_StrideY)));
1274 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1275 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001276 }
1277 else if (paddingString == "VALID")
1278 {
1279 padding = false;
Matteo Martincigh46315822018-11-28 16:22:36 +00001280
1281 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1282 static_cast<float>(desc.m_StrideY)));
1283 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1284 static_cast<float>(desc.m_StrideX)));
1285 }
1286
1287 switch (dataLayout)
1288 {
1289 case DataLayout::NHWC:
1290 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1291 outputHeight,
1292 outputWidth,
1293 weightTensor.GetShape()[0] },
1294 DataType::Float32);
1295 break;
1296 case DataLayout::NCHW:
1297 default:
surmeh01bceff2f2018-03-29 16:29:27 +01001298 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1299 weightTensor.GetShape()[0],
Matteo Martincigh46315822018-11-28 16:22:36 +00001300 outputHeight,
1301 outputWidth },
1302 DataType::Float32);
1303 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001304 }
surmeh01bceff2f2018-03-29 16:29:27 +01001305
1306 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1307 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1308
1309 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1310 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Matteo Martincigh46315822018-11-28 16:22:36 +00001311 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001312
1313 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1314}
1315
1316ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
telsoa01c577f2c2018-08-31 09:22:23 +01001317 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001318{
1319 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1320 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1321 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1322
1323 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1324 {
telsoa01c577f2c2018-08-31 09:22:23 +01001325 throw ParseException(
1326 boost::str(
1327 boost::format(
1328 "ArmNN only supports Depthwise Convolution layer with constant weights. "
1329 "Non const input found %1% for node %2% %3%")
1330 % inputs[1].m_IndexedValue->GetNode().name()
1331 % nodeDef.name()
1332 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001333 }
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001334
surmeh01bceff2f2018-03-29 16:29:27 +01001335 ParsedConstTfOperation<float>* weightNode =
1336 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1337
surmeh01bceff2f2018-03-29 16:29:27 +01001338 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1339 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1340 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1341
1342 DepthwiseConvolution2dDescriptor desc;
1343 desc.m_BiasEnabled = false;
1344
telsoa01c577f2c2018-08-31 09:22:23 +01001345 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1346
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001347 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001348
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001349 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001350
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001351 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001352
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001353 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1354 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001355
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001356 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1357 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1358
1359 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
Matteo Martincigh747ef822018-12-18 09:26:39 +00001360 // Tensorflow weights come in the format [H, W, I, M].
1361 // ArmNN weights have to be [M, I, H, W].
1362 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001363
1364 // Swizzle the tensor using the given permutation vector.
1365 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1366 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1367
1368 // Swizzles the content of the tensor's permanent storage into a local storage.
1369 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1370 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001371 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001372
1373 // Create a weight tensor with the newly swizzled data.
1374 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1375
Matteo Martincigh747ef822018-12-18 09:26:39 +00001376 uint32_t weightHeight = weightTensor.GetShape()[2];
1377 uint32_t weightWidth = weightTensor.GetShape()[3];
surmeh01bceff2f2018-03-29 16:29:27 +01001378
1379 bool padding = false;
1380 TensorInfo outputInfo;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001381 unsigned int outputHeight = 0;
1382 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001383
1384 CHECK_PADDING_TYPE(nodeDef, paddingString);
1385
surmeh01bceff2f2018-03-29 16:29:27 +01001386 if (paddingString == "SAME")
1387 {
1388 padding = true;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001389
1390 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1391 static_cast<float>(desc.m_StrideY)));
1392 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1393 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001394 }
1395 else if (paddingString == "VALID")
1396 {
1397 padding = false;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001398
1399 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1400 static_cast<float>(desc.m_StrideY)));
1401 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1402 static_cast<float>(desc.m_StrideX)));
1403 }
1404
1405 switch (dataLayout)
1406 {
1407 case DataLayout::NHWC:
1408 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1409 outputHeight,
1410 outputWidth,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001411 weightTensor.GetShape()[0] * weightTensor.GetShape()[1]},
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001412 DataType::Float32);
1413 break;
1414 case DataLayout::NCHW:
1415 default:
1416 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1417 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1418 outputHeight,
1419 outputWidth },
1420 DataType::Float32);
1421 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001422 }
surmeh01bceff2f2018-03-29 16:29:27 +01001423
1424 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1425 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1426
1427 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1428 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001429 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001430
1431 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1432}
1433
Conor Kennedyc2130a02018-12-05 11:05:54 +00001434TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
1435{
1436 BOOST_ASSERT(nodeDef.op() == "ExpandDims");
1437
1438 if (inputTensorInfo.GetNumDimensions() > 4) {
1439 throw ParseException(
1440 boost::str(
1441 boost::format(
1442 "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1443 % inputTensorInfo.GetNumDimensions()
1444 % nodeDef.name()
1445 % CHECK_LOCATION().AsString()));
1446 }
1447
1448 std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1449
1450 std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1451 std::vector<uint32_t> outputDims;
1452
1453 // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1454 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1455 {
1456 // add current input shape to outputDims
1457 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1458 auto currentDimension = inputTensorInfo.GetShape()[i];
1459 outputDims.push_back(currentDimension);
1460 }
1461
1462 // insert a dimension of 1 at index 'expandDim' of inputs shape
1463 if (expandDim >= 0)
1464 {
1465 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1466 outputDims.insert(getPosition, 1);
1467 }
1468
1469 // if negative number for 'expandDim' then count backwards from the last element
1470 // and insert 1 dimension at index 'expandDim'
1471 if (expandDim < 0)
1472 {
Matteo Martincighd7cceeb2018-12-06 09:06:29 +00001473 int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001474 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1475 outputDims.insert(getPosition, 1);
1476 }
1477 }
1478 else
1479 {
1480 throw InvalidArgumentException(
1481 boost::str(
1482 boost::format(
1483 "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1484 % expandDim
1485 % inputDimSize
1486 % CHECK_LOCATION().AsString()));
1487 }
1488
1489 if (outputDims.size() > 4)
1490 {
1491 throw ParseException(
1492 boost::str(
1493 boost::format(
1494 "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1495 % outputDims.size()
1496 % nodeDef.name()
1497 % CHECK_LOCATION().AsString()));
1498 }
1499
1500 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1501 outputDims.data());
1502
1503 TensorInfo outTensorInfo = inputTensorInfo;
1504 outTensorInfo.SetShape(outShape);
1505
1506 return outTensorInfo;
1507}
1508
1509ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1510{
1511 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1512
1513 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1514 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1515
1516 TensorInfo outputInfo;
1517 outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo);
1518
1519 ReshapeDescriptor reshapeDesc;
1520 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1521 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1522 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1523 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1524
1525 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1526}
1527
surmeh01bceff2f2018-03-29 16:29:27 +01001528ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1529 const tensorflow::GraphDef& graphDef)
1530{
1531 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1532
1533 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1534 {
telsoa01c577f2c2018-08-31 09:22:23 +01001535 throw ParseException(
1536 boost::str(
1537 boost::format(
1538 "ArmNN only supports FusedBatchNormalization layers with constant scale. "
1539 "Input %1%. Node %2% %3%")
1540 % inputs[1].m_IndexedValue->GetNode().name()
1541 % nodeDef.name()
1542 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001543 }
1544 ParsedConstTfOperation<float>* scaleNode =
1545 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1546
1547 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1548 {
telsoa01c577f2c2018-08-31 09:22:23 +01001549 throw ParseException(
1550 boost::str(
1551 boost::format(
1552 "ArmNN only supports FusedBatchNormalization layers with constant offset. "
1553 "Input %1%. Node %2% %3%")
1554 % inputs[2].m_IndexedValue->GetNode().name()
1555 % nodeDef.name()
1556 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001557 }
1558 ParsedConstTfOperation<float>* offsetNode =
1559 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1560
1561 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1562 {
telsoa01c577f2c2018-08-31 09:22:23 +01001563 throw ParseException(
1564 boost::str(
1565 boost::format(
1566 "ArmNN only supports FusedBatchNormalization layers with constant mean. "
1567 "Input %1%. Node %2% %3%")
1568 % inputs[3].m_IndexedValue->GetNode().name()
1569 % nodeDef.name()
1570 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001571 }
1572 ParsedConstTfOperation<float>* meanNode =
1573 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1574
1575 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1576 {
telsoa01c577f2c2018-08-31 09:22:23 +01001577 throw ParseException(
1578 boost::str(
1579 boost::format(
1580 "ArmNN only supports FusedBatchNormalization layers with constant variance. "
1581 "Input %1%. Node %2% %3%")
1582 % inputs[4].m_IndexedValue->GetNode().name()
1583 % nodeDef.name()
1584 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001585 }
1586 ParsedConstTfOperation<float>* varianceNode =
1587 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1588
Matteo Martincigh075c7502018-12-05 13:10:45 +00001589 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1590
1591 CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1592
telsoa01c577f2c2018-08-31 09:22:23 +01001593 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001594 BatchNormalizationDescriptor desc;
1595 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001596 desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001597
telsoa01c577f2c2018-08-31 09:22:23 +01001598 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1599 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001600 std::vector<float> scaleTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001601 ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001602
1603 std::vector<float> offsetTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001604 ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001605
1606 std::vector<float> meanTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001607 ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001608
1609 std::vector<float> varianceTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001610 ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001611
1612 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1613 meanTensor,
1614 varianceTensor,
1615 offsetTensor,
1616 scaleTensor,
1617 nodeDef.name().c_str());
1618
1619 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1620
Matteo Martincigh075c7502018-12-05 13:10:45 +00001621 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1622 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001623
1624 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1625}
1626
telsoa01c577f2c2018-08-31 09:22:23 +01001627bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1628 size_t alphaLayerIndex,
1629 const OutputOfParsedTfOperation& otherOp,
1630 armnn::IOutputSlot** outputOfLeakyRelu,
1631 armnn::ActivationDescriptor & desc)
1632{
1633 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1634
1635 // Verifying all these assumptions hold:
1636 //
1637 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1638 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1639 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1640 //
1641
1642 if (mulNodeDef.op() == "Mul")
1643 {
1644 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1645 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1646
1647 BOOST_ASSERT(inputs.size() == 2);
1648 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1649 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1650 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1651
1652 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1653 {
1654 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1655 {
1656 ParsedConstTfOperation<float>* alpha =
1657 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1658 inputs[alphaLayerIndex].m_IndexedValue);
1659
1660 std::vector<float> const_data;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001661 ConstTensor const_tensor = alpha->GetConstTensor(const_data);
telsoa01c577f2c2018-08-31 09:22:23 +01001662
1663 if (const_data.size() == 1)
1664 {
1665 desc.m_Function = ActivationFunction::LeakyReLu;
1666 desc.m_A = const_data[0];
1667
1668 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1669 return true;
1670 }
1671 }
1672 }
1673 }
1674 return false;
1675}
1676
telsoa01c577f2c2018-08-31 09:22:23 +01001677ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1678 const tensorflow::GraphDef& graphDef)
1679{
1680 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
Sadik Armagan975c09a2018-12-04 10:02:08 +00001681 if (inputs.size() != 2)
1682 {
1683 throw ParseException(
1684 boost::str(
1685 boost::format(
1686 "Maximum expects two inputs!. Got %1% for Node %2% %3%")
1687 % inputs.size()
1688 % nodeDef.name()
1689 % CHECK_LOCATION().AsString()));
1690 }
1691
telsoa01c577f2c2018-08-31 09:22:23 +01001692 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1693 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1694 IOutputSlot* outputOfLeakyRelu = nullptr;
1695
1696 ActivationDescriptor desc;
1697
Sadik Armagan975c09a2018-12-04 10:02:08 +00001698 // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1699 // i.e. one of the four possible scenarios:
1700 // 1, max(mul(a, x), x)
1701 // 2, max(mul(x, a), x)
1702 // 3, max(x, mul(a, x))
1703 // 4, max(x, mul(x, a))
1704 // These are handled by an activation layer.
telsoa01c577f2c2018-08-31 09:22:23 +01001705
1706 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1707 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1708 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1709 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1710 {
1711 BOOST_ASSERT(outputOfLeakyRelu != nullptr);
1712
1713 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1714 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1715 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1716 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1717 }
1718 else
1719 {
Sadik Armagan975c09a2018-12-04 10:02:08 +00001720 // Anything else is just a maximum layer.
1721
1722 return AddMaximumLayer(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001723 }
1724}
1725
jimfly0184c70e62018-12-19 13:14:46 +00001726std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> TfParser::ProcessElementwiseInputSlots(
1727 const tensorflow::NodeDef& nodeDef, const std::string& layerName)
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001728{
1729 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1730
1731 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1732 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1733 const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1734 const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1735
1736 if (input0Dim != input1Dim)
1737 {
1738 // broadcasting where input0 and input1 have different number of dimensions
1739 // is only supported for 1D and 4D tensors pair
1740 if (input0Dim == 1 && input1Dim == 4)
1741 {
1742 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1743 }
1744 else if (input0Dim == 4 && input1Dim == 1)
1745 {
1746 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1747 }
1748 else
1749 {
1750 throw ParseException(
jimfly0184c70e62018-12-19 13:14:46 +00001751 boost::str(
1752 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
1753 % layerName
1754 % nodeDef.name()
1755 % CHECK_LOCATION().AsString()));
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001756 }
1757 }
jimfly0184c70e62018-12-19 13:14:46 +00001758 return {input0Slot, input1Slot};
1759}
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001760
kevmay012b4d88e2019-01-24 14:05:09 +00001761ParsedTfOperationPtr TfParser::ProcessComparisonLayer(
1762 IOutputSlot* input0Slot,
1763 IOutputSlot* input1Slot,
1764 IConnectableLayer* const layer,
1765 const tensorflow::NodeDef& nodeDef)
1766{
1767 input0Slot->Connect(layer->GetInputSlot(0));
1768 input1Slot->Connect(layer->GetInputSlot(1));
1769
1770 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1771 outputInfo.SetDataType(DataType::Boolean);
1772 std::vector<unsigned int> outputShape;
1773
1774 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1775 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1776
1777 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1778 {
1779 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1780 }
1781
1782 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1783 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1784
1785 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1786}
1787
jimfly0184c70e62018-12-19 13:14:46 +00001788ParsedTfOperationPtr TfParser::ProcessElementwiseLayer(
1789 IOutputSlot* input0Slot,
1790 IOutputSlot* input1Slot,
1791 IConnectableLayer* const layer,
1792 const tensorflow::NodeDef& nodeDef)
1793{
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001794 input0Slot->Connect(layer->GetInputSlot(0));
1795 input1Slot->Connect(layer->GetInputSlot(1));
1796
1797 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1798 std::vector<unsigned int> outputShape;
1799
1800 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1801 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1802
1803 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1804 {
1805 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1806 }
1807
1808 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1809 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1810
1811 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1812}
1813
FrancisMurtagh94412af2019-01-24 10:53:39 +00001814ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
1815 const tensorflow::GraphDef& graphDef)
1816{
1817 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1818 IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1819 IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1820
1821 // Infer shape of output tensor
1822 unsigned int paramsDim = params.GetTensorInfo().GetNumDimensions();
1823 unsigned int indicesDim = indices.GetTensorInfo().GetNumDimensions();
1824 unsigned int outputDim = paramsDim - 1 + indicesDim;
1825
1826 std::vector<unsigned int> dimSizes;
1827
1828 for (unsigned int i = 0; i < indicesDim; ++i)
1829 {
1830 dimSizes.push_back(indices.GetTensorInfo().GetShape()[i]);
1831 }
1832 for (unsigned int i = 1; i < paramsDim; ++i)
1833 {
1834 dimSizes.push_back(params.GetTensorInfo().GetShape()[i]);
1835 }
1836
1837 const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data());
1838
1839 const TensorInfo inferredOutputInfo(inferredShape, params.GetTensorInfo().GetDataType());
1840
1841 IConnectableLayer* const layer = m_Network->AddGatherLayer(nodeDef.name().c_str());
1842 layer->GetOutputSlot(0).SetTensorInfo(inferredOutputInfo);
1843
1844 params.Connect(layer->GetInputSlot(0));
1845 indices.Connect(layer->GetInputSlot(1));
1846
1847 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1848}
1849
jimfly01a06bf312018-12-18 16:24:51 +00001850ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
1851 const tensorflow::GraphDef& graphDef)
1852{
1853 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
1854 IOutputSlot* input0Slot = inputLayers.first;
1855 IOutputSlot* input1Slot = inputLayers.second;
1856
1857 IConnectableLayer* const layer = m_Network->AddGreaterLayer(nodeDef.name().c_str());
1858
kevmay012b4d88e2019-01-24 14:05:09 +00001859 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly01a06bf312018-12-18 16:24:51 +00001860}
1861
jimfly0184c70e62018-12-19 13:14:46 +00001862ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
1863 const tensorflow::GraphDef& graphDef)
1864{
1865 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
1866 IOutputSlot* input0Slot = inputLayers.first;
1867 IOutputSlot* input1Slot = inputLayers.second;
1868
1869 IConnectableLayer* const layer = m_Network->AddEqualLayer(nodeDef.name().c_str());
1870
kevmay012b4d88e2019-01-24 14:05:09 +00001871 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly0184c70e62018-12-19 13:14:46 +00001872}
1873
1874ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
1875 const tensorflow::GraphDef& graphDef)
1876{
1877 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
1878 IOutputSlot* input0Slot = inputLayers.first;
1879 IOutputSlot* input1Slot = inputLayers.second;
1880
1881 IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1882
1883 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1884}
1885
jimfly0123be07e2018-12-04 17:47:22 +00001886ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1887{
1888 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1889
1890 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1891 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1892
1893 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
1894 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
1895
1896 if (input0Info.GetNumDimensions() == 1)
1897 {
1898 const bool isNHWC = true;
1899 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1900 }
1901
1902 if (input1Info.GetNumDimensions() == 1)
1903 {
1904 const bool isNHWC = true;
1905 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1906 }
1907
1908 IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
1909
1910 input0Slot->Connect(layer->GetInputSlot(0));
1911 input1Slot->Connect(layer->GetInputSlot(1));
1912
1913 if (input0Info.GetNumDimensions() == 1)
1914 {
1915 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
1916 }
1917 else
1918 {
1919 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
1920 }
1921
1922 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1923}
1924
jimfly01f6ba7472018-12-04 10:09:52 +00001925unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
1926 const TensorInfo& inputTensorInfo,
1927 const std::string& nodeName)
1928{
1929 unsigned int rank = paddingTensor.GetShape()[0];
1930 unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
1931 if (rank != expectedRank)
1932 {
1933 throw ParseException(
1934 boost::str(
1935 boost::format(
1936 "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
1937 % expectedRank
1938 % rank
1939 % nodeName
1940 % CHECK_LOCATION().AsString()));
1941 }
1942 unsigned int second = paddingTensor.GetShape()[1];
1943 if (second != 2)
1944 {
1945 throw ParseException(
1946 boost::str(
1947 boost::format(
1948 "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
1949 % rank
1950 % second
1951 % nodeName
1952 % CHECK_LOCATION().AsString()));
1953 }
1954 return rank;
1955}
1956
1957TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
1958 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1959{
1960 unsigned int numDims = inputTensorInfo.GetNumDimensions();
1961 std::vector<unsigned int> outDims;
1962 for (unsigned int i = 0; i < numDims; ++i)
1963 {
1964 unsigned int dimSize = inputTensorInfo.GetShape()[i];
1965 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
1966 dimSize += dimPadding.first;
1967 dimSize += dimPadding.second;
1968 outDims.push_back(dimSize);
1969 }
1970 TensorInfo paddedTensorInfo = inputTensorInfo;
1971 unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
1972 paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
1973 return paddedTensorInfo;
1974}
1975
1976ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
1977 const tensorflow::GraphDef& graphDef)
1978{
1979 // input consists of:
1980 // input[0] the tensor which will be padded
1981 // input[1] the tensor holding the padding values
1982 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1983 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1984 TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
1985 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
1986 {
1987 throw ParseException(
1988 boost::str(
1989 boost::format(
1990 "ArmNN only supports Pad with constant padding. "
1991 "Input %1%. Node %2% %3%")
1992 % inputs[1].m_IndexedValue->GetNode().name()
1993 % nodeDef.name()
1994 % CHECK_LOCATION().AsString()));
1995
1996 }
1997 ParsedConstTfOperation<int32_t>* paddingTensorOp =
1998 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1999
2000 std::vector<int32_t> paddingTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002001 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
jimfly01f6ba7472018-12-04 10:09:52 +00002002 // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
2003 // and should match the rank of the input tensor that is being padded.
2004 // For each dimension D of input, paddings[D, 0] indicates how many values to add
2005 // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
2006 // many values to add after the contents of tensor in that dimension
2007 // This needs to be translated into a padList for ACL
2008 std::vector<std::pair<unsigned int, unsigned int>> padList;
2009 unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
2010 for (unsigned int i = 0; i < rank; ++i)
2011 {
2012 std::pair<unsigned int, unsigned int> paddingForDim;
2013 for (unsigned int j = 0; j < 2; j++)
2014 {
2015 unsigned int index = (i * 2) + j;
2016 int paddingAmount = paddingTensorData[index];
2017 // make sure we can cast to an unsigned value
2018 if (paddingAmount < 0)
2019 {
2020 throw ParseException(
2021 boost::str(
2022 boost::format(
2023 "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
2024 % paddingAmount
2025 % i
2026 % j
2027 % nodeDef.name()
2028 % CHECK_LOCATION().AsString()));
2029 }
2030 if (j == 0)
2031 {
2032 paddingForDim.first = static_cast<unsigned int>(paddingAmount);
2033 }
2034 else
2035 {
2036 paddingForDim.second = static_cast<unsigned int>(paddingAmount);
2037 }
2038 }
2039 padList.push_back(paddingForDim);
2040 }
2041 PadDescriptor padDescriptor(padList);
2042 IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
2043 previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
2044 // Use the padding to calculate the new output tensor shape
2045 TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
2046 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2047 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2048}
2049
surmeh01bceff2f2018-03-29 16:29:27 +01002050ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
2051 const tensorflow::GraphDef& graphDef)
2052{
2053 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002054
telsoa01c577f2c2018-08-31 09:22:23 +01002055 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01002056 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
surmeh01bceff2f2018-03-29 16:29:27 +01002057
surmeh01bceff2f2018-03-29 16:29:27 +01002058 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2059
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002060 // Constant tensor index
2061 unsigned int index = GetConstInputIndex(inputs);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002062 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002063 ParsedConstTfOperation<int32_t>* shapeNode =
2064 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2065
surmeh01bceff2f2018-03-29 16:29:27 +01002066 std::vector<int32_t> axisTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002067 shapeNode->GetConstTensor(axisTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002068
telsoa01c577f2c2018-08-31 09:22:23 +01002069 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002070 const unsigned int concatDim = static_cast<unsigned int>(axisTensorData[0]);
surmeh01bceff2f2018-03-29 16:29:27 +01002071
telsoa01c577f2c2018-08-31 09:22:23 +01002072 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002073 if (concatDim == 0 || concatDim == 2)
surmeh01bceff2f2018-03-29 16:29:27 +01002074 {
telsoa01c577f2c2018-08-31 09:22:23 +01002075 throw ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002076 boost::str(
2077 boost::format(
telsoa01c577f2c2018-08-31 09:22:23 +01002078 "Dimension %1% for concatenation is not supported by Armnn. "
2079 "Node %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002080 % concatDim
2081 % nodeDef.name()
2082 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002083 }
2084
Matteo Martincighf9afc792018-12-06 12:03:17 +00002085 unsigned int numConcatViews = numInputs - 1;
2086 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), MaxNumOfTensorDimensions);
2087 concatDescriptor.SetConcatAxis(concatDim);
2088 TensorShape mergeDims(MaxNumOfTensorDimensions);
2089 unsigned int mergeDim = 0;
2090 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002091 {
telsoa01c577f2c2018-08-31 09:22:23 +01002092 // Need to double check whether it should be
Matteo Martincighf9afc792018-12-06 12:03:17 +00002093 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002094 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2095
Matteo Martincighf9afc792018-12-06 12:03:17 +00002096 // Double check dimensions of the tensors
2097 if (inputTensorInfo.GetNumDimensions() != MaxNumOfTensorDimensions)
2098 {
2099 throw armnn::ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002100 boost::str(
2101 boost::format(
Matteo Martincighf9afc792018-12-06 12:03:17 +00002102 "The number of dimensions: %1% for input tensors of the "
2103 "concatenation op should be %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002104 % inputTensorInfo.GetNumDimensions()
2105 % MaxNumOfTensorDimensions
2106 % CHECK_LOCATION().AsString()));
Matteo Martincighf9afc792018-12-06 12:03:17 +00002107 }
2108
2109 // Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
2110 mergeDims = inputTensorInfo.GetShape();
2111 unsigned int* viewOrigin = const_cast<unsigned int*>(concatDescriptor.GetViewOrigin(viewIndex));
2112 std::fill(viewOrigin, viewOrigin + MaxNumOfTensorDimensions, 0);
2113
2114 // Update the view origin coordinates and the merge dimension value
2115 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
2116 mergeDim += mergeDims[concatDim];
surmeh01bceff2f2018-03-29 16:29:27 +01002117 }
2118
Matteo Martincighf9afc792018-12-06 12:03:17 +00002119 // Update the output shape
2120 mergeDims[concatDim] = mergeDim;
surmeh01bceff2f2018-03-29 16:29:27 +01002121 armnn::IConnectableLayer *layer = m_Network->AddMergerLayer(concatDescriptor, nodeDef.name().c_str());
2122
Matteo Martincighf9afc792018-12-06 12:03:17 +00002123 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
surmeh01bceff2f2018-03-29 16:29:27 +01002124
Matteo Martincighf9afc792018-12-06 12:03:17 +00002125 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002126 {
Matteo Martincighf9afc792018-12-06 12:03:17 +00002127 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2128 inputSlot.Connect(layer->GetInputSlot(viewIndex));
surmeh01bceff2f2018-03-29 16:29:27 +01002129 }
2130
2131 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2132}
2133
2134ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
2135 const tensorflow::GraphDef& graphDef)
2136{
telsoa01c577f2c2018-08-31 09:22:23 +01002137 // Note: the Shape layer is handled in a special way, because:
2138 // 1. ARMNN doesn't support int32 tensors which it outputs.
2139 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01002140 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01002141 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01002142
2143 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
2144 if (tfDataType != tensorflow::DT_INT32)
2145 {
telsoa01c577f2c2018-08-31 09:22:23 +01002146 throw ParseException(
2147 boost::str(
2148 boost::format(
2149 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
2150 % tensorflow::DataType_Name(tfDataType)
2151 % nodeDef.name()
2152 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002153 }
2154
2155 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2156 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2157 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2158 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
2159
2160 std::vector<int32_t> shapeTensorData;
2161 shapeTensorData.reserve(prevLayerDimensions);
2162
2163 for (unsigned int i=0; i<prevLayerDimensions; ++i)
2164 {
2165 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
2166 }
2167
2168 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2169
2170 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
2171 nodeDef,
2172 &shapeTensorData[0],
2173 shapeTensorInfo);
2174}
2175
2176ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
2177 const tensorflow::GraphDef& graphDef)
2178{
2179 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2180 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2181
2182 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2183 {
telsoa01c577f2c2018-08-31 09:22:23 +01002184 throw ParseException(
2185 boost::str(
2186 boost::format(
2187 "ArmNN only supports Reshape layers with constant shapes. "
2188 "Input %1% Node %2% %3%")
2189 % inputs[1].m_IndexedValue->GetNode().name()
2190 % nodeDef.name()
2191 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002192 }
2193 ParsedConstTfOperation<int32_t>* shapeNode =
2194 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2195
2196 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2197 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2198
2199 std::vector<int32_t> shapeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002200 ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002201 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2202
2203 TensorShape targetShape = outputTensorInfo.GetShape();
2204 ReshapeDescriptor reshapeDesc;
2205 reshapeDesc.m_TargetShape = targetShape;
2206
2207 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2208 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2209 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2210
2211 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2212}
2213
2214ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
2215 const tensorflow::GraphDef& graphDef)
2216{
2217 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2218
2219 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2220 {
telsoa01c577f2c2018-08-31 09:22:23 +01002221 throw ParseException(
2222 boost::str(
2223 boost::format(
2224 "ArmNN only supports ResizeBilinear layers with constant sizes. "
2225 "Input %1%. Node %2% %3%")
2226 % inputs[1].m_IndexedValue->GetNode().name()
2227 % nodeDef.name()
2228 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002229 }
2230 ParsedConstTfOperation<int32_t>* sizeNode =
2231 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2232
telsoa01c577f2c2018-08-31 09:22:23 +01002233 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01002234 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
2235 {
telsoa01c577f2c2018-08-31 09:22:23 +01002236 throw ParseException(
2237 boost::str(
2238 boost::format(
2239 "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
2240 "Node %1% %2%")
2241 % nodeDef.name()
2242 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002243 }
2244
telsoa01c577f2c2018-08-31 09:22:23 +01002245 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01002246 std::vector<int32_t> sizeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002247 ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002248
telsoa01c577f2c2018-08-31 09:22:23 +01002249 // The descriptor only has target height and width attributes, which we get from the size tensor.
surmeh01bceff2f2018-03-29 16:29:27 +01002250 ResizeBilinearDescriptor desc;
2251 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
2252 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
jimfly018a121502018-12-06 16:19:52 +00002253 desc.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002254
2255 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, nodeDef.name().c_str());
2256
2257 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2258 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01002259 // The input shape is always in BHWC format, this will be swizzled below; for now,
2260 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01002261 unsigned int outBatch = inputTensorInfo.GetShape()[0];
2262 unsigned int outChannels = inputTensorInfo.GetShape()[3];
2263 unsigned int outHeight = desc.m_TargetHeight;
2264 unsigned int outWidth = desc.m_TargetWidth;
jimfly018a121502018-12-06 16:19:52 +00002265 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
telsoa01c577f2c2018-08-31 09:22:23 +01002266 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01002267 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2268 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2269
jimfly018a121502018-12-06 16:19:52 +00002270 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002271
2272 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2273}
2274
2275TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
2276{
2277 BOOST_ASSERT(nodeDef.op() == "Squeeze");
2278 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2279
2280 DataType type;
2281 if (tfDataType == tensorflow::DT_FLOAT)
2282 {
2283 type = DataType::Float32;
2284 }
2285 else if (tfDataType == tensorflow::DT_INT32)
2286 {
2287 type = DataType::Signed32;
2288 }
2289 else
2290 {
telsoa01c577f2c2018-08-31 09:22:23 +01002291 throw ParseException(
2292 boost::str(
2293 boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
2294 % tensorflow::DataType_Name(tfDataType)
2295 % nodeDef.name()
2296 % CHECK_LOCATION().AsString()));
2297 }
2298
2299
2300 if (inputTensorInfo.GetNumDimensions() > 4)
2301 {
2302 throw ParseException(
2303 boost::str(
2304 boost::format(
2305 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2306 % inputTensorInfo.GetNumDimensions()
2307 % nodeDef.name()
2308 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002309 }
2310
2311 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01002312 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2313
surmeh01bceff2f2018-03-29 16:29:27 +01002314 if (squeezeDims.empty())
2315 {
telsoa01c577f2c2018-08-31 09:22:23 +01002316 squeezeDims.assign(dimensionSequence,
2317 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01002318 }
2319
2320 std::vector<uint32_t> outputDims;
2321 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2322 {
telsoa01c577f2c2018-08-31 09:22:23 +01002323 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2324 auto currentDimension = inputTensorInfo.GetShape()[i];
2325 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01002326 {
telsoa01c577f2c2018-08-31 09:22:23 +01002327 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01002328 }
2329 }
2330
2331 if (outputDims.size() > 4)
2332 {
telsoa01c577f2c2018-08-31 09:22:23 +01002333 throw ParseException(
2334 boost::str(
2335 boost::format(
2336 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2337 % outputDims.size()
2338 % nodeDef.name()
2339 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002340 }
2341
telsoa01c577f2c2018-08-31 09:22:23 +01002342 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2343 outputDims.data());
2344
2345 TensorInfo outTensorInfo = inputTensorInfo;
2346 outTensorInfo.SetShape(outShape);
2347 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01002348
2349 return outTensorInfo;
2350}
2351
2352ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2353{
2354 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2355
2356 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2357 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2358
2359 TensorInfo outputInfo;
2360 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2361
2362 ReshapeDescriptor reshapeDesc;
2363 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2364 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2365 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2366 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2367
2368 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2369}
2370
2371ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2372{
2373 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2374
2375 NormalizationDescriptor normalizationDescriptor;
2376 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2377 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2378 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2379 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2380 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2381 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
ruoyan018174f362018-12-04 18:24:08 +00002382 normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002383
2384 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2385 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2386
2387 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002388 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2389 nodeDef.name().c_str());
ruoyan018174f362018-12-04 18:24:08 +00002390 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2391 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
surmeh01bceff2f2018-03-29 16:29:27 +01002392
2393 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2394}
2395
2396/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01002397/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2398/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2399/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2400///
surmeh01bceff2f2018-03-29 16:29:27 +01002401class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
2402{
2403public:
2404 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2405 : DeferredSingleLayerParsedTfOperation(parser, node)
2406 {
2407 }
2408
2409 void CreateLayerDeferred() override
2410 {
2411 BOOST_ASSERT(m_Layer == nullptr);
2412 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2413 }
2414};
2415
2416ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2417{
telsoa01c577f2c2018-08-31 09:22:23 +01002418 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01002419 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2420}
2421
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002422ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2423{
2424 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2425 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2426 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2427
2428 if (inputs.size() != 2)
2429 {
2430 throw ParseException(
2431 boost::str(boost::format("Mean expects two inputs!. Got %1% for Node %2% %3%")
2432 % inputs.size()
2433 % nodeDef.name()
2434 % CHECK_LOCATION().AsString()));
2435 }
2436
2437 bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef, "keep_dims");
2438
2439 ParsedConstTfOperation<int32_t>* axisNode =
2440 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2441
2442 const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo();
2443
2444 ConstTensor axisTensor(axisTensorInfo, axisNode->GetStorage());
2445 const int* axisData = static_cast<const int*>(axisTensor.GetMemoryArea());
2446
2447 TensorInfo outputTensorInfo;
2448 MeanDescriptor meanDescriptor;
2449 meanDescriptor.m_KeepDims = keepDims;
2450
2451 // Negative axis values are supported so that the process requires
2452 // to convert them into the corresponding positive ones.
2453 // Duplicate values are also removed.
2454 std::vector<int> rawAxisVector(axisData, axisData + axisTensorInfo.GetNumElements());
2455 std::set<unsigned int> positiveAxisSet;
2456 int rank = static_cast<int>(inputTensorInfo.GetNumDimensions());
2457
2458 std::transform(rawAxisVector.begin(), rawAxisVector.end(),
2459 std::inserter(positiveAxisSet, positiveAxisSet.begin()),
2460 [rank](int i) -> unsigned int { return static_cast<unsigned int>((i + rank) % rank); });
2461
2462 CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
2463
2464 if (inputTensorInfo.GetNumDimensions() > positiveAxisSet.size())
2465 {
2466 meanDescriptor.m_Axis.assign(positiveAxisSet.begin(), positiveAxisSet.end());
2467 }
2468
2469 IConnectableLayer* layer = m_Network->AddMeanLayer(meanDescriptor, nodeDef.name().c_str());
2470 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2471 inputSlot.Connect(layer->GetInputSlot(0));
2472
2473 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2474}
2475
telsoa01c577f2c2018-08-31 09:22:23 +01002476/// An ParsedTfOperation for a Mul node.
2477/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2478/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2479/// and in these cases armnn doesn't need a separate layer for the Mul.
2480///
2481class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
2482{
2483public:
2484 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2485 : DeferredSingleLayerParsedTfOperation(parser, node)
2486 {
2487 }
2488
2489 void CreateLayerDeferred() override
2490 {
2491 BOOST_ASSERT(m_Layer == nullptr);
2492 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2493 }
2494};
2495
surmeh01bceff2f2018-03-29 16:29:27 +01002496ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2497{
2498 boost::ignore_unused(graphDef);
2499
telsoa01c577f2c2018-08-31 09:22:23 +01002500 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002501}
2502
2503ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
2504 const tensorflow::GraphDef& graphDef)
2505{
2506 boost::ignore_unused(graphDef);
2507
2508 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2509
2510 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
2511
2512 auto it = m_InputShapes.find(nodeDef.name());
2513 if (it == m_InputShapes.end())
2514 {
telsoa01c577f2c2018-08-31 09:22:23 +01002515 throw ParseException(
2516 boost::str(
2517 boost::format(
2518 "Missing input shape for Placeholder '%1%' %2%")
2519 % nodeDef.name()
2520 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002521 }
2522 TensorInfo tensorInfo(it->second, DataType::Float32);
2523
2524 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2525
2526 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2527
2528 TrackInputBinding(layer, layerId, tensorInfo);
2529
2530 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2531}
2532
saoste01bbd40612018-08-28 15:41:51 +01002533ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2534{
2535 boost::ignore_unused(graphDef);
2536 return AddRealDivLayer(nodeDef);
2537}
2538
surmeh01bceff2f2018-03-29 16:29:27 +01002539ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
2540 const tensorflow::GraphDef& graphDef)
2541{
2542 boost::ignore_unused(graphDef);
2543
2544 ActivationDescriptor activationDesc;
2545 activationDesc.m_Function = ActivationFunction::ReLu;
2546 return AddActivationLayer(nodeDef, activationDesc);
2547}
2548
2549ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
2550 const tensorflow::GraphDef& graphDef)
2551{
2552 boost::ignore_unused(graphDef);
2553
2554 ActivationDescriptor activationDesc;
2555 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2556 activationDesc.m_A = 6.0f;
2557 activationDesc.m_B = 0.0f;
2558
2559 return AddActivationLayer(nodeDef, activationDesc);
2560}
2561
2562ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
2563 const tensorflow::GraphDef& graphDef)
2564{
2565 boost::ignore_unused(graphDef);
2566
2567 ActivationDescriptor activationDesc;
2568 activationDesc.m_Function = ActivationFunction::Sigmoid;
2569
2570 return AddActivationLayer(nodeDef, activationDesc);
2571}
2572
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002573ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
2574 const tensorflow::GraphDef &graphDef)
2575{
2576 boost::ignore_unused(graphDef);
2577
2578 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2579
2580 IConnectableLayer* const layer = m_Network->AddRsqrtLayer(nodeDef.name().c_str());
2581
2582 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2583 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2584 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2585
2586 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2587}
2588
surmeh01bceff2f2018-03-29 16:29:27 +01002589ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
2590 const tensorflow::GraphDef& graphDef)
2591{
2592 boost::ignore_unused(graphDef);
2593
2594 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2595
2596 SoftmaxDescriptor softmaxDescriptor;
2597 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2598
2599 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2600 prevLayerSlot.Connect(layer->GetInputSlot(0));
2601 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2602
2603 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2604}
2605
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002606ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
2607 const tensorflow::GraphDef& graphDef)
2608{
2609 boost::ignore_unused(graphDef);
2610
2611 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2612 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2613 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2614
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002615 // Constant tensor index
2616 unsigned int index = GetConstInputIndex(inputs);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002617 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002618 ParsedConstTfOperation<int32_t>* shapeNode =
2619 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2620
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002621 std::vector<int32_t> axisTensorData;
2622 shapeNode->GetConstTensor(axisTensorData);
2623
2624 // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2625 const unsigned int splitDim = static_cast<unsigned int>(axisTensorData[0]);
2626
2627 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2628 if (splitDim == 0 || splitDim == 2)
2629 {
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002630 throw armnn::ParseException(
2631 boost::str(
2632 boost::format(
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002633 "Dimension %1% for split is not supported by Armnn. "
2634 "Node %2% %3%")
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002635 % splitDim
2636 % nodeDef.name()
2637 % CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002638 }
2639
Saoirse Stewart120196d2019-02-28 11:32:41 +00002640 // As Armnn only supports splitter outputs of the same shape, therefore num_split will be limited to an integer.
2641 uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef, "num_split");
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002642
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002643 IOutputSlot& inputSlot = inputs[1 - index].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1 - index].m_Index);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002644 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2645
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002646 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2647
2648 if (inputDimSize != MaxNumOfTensorDimensions)
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002649 {
2650 throw armnn::ParseException(
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002651 boost::str(
2652 boost::format(
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002653 "The number of dimensions: %1% for input tensors of the "
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002654 "split op should be %2% %3%")
2655 % inputTensorInfo.GetNumDimensions()
2656 % MaxNumOfTensorDimensions
2657 % CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002658 }
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002659
2660 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2661
2662 // Add current input shape to splitterDimSizes
2663 for (unsigned int i = 0; i < inputDimSize; ++i)
2664 {
2665 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2666 }
2667
2668 if (splitterDimSizes[splitDim] % num_split != 0)
2669 {
2670 throw ParseException("Number of splits must evenly divide the dimension");
2671 }
2672 splitterDimSizes[splitDim] /= num_split;
2673
2674 SplitterDescriptor splitDesc(num_split);
2675 for (unsigned int g = 0; g < num_split; ++g)
2676 {
2677 // Set the size of the views.
2678 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2679 {
2680 splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2681 }
2682 splitDesc.SetViewOriginCoord(g, splitDim, splitterDimSizes[splitDim] * g);
2683 }
2684
2685 IConnectableLayer *layer = m_Network->AddSplitterLayer(splitDesc, nodeDef.name().c_str());
2686
2687 inputSlot.Connect(layer->GetInputSlot(0));
2688
2689 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2690 splitterDimSizes.data());
2691
2692 for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
2693 {
2694 layer->GetOutputSlot(i).SetTensorInfo(armnn::TensorInfo(outShape, inputTensorInfo.GetDataType()));
2695 }
2696
2697 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2698}
2699
surmeh01bceff2f2018-03-29 16:29:27 +01002700ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
2701 const tensorflow::GraphDef& graphDef)
2702{
2703 boost::ignore_unused(graphDef);
2704
2705 ActivationDescriptor activationDesc;
2706 activationDesc.m_Function = ActivationFunction::SoftReLu;
2707
2708 return AddActivationLayer(nodeDef, activationDesc);
2709}
2710
2711ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2712{
2713 boost::ignore_unused(graphDef);
2714
2715 ActivationDescriptor activationDesc;
2716 activationDesc.m_Function = ActivationFunction::TanH;
2717 activationDesc.m_A = 1.0f;
2718 activationDesc.m_B = 1.0f;
2719
2720 return AddActivationLayer(nodeDef, activationDesc);
2721}
2722
2723ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
2724 ActivationDescriptor& activationDesc)
2725{
2726 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2727
2728 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2729
2730 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2731 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2732 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2733 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2734}
2735
2736ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
2737 const tensorflow::GraphDef& graphDef)
2738{
2739 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
2740}
2741
2742ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
2743 const tensorflow::GraphDef& graphDef)
2744{
2745 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2746}
2747
2748ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
2749 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
2750{
2751 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2752 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2753 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2754
2755 if (inputs.size() != 1)
2756 {
telsoa01c577f2c2018-08-31 09:22:23 +01002757 throw ParseException(
2758 boost::str(
2759 boost::format(
2760 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
2761 % inputs.size()
2762 % nodeDef.name()
2763 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002764 }
2765
2766 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
2767 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2768 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
2769 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
2770
2771 Pooling2dDescriptor pooling2dDescriptor;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002772 pooling2dDescriptor.m_PoolType = pooltype;
2773 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
surmeh01bceff2f2018-03-29 16:29:27 +01002774 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
2775
telsoa01c577f2c2018-08-31 09:22:23 +01002776 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
FrancisMurtaghf005e312018-12-06 15:26:04 +00002777 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
2778 pooling2dDescriptor.m_DataLayout = dataLayout;
2779 DataLayoutIndexed dataLayoutIndexed(dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +01002780
FrancisMurtaghf005e312018-12-06 15:26:04 +00002781 pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
2782 pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
2783 pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
2784 pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002785
FrancisMurtaghf005e312018-12-06 15:26:04 +00002786 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
2787 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002788
2789 bool padding = false;
2790 TensorInfo outputInfo;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002791 unsigned int outputHeight = 0;
2792 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01002793
2794 CHECK_PADDING_TYPE(nodeDef, paddingString);
2795
surmeh01bceff2f2018-03-29 16:29:27 +01002796 if (paddingString == "SAME")
2797 {
2798 padding = true;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002799
2800 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
2801 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2802 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
2803 static_cast<float>(pooling2dDescriptor.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01002804 }
2805 else if (paddingString == "VALID")
2806 {
2807 padding = false;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002808
2809 outputHeight = static_cast<uint32_t>(ceil(
2810 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
2811 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2812 outputWidth = static_cast<uint32_t>(ceil(
2813 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
2814 static_cast<float>(pooling2dDescriptor.m_StrideX)));
2815 }
2816
2817 switch (dataLayout)
2818 {
2819 case DataLayout::NHWC:
2820 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2821 outputHeight,
2822 outputWidth,
2823 inputTensorInfo.GetShape()[3] },
2824 DataType::Float32);
2825 break;
2826 case DataLayout::NCHW:
2827 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2828 inputTensorInfo.GetShape()[1],
2829 outputHeight,
2830 outputWidth },
2831 DataType::Float32);
2832 break;
surmeh01bceff2f2018-03-29 16:29:27 +01002833 }
surmeh01bceff2f2018-03-29 16:29:27 +01002834
2835 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002836 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002837 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002838 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002839
2840
2841 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
2842 if (layer == nullptr)
2843 {
telsoa01c577f2c2018-08-31 09:22:23 +01002844 throw ParseException(
2845 boost::str(
2846 boost::format(
2847 "Failed to add pooling2d layer for %1% %2%")
2848 % nodeDef.name()
2849 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002850 }
2851
2852 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2853
FrancisMurtaghf005e312018-12-06 15:26:04 +00002854 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002855
2856 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2857}
2858
2859ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
2860{
2861 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2862
2863 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2864 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2865
2866 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
2867 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
2868
2869 if (isBiasAdd)
2870 {
2871 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
2872 // with the same data in the correct dimension for broadcast in addition.
2873 if(input1Info.GetNumDimensions() != 1)
2874 {
telsoa01c577f2c2018-08-31 09:22:23 +01002875 throw ParseException(
2876 boost::str(
2877 boost::format(
2878 "Unsupported bias for BiasAdd. It should be a 1D vector. "
2879 "Got %1% dimensions for input %2%. Node %3% %4%")
2880 % input1Info.GetNumDimensions()
2881 % inputs[1].m_IndexedValue->GetNode().name()
2882 % nodeDef.name()
2883 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002884 }
2885
2886 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01002887
telsoa01c577f2c2018-08-31 09:22:23 +01002888 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01002889 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002890 }
2891 else
2892 {
2893 if (input0Info.GetNumDimensions() == 1)
2894 {
2895 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002896 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002897 }
2898
2899 if (input1Info.GetNumDimensions() == 1)
2900 {
2901 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002902 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002903 }
2904 }
2905
2906 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
2907
2908 input0Slot->Connect(layer->GetInputSlot(0));
2909 input1Slot->Connect(layer->GetInputSlot(1));
2910
Nattapat Chaimanowongfab64f02019-02-15 16:46:24 +00002911 if (input0Info.GetNumDimensions() == input1Info.GetNumDimensions())
2912 {
2913 const TensorShape& input0Shape = input0Info.GetShape();
2914 const TensorShape& input1Shape = input1Info.GetShape();
2915
2916 std::vector<unsigned int> outputShape;
2917 outputShape.reserve(input0Shape.GetNumDimensions());
2918 TensorInfo outputInfo(input0Info);
2919
2920 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
2921 {
2922 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
2923 }
2924
2925 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
2926
2927 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2928 }
2929 else if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
surmeh01bceff2f2018-03-29 16:29:27 +01002930 {
2931 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2932 }
2933 else
2934 {
2935 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2936 }
2937
2938 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2939}
2940
saoste01bbd40612018-08-28 15:41:51 +01002941ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
2942{
2943 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2944
2945 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
2946 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2947 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2948
2949 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2950 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2951
2952
2953 if (input0NumDims < input1NumDims)
2954 {
2955 const bool isNHWC = true;
2956 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2957 }
2958 if (input1NumDims < input0NumDims)
2959 {
2960 const bool isNHWC = true;
2961 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2962 }
2963
2964 input0Slot->Connect(layer->GetInputSlot(0));
2965 input1Slot->Connect(layer->GetInputSlot(1));
2966
2967 if (input0NumDims < input1NumDims)
2968 {
2969 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2970 }
2971 else
2972 {
2973 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2974
2975 }
2976 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2977}
2978
Sadik Armagan975c09a2018-12-04 10:02:08 +00002979ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
2980{
2981 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2982
2983 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2984 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2985
2986 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2987 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2988
2989 if (input0NumDims < input1NumDims)
2990 {
2991 const bool isNHWC = true;
2992 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2993 }
2994 if (input1NumDims < input0NumDims)
2995 {
2996 const bool isNHWC = true;
2997 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2998 }
2999
3000 IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
3001
3002 input0Slot->Connect(layer->GetInputSlot(0));
3003 input1Slot->Connect(layer->GetInputSlot(1));
3004
3005 TensorInfo outputInfo = input0Slot->GetTensorInfo();
3006 std::vector<unsigned int> outputShape;
3007
3008 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
3009 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
3010
3011 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3012 {
3013 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3014 }
3015
3016 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3017 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3018
3019 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3020}
3021
telsoa01c577f2c2018-08-31 09:22:23 +01003022IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
3023{
3024 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3025
3026 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
3027 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3028 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3029
3030 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3031 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3032
3033 if (input0NumDims < input1NumDims)
3034 {
3035 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003036 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003037 }
3038 if (input1NumDims < input0NumDims)
3039 {
3040 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003041 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003042 }
3043
3044 input0Slot->Connect(layer->GetInputSlot(0));
3045 input1Slot->Connect(layer->GetInputSlot(1));
3046
3047 if (input0NumDims < input1NumDims)
3048 {
3049 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3050 }
3051 else
3052 {
3053 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3054 }
3055 return layer;
3056}
3057
surmeh01bceff2f2018-03-29 16:29:27 +01003058IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
3059 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
3060{
telsoa01c577f2c2018-08-31 09:22:23 +01003061 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01003062 ParsedConstTfOperation<float>* biasNode = nullptr;
3063 if (addNodeDef != nullptr)
3064 {
3065 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01003066 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003067 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
3068 {
3069 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
3070 }
3071 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
3072 {
3073 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
3074 }
3075 else
3076 {
telsoa01c577f2c2018-08-31 09:22:23 +01003077 throw ParseException(
3078 boost::str(
3079 boost::format(
3080 "ArmNN only supports fully connected layers with constant bias. "
3081 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
3082 % addInputs[0].m_IndexedValue->GetNode().name()
3083 % addInputs[1].m_IndexedValue->GetNode().name()
3084 % addNodeDef->name()
3085 % matMulNodeDef.name()
3086 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003087 }
3088 }
3089
telsoa01c577f2c2018-08-31 09:22:23 +01003090 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003091 ParsedConstTfOperation<float>* weightNode = nullptr;
3092 ParsedTfOperation* inputNode = nullptr;
3093 unsigned int inputIdx = 0;
3094 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
3095 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
3096 {
3097 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
3098 inputNode = mulInputs[1].m_IndexedValue;
3099 inputIdx = mulInputs[1].m_Index;
3100 }
3101 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
3102 {
3103 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
3104 inputNode = mulInputs[0].m_IndexedValue;
3105 inputIdx = mulInputs[0].m_Index;
3106 }
3107 else
3108 {
telsoa01c577f2c2018-08-31 09:22:23 +01003109 throw ParseException(
3110 boost::str(
3111 boost::format(
3112 "ArmNN only supports fully connected layers with constant weights. "
3113 "Inputs %1% and %2%. MatMulNode %3% %4%")
3114 % mulInputs[0].m_IndexedValue->GetNode().name()
3115 % mulInputs[1].m_IndexedValue->GetNode().name()
3116 % matMulNodeDef.name()
3117 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003118 }
3119
3120 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003121 // Handles weight.
Matteo Martincigh482ca852018-12-12 09:20:55 +00003122 ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003123
3124 FullyConnectedDescriptor desc;
3125 desc.m_BiasEnabled = addNodeDef != nullptr;
3126
3127 IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +01003128 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003129 if (addNodeDef != nullptr)
3130 {
3131 std::vector<float> biasTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00003132 ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003133
3134 if (weights.GetShape()[1] != biases.GetShape()[0])
3135 {
telsoa01c577f2c2018-08-31 09:22:23 +01003136 throw ParseException(
3137 boost::str(
3138 boost::format(
3139 "Shape of matmul weights and bias do not match. "
3140 "AddNode %1%. MatMulNode %2% %3%")
3141 % addNodeDef->name()
3142 % matMulNodeDef.name()
3143 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003144 }
3145
3146 layer = m_Network->AddFullyConnectedLayer(desc, weights, biases, armnnLayerName);
3147 }
3148 else
3149 {
3150 layer = m_Network->AddFullyConnectedLayer(desc, weights, armnnLayerName);
3151 }
3152
3153 BOOST_ASSERT(layer != nullptr);
3154
3155 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3156 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3157
telsoa01c577f2c2018-08-31 09:22:23 +01003158 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01003159 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
3160 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3161 return layer;
3162}
3163
3164void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
3165{
telsoa01c577f2c2018-08-31 09:22:23 +01003166 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01003167 tensorflow::DataType type = tensorflow::DT_FLOAT;
3168 if (nodeDef.attr().count("T") != 0)
3169 {
3170 auto attr = nodeDef.attr().at("T");
3171 type = attr.type();
3172 }
3173 else if (nodeDef.attr().count("dtype") != 0)
3174 {
3175 auto attr = nodeDef.attr().at("dtype");
3176 type = attr.type();
3177 }
3178
Ferran Balaguerc602f292019-02-08 17:09:55 +00003179 if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() != "Const")
surmeh01bceff2f2018-03-29 16:29:27 +01003180 {
telsoa01c577f2c2018-08-31 09:22:23 +01003181 throw ParseException(
3182 boost::str(
3183 boost::format(
Ferran Balaguerc602f292019-02-08 17:09:55 +00003184 "Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). "
telsoa01c577f2c2018-08-31 09:22:23 +01003185 "Got %1% for Node %2% %3%")
3186 % tensorflow::DataType_Name(type)
3187 % nodeDef.name()
3188 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003189 }
3190
3191 const std::string& operation = nodeDef.op();
narpra016f37f832018-12-21 18:30:00 +00003192 auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3193 if (itControlInput != m_ControlInputs.end())
3194 {
3195 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
3196 return;
3197 }
surmeh01bceff2f2018-03-29 16:29:27 +01003198 auto it = ms_OperationNameToParsingFunctions.find(operation);
3199 if (it != ms_OperationNameToParsingFunctions.end())
3200 {
3201 auto func = it->second;
3202 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
3203 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3204
telsoa01c577f2c2018-08-31 09:22:23 +01003205 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01003206 auto it = m_ParsedTfOperations.find(nodeDef.name());
3207 if (it != m_ParsedTfOperations.end())
3208 {
3209 throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
3210 }
3211 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3212
telsoa01c577f2c2018-08-31 09:22:23 +01003213 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003214 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3215 m_RequestedOutputs.end())
3216 {
3217 auto outId = ParseOutputId(nodeDef.name());
3218 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
3219 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3220
3221 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3222
3223 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
3224
3225 prevSlot.Connect(outputLayer->GetInputSlot(0));
3226
3227 TrackOutputBinding(outputLayer, layerId, tensorInfo);
3228 }
3229 }
3230 else
3231 {
telsoa01c577f2c2018-08-31 09:22:23 +01003232 throw ParseException(
3233 boost::str(
3234 boost::format(
3235 "Unsupported operation %1% in tensorflow::GraphDef %2%")
3236 % operation
3237 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003238 }
3239}
3240
3241void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
3242{
telsoa01c577f2c2018-08-31 09:22:23 +01003243 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01003244 m_NodesByName.clear();
3245 m_NetworkInputsBindingInfo.clear();
3246 m_NetworkOutputsBindingInfo.clear();
3247
3248 for (int i = 0; i < graphDef.node_size(); ++i)
3249 {
3250 const tensorflow::NodeDef& node = graphDef.node(i);
3251 m_NodesByName[node.name()] = &node;
3252 }
3253
telsoa01c577f2c2018-08-31 09:22:23 +01003254 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01003255 std::vector<const tensorflow::NodeDef*> targetNodes;
3256 for (const std::string& requestedOutputName : m_RequestedOutputs)
3257 {
3258 auto nodeIt = m_NodesByName.find(requestedOutputName);
3259 if (nodeIt == m_NodesByName.end())
3260 {
telsoa01c577f2c2018-08-31 09:22:23 +01003261 throw ParseException(
3262 boost::str(
3263 boost::format(
3264 "Couldn't find requested output node '%1%' in graph %2%")
3265 % requestedOutputName
3266 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003267 }
3268 targetNodes.push_back(nodeIt->second);
3269 }
3270
telsoa01c577f2c2018-08-31 09:22:23 +01003271 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003272 std::vector<const tensorflow::NodeDef*> sortedNodes;
3273 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3274 targetNodes,
3275 [this](const tensorflow::NodeDef* node)
3276 {
3277 auto outputs = GetTfInputNodes(*node);
3278 std::vector<const tensorflow::NodeDef*> nodesOnly;
3279 for (const auto & o : outputs) {
3280 nodesOnly.push_back(o.m_IndexedValue);
3281 }
3282 return nodesOnly;
3283 },
3284 sortedNodes))
3285 {
telsoa01c577f2c2018-08-31 09:22:23 +01003286 throw ParseException(
3287 boost::str(
3288 boost::format(
3289 "Cycle detected in graph %1%")
3290 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003291 }
3292
telsoa01c577f2c2018-08-31 09:22:23 +01003293 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003294 for (const auto& it : sortedNodes)
3295 {
3296 const tensorflow::NodeDef& currentNode = *it;
3297 LoadNodeDef(currentNode, graphDef);
3298 }
3299}
3300
3301INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
3302 const std::map<std::string, TensorShape>& inputShapes,
3303 const std::vector<std::string>& requestedOutputs)
3304{
3305 FILE* fd = fopen(graphFile, "r");
3306
3307 if (fd == nullptr)
3308 {
telsoa01c577f2c2018-08-31 09:22:23 +01003309 throw FileNotFoundException(
3310 boost::str(
3311 boost::format(
3312 "Graph file %1% failed to open %2%")
3313 % graphFile
3314 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003315 }
3316
telsoa01c577f2c2018-08-31 09:22:23 +01003317 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003318 tensorflow::GraphDef graphDef;
3319 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
3320 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3321 delete input;
3322 fclose(fd);
3323
3324 if (!success)
3325 {
telsoa01c577f2c2018-08-31 09:22:23 +01003326 throw ParseException(
3327 boost::str(
3328 boost::format(
3329 "Failed to parse graph file %1%")
3330 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003331 }
3332
3333 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3334}
3335
3336INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
3337 const std::map<std::string, TensorShape>& inputShapes,
3338 const std::vector<std::string>& requestedOutputs)
3339{
telsoa01c577f2c2018-08-31 09:22:23 +01003340 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003341 tensorflow::GraphDef graphDef;
3342 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3343
3344 if (!success)
3345 {
telsoa01c577f2c2018-08-31 09:22:23 +01003346 throw ParseException(
3347 boost::str(
3348 boost::format(
3349 "Failed to parse graph file %1%")
3350 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003351 }
3352
3353 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3354}
3355
3356INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
3357 const std::map<std::string, TensorShape>& inputShapes,
3358 const std::vector<std::string>& requestedOutputs)
3359{
3360 FILE* fd = fopen(graphFile, "rb");
3361
3362 if (fd == nullptr)
3363 {
telsoa01c577f2c2018-08-31 09:22:23 +01003364 throw FileNotFoundException(
3365 boost::str(
3366 boost::format(
3367 "Graph file %1% failed to open %2%")
3368 % graphFile
3369 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003370 }
3371
telsoa01c577f2c2018-08-31 09:22:23 +01003372 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003373 tensorflow::GraphDef graphDef;
3374
3375 google::protobuf::io::FileInputStream inStream(fileno(fd));
3376 google::protobuf::io::CodedInputStream codedStream(&inStream);
3377 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
3378 bool success = graphDef.ParseFromCodedStream(&codedStream);
3379 fclose(fd);
3380
3381 if (!success)
3382 {
telsoa01c577f2c2018-08-31 09:22:23 +01003383 throw ParseException(
3384 boost::str(
3385 boost::format(
3386 "Failed to parse protobuf file %1% %2%")
3387 % graphFile
3388 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003389 }
3390
3391 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3392}
3393
3394INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
3395 const std::map<std::string, TensorShape>& inputShapes,
3396 const std::vector<std::string>& requestedOutputs)
3397{
3398 m_Network = INetwork::Create();
3399
3400 m_InputShapes = inputShapes;
3401 if (requestedOutputs.size() == 0)
3402 {
telsoa01c577f2c2018-08-31 09:22:23 +01003403 throw ParseException(
3404 boost::str(
3405 boost::format(
3406 "requestedOutputs must have at least one entry %1%")
3407 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003408 }
3409 m_RequestedOutputs = requestedOutputs;
3410
3411 try
3412 {
3413 LoadGraphDef(graphDef);
3414 }
3415 catch (const ParseException& e)
3416 {
3417 Cleanup();
3418 throw e;
3419 }
3420
3421 Cleanup();
3422
3423 return std::move(m_Network);
3424}
3425
3426void TfParser::Cleanup()
3427{
telsoa01c577f2c2018-08-31 09:22:23 +01003428 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01003429 m_InputShapes.clear();
3430 m_RequestedOutputs.clear();
3431 m_NodesByName.clear();
3432 m_ParsedTfOperations.clear();
3433}
3434
3435BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
3436{
3437 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
3438}
3439
3440BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
3441{
3442 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
3443}
3444
3445std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
3446 const char* bindingPointDesc,
3447 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3448{
3449 auto it = nameToBindingInfo.find(layerName);
3450 if (it == nameToBindingInfo.end())
3451 {
telsoa01c577f2c2018-08-31 09:22:23 +01003452 throw InvalidArgumentException(
3453 boost::str(
3454 boost::format(
3455 "Unknown %1% '%2%' %3%")
3456 % bindingPointDesc
3457 % layerName
3458 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003459 }
3460 return it->second;
3461}
3462
3463void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3464{
3465 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3466}
3467
3468void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3469{
3470 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3471}
3472
3473void TfParser::TrackBindingPoint(IConnectableLayer* layer,
3474 LayerBindingId id,
3475 const TensorInfo& tensorInfo,
3476 const char* bindingPointDesc,
3477 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3478{
3479 const std::string layerName = layer->GetName();
3480 auto it = nameToBindingInfo.find(layerName);
3481 if (it == nameToBindingInfo.end())
3482 {
3483 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3484 }
3485 else
3486 {
telsoa01c577f2c2018-08-31 09:22:23 +01003487 throw ParseException(
3488 boost::str(
3489 boost::format(
3490 "Id %1% used by more than one %2% layer %3%")
3491 % id
3492 % bindingPointDesc
3493 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003494 }
3495}
3496
3497} // namespace armnnTfParser