blob: 3d0c72da4c35c6486d16b74689532a5f9cba0d65 [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
5#include "TfParser.hpp"
6
7#include <armnn/INetwork.hpp>
8#include <armnn/Utils.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <armnn/Exceptions.hpp>
11#include <armnn/Descriptors.hpp>
12
13#include <GraphTopologicalSort.hpp>
Sadik Armagan479045b2018-10-01 11:51:37 +010014#include <ParserHelper.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010015#include <Permute.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010016#include <VerificationHelpers.hpp>
Matteo Martincigh46315822018-11-28 16:22:36 +000017#include <DataLayoutIndexed.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010018
19#include <google/protobuf/io/zero_copy_stream_impl.h>
20#include <google/protobuf/text_format.h>
21
22#include "tensorflow/core/framework/graph.pb.h"
23#include "tensorflow/core/framework/node_def.pb.h"
24#include "tensorflow/core/framework/types.pb.h"
25#include "tensorflow/core/framework/tensor.pb.h"
26#include "tensorflow/core/framework/tensor_shape.pb.h"
27
28#include <boost/assert.hpp>
29#include <boost/format.hpp>
30#include <boost/core/ignore_unused.hpp>
31#include <boost/log/trivial.hpp>
32#include <boost/numeric/conversion/cast.hpp>
33#include <boost/polymorphic_cast.hpp>
34
35#include <memory>
36#include <sstream>
37#include <numeric>
38#include <functional>
39
Matteo Martincigh46315822018-11-28 16:22:36 +000040using namespace armnnUtils;
surmeh01bceff2f2018-03-29 16:29:27 +010041using namespace armnn;
42
43namespace armnnTfParser
44{
45namespace
46{
47
48const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
49const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
50
surmeh01bceff2f2018-03-29 16:29:27 +010051
52template <typename Callable>
53void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
54 const std::string& attribName,
55 tensorflow::AttrValue::ValueCase expectedValueCase,
56 Callable callable)
57{
58 auto iter = nodeDef.attr().find(attribName);
59 if (iter != nodeDef.attr().end())
60 {
61 const auto& attrValue = iter->second;
62 if (attrValue.value_case() == expectedValueCase)
63 {
64 callable(attrValue);
65 }
66 else
67 {
telsoa01c577f2c2018-08-31 09:22:23 +010068 throw ParseException(
69 boost::str(
70 boost::format(
71 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
72 "but found %4% instead %5%")
73 % attribName
74 % nodeDef.name()
75 % static_cast<int>(expectedValueCase)
76 % static_cast<int>(attrValue.value_case())
77 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010078 }
79 }
80 else
81 {
telsoa01c577f2c2018-08-31 09:22:23 +010082 throw ParseException(
83 boost::str(
84 boost::format(
85 "Could not find required attribute %1% in node %2% %3%")
86 % attribName
87 % nodeDef.name()
88 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010089 }
90}
91
92template <typename Callable>
93void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
94 const std::string& attribName,
95 tensorflow::AttrValue::ValueCase expectedValueCase,
96 Callable callable)
97{
98 auto iter = nodeDef.attr().find(attribName);
99 if (iter != nodeDef.attr().end())
100 {
101 const auto& attrValue = iter->second;
102 if (attrValue.value_case() == expectedValueCase)
103 {
104 callable(attrValue);
105 }
106 else
107 {
telsoa01c577f2c2018-08-31 09:22:23 +0100108 throw ParseException(
109 boost::str(
110 boost::format(
111 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
112 "but found %4% instead %5%")
113 % attribName
114 % nodeDef.name()
115 % static_cast<int>(expectedValueCase)
116 % static_cast<int>(attrValue.value_case())
117 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100118 }
119 }
120}
121
122float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
123{
124 float attribValue = 0.0f;
125 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
126 [&attribValue](const tensorflow::AttrValue& attrValue)
127 {
128 attribValue = attrValue.f();
129 });
130 return attribValue;
131}
132
Conor Kennedyc2130a02018-12-05 11:05:54 +0000133int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
134{
135 int32_t attribValue = 0u;
136 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
137 [&attribValue](const tensorflow::AttrValue& attrValue)
138 {
139 attribValue = static_cast<int32_t>(attrValue.i());
140 });
141 return attribValue;
142}
143
surmeh01bceff2f2018-03-29 16:29:27 +0100144uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
145{
146 uint32_t attribValue = 0u;
147 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
148 [&attribValue](const tensorflow::AttrValue& attrValue)
149 {
150 attribValue = static_cast<uint32_t>(attrValue.i());
151 });
152 return attribValue;
153}
154
155std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
156{
157 std::string attribValue = "";
158 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
159 [&attribValue](const tensorflow::AttrValue& attrValue)
160 {
161 attribValue = attrValue.s();
162 });
163 return attribValue;
164}
165
166std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
167 const std::string& name)
168{
169 std::vector<uint32_t> attriList;
170 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
171 [&attriList](const tensorflow::AttrValue& attrValue)
172 {
173 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
174 {
175 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
176 }
177 });
178
179 return attriList;
180}
181
182std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
183 const std::string& name)
184{
185 std::vector<uint32_t> attriList;
186 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
187 [&attriList](const tensorflow::AttrValue& attrValue)
188 {
189 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
190 {
191 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
192 }
193 });
194
195 return attriList;
196}
197
198bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
199 const std::string& name,
200 bool defaultValue = false)
201{
202 bool attribValue = defaultValue;
203 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
204 [&attribValue](const tensorflow::AttrValue& attrValue)
205 {
206 attribValue = attrValue.b();
207 });
208 return attribValue;
209}
210
211tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
212{
213 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
214 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
215 [&attribValue](const tensorflow::AttrValue& attrValue)
216 {
217 attribValue = attrValue.type();
218 });
219 return attribValue;
220}
221
222TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
223{
224 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
225 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
226
227 if (stretchDim != targetDims.end())
228 {
229 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
230 {
telsoa01c577f2c2018-08-31 09:22:23 +0100231 throw ParseException(
232 boost::str(
233 boost::format(
234 "At most one component of shape can be -1 %1%")
235 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100236 }
237
telsoa01c577f2c2018-08-31 09:22:23 +0100238 auto targetNumElements =
239 boost::numeric_cast<unsigned int>(
240 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100241 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
242 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
243 }
244
245 TensorInfo reshapeInfo = input;
246 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
247
248 return reshapeInfo;
249}
250
telsoa01c577f2c2018-08-31 09:22:23 +0100251// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100252IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
253 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100254{
255 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
256 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
257 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
258 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
259 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
260 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
261
262 armnn::TensorInfo reshapedInfo = input1Info;
263 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
264
265 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
266 ReshapeDescriptor reshapeDesc;
267 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
268 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
269
270 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
271 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
272
273 input1Slot = &reshapeLayer->GetOutputSlot(0);
274
275 return input1Slot;
276}
277
278OutputId ParseOutputId(const std::string & name)
279{
280 unsigned int outputNum = 0;
281 size_t colonPos = name.find_last_of(":");
282 if (colonPos != std::string::npos)
283 {
284 int n = std::stoi(name.substr(colonPos+1));
285 if (n<0 || n>100)
286 {
telsoa01c577f2c2018-08-31 09:22:23 +0100287 throw ParseException(
288 boost::str(
289 boost::format(
290 "Output tensor id is out of range for %1% %2%")
291 % name
292 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100293 }
294 outputNum = static_cast<unsigned int>(n);
295 }
296 return OutputId(name.substr(0,colonPos),outputNum);
297}
298
telsoa01c577f2c2018-08-31 09:22:23 +0100299#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
300 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
301 { \
302 throw ParseException( \
303 boost::str( \
304 boost::format( \
305 "Unsupported data format %1% passed for %2% node %3%. " \
306 "Only NHWC and NCHW supported %4%") \
307 % FORMAT \
308 % NODE_TYPE \
309 % NODE_DEF.name() \
310 % CHECK_LOCATION().AsString())); \
311 }
312
313#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
314 if(PADDING != "SAME" && PADDING != "VALID" ) \
315 { \
316 throw ParseException( \
317 boost::str( \
318 boost::format( \
319 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
320 % PADDING \
321 % NODE_DEF.name() \
322 % CHECK_LOCATION().AsString())); \
323 } \
324
surmeh01bceff2f2018-03-29 16:29:27 +0100325} // namespace
326
327const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
328 { "Const", &TfParser::ParseConst },
329 { "Add", &TfParser::ParseAdd },
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000330 { "AddN", &TfParser::ParseAddN },
surmeh01bceff2f2018-03-29 16:29:27 +0100331 { "BiasAdd", &TfParser::ParseBiasAdd },
332 { "Identity", &TfParser::ParseIdentity },
333 { "Conv2D", &TfParser::ParseConv2D },
334 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
Conor Kennedyc2130a02018-12-05 11:05:54 +0000335 { "ExpandDims", &TfParser::ParseExpandDims },
surmeh01bceff2f2018-03-29 16:29:27 +0100336 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
jimfly01a06bf312018-12-18 16:24:51 +0000337 { "Greater", &TfParser::ParseGreater},
surmeh01bceff2f2018-03-29 16:29:27 +0100338 { "ConcatV2", &TfParser::ParseConcat },
339 { "LRN", &TfParser::ParseLrn },
340 { "MatMul", &TfParser::ParseMatMul },
341 { "Mul", &TfParser::ParseMul },
342 { "Placeholder", &TfParser::ParsePlaceholder },
saoste01bbd40612018-08-28 15:41:51 +0100343 { "RealDiv", &TfParser::ParseRealDiv },
surmeh01bceff2f2018-03-29 16:29:27 +0100344 { "Relu", &TfParser::ParseRelu },
345 { "Relu6", &TfParser::ParseRelu6 },
346 { "Reshape", &TfParser::ParseReshape },
347 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
348 { "Shape", &TfParser::ParseShape },
349 { "Squeeze", &TfParser::ParseSqueeze },
350 { "Sigmoid", &TfParser::ParseSigmoid },
351 { "Softmax", &TfParser::ParseSoftmax },
352 { "Softplus", &TfParser::ParseSoftplus },
Sadik Armagan2ad6cb42018-12-27 11:23:44 +0000353 { "Split", &TfParser::ParseSplit },
surmeh01bceff2f2018-03-29 16:29:27 +0100354 { "Tanh", &TfParser::ParseTanh },
355 { "MaxPool", &TfParser::ParseMaxPool },
356 { "AvgPool", &TfParser::ParseAvgPool },
telsoa01c577f2c2018-08-31 09:22:23 +0100357 { "Maximum", &TfParser::ParseMaximum },
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +0000358 { "Minimum", &TfParser::ParseMinimum },
jimfly0184c70e62018-12-19 13:14:46 +0000359 { "Equal", &TfParser::ParseEqual },
jimfly01f6ba7472018-12-04 10:09:52 +0000360 { "Pad", &TfParser::ParsePad },
narpra016f37f832018-12-21 18:30:00 +0000361 { "Sub", &TfParser::ParseSub }
362};
363
364const std::list<std::string> TfParser::m_ControlInputs = {
365 "Assert"
surmeh01bceff2f2018-03-29 16:29:27 +0100366};
367
368ITfParser* ITfParser::CreateRaw()
369{
370 return new TfParser();
371}
372
373ITfParserPtr ITfParser::Create()
374{
375 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
376}
377
378void ITfParser::Destroy(ITfParser* parser)
379{
380 delete parser;
381}
382
383inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
384 uint32_t filterSize, bool samePadding,
385 uint32_t* paddingFront, uint32_t* paddingBack) {
386 *paddingFront = 0;
387 *paddingBack = 0;
388
389 if (samePadding) {
390 uint32_t outputSize = (inputSize + stride - 1) / stride;
391 uint32_t temp = (outputSize - 1) * stride + filterSize;
392 if (temp > inputSize) {
393 *paddingFront = (temp - inputSize) / 2;
394 *paddingBack = (temp - inputSize) - *paddingFront;
395 }
396 }
397}
398
399void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
400 bool samePadding)
401{
402 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
403}
404
405/// An Abstract base class which represents a single tensorflow operation (node)
406/// that has been (potentially partially) converted to Armnn.
407/// It may not yet have been fully converted into actual Armnn layers.
408class ParsedTfOperation
409{
410public:
411 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
412 : m_Parser(parser)
413 , m_Node(node)
414 {
415 }
416
417 virtual ~ParsedTfOperation() {};
418
419 const tensorflow::NodeDef& GetNode() const { return m_Node; }
420
421 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
422 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
423 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
424
425 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
426 virtual ParsedTfOperation* ResolveIdentityOperations()
427 {
428 return this;
429 }
430
431protected:
432 TfParser* m_Parser;
433 const tensorflow::NodeDef& m_Node;
434};
435
436/// An ParsedTfOperation where the Armnn equivalent is a single layer,
437/// with output slots that correspond directly to the Tf node outputs.
438class SingleLayerParsedTfOperation : public ParsedTfOperation
439{
440public:
441 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
442 : ParsedTfOperation(parser, node)
443 , m_Layer(layer)
444 {
445 }
446
447 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
448 {
449 BOOST_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100450 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100451 unsigned int armnnOutputSlotIdx = tfOutputIndex;
452 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
453 {
454 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100455 boost::str(
456 boost::format(
457 "The requested output slot #%1% "
458 "for %2% does not exist %3%")
459 % armnnOutputSlotIdx
460 % m_Layer->GetName()
461 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100462 }
463 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
464 }
465
466protected:
467 IConnectableLayer* m_Layer;
468};
469
telsoa01c577f2c2018-08-31 09:22:23 +0100470/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100471class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
472{
473public:
474 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
475 : SingleLayerParsedTfOperation(parser, node, nullptr)
476 {
477 }
478
479 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
480 {
481 if (!m_Layer)
482 {
483 CreateLayerDeferred();
484 }
485 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
486 }
487
488private:
489 virtual void CreateLayerDeferred() = 0;
490};
491
492
493TfParser::TfParser()
494 : m_Network(nullptr, nullptr)
495{
496}
497
498
499const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
500{
501 if (nodeDef->op() != "Identity")
502 {
503 return nodeDef;
504 }
505
506 if (nodeDef->input_size() != 1)
507 {
telsoa01c577f2c2018-08-31 09:22:23 +0100508 throw ParseException(
509 boost::str(
510 boost::format(
511 "Identity node should have a single input! %1% has %2% inputs %3%")
512 % nodeDef->name()
513 % nodeDef->input_size()
514 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100515 }
516
517 auto it = m_NodesByName.find(nodeDef->input(0));
518 if (it != m_NodesByName.end())
519 {
520 const tensorflow::NodeDef* inputNode = it->second;
521 return ResolveIdentityNode(inputNode);
522 }
523 else
524 {
telsoa01c577f2c2018-08-31 09:22:23 +0100525 throw ParseException(
526 boost::str(
527 boost::format(
528 "Cannot find what the Identity node %1% is linked to! %2%")
529 % nodeDef->name()
530 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100531 }
532}
533
534std::vector<OutputOfConstNodeDef>
535TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
536{
537 std::vector<OutputOfConstNodeDef> ret;
538
surmeh013537c2c2018-05-18 16:31:43 +0100539 if (nodeDef.op() == "Const")
540 {
541 // For some reason const node can have "Control Inputs". We ignore them for now.
542 return ret;
543 }
544
surmeh01bceff2f2018-03-29 16:29:27 +0100545 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
546 for (int j = 0; j < nodeDef.input_size(); ++j)
547 {
548 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100549
550 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
551 {
narpra016f37f832018-12-21 18:30:00 +0000552 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
553 continue;
surmeh013537c2c2018-05-18 16:31:43 +0100554 }
555
surmeh01bceff2f2018-03-29 16:29:27 +0100556 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
557 if (inputIt == m_NodesByName.end())
558 {
559 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100560 boost::str(
561 boost::format(
562 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
563 % nodeDef.input(j)
564 % nodeDef.name()
565 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100566 }
567 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
568 }
569
570 return ret;
571}
572
573std::vector<OutputOfParsedTfOperation>
574TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
575 std::size_t expectedNumInputs)
576{
telsoa01c577f2c2018-08-31 09:22:23 +0100577 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100578 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
579 const std::size_t numInputs = nodes.size();
580 if (numInputs != expectedNumInputs)
581 {
telsoa01c577f2c2018-08-31 09:22:23 +0100582 throw ParseException(
583 boost::str(
584 boost::format(
585 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
586 % nodeDef.name()
587 % expectedNumInputs
588 % numInputs
589 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100590 }
telsoa01c577f2c2018-08-31 09:22:23 +0100591 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100592 std::vector<OutputOfParsedTfOperation> result;
593 for (auto&& node : nodes)
594 {
595 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
596 if (it == m_ParsedTfOperations.end())
597 {
telsoa01c577f2c2018-08-31 09:22:23 +0100598 throw ParseException(
599 boost::str(
600 boost::format(
601 "Node with name '%1%' has not been parsed %2%")
602 % node.m_IndexedValue->name()
603 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100604 }
605 ParsedTfOperation* parsedOp = it->second.get();
606 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
607 parsedOp = parsedOp->ResolveIdentityOperations();
608 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
609 }
610 return result;
611}
612
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000613IConnectableLayer* TfParser::CreateAdditionLayer(
614 const tensorflow::NodeDef& nodeDef,
615 IOutputSlot* input0Slot,
616 IOutputSlot* input1Slot,
617 const std::string& layerName)
618{
619 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
620 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
621
622 const unsigned int input0Dim = input0Info.GetNumDimensions();
623 const unsigned int input1Dim = input1Info.GetNumDimensions();
624 if (input0Dim != input1Dim)
625 {
626 // broadcasting where input0 and input1 have different number of dimensions
627 // is only supported for 1D and 4D tensors pair
628 if (input0Dim == 1 && input1Dim == 4)
629 {
630 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
631 }
632 else if (input0Dim == 4 && input1Dim == 1)
633 {
634 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
635 }
636 else
637 {
638 throw ParseException(
639 boost::str(
640 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
641 % layerName
642 % nodeDef.name()
643 % CHECK_LOCATION().AsString()));
644 }
645 }
646 IConnectableLayer* const layer = m_Network->AddAdditionLayer(layerName.c_str());
647
648 input0Slot->Connect(layer->GetInputSlot(0));
649 input1Slot->Connect(layer->GetInputSlot(1));
650
651 // Ensure the output tensor has the correct dimensions even if a broadcast has been done
652 TensorInfo outputInfo = input0Slot->GetTensorInfo();
653 std::vector<unsigned int> outputShape;
654
655 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
656 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
657
658 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
659 {
660 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
661 }
662
663 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
664 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
665
666 return layer;
667}
668
669IConnectableLayer* TfParser::CreateAdditionLayer(
670 const tensorflow::NodeDef& nodeDef,
671 IConnectableLayer* layerOne,
672 IConnectableLayer* layerTwo,
673 unsigned int numberOfAddition,
674 unsigned long numberOfLayersToConnect,
675 bool isOdd)
676{
677 IOutputSlot* input0Slot = &layerOne->GetOutputSlot(0);
678 IOutputSlot* input1Slot = &layerTwo->GetOutputSlot(0);
679 std::string layerName(nodeDef.name());
680 if (isOdd || numberOfLayersToConnect != 2)
681 {
682 // we are not connecting the final layer
683 layerName.append("_addN_").append(std::to_string(numberOfAddition));
684 }
685 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
686}
687
688IConnectableLayer* TfParser::CreateAdditionLayer(
689 const tensorflow::NodeDef& nodeDef,
690 const OutputOfParsedTfOperation& opOne,
691 const OutputOfParsedTfOperation& opTwo,
692 unsigned int numberOfAddition)
693{
694 IOutputSlot* input0Slot = &opOne.m_IndexedValue->ResolveArmnnOutputSlot(opOne.m_Index);
695 IOutputSlot* input1Slot = &opTwo.m_IndexedValue->ResolveArmnnOutputSlot(opTwo.m_Index);
696 std::string layerName(nodeDef.name());
697 layerName.append("_addN_").append(std::to_string(numberOfAddition));
698 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
699}
700
701IConnectableLayer* TfParser::CreateAdditionLayer(
702 const tensorflow::NodeDef& nodeDef,
703 const OutputOfParsedTfOperation& op,
704 IConnectableLayer* layer)
705{
706 IOutputSlot* input0Slot = &op.m_IndexedValue->ResolveArmnnOutputSlot(op.m_Index);
707 IOutputSlot* input1Slot = &layer->GetOutputSlot(0);
708 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
709}
710
711ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
712{
713 uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
714 if (numberOfInputs < 2)
715 {
716 // should never happen
717 throw ParseException(
718 boost::str(
719 boost::format(
720 "AddN Node with name '%1%' has less than two (%2) inputs %3%")
721 % nodeDef.name()
722 % std::to_string(numberOfInputs)
723 % CHECK_LOCATION().AsString()));
724 }
725 else if (numberOfInputs == 2)
726 {
727 //this is the same as a simple Add operation
728 return AddAdditionLayer(nodeDef, false);
729 }
730 else
731 {
732 // build a binary tree of Add layers and return the final Add as the return from the function
733 // if we have an odd number of inputs then the final Add will consist of a layer connecting to an
734 // OutputOfParsedTfOperation, otherwise it will be two layers being added together
735 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
736 unsigned int numberOfAdditions = 0;
737 std::vector<IConnectableLayer*> layers;
738 // NOTE: at this point we will have a minimum of three inputs
739 for (unsigned int i = 0; i < numberOfInputs; ++i)
740 {
741 // every time i is odd we have two inputs to process.
742 bool onSecondItem = i % 2;
743 if (onSecondItem)
744 {
745 ++numberOfAdditions;
746 IConnectableLayer* newLayer = CreateAdditionLayer(
747 nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
748 layers.push_back(newLayer);
749 }
750 }
751
752 std::vector<IConnectableLayer*> layersToConnect(layers);
753 unsigned long numberOfLayersToConnect = layersToConnect.size();
754 bool isOdd = numberOfInputs % 2;
755
756 while (numberOfLayersToConnect > 1)
757 {
758 layers.clear();
759 for (unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
760 bool onSecondItem = i % 2;
761 if (onSecondItem) {
762 ++numberOfAdditions;
763 IConnectableLayer* newLayer = CreateAdditionLayer(
764 nodeDef,
765 layersToConnect[i - 1],
766 layersToConnect[i],
767 numberOfAdditions,
768 numberOfLayersToConnect,
769 isOdd);
770 layers.push_back(newLayer);
771 }
772 }
773 //OK... need to go again... maybe
774 layersToConnect = layers;
775 numberOfLayersToConnect = layersToConnect.size();
776 }
777 IConnectableLayer* finalLayer = layersToConnect[0];
778 // if we had an odd number of inputs we need to connect the final layer to the
779 // last OutputOfParsedTfOperation in order to create the last Add layer we will
780 // be handing back.
781 if (isOdd)
782 {
783 // connect the final layer to the last op
784 finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
785 }
786 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, finalLayer);
787 }
788}
789
surmeh01bceff2f2018-03-29 16:29:27 +0100790ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
791{
792 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
793
telsoa01c577f2c2018-08-31 09:22:23 +0100794 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
795 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100796 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
797 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
798 {
799 IConnectableLayer* layer =
800 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
801 &nodeDef,nodeDef.name().c_str());
802 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
803 }
804 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
805 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
806 {
807 IConnectableLayer* layer =
808 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
809 &nodeDef,nodeDef.name().c_str());
810 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
811 }
812 else
813 {
telsoa01c577f2c2018-08-31 09:22:23 +0100814 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100815 return AddAdditionLayer(nodeDef);
816 }
817}
818
819ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
820{
821 return AddAdditionLayer(nodeDef, true);
822}
823
824/// An ParsedTfOperation which forwards to another (used for Identity nodes).
825class ParsedIdentityTfOperation : public ParsedTfOperation
826{
827public:
828 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
829 : ParsedTfOperation(parser, node)
830 , m_Representative(representative)
831 {
832 }
833
834 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
835 {
836 BOOST_ASSERT(m_Representative);
837 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
838 }
839
840 virtual ParsedTfOperation* ResolveIdentityOperations() override
841 {
842 return m_Representative->ResolveIdentityOperations();
843 }
844
845private:
846 ParsedTfOperation* m_Representative;
847};
848
849ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
850{
851 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
852 // Any requests for the output slots of this node should be forwarded to the node connected as input.
853 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
854}
855
856/// An ParsedTfOperation for a Const node.
857/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
858/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
859template <typename T>
860class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
861{
862public:
863 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
864 const T* tensorData, const TensorInfo& tensorInfo)
865 : DeferredSingleLayerParsedTfOperation(parser, node),
866 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
867 m_TensorInfo(tensorInfo)
868 {
869 BOOST_ASSERT(tensorInfo.GetDataType() == GetDataType<T>());
870 }
871
872 void CreateLayerDeferred() override
873 {
874 BOOST_ASSERT(m_Layer == nullptr);
875 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
876 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
877 }
878
Matteo Martincigh482ca852018-12-12 09:20:55 +0000879 ConstTensor GetConstTensor(std::vector<T>& outputTensorData) const
surmeh01bceff2f2018-03-29 16:29:27 +0100880 {
surmeh01bceff2f2018-03-29 16:29:27 +0100881 outputTensorData.resize(m_TensorInfo.GetNumElements());
882
Matteo Martincigh482ca852018-12-12 09:20:55 +0000883 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
884
telsoa01c577f2c2018-08-31 09:22:23 +0100885 // Updates the result to point to the user provided storage.
Matteo Martincigh482ca852018-12-12 09:20:55 +0000886 ConstTensor constTensor(m_TensorInfo, outputTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +0100887 return constTensor;
888 }
889
Matteo Martincigh46315822018-11-28 16:22:36 +0000890 const T* GetStorage() const
891 {
892 return m_Storage.data();
893 }
894
895 const TensorInfo& GetTensorInfo() const
896 {
897 return m_TensorInfo;
898 }
899
surmeh01bceff2f2018-03-29 16:29:27 +0100900private:
901 ///< Manages the lifetime of the tensor data.
902 std::vector<T> m_Storage;
903 ///< Describes the layout of the tensor and points to the data in m_Storage.
904 TensorInfo m_TensorInfo;
905};
906
telsoa01c577f2c2018-08-31 09:22:23 +0100907DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
908 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100909{
910 switch (tfDataType)
911 {
912 case tensorflow::DT_FLOAT:
913 return DataType::Float32;
914 break;
915 case tensorflow::DT_INT32:
916 return DataType::Signed32;
917 break;
918 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100919 throw ParseException(
920 boost::str(
921 boost::format(
922 "Unknown DataType %1% for node %2% %3%")
923 % tensorflow::DataType_Name(tfDataType)
924 % nodeDef.name()
925 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100926 }
927}
928
929struct ParseTfTensorValueList
930{
931 template<typename DataType>
932 static void Parse(
933 const tensorflow::TensorProto& tfTensor,
934 unsigned int dstElements,
935 std::vector<int8_t>& outputData);
936
937 template <typename DataType>
938 static void ReadData(const void* srcData, unsigned int numSrcElements,
939 std::vector<int8_t>& dstData, unsigned int numDstElements)
940 {
telsoa01c577f2c2018-08-31 09:22:23 +0100941 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100942 if (numSrcElements == 0)
943 {
944 return;
945 }
946
telsoa01c577f2c2018-08-31 09:22:23 +0100947 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100948 if (numDstElements == 0)
949 {
950 numDstElements = numSrcElements;
951 }
952
telsoa01c577f2c2018-08-31 09:22:23 +0100953 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100954 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
955
956 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
957 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
958
telsoa01c577f2c2018-08-31 09:22:23 +0100959 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +0100960 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
961
962 if (numDstElements > numSrcElements)
963 {
telsoa01c577f2c2018-08-31 09:22:23 +0100964 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +0100965 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
966 }
967 }
968
969};
970
971template <>
972void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
973 unsigned int dstElements, std::vector<int8_t>& outputData)
974{
975 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
976 outputData, dstElements);
977}
978
979template <>
980void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
981 unsigned int dstElements, std::vector<int8_t>& outputData)
982{
983 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
984 outputData, dstElements);
985}
986
987template <template<typename> class OperatorType, typename T = int8_t>
988struct MakeTfOperation
989{
990 template<typename DataType, class... Args>
991 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
992 Args&&... args)
993 {
994 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
995 }
996};
997
998template <>
999struct MakeTfOperation<ParsedConstTfOperation>
1000{
1001 template<typename DataType, class... Args>
1002 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
1003 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
1004 {
1005 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1006 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
1007 }
1008};
1009
1010template <class FuncType>
1011struct InvokeParseFunction
1012{
1013 template<class ResType, class... Args>
1014 inline static ResType Result(DataType dataType, Args&&... args)
1015 {
1016 if (dataType == DataType::Float32)
1017 {
1018 return FuncType::template Parse<float>(std::forward<Args>(args)...);
1019 }
1020 else if (dataType == DataType::Signed32)
1021 {
1022 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1023 }
1024
1025 return ResType();
1026 }
1027
1028 template<class... Args>
1029 inline static void Result(DataType dataType, Args&&... args)
1030 {
1031 if (dataType == DataType::Float32)
1032 {
1033 FuncType::template Parse<float>(std::forward<Args>(args)...);
1034 }
1035 else if (dataType == DataType::Signed32)
1036 {
1037 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1038 }
1039 }
1040};
1041
1042ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1043{
1044 BOOST_ASSERT(nodeDef.op() == "Const");
1045
1046 if (nodeDef.attr().count("value") == 0)
1047 {
telsoa01c577f2c2018-08-31 09:22:23 +01001048 throw ParseException(
1049 boost::str(
1050 boost::format(
1051 "Value not found for Const node - %1% %2%")
1052 % nodeDef.name()
1053 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001054 }
1055
1056 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
1057 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1058 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
1059
1060 const auto GetDimensionSize = [](auto& d) { return d.size(); };
1061
1062 std::vector<unsigned int> dimensionSizes;
1063 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1064 std::back_inserter(dimensionSizes), GetDimensionSize);
1065
telsoa01c577f2c2018-08-31 09:22:23 +01001066 // Calculates number of elements.
1067 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001068 unsigned int numElements = 0U;
1069
1070 if (!dimensionSizes.empty())
1071 {
1072 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1073 1U, std::multiplies<unsigned int>());
1074 }
1075
1076 std::vector<int8_t> tensorData;
1077
telsoa01c577f2c2018-08-31 09:22:23 +01001078 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001079 if (tfTensor.tensor_content().empty())
1080 {
1081 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
1082
1083 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +01001084 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +01001085 if (numElements == 0)
1086 {
telsoa01c577f2c2018-08-31 09:22:23 +01001087 const unsigned int tfNumElements =
1088 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001089 dimensionSizes.push_back(tfNumElements);
1090 }
1091 }
telsoa01c577f2c2018-08-31 09:22:23 +01001092 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001093 else
1094 {
1095 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1096
telsoa01c577f2c2018-08-31 09:22:23 +01001097 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +01001098 if (numElements == 0)
1099 {
telsoa01c577f2c2018-08-31 09:22:23 +01001100 throw ParseException(
1101 boost::str(
1102 boost::format(
1103 "No tensor shape found for Const node - %1% %2%")
1104 % nodeDef.name()
1105 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001106 }
1107 }
1108
telsoa01c577f2c2018-08-31 09:22:23 +01001109 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001110 if (tensorData.empty())
1111 {
telsoa01c577f2c2018-08-31 09:22:23 +01001112 throw ParseException(
1113 boost::str(
1114 boost::format(
1115 "No tensor data found for Const node - %1% %2%")
1116 % nodeDef.name()
1117 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001118 }
1119
telsoa01c577f2c2018-08-31 09:22:23 +01001120 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1121 dimensionSizes.data(),
1122 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001123
1124 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +01001125 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +01001126 if (tensorData.size() > tensorInfo.GetNumBytes())
1127 {
telsoa01c577f2c2018-08-31 09:22:23 +01001128 throw ParseException(
1129 boost::str(
1130 boost::format(
1131 "Number of elements (%1%) should be less than or equal "
1132 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
1133 % (tensorData.size() / GetDataTypeSize(dataType))
1134 % tensorInfo.GetNumElements()
1135 % nodeDef.name()
1136 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001137 }
1138
1139 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1140 dataType, this, nodeDef, tensorData, tensorInfo);
1141}
1142
1143template<typename Type>
1144bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
1145{
1146 auto it = m_ParsedTfOperations.find(nodeName);
jimfly01f6ba7472018-12-04 10:09:52 +00001147 if (it == m_ParsedTfOperations.end())
surmeh01bceff2f2018-03-29 16:29:27 +01001148 {
1149 return false;
1150 }
jimfly01f6ba7472018-12-04 10:09:52 +00001151 return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1152}
1153
1154template<typename Type>
1155bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
1156{
1157 return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
surmeh01bceff2f2018-03-29 16:29:27 +01001158}
1159
1160ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
1161 const tensorflow::GraphDef& graphDef)
1162{
1163 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1164 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1165 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1166
1167 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1168 {
telsoa01c577f2c2018-08-31 09:22:23 +01001169 throw ParseException(
1170 boost::str(
1171 boost::format(
1172 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1173 % nodeDef.name()
1174 % inputs[1].m_IndexedValue->GetNode().name()
1175 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001176 }
1177 ParsedConstTfOperation<float>* weightNode =
1178 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1179
1180 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1181 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1182 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1183
telsoa01c577f2c2018-08-31 09:22:23 +01001184 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
surmeh01bceff2f2018-03-29 16:29:27 +01001185 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1186 if (!dilations.empty())
1187 {
1188 for (auto dilation : dilations)
1189 {
1190 if (dilation != 1u)
1191 {
telsoa01c577f2c2018-08-31 09:22:23 +01001192 throw ParseException(
1193 boost::str(
1194 boost::format(
1195 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1196 % nodeDef.name()
1197 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001198 }
1199 }
1200 }
1201
1202 Convolution2dDescriptor desc;
1203 desc.m_BiasEnabled = false;
1204
telsoa01c577f2c2018-08-31 09:22:23 +01001205 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1206
Matteo Martincigh46315822018-11-28 16:22:36 +00001207 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001208
Matteo Martincigh46315822018-11-28 16:22:36 +00001209 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001210
Matteo Martincigh46315822018-11-28 16:22:36 +00001211 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001212
Matteo Martincigh46315822018-11-28 16:22:36 +00001213 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1214 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001215
Matteo Martincigh46315822018-11-28 16:22:36 +00001216 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1217 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1218
1219 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1220 // Tensorflow weights are [H, W, In, Out].
1221 // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1222 // and [Out, In, H, W] when the data layout is NCHW.
1223 PermutationVector permutationVector =
1224 dataLayout == DataLayout::NHWC ?
1225 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1226 std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1227
1228 // Swizzle the tensor using the given permutation vector.
1229 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1230 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1231
1232 // Swizzles the content of the tensor's permanent storage into a local storage.
1233 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1234 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001235 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Matteo Martincigh46315822018-11-28 16:22:36 +00001236
1237 // Create a weight tensor with the newly swizzled data.
1238 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1239
1240 uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1241 uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001242
1243 bool padding = false;
1244 TensorInfo outputInfo;
Matteo Martincigh46315822018-11-28 16:22:36 +00001245 unsigned int outputHeight = 0;
1246 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001247
1248 CHECK_PADDING_TYPE(nodeDef, paddingString);
1249
surmeh01bceff2f2018-03-29 16:29:27 +01001250 if (paddingString == "SAME")
1251 {
1252 padding = true;
Matteo Martincigh46315822018-11-28 16:22:36 +00001253
1254 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1255 static_cast<float>(desc.m_StrideY)));
1256 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1257 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001258 }
1259 else if (paddingString == "VALID")
1260 {
1261 padding = false;
Matteo Martincigh46315822018-11-28 16:22:36 +00001262
1263 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1264 static_cast<float>(desc.m_StrideY)));
1265 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1266 static_cast<float>(desc.m_StrideX)));
1267 }
1268
1269 switch (dataLayout)
1270 {
1271 case DataLayout::NHWC:
1272 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1273 outputHeight,
1274 outputWidth,
1275 weightTensor.GetShape()[0] },
1276 DataType::Float32);
1277 break;
1278 case DataLayout::NCHW:
1279 default:
surmeh01bceff2f2018-03-29 16:29:27 +01001280 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1281 weightTensor.GetShape()[0],
Matteo Martincigh46315822018-11-28 16:22:36 +00001282 outputHeight,
1283 outputWidth },
1284 DataType::Float32);
1285 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001286 }
surmeh01bceff2f2018-03-29 16:29:27 +01001287
1288 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1289 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1290
1291 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1292 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Matteo Martincigh46315822018-11-28 16:22:36 +00001293 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001294
1295 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1296}
1297
1298ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
telsoa01c577f2c2018-08-31 09:22:23 +01001299 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001300{
1301 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1302 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1303 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1304
1305 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1306 {
telsoa01c577f2c2018-08-31 09:22:23 +01001307 throw ParseException(
1308 boost::str(
1309 boost::format(
1310 "ArmNN only supports Depthwise Convolution layer with constant weights. "
1311 "Non const input found %1% for node %2% %3%")
1312 % inputs[1].m_IndexedValue->GetNode().name()
1313 % nodeDef.name()
1314 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001315 }
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001316
surmeh01bceff2f2018-03-29 16:29:27 +01001317 ParsedConstTfOperation<float>* weightNode =
1318 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1319
surmeh01bceff2f2018-03-29 16:29:27 +01001320 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1321 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1322 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1323
1324 DepthwiseConvolution2dDescriptor desc;
1325 desc.m_BiasEnabled = false;
1326
telsoa01c577f2c2018-08-31 09:22:23 +01001327 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1328
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001329 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001330
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001331 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001332
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001333 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001334
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001335 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1336 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001337
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001338 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1339 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1340
1341 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
Matteo Martincigh747ef822018-12-18 09:26:39 +00001342 // Tensorflow weights come in the format [H, W, I, M].
1343 // ArmNN weights have to be [M, I, H, W].
1344 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001345
1346 // Swizzle the tensor using the given permutation vector.
1347 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1348 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1349
1350 // Swizzles the content of the tensor's permanent storage into a local storage.
1351 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1352 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001353 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001354
1355 // Create a weight tensor with the newly swizzled data.
1356 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1357
Matteo Martincigh747ef822018-12-18 09:26:39 +00001358 uint32_t weightHeight = weightTensor.GetShape()[2];
1359 uint32_t weightWidth = weightTensor.GetShape()[3];
surmeh01bceff2f2018-03-29 16:29:27 +01001360
1361 bool padding = false;
1362 TensorInfo outputInfo;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001363 unsigned int outputHeight = 0;
1364 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001365
1366 CHECK_PADDING_TYPE(nodeDef, paddingString);
1367
surmeh01bceff2f2018-03-29 16:29:27 +01001368 if (paddingString == "SAME")
1369 {
1370 padding = true;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001371
1372 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1373 static_cast<float>(desc.m_StrideY)));
1374 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1375 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001376 }
1377 else if (paddingString == "VALID")
1378 {
1379 padding = false;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001380
1381 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1382 static_cast<float>(desc.m_StrideY)));
1383 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1384 static_cast<float>(desc.m_StrideX)));
1385 }
1386
1387 switch (dataLayout)
1388 {
1389 case DataLayout::NHWC:
1390 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1391 outputHeight,
1392 outputWidth,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001393 weightTensor.GetShape()[0] * weightTensor.GetShape()[1]},
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001394 DataType::Float32);
1395 break;
1396 case DataLayout::NCHW:
1397 default:
1398 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1399 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1400 outputHeight,
1401 outputWidth },
1402 DataType::Float32);
1403 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001404 }
surmeh01bceff2f2018-03-29 16:29:27 +01001405
1406 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1407 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1408
1409 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1410 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001411 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001412
1413 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1414}
1415
Conor Kennedyc2130a02018-12-05 11:05:54 +00001416TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
1417{
1418 BOOST_ASSERT(nodeDef.op() == "ExpandDims");
1419
1420 if (inputTensorInfo.GetNumDimensions() > 4) {
1421 throw ParseException(
1422 boost::str(
1423 boost::format(
1424 "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1425 % inputTensorInfo.GetNumDimensions()
1426 % nodeDef.name()
1427 % CHECK_LOCATION().AsString()));
1428 }
1429
1430 std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1431
1432 std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1433 std::vector<uint32_t> outputDims;
1434
1435 // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1436 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1437 {
1438 // add current input shape to outputDims
1439 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1440 auto currentDimension = inputTensorInfo.GetShape()[i];
1441 outputDims.push_back(currentDimension);
1442 }
1443
1444 // insert a dimension of 1 at index 'expandDim' of inputs shape
1445 if (expandDim >= 0)
1446 {
1447 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1448 outputDims.insert(getPosition, 1);
1449 }
1450
1451 // if negative number for 'expandDim' then count backwards from the last element
1452 // and insert 1 dimension at index 'expandDim'
1453 if (expandDim < 0)
1454 {
Matteo Martincighd7cceeb2018-12-06 09:06:29 +00001455 int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001456 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1457 outputDims.insert(getPosition, 1);
1458 }
1459 }
1460 else
1461 {
1462 throw InvalidArgumentException(
1463 boost::str(
1464 boost::format(
1465 "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1466 % expandDim
1467 % inputDimSize
1468 % CHECK_LOCATION().AsString()));
1469 }
1470
1471 if (outputDims.size() > 4)
1472 {
1473 throw ParseException(
1474 boost::str(
1475 boost::format(
1476 "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1477 % outputDims.size()
1478 % nodeDef.name()
1479 % CHECK_LOCATION().AsString()));
1480 }
1481
1482 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1483 outputDims.data());
1484
1485 TensorInfo outTensorInfo = inputTensorInfo;
1486 outTensorInfo.SetShape(outShape);
1487
1488 return outTensorInfo;
1489}
1490
1491ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1492{
1493 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1494
1495 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1496 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1497
1498 TensorInfo outputInfo;
1499 outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo);
1500
1501 ReshapeDescriptor reshapeDesc;
1502 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1503 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1504 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1505 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1506
1507 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1508}
1509
surmeh01bceff2f2018-03-29 16:29:27 +01001510ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1511 const tensorflow::GraphDef& graphDef)
1512{
1513 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1514
1515 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1516 {
telsoa01c577f2c2018-08-31 09:22:23 +01001517 throw ParseException(
1518 boost::str(
1519 boost::format(
1520 "ArmNN only supports FusedBatchNormalization layers with constant scale. "
1521 "Input %1%. Node %2% %3%")
1522 % inputs[1].m_IndexedValue->GetNode().name()
1523 % nodeDef.name()
1524 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001525 }
1526 ParsedConstTfOperation<float>* scaleNode =
1527 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1528
1529 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1530 {
telsoa01c577f2c2018-08-31 09:22:23 +01001531 throw ParseException(
1532 boost::str(
1533 boost::format(
1534 "ArmNN only supports FusedBatchNormalization layers with constant offset. "
1535 "Input %1%. Node %2% %3%")
1536 % inputs[2].m_IndexedValue->GetNode().name()
1537 % nodeDef.name()
1538 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001539 }
1540 ParsedConstTfOperation<float>* offsetNode =
1541 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1542
1543 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1544 {
telsoa01c577f2c2018-08-31 09:22:23 +01001545 throw ParseException(
1546 boost::str(
1547 boost::format(
1548 "ArmNN only supports FusedBatchNormalization layers with constant mean. "
1549 "Input %1%. Node %2% %3%")
1550 % inputs[3].m_IndexedValue->GetNode().name()
1551 % nodeDef.name()
1552 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001553 }
1554 ParsedConstTfOperation<float>* meanNode =
1555 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1556
1557 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1558 {
telsoa01c577f2c2018-08-31 09:22:23 +01001559 throw ParseException(
1560 boost::str(
1561 boost::format(
1562 "ArmNN only supports FusedBatchNormalization layers with constant variance. "
1563 "Input %1%. Node %2% %3%")
1564 % inputs[4].m_IndexedValue->GetNode().name()
1565 % nodeDef.name()
1566 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001567 }
1568 ParsedConstTfOperation<float>* varianceNode =
1569 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1570
Matteo Martincigh075c7502018-12-05 13:10:45 +00001571 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1572
1573 CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1574
telsoa01c577f2c2018-08-31 09:22:23 +01001575 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001576 BatchNormalizationDescriptor desc;
1577 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001578 desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001579
telsoa01c577f2c2018-08-31 09:22:23 +01001580 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1581 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001582 std::vector<float> scaleTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001583 ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001584
1585 std::vector<float> offsetTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001586 ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001587
1588 std::vector<float> meanTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001589 ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001590
1591 std::vector<float> varianceTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001592 ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001593
1594 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1595 meanTensor,
1596 varianceTensor,
1597 offsetTensor,
1598 scaleTensor,
1599 nodeDef.name().c_str());
1600
1601 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1602
Matteo Martincigh075c7502018-12-05 13:10:45 +00001603 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1604 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001605
1606 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1607}
1608
telsoa01c577f2c2018-08-31 09:22:23 +01001609bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1610 size_t alphaLayerIndex,
1611 const OutputOfParsedTfOperation& otherOp,
1612 armnn::IOutputSlot** outputOfLeakyRelu,
1613 armnn::ActivationDescriptor & desc)
1614{
1615 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1616
1617 // Verifying all these assumptions hold:
1618 //
1619 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1620 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1621 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1622 //
1623
1624 if (mulNodeDef.op() == "Mul")
1625 {
1626 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1627 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1628
1629 BOOST_ASSERT(inputs.size() == 2);
1630 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1631 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1632 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1633
1634 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1635 {
1636 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1637 {
1638 ParsedConstTfOperation<float>* alpha =
1639 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1640 inputs[alphaLayerIndex].m_IndexedValue);
1641
1642 std::vector<float> const_data;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001643 ConstTensor const_tensor = alpha->GetConstTensor(const_data);
telsoa01c577f2c2018-08-31 09:22:23 +01001644
1645 if (const_data.size() == 1)
1646 {
1647 desc.m_Function = ActivationFunction::LeakyReLu;
1648 desc.m_A = const_data[0];
1649
1650 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1651 return true;
1652 }
1653 }
1654 }
1655 }
1656 return false;
1657}
1658
telsoa01c577f2c2018-08-31 09:22:23 +01001659ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1660 const tensorflow::GraphDef& graphDef)
1661{
1662 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
Sadik Armagan975c09a2018-12-04 10:02:08 +00001663 if (inputs.size() != 2)
1664 {
1665 throw ParseException(
1666 boost::str(
1667 boost::format(
1668 "Maximum expects two inputs!. Got %1% for Node %2% %3%")
1669 % inputs.size()
1670 % nodeDef.name()
1671 % CHECK_LOCATION().AsString()));
1672 }
1673
telsoa01c577f2c2018-08-31 09:22:23 +01001674 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1675 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1676 IOutputSlot* outputOfLeakyRelu = nullptr;
1677
1678 ActivationDescriptor desc;
1679
Sadik Armagan975c09a2018-12-04 10:02:08 +00001680 // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1681 // i.e. one of the four possible scenarios:
1682 // 1, max(mul(a, x), x)
1683 // 2, max(mul(x, a), x)
1684 // 3, max(x, mul(a, x))
1685 // 4, max(x, mul(x, a))
1686 // These are handled by an activation layer.
telsoa01c577f2c2018-08-31 09:22:23 +01001687
1688 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1689 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1690 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1691 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1692 {
1693 BOOST_ASSERT(outputOfLeakyRelu != nullptr);
1694
1695 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1696 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1697 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1698 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1699 }
1700 else
1701 {
Sadik Armagan975c09a2018-12-04 10:02:08 +00001702 // Anything else is just a maximum layer.
1703
1704 return AddMaximumLayer(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001705 }
1706}
1707
jimfly0184c70e62018-12-19 13:14:46 +00001708std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> TfParser::ProcessElementwiseInputSlots(
1709 const tensorflow::NodeDef& nodeDef, const std::string& layerName)
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001710{
1711 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1712
1713 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1714 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1715 const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1716 const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1717
1718 if (input0Dim != input1Dim)
1719 {
1720 // broadcasting where input0 and input1 have different number of dimensions
1721 // is only supported for 1D and 4D tensors pair
1722 if (input0Dim == 1 && input1Dim == 4)
1723 {
1724 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1725 }
1726 else if (input0Dim == 4 && input1Dim == 1)
1727 {
1728 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1729 }
1730 else
1731 {
1732 throw ParseException(
jimfly0184c70e62018-12-19 13:14:46 +00001733 boost::str(
1734 boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
1735 % layerName
1736 % nodeDef.name()
1737 % CHECK_LOCATION().AsString()));
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001738 }
1739 }
jimfly0184c70e62018-12-19 13:14:46 +00001740 return {input0Slot, input1Slot};
1741}
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001742
jimfly0184c70e62018-12-19 13:14:46 +00001743ParsedTfOperationPtr TfParser::ProcessElementwiseLayer(
1744 IOutputSlot* input0Slot,
1745 IOutputSlot* input1Slot,
1746 IConnectableLayer* const layer,
1747 const tensorflow::NodeDef& nodeDef)
1748{
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001749 input0Slot->Connect(layer->GetInputSlot(0));
1750 input1Slot->Connect(layer->GetInputSlot(1));
1751
1752 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1753 std::vector<unsigned int> outputShape;
1754
1755 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1756 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1757
1758 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1759 {
1760 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1761 }
1762
1763 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1764 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1765
1766 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1767}
1768
jimfly01a06bf312018-12-18 16:24:51 +00001769ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
1770 const tensorflow::GraphDef& graphDef)
1771{
1772 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
1773 IOutputSlot* input0Slot = inputLayers.first;
1774 IOutputSlot* input1Slot = inputLayers.second;
1775
1776 IConnectableLayer* const layer = m_Network->AddGreaterLayer(nodeDef.name().c_str());
1777
1778 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1779}
1780
jimfly0184c70e62018-12-19 13:14:46 +00001781ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
1782 const tensorflow::GraphDef& graphDef)
1783{
1784 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
1785 IOutputSlot* input0Slot = inputLayers.first;
1786 IOutputSlot* input1Slot = inputLayers.second;
1787
1788 IConnectableLayer* const layer = m_Network->AddEqualLayer(nodeDef.name().c_str());
1789
1790 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1791}
1792
1793ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
1794 const tensorflow::GraphDef& graphDef)
1795{
1796 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
1797 IOutputSlot* input0Slot = inputLayers.first;
1798 IOutputSlot* input1Slot = inputLayers.second;
1799
1800 IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1801
1802 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1803}
1804
jimfly0123be07e2018-12-04 17:47:22 +00001805ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1806{
1807 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1808
1809 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1810 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1811
1812 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
1813 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
1814
1815 if (input0Info.GetNumDimensions() == 1)
1816 {
1817 const bool isNHWC = true;
1818 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1819 }
1820
1821 if (input1Info.GetNumDimensions() == 1)
1822 {
1823 const bool isNHWC = true;
1824 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1825 }
1826
1827 IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
1828
1829 input0Slot->Connect(layer->GetInputSlot(0));
1830 input1Slot->Connect(layer->GetInputSlot(1));
1831
1832 if (input0Info.GetNumDimensions() == 1)
1833 {
1834 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
1835 }
1836 else
1837 {
1838 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
1839 }
1840
1841 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1842}
1843
jimfly01f6ba7472018-12-04 10:09:52 +00001844unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
1845 const TensorInfo& inputTensorInfo,
1846 const std::string& nodeName)
1847{
1848 unsigned int rank = paddingTensor.GetShape()[0];
1849 unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
1850 if (rank != expectedRank)
1851 {
1852 throw ParseException(
1853 boost::str(
1854 boost::format(
1855 "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
1856 % expectedRank
1857 % rank
1858 % nodeName
1859 % CHECK_LOCATION().AsString()));
1860 }
1861 unsigned int second = paddingTensor.GetShape()[1];
1862 if (second != 2)
1863 {
1864 throw ParseException(
1865 boost::str(
1866 boost::format(
1867 "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
1868 % rank
1869 % second
1870 % nodeName
1871 % CHECK_LOCATION().AsString()));
1872 }
1873 return rank;
1874}
1875
1876TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
1877 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1878{
1879 unsigned int numDims = inputTensorInfo.GetNumDimensions();
1880 std::vector<unsigned int> outDims;
1881 for (unsigned int i = 0; i < numDims; ++i)
1882 {
1883 unsigned int dimSize = inputTensorInfo.GetShape()[i];
1884 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
1885 dimSize += dimPadding.first;
1886 dimSize += dimPadding.second;
1887 outDims.push_back(dimSize);
1888 }
1889 TensorInfo paddedTensorInfo = inputTensorInfo;
1890 unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
1891 paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
1892 return paddedTensorInfo;
1893}
1894
1895ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
1896 const tensorflow::GraphDef& graphDef)
1897{
1898 // input consists of:
1899 // input[0] the tensor which will be padded
1900 // input[1] the tensor holding the padding values
1901 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1902 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1903 TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
1904 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
1905 {
1906 throw ParseException(
1907 boost::str(
1908 boost::format(
1909 "ArmNN only supports Pad with constant padding. "
1910 "Input %1%. Node %2% %3%")
1911 % inputs[1].m_IndexedValue->GetNode().name()
1912 % nodeDef.name()
1913 % CHECK_LOCATION().AsString()));
1914
1915 }
1916 ParsedConstTfOperation<int32_t>* paddingTensorOp =
1917 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1918
1919 std::vector<int32_t> paddingTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001920 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
jimfly01f6ba7472018-12-04 10:09:52 +00001921 // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
1922 // and should match the rank of the input tensor that is being padded.
1923 // For each dimension D of input, paddings[D, 0] indicates how many values to add
1924 // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
1925 // many values to add after the contents of tensor in that dimension
1926 // This needs to be translated into a padList for ACL
1927 std::vector<std::pair<unsigned int, unsigned int>> padList;
1928 unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
1929 for (unsigned int i = 0; i < rank; ++i)
1930 {
1931 std::pair<unsigned int, unsigned int> paddingForDim;
1932 for (unsigned int j = 0; j < 2; j++)
1933 {
1934 unsigned int index = (i * 2) + j;
1935 int paddingAmount = paddingTensorData[index];
1936 // make sure we can cast to an unsigned value
1937 if (paddingAmount < 0)
1938 {
1939 throw ParseException(
1940 boost::str(
1941 boost::format(
1942 "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
1943 % paddingAmount
1944 % i
1945 % j
1946 % nodeDef.name()
1947 % CHECK_LOCATION().AsString()));
1948 }
1949 if (j == 0)
1950 {
1951 paddingForDim.first = static_cast<unsigned int>(paddingAmount);
1952 }
1953 else
1954 {
1955 paddingForDim.second = static_cast<unsigned int>(paddingAmount);
1956 }
1957 }
1958 padList.push_back(paddingForDim);
1959 }
1960 PadDescriptor padDescriptor(padList);
1961 IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
1962 previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
1963 // Use the padding to calculate the new output tensor shape
1964 TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
1965 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1966 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1967}
1968
surmeh01bceff2f2018-03-29 16:29:27 +01001969ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
1970 const tensorflow::GraphDef& graphDef)
1971{
1972 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
Matteo Martincighf9afc792018-12-06 12:03:17 +00001973
telsoa01c577f2c2018-08-31 09:22:23 +01001974 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01001975 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
surmeh01bceff2f2018-03-29 16:29:27 +01001976
surmeh01bceff2f2018-03-29 16:29:27 +01001977 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
1978
telsoa01c577f2c2018-08-31 09:22:23 +01001979 // The last input is the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01001980 if (!HasParsedConstTensor<int32_t>(inputs[numInputs - 1].m_IndexedValue->GetNode().name()))
1981 {
telsoa01c577f2c2018-08-31 09:22:23 +01001982 throw ParseException(
1983 boost::str(
1984 boost::format(
1985 "ArmNN only supports Concat with constant axis. "
1986 "Input %1%. Node %2% %3%")
1987 % inputs[numInputs - 1].m_IndexedValue->GetNode().name()
1988 % nodeDef.name()
1989 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001990 }
1991 ParsedConstTfOperation<int32_t>* shapeNode =
1992 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[numInputs - 1].m_IndexedValue);
1993
Matteo Martincighf9afc792018-12-06 12:03:17 +00001994 // Get the axis tensor data
surmeh01bceff2f2018-03-29 16:29:27 +01001995 std::vector<int32_t> axisTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001996 shapeNode->GetConstTensor(axisTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001997
telsoa01c577f2c2018-08-31 09:22:23 +01001998 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00001999 const unsigned int concatDim = static_cast<unsigned int>(axisTensorData[0]);
surmeh01bceff2f2018-03-29 16:29:27 +01002000
telsoa01c577f2c2018-08-31 09:22:23 +01002001 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002002 if (concatDim == 0 || concatDim == 2)
surmeh01bceff2f2018-03-29 16:29:27 +01002003 {
telsoa01c577f2c2018-08-31 09:22:23 +01002004 throw ParseException(
2005 boost::str(
2006 boost::format(
2007 "Dimension %1% for concatenation is not supported by Armnn. "
2008 "Node %2% %3%")
Matteo Martincighf9afc792018-12-06 12:03:17 +00002009 % concatDim
telsoa01c577f2c2018-08-31 09:22:23 +01002010 % nodeDef.name()
2011 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002012 }
2013
Matteo Martincighf9afc792018-12-06 12:03:17 +00002014 unsigned int numConcatViews = numInputs - 1;
2015 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), MaxNumOfTensorDimensions);
2016 concatDescriptor.SetConcatAxis(concatDim);
2017 TensorShape mergeDims(MaxNumOfTensorDimensions);
2018 unsigned int mergeDim = 0;
2019 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002020 {
telsoa01c577f2c2018-08-31 09:22:23 +01002021 // Need to double check whether it should be
Matteo Martincighf9afc792018-12-06 12:03:17 +00002022 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002023 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2024
Matteo Martincighf9afc792018-12-06 12:03:17 +00002025 // Double check dimensions of the tensors
2026 if (inputTensorInfo.GetNumDimensions() != MaxNumOfTensorDimensions)
2027 {
2028 throw armnn::ParseException(
2029 boost::str(
2030 boost::format(
2031 "The number of dimensions: %1% for input tensors of the "
2032 "concatenation op should be %2% %3%")
2033 % inputTensorInfo.GetNumDimensions()
2034 % MaxNumOfTensorDimensions
2035 % CHECK_LOCATION().AsString()));
2036 }
2037
2038 // Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
2039 mergeDims = inputTensorInfo.GetShape();
2040 unsigned int* viewOrigin = const_cast<unsigned int*>(concatDescriptor.GetViewOrigin(viewIndex));
2041 std::fill(viewOrigin, viewOrigin + MaxNumOfTensorDimensions, 0);
2042
2043 // Update the view origin coordinates and the merge dimension value
2044 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
2045 mergeDim += mergeDims[concatDim];
surmeh01bceff2f2018-03-29 16:29:27 +01002046 }
2047
Matteo Martincighf9afc792018-12-06 12:03:17 +00002048 // Update the output shape
2049 mergeDims[concatDim] = mergeDim;
surmeh01bceff2f2018-03-29 16:29:27 +01002050 armnn::IConnectableLayer *layer = m_Network->AddMergerLayer(concatDescriptor, nodeDef.name().c_str());
2051
Matteo Martincighf9afc792018-12-06 12:03:17 +00002052 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
surmeh01bceff2f2018-03-29 16:29:27 +01002053
Matteo Martincighf9afc792018-12-06 12:03:17 +00002054 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002055 {
Matteo Martincighf9afc792018-12-06 12:03:17 +00002056 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2057 inputSlot.Connect(layer->GetInputSlot(viewIndex));
surmeh01bceff2f2018-03-29 16:29:27 +01002058 }
2059
2060 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2061}
2062
2063ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
2064 const tensorflow::GraphDef& graphDef)
2065{
telsoa01c577f2c2018-08-31 09:22:23 +01002066 // Note: the Shape layer is handled in a special way, because:
2067 // 1. ARMNN doesn't support int32 tensors which it outputs.
2068 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01002069 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01002070 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01002071
2072 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
2073 if (tfDataType != tensorflow::DT_INT32)
2074 {
telsoa01c577f2c2018-08-31 09:22:23 +01002075 throw ParseException(
2076 boost::str(
2077 boost::format(
2078 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
2079 % tensorflow::DataType_Name(tfDataType)
2080 % nodeDef.name()
2081 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002082 }
2083
2084 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2085 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2086 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2087 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
2088
2089 std::vector<int32_t> shapeTensorData;
2090 shapeTensorData.reserve(prevLayerDimensions);
2091
2092 for (unsigned int i=0; i<prevLayerDimensions; ++i)
2093 {
2094 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
2095 }
2096
2097 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2098
2099 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
2100 nodeDef,
2101 &shapeTensorData[0],
2102 shapeTensorInfo);
2103}
2104
2105ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
2106 const tensorflow::GraphDef& graphDef)
2107{
2108 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2109 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2110
2111 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2112 {
telsoa01c577f2c2018-08-31 09:22:23 +01002113 throw ParseException(
2114 boost::str(
2115 boost::format(
2116 "ArmNN only supports Reshape layers with constant shapes. "
2117 "Input %1% Node %2% %3%")
2118 % inputs[1].m_IndexedValue->GetNode().name()
2119 % nodeDef.name()
2120 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002121 }
2122 ParsedConstTfOperation<int32_t>* shapeNode =
2123 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2124
2125 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2126 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2127
2128 std::vector<int32_t> shapeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002129 ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002130 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2131
2132 TensorShape targetShape = outputTensorInfo.GetShape();
2133 ReshapeDescriptor reshapeDesc;
2134 reshapeDesc.m_TargetShape = targetShape;
2135
2136 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2137 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2138 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2139
2140 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2141}
2142
2143ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
2144 const tensorflow::GraphDef& graphDef)
2145{
2146 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2147
2148 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2149 {
telsoa01c577f2c2018-08-31 09:22:23 +01002150 throw ParseException(
2151 boost::str(
2152 boost::format(
2153 "ArmNN only supports ResizeBilinear layers with constant sizes. "
2154 "Input %1%. Node %2% %3%")
2155 % inputs[1].m_IndexedValue->GetNode().name()
2156 % nodeDef.name()
2157 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002158 }
2159 ParsedConstTfOperation<int32_t>* sizeNode =
2160 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2161
telsoa01c577f2c2018-08-31 09:22:23 +01002162 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01002163 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
2164 {
telsoa01c577f2c2018-08-31 09:22:23 +01002165 throw ParseException(
2166 boost::str(
2167 boost::format(
2168 "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
2169 "Node %1% %2%")
2170 % nodeDef.name()
2171 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002172 }
2173
telsoa01c577f2c2018-08-31 09:22:23 +01002174 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01002175 std::vector<int32_t> sizeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002176 ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002177
telsoa01c577f2c2018-08-31 09:22:23 +01002178 // The descriptor only has target height and width attributes, which we get from the size tensor.
surmeh01bceff2f2018-03-29 16:29:27 +01002179 ResizeBilinearDescriptor desc;
2180 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
2181 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
jimfly018a121502018-12-06 16:19:52 +00002182 desc.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002183
2184 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, nodeDef.name().c_str());
2185
2186 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2187 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01002188 // The input shape is always in BHWC format, this will be swizzled below; for now,
2189 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01002190 unsigned int outBatch = inputTensorInfo.GetShape()[0];
2191 unsigned int outChannels = inputTensorInfo.GetShape()[3];
2192 unsigned int outHeight = desc.m_TargetHeight;
2193 unsigned int outWidth = desc.m_TargetWidth;
jimfly018a121502018-12-06 16:19:52 +00002194 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
telsoa01c577f2c2018-08-31 09:22:23 +01002195 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01002196 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2197 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2198
jimfly018a121502018-12-06 16:19:52 +00002199 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002200
2201 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2202}
2203
2204TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
2205{
2206 BOOST_ASSERT(nodeDef.op() == "Squeeze");
2207 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2208
2209 DataType type;
2210 if (tfDataType == tensorflow::DT_FLOAT)
2211 {
2212 type = DataType::Float32;
2213 }
2214 else if (tfDataType == tensorflow::DT_INT32)
2215 {
2216 type = DataType::Signed32;
2217 }
2218 else
2219 {
telsoa01c577f2c2018-08-31 09:22:23 +01002220 throw ParseException(
2221 boost::str(
2222 boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
2223 % tensorflow::DataType_Name(tfDataType)
2224 % nodeDef.name()
2225 % CHECK_LOCATION().AsString()));
2226 }
2227
2228
2229 if (inputTensorInfo.GetNumDimensions() > 4)
2230 {
2231 throw ParseException(
2232 boost::str(
2233 boost::format(
2234 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2235 % inputTensorInfo.GetNumDimensions()
2236 % nodeDef.name()
2237 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002238 }
2239
2240 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01002241 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2242
surmeh01bceff2f2018-03-29 16:29:27 +01002243 if (squeezeDims.empty())
2244 {
telsoa01c577f2c2018-08-31 09:22:23 +01002245 squeezeDims.assign(dimensionSequence,
2246 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01002247 }
2248
2249 std::vector<uint32_t> outputDims;
2250 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2251 {
telsoa01c577f2c2018-08-31 09:22:23 +01002252 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2253 auto currentDimension = inputTensorInfo.GetShape()[i];
2254 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01002255 {
telsoa01c577f2c2018-08-31 09:22:23 +01002256 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01002257 }
2258 }
2259
2260 if (outputDims.size() > 4)
2261 {
telsoa01c577f2c2018-08-31 09:22:23 +01002262 throw ParseException(
2263 boost::str(
2264 boost::format(
2265 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2266 % outputDims.size()
2267 % nodeDef.name()
2268 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002269 }
2270
telsoa01c577f2c2018-08-31 09:22:23 +01002271 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2272 outputDims.data());
2273
2274 TensorInfo outTensorInfo = inputTensorInfo;
2275 outTensorInfo.SetShape(outShape);
2276 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01002277
2278 return outTensorInfo;
2279}
2280
2281ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2282{
2283 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2284
2285 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2286 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2287
2288 TensorInfo outputInfo;
2289 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2290
2291 ReshapeDescriptor reshapeDesc;
2292 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2293 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2294 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2295 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2296
2297 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2298}
2299
2300ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2301{
2302 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2303
2304 NormalizationDescriptor normalizationDescriptor;
2305 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2306 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2307 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2308 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2309 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2310 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
ruoyan018174f362018-12-04 18:24:08 +00002311 normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002312
2313 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2314 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2315
2316 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002317 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2318 nodeDef.name().c_str());
ruoyan018174f362018-12-04 18:24:08 +00002319 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2320 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
surmeh01bceff2f2018-03-29 16:29:27 +01002321
2322 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2323}
2324
2325/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01002326/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2327/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2328/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2329///
surmeh01bceff2f2018-03-29 16:29:27 +01002330class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
2331{
2332public:
2333 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2334 : DeferredSingleLayerParsedTfOperation(parser, node)
2335 {
2336 }
2337
2338 void CreateLayerDeferred() override
2339 {
2340 BOOST_ASSERT(m_Layer == nullptr);
2341 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2342 }
2343};
2344
2345ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2346{
telsoa01c577f2c2018-08-31 09:22:23 +01002347 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01002348 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2349}
2350
telsoa01c577f2c2018-08-31 09:22:23 +01002351/// An ParsedTfOperation for a Mul node.
2352/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2353/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2354/// and in these cases armnn doesn't need a separate layer for the Mul.
2355///
2356class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
2357{
2358public:
2359 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2360 : DeferredSingleLayerParsedTfOperation(parser, node)
2361 {
2362 }
2363
2364 void CreateLayerDeferred() override
2365 {
2366 BOOST_ASSERT(m_Layer == nullptr);
2367 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2368 }
2369};
2370
surmeh01bceff2f2018-03-29 16:29:27 +01002371ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2372{
2373 boost::ignore_unused(graphDef);
2374
telsoa01c577f2c2018-08-31 09:22:23 +01002375 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002376}
2377
2378ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
2379 const tensorflow::GraphDef& graphDef)
2380{
2381 boost::ignore_unused(graphDef);
2382
2383 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2384
2385 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
2386
2387 auto it = m_InputShapes.find(nodeDef.name());
2388 if (it == m_InputShapes.end())
2389 {
telsoa01c577f2c2018-08-31 09:22:23 +01002390 throw ParseException(
2391 boost::str(
2392 boost::format(
2393 "Missing input shape for Placeholder '%1%' %2%")
2394 % nodeDef.name()
2395 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002396 }
2397 TensorInfo tensorInfo(it->second, DataType::Float32);
2398
2399 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2400
2401 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2402
2403 TrackInputBinding(layer, layerId, tensorInfo);
2404
2405 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2406}
2407
saoste01bbd40612018-08-28 15:41:51 +01002408ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2409{
2410 boost::ignore_unused(graphDef);
2411 return AddRealDivLayer(nodeDef);
2412}
2413
surmeh01bceff2f2018-03-29 16:29:27 +01002414ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
2415 const tensorflow::GraphDef& graphDef)
2416{
2417 boost::ignore_unused(graphDef);
2418
2419 ActivationDescriptor activationDesc;
2420 activationDesc.m_Function = ActivationFunction::ReLu;
2421 return AddActivationLayer(nodeDef, activationDesc);
2422}
2423
2424ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
2425 const tensorflow::GraphDef& graphDef)
2426{
2427 boost::ignore_unused(graphDef);
2428
2429 ActivationDescriptor activationDesc;
2430 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2431 activationDesc.m_A = 6.0f;
2432 activationDesc.m_B = 0.0f;
2433
2434 return AddActivationLayer(nodeDef, activationDesc);
2435}
2436
2437ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
2438 const tensorflow::GraphDef& graphDef)
2439{
2440 boost::ignore_unused(graphDef);
2441
2442 ActivationDescriptor activationDesc;
2443 activationDesc.m_Function = ActivationFunction::Sigmoid;
2444
2445 return AddActivationLayer(nodeDef, activationDesc);
2446}
2447
2448ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
2449 const tensorflow::GraphDef& graphDef)
2450{
2451 boost::ignore_unused(graphDef);
2452
2453 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2454
2455 SoftmaxDescriptor softmaxDescriptor;
2456 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2457
2458 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2459 prevLayerSlot.Connect(layer->GetInputSlot(0));
2460 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2461
2462 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2463}
2464
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002465ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
2466 const tensorflow::GraphDef& graphDef)
2467{
2468 boost::ignore_unused(graphDef);
2469
2470 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2471 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2472 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2473
2474 // The last input is the axis for split operation.
2475 if (!HasParsedConstTensor<int32_t>(inputs[numInputs - 1].m_IndexedValue->GetNode().name()))
2476 {
2477 throw ParseException(
2478 boost::str(
2479 boost::format(
2480 "ArmNN only supports split with constant axis. "
2481 "Input %1%. Node %2% %3%")
2482 % inputs[numInputs - 1].m_IndexedValue->GetNode().name()
2483 % nodeDef.name()
2484 % CHECK_LOCATION().AsString()));
2485 }
2486 ParsedConstTfOperation<int32_t>* shapeNode =
2487 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[numInputs - 1].m_IndexedValue);
2488
2489 // Get the axis tensor data
2490 std::vector<int32_t> axisTensorData;
2491 shapeNode->GetConstTensor(axisTensorData);
2492
2493 // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2494 const unsigned int splitDim = static_cast<unsigned int>(axisTensorData[0]);
2495
2496 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2497 if (splitDim == 0 || splitDim == 2)
2498 {
2499 throw ParseException(
2500 boost::str(
2501 boost::format(
2502 "Dimension %1% for split is not supported by Armnn. "
2503 "Node %2% %3%")
2504 % splitDim
2505 % nodeDef.name()
2506 % CHECK_LOCATION().AsString()));
2507 }
2508
2509 // As Armnn only supports splitter outputs of the same shape, therefore num_splits will be limited to an integer.
2510 uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef, "num_or_size_splits");
2511
2512 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2513 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2514
2515 if (inputTensorInfo.GetNumDimensions() != MaxNumOfTensorDimensions)
2516 {
2517 throw armnn::ParseException(
2518 boost::str(
2519 boost::format(
2520 "The number of dimensions: %1% for input tensors of the "
2521 "splitter op should be %2% %3%")
2522 % inputTensorInfo.GetNumDimensions()
2523 % MaxNumOfTensorDimensions
2524 % CHECK_LOCATION().AsString()));
2525 }
2526 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2527
2528 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2529
2530 // Add current input shape to splitterDimSizes
2531 for (unsigned int i = 0; i < inputDimSize; ++i)
2532 {
2533 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2534 }
2535
2536 if (splitterDimSizes[splitDim] % num_split != 0)
2537 {
2538 throw ParseException("Number of splits must evenly divide the dimension");
2539 }
2540 splitterDimSizes[splitDim] /= num_split;
2541
2542 SplitterDescriptor splitDesc(num_split);
2543 for (unsigned int g = 0; g < num_split; ++g)
2544 {
2545 // Set the size of the views.
2546 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2547 {
2548 splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2549 }
2550 splitDesc.SetViewOriginCoord(g, splitDim, splitterDimSizes[splitDim] * g);
2551 }
2552
2553 IConnectableLayer *layer = m_Network->AddSplitterLayer(splitDesc, nodeDef.name().c_str());
2554
2555 inputSlot.Connect(layer->GetInputSlot(0));
2556
2557 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2558 splitterDimSizes.data());
2559
2560 for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
2561 {
2562 layer->GetOutputSlot(i).SetTensorInfo(armnn::TensorInfo(outShape, inputTensorInfo.GetDataType()));
2563 }
2564
2565 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2566}
2567
surmeh01bceff2f2018-03-29 16:29:27 +01002568ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
2569 const tensorflow::GraphDef& graphDef)
2570{
2571 boost::ignore_unused(graphDef);
2572
2573 ActivationDescriptor activationDesc;
2574 activationDesc.m_Function = ActivationFunction::SoftReLu;
2575
2576 return AddActivationLayer(nodeDef, activationDesc);
2577}
2578
2579ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2580{
2581 boost::ignore_unused(graphDef);
2582
2583 ActivationDescriptor activationDesc;
2584 activationDesc.m_Function = ActivationFunction::TanH;
2585 activationDesc.m_A = 1.0f;
2586 activationDesc.m_B = 1.0f;
2587
2588 return AddActivationLayer(nodeDef, activationDesc);
2589}
2590
2591ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
2592 ActivationDescriptor& activationDesc)
2593{
2594 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2595
2596 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2597
2598 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2599 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2600 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2601 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2602}
2603
2604ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
2605 const tensorflow::GraphDef& graphDef)
2606{
2607 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
2608}
2609
2610ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
2611 const tensorflow::GraphDef& graphDef)
2612{
2613 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2614}
2615
2616ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
2617 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
2618{
2619 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2620 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2621 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2622
2623 if (inputs.size() != 1)
2624 {
telsoa01c577f2c2018-08-31 09:22:23 +01002625 throw ParseException(
2626 boost::str(
2627 boost::format(
2628 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
2629 % inputs.size()
2630 % nodeDef.name()
2631 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002632 }
2633
2634 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
2635 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2636 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
2637 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
2638
2639 Pooling2dDescriptor pooling2dDescriptor;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002640 pooling2dDescriptor.m_PoolType = pooltype;
2641 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
surmeh01bceff2f2018-03-29 16:29:27 +01002642 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
2643
telsoa01c577f2c2018-08-31 09:22:23 +01002644 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
FrancisMurtaghf005e312018-12-06 15:26:04 +00002645 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
2646 pooling2dDescriptor.m_DataLayout = dataLayout;
2647 DataLayoutIndexed dataLayoutIndexed(dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +01002648
FrancisMurtaghf005e312018-12-06 15:26:04 +00002649 pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
2650 pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
2651 pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
2652 pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002653
FrancisMurtaghf005e312018-12-06 15:26:04 +00002654 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
2655 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002656
2657 bool padding = false;
2658 TensorInfo outputInfo;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002659 unsigned int outputHeight = 0;
2660 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01002661
2662 CHECK_PADDING_TYPE(nodeDef, paddingString);
2663
surmeh01bceff2f2018-03-29 16:29:27 +01002664 if (paddingString == "SAME")
2665 {
2666 padding = true;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002667
2668 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
2669 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2670 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
2671 static_cast<float>(pooling2dDescriptor.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01002672 }
2673 else if (paddingString == "VALID")
2674 {
2675 padding = false;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002676
2677 outputHeight = static_cast<uint32_t>(ceil(
2678 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
2679 static_cast<float>(pooling2dDescriptor.m_StrideY)));
2680 outputWidth = static_cast<uint32_t>(ceil(
2681 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
2682 static_cast<float>(pooling2dDescriptor.m_StrideX)));
2683 }
2684
2685 switch (dataLayout)
2686 {
2687 case DataLayout::NHWC:
2688 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2689 outputHeight,
2690 outputWidth,
2691 inputTensorInfo.GetShape()[3] },
2692 DataType::Float32);
2693 break;
2694 case DataLayout::NCHW:
2695 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2696 inputTensorInfo.GetShape()[1],
2697 outputHeight,
2698 outputWidth },
2699 DataType::Float32);
2700 break;
surmeh01bceff2f2018-03-29 16:29:27 +01002701 }
surmeh01bceff2f2018-03-29 16:29:27 +01002702
2703 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002704 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002705 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
FrancisMurtaghf005e312018-12-06 15:26:04 +00002706 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01002707
2708
2709 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
2710 if (layer == nullptr)
2711 {
telsoa01c577f2c2018-08-31 09:22:23 +01002712 throw ParseException(
2713 boost::str(
2714 boost::format(
2715 "Failed to add pooling2d layer for %1% %2%")
2716 % nodeDef.name()
2717 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002718 }
2719
2720 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2721
FrancisMurtaghf005e312018-12-06 15:26:04 +00002722 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002723
2724 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2725}
2726
2727ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
2728{
2729 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2730
2731 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2732 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2733
2734 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
2735 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
2736
2737 if (isBiasAdd)
2738 {
2739 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
2740 // with the same data in the correct dimension for broadcast in addition.
2741 if(input1Info.GetNumDimensions() != 1)
2742 {
telsoa01c577f2c2018-08-31 09:22:23 +01002743 throw ParseException(
2744 boost::str(
2745 boost::format(
2746 "Unsupported bias for BiasAdd. It should be a 1D vector. "
2747 "Got %1% dimensions for input %2%. Node %3% %4%")
2748 % input1Info.GetNumDimensions()
2749 % inputs[1].m_IndexedValue->GetNode().name()
2750 % nodeDef.name()
2751 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002752 }
2753
2754 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01002755
telsoa01c577f2c2018-08-31 09:22:23 +01002756 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01002757 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002758 }
2759 else
2760 {
2761 if (input0Info.GetNumDimensions() == 1)
2762 {
2763 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002764 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002765 }
2766
2767 if (input1Info.GetNumDimensions() == 1)
2768 {
2769 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002770 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002771 }
2772 }
2773
2774 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
2775
2776 input0Slot->Connect(layer->GetInputSlot(0));
2777 input1Slot->Connect(layer->GetInputSlot(1));
2778
2779 if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
2780 {
2781 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2782 }
2783 else
2784 {
2785 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2786 }
2787
2788 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2789}
2790
saoste01bbd40612018-08-28 15:41:51 +01002791ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
2792{
2793 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2794
2795 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
2796 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2797 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2798
2799 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2800 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2801
2802
2803 if (input0NumDims < input1NumDims)
2804 {
2805 const bool isNHWC = true;
2806 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2807 }
2808 if (input1NumDims < input0NumDims)
2809 {
2810 const bool isNHWC = true;
2811 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2812 }
2813
2814 input0Slot->Connect(layer->GetInputSlot(0));
2815 input1Slot->Connect(layer->GetInputSlot(1));
2816
2817 if (input0NumDims < input1NumDims)
2818 {
2819 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2820 }
2821 else
2822 {
2823 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2824
2825 }
2826 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2827}
2828
Sadik Armagan975c09a2018-12-04 10:02:08 +00002829ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
2830{
2831 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2832
2833 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2834 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2835
2836 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2837 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2838
2839 if (input0NumDims < input1NumDims)
2840 {
2841 const bool isNHWC = true;
2842 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2843 }
2844 if (input1NumDims < input0NumDims)
2845 {
2846 const bool isNHWC = true;
2847 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2848 }
2849
2850 IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
2851
2852 input0Slot->Connect(layer->GetInputSlot(0));
2853 input1Slot->Connect(layer->GetInputSlot(1));
2854
2855 TensorInfo outputInfo = input0Slot->GetTensorInfo();
2856 std::vector<unsigned int> outputShape;
2857
2858 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
2859 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
2860
2861 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
2862 {
2863 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
2864 }
2865
2866 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
2867 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2868
2869 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2870}
2871
telsoa01c577f2c2018-08-31 09:22:23 +01002872IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
2873{
2874 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2875
2876 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
2877 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2878 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2879
2880 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2881 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2882
2883 if (input0NumDims < input1NumDims)
2884 {
2885 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002886 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002887 }
2888 if (input1NumDims < input0NumDims)
2889 {
2890 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01002891 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002892 }
2893
2894 input0Slot->Connect(layer->GetInputSlot(0));
2895 input1Slot->Connect(layer->GetInputSlot(1));
2896
2897 if (input0NumDims < input1NumDims)
2898 {
2899 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2900 }
2901 else
2902 {
2903 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2904 }
2905 return layer;
2906}
2907
surmeh01bceff2f2018-03-29 16:29:27 +01002908IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
2909 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
2910{
telsoa01c577f2c2018-08-31 09:22:23 +01002911 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01002912 ParsedConstTfOperation<float>* biasNode = nullptr;
2913 if (addNodeDef != nullptr)
2914 {
2915 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01002916 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01002917 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
2918 {
2919 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
2920 }
2921 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
2922 {
2923 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
2924 }
2925 else
2926 {
telsoa01c577f2c2018-08-31 09:22:23 +01002927 throw ParseException(
2928 boost::str(
2929 boost::format(
2930 "ArmNN only supports fully connected layers with constant bias. "
2931 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
2932 % addInputs[0].m_IndexedValue->GetNode().name()
2933 % addInputs[1].m_IndexedValue->GetNode().name()
2934 % addNodeDef->name()
2935 % matMulNodeDef.name()
2936 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002937 }
2938 }
2939
telsoa01c577f2c2018-08-31 09:22:23 +01002940 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01002941 ParsedConstTfOperation<float>* weightNode = nullptr;
2942 ParsedTfOperation* inputNode = nullptr;
2943 unsigned int inputIdx = 0;
2944 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
2945 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
2946 {
2947 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
2948 inputNode = mulInputs[1].m_IndexedValue;
2949 inputIdx = mulInputs[1].m_Index;
2950 }
2951 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
2952 {
2953 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
2954 inputNode = mulInputs[0].m_IndexedValue;
2955 inputIdx = mulInputs[0].m_Index;
2956 }
2957 else
2958 {
telsoa01c577f2c2018-08-31 09:22:23 +01002959 throw ParseException(
2960 boost::str(
2961 boost::format(
2962 "ArmNN only supports fully connected layers with constant weights. "
2963 "Inputs %1% and %2%. MatMulNode %3% %4%")
2964 % mulInputs[0].m_IndexedValue->GetNode().name()
2965 % mulInputs[1].m_IndexedValue->GetNode().name()
2966 % matMulNodeDef.name()
2967 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002968 }
2969
2970 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01002971 // Handles weight.
Matteo Martincigh482ca852018-12-12 09:20:55 +00002972 ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002973
2974 FullyConnectedDescriptor desc;
2975 desc.m_BiasEnabled = addNodeDef != nullptr;
2976
2977 IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +01002978 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01002979 if (addNodeDef != nullptr)
2980 {
2981 std::vector<float> biasTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002982 ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002983
2984 if (weights.GetShape()[1] != biases.GetShape()[0])
2985 {
telsoa01c577f2c2018-08-31 09:22:23 +01002986 throw ParseException(
2987 boost::str(
2988 boost::format(
2989 "Shape of matmul weights and bias do not match. "
2990 "AddNode %1%. MatMulNode %2% %3%")
2991 % addNodeDef->name()
2992 % matMulNodeDef.name()
2993 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002994 }
2995
2996 layer = m_Network->AddFullyConnectedLayer(desc, weights, biases, armnnLayerName);
2997 }
2998 else
2999 {
3000 layer = m_Network->AddFullyConnectedLayer(desc, weights, armnnLayerName);
3001 }
3002
3003 BOOST_ASSERT(layer != nullptr);
3004
3005 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3006 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3007
telsoa01c577f2c2018-08-31 09:22:23 +01003008 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01003009 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
3010 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3011 return layer;
3012}
3013
3014void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
3015{
telsoa01c577f2c2018-08-31 09:22:23 +01003016 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01003017 tensorflow::DataType type = tensorflow::DT_FLOAT;
3018 if (nodeDef.attr().count("T") != 0)
3019 {
3020 auto attr = nodeDef.attr().at("T");
3021 type = attr.type();
3022 }
3023 else if (nodeDef.attr().count("dtype") != 0)
3024 {
3025 auto attr = nodeDef.attr().at("dtype");
3026 type = attr.type();
3027 }
3028
3029 if (type != tensorflow::DT_FLOAT && nodeDef.op() != "Const")
3030 {
telsoa01c577f2c2018-08-31 09:22:23 +01003031 throw ParseException(
3032 boost::str(
3033 boost::format(
3034 "Currently only FLOAT is supported for tensorflow nodes (apart from Const). "
3035 "Got %1% for Node %2% %3%")
3036 % tensorflow::DataType_Name(type)
3037 % nodeDef.name()
3038 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003039 }
3040
3041 const std::string& operation = nodeDef.op();
narpra016f37f832018-12-21 18:30:00 +00003042 auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3043 if (itControlInput != m_ControlInputs.end())
3044 {
3045 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
3046 return;
3047 }
surmeh01bceff2f2018-03-29 16:29:27 +01003048 auto it = ms_OperationNameToParsingFunctions.find(operation);
3049 if (it != ms_OperationNameToParsingFunctions.end())
3050 {
3051 auto func = it->second;
3052 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
3053 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3054
telsoa01c577f2c2018-08-31 09:22:23 +01003055 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01003056 auto it = m_ParsedTfOperations.find(nodeDef.name());
3057 if (it != m_ParsedTfOperations.end())
3058 {
3059 throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
3060 }
3061 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3062
telsoa01c577f2c2018-08-31 09:22:23 +01003063 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003064 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3065 m_RequestedOutputs.end())
3066 {
3067 auto outId = ParseOutputId(nodeDef.name());
3068 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
3069 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3070
3071 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3072
3073 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
3074
3075 prevSlot.Connect(outputLayer->GetInputSlot(0));
3076
3077 TrackOutputBinding(outputLayer, layerId, tensorInfo);
3078 }
3079 }
3080 else
3081 {
telsoa01c577f2c2018-08-31 09:22:23 +01003082 throw ParseException(
3083 boost::str(
3084 boost::format(
3085 "Unsupported operation %1% in tensorflow::GraphDef %2%")
3086 % operation
3087 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003088 }
3089}
3090
3091void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
3092{
telsoa01c577f2c2018-08-31 09:22:23 +01003093 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01003094 m_NodesByName.clear();
3095 m_NetworkInputsBindingInfo.clear();
3096 m_NetworkOutputsBindingInfo.clear();
3097
3098 for (int i = 0; i < graphDef.node_size(); ++i)
3099 {
3100 const tensorflow::NodeDef& node = graphDef.node(i);
3101 m_NodesByName[node.name()] = &node;
3102 }
3103
telsoa01c577f2c2018-08-31 09:22:23 +01003104 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01003105 std::vector<const tensorflow::NodeDef*> targetNodes;
3106 for (const std::string& requestedOutputName : m_RequestedOutputs)
3107 {
3108 auto nodeIt = m_NodesByName.find(requestedOutputName);
3109 if (nodeIt == m_NodesByName.end())
3110 {
telsoa01c577f2c2018-08-31 09:22:23 +01003111 throw ParseException(
3112 boost::str(
3113 boost::format(
3114 "Couldn't find requested output node '%1%' in graph %2%")
3115 % requestedOutputName
3116 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003117 }
3118 targetNodes.push_back(nodeIt->second);
3119 }
3120
telsoa01c577f2c2018-08-31 09:22:23 +01003121 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003122 std::vector<const tensorflow::NodeDef*> sortedNodes;
3123 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3124 targetNodes,
3125 [this](const tensorflow::NodeDef* node)
3126 {
3127 auto outputs = GetTfInputNodes(*node);
3128 std::vector<const tensorflow::NodeDef*> nodesOnly;
3129 for (const auto & o : outputs) {
3130 nodesOnly.push_back(o.m_IndexedValue);
3131 }
3132 return nodesOnly;
3133 },
3134 sortedNodes))
3135 {
telsoa01c577f2c2018-08-31 09:22:23 +01003136 throw ParseException(
3137 boost::str(
3138 boost::format(
3139 "Cycle detected in graph %1%")
3140 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003141 }
3142
telsoa01c577f2c2018-08-31 09:22:23 +01003143 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003144 for (const auto& it : sortedNodes)
3145 {
3146 const tensorflow::NodeDef& currentNode = *it;
3147 LoadNodeDef(currentNode, graphDef);
3148 }
3149}
3150
3151INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
3152 const std::map<std::string, TensorShape>& inputShapes,
3153 const std::vector<std::string>& requestedOutputs)
3154{
3155 FILE* fd = fopen(graphFile, "r");
3156
3157 if (fd == nullptr)
3158 {
telsoa01c577f2c2018-08-31 09:22:23 +01003159 throw FileNotFoundException(
3160 boost::str(
3161 boost::format(
3162 "Graph file %1% failed to open %2%")
3163 % graphFile
3164 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003165 }
3166
telsoa01c577f2c2018-08-31 09:22:23 +01003167 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003168 tensorflow::GraphDef graphDef;
3169 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
3170 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3171 delete input;
3172 fclose(fd);
3173
3174 if (!success)
3175 {
telsoa01c577f2c2018-08-31 09:22:23 +01003176 throw ParseException(
3177 boost::str(
3178 boost::format(
3179 "Failed to parse graph file %1%")
3180 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003181 }
3182
3183 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3184}
3185
3186INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
3187 const std::map<std::string, TensorShape>& inputShapes,
3188 const std::vector<std::string>& requestedOutputs)
3189{
telsoa01c577f2c2018-08-31 09:22:23 +01003190 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003191 tensorflow::GraphDef graphDef;
3192 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3193
3194 if (!success)
3195 {
telsoa01c577f2c2018-08-31 09:22:23 +01003196 throw ParseException(
3197 boost::str(
3198 boost::format(
3199 "Failed to parse graph file %1%")
3200 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003201 }
3202
3203 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3204}
3205
3206INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
3207 const std::map<std::string, TensorShape>& inputShapes,
3208 const std::vector<std::string>& requestedOutputs)
3209{
3210 FILE* fd = fopen(graphFile, "rb");
3211
3212 if (fd == nullptr)
3213 {
telsoa01c577f2c2018-08-31 09:22:23 +01003214 throw FileNotFoundException(
3215 boost::str(
3216 boost::format(
3217 "Graph file %1% failed to open %2%")
3218 % graphFile
3219 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003220 }
3221
telsoa01c577f2c2018-08-31 09:22:23 +01003222 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003223 tensorflow::GraphDef graphDef;
3224
3225 google::protobuf::io::FileInputStream inStream(fileno(fd));
3226 google::protobuf::io::CodedInputStream codedStream(&inStream);
3227 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
3228 bool success = graphDef.ParseFromCodedStream(&codedStream);
3229 fclose(fd);
3230
3231 if (!success)
3232 {
telsoa01c577f2c2018-08-31 09:22:23 +01003233 throw ParseException(
3234 boost::str(
3235 boost::format(
3236 "Failed to parse protobuf file %1% %2%")
3237 % graphFile
3238 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003239 }
3240
3241 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3242}
3243
3244INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
3245 const std::map<std::string, TensorShape>& inputShapes,
3246 const std::vector<std::string>& requestedOutputs)
3247{
3248 m_Network = INetwork::Create();
3249
3250 m_InputShapes = inputShapes;
3251 if (requestedOutputs.size() == 0)
3252 {
telsoa01c577f2c2018-08-31 09:22:23 +01003253 throw ParseException(
3254 boost::str(
3255 boost::format(
3256 "requestedOutputs must have at least one entry %1%")
3257 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003258 }
3259 m_RequestedOutputs = requestedOutputs;
3260
3261 try
3262 {
3263 LoadGraphDef(graphDef);
3264 }
3265 catch (const ParseException& e)
3266 {
3267 Cleanup();
3268 throw e;
3269 }
3270
3271 Cleanup();
3272
3273 return std::move(m_Network);
3274}
3275
3276void TfParser::Cleanup()
3277{
telsoa01c577f2c2018-08-31 09:22:23 +01003278 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01003279 m_InputShapes.clear();
3280 m_RequestedOutputs.clear();
3281 m_NodesByName.clear();
3282 m_ParsedTfOperations.clear();
3283}
3284
3285BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
3286{
3287 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
3288}
3289
3290BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
3291{
3292 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
3293}
3294
3295std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
3296 const char* bindingPointDesc,
3297 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3298{
3299 auto it = nameToBindingInfo.find(layerName);
3300 if (it == nameToBindingInfo.end())
3301 {
telsoa01c577f2c2018-08-31 09:22:23 +01003302 throw InvalidArgumentException(
3303 boost::str(
3304 boost::format(
3305 "Unknown %1% '%2%' %3%")
3306 % bindingPointDesc
3307 % layerName
3308 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003309 }
3310 return it->second;
3311}
3312
3313void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3314{
3315 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3316}
3317
3318void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3319{
3320 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3321}
3322
3323void TfParser::TrackBindingPoint(IConnectableLayer* layer,
3324 LayerBindingId id,
3325 const TensorInfo& tensorInfo,
3326 const char* bindingPointDesc,
3327 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3328{
3329 const std::string layerName = layer->GetName();
3330 auto it = nameToBindingInfo.find(layerName);
3331 if (it == nameToBindingInfo.end())
3332 {
3333 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3334 }
3335 else
3336 {
telsoa01c577f2c2018-08-31 09:22:23 +01003337 throw ParseException(
3338 boost::str(
3339 boost::format(
3340 "Id %1% used by more than one %2% layer %3%")
3341 % id
3342 % bindingPointDesc
3343 % CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003344 }
3345}
3346
3347} // namespace armnnTfParser