blob: 1e566fe943cdfe5cadc5888ef704d238b8bed357 [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
Teresa Charlin52664732020-06-29 16:27:03 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00005
surmeh01bceff2f2018-03-29 16:29:27 +01006#include "TfParser.hpp"
7
Matthew Sloyanac001ee2021-02-03 10:43:04 +00008#include "armnnTfParser/Version.hpp"
9
surmeh01bceff2f2018-03-29 16:29:27 +010010#include <armnn/TypesUtils.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010011#include <armnn/Descriptors.hpp>
12
Matteo Martincighe011d202019-11-28 11:35:47 +000013#include <armnnUtils/Permute.hpp>
14#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly08759e22020-03-02 11:41:31 +000015#include <armnnUtils/Transpose.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000016#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan589e3e82020-09-11 16:17:48 +010017#include <armnn/utility/NumericCast.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010018#include <armnn/utility/PolymorphicDowncast.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000019
surmeh01bceff2f2018-03-29 16:29:27 +010020#include <GraphTopologicalSort.hpp>
Sadik Armagan479045b2018-10-01 11:51:37 +010021#include <ParserHelper.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010022
23#include <google/protobuf/io/zero_copy_stream_impl.h>
24#include <google/protobuf/text_format.h>
25
Derek Lambertibaa177f2019-12-10 22:00:43 +000026#include <tensorflow/core/framework/graph.pb.h>
surmeh01bceff2f2018-03-29 16:29:27 +010027
James Ward58dec6b2020-09-11 17:32:44 +010028#include <fmt/core.h>
Jan Eilersba3ef182020-09-25 08:36:44 +010029#include <fmt/format.h>
Matthew Sloyanac001ee2021-02-03 10:43:04 +000030#include <iostream>
surmeh01bceff2f2018-03-29 16:29:27 +010031#include <numeric>
surmeh01bceff2f2018-03-29 16:29:27 +010032
Matteo Martincigh46315822018-11-28 16:22:36 +000033using namespace armnnUtils;
surmeh01bceff2f2018-03-29 16:29:27 +010034using namespace armnn;
35
36namespace armnnTfParser
37{
Kevin May7d96b162021-02-03 17:38:41 +000038
39ITfParser::ITfParser() : pTfParserImpl(new ITfParser::TfParserImpl()){}
40
41ITfParser::~ITfParser() = default;
42
43ITfParser *ITfParser::CreateRaw()
44{
45 return new ITfParser();
46}
47
48ITfParserPtr ITfParser::Create()
49{
50 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
51}
52
53void ITfParser::Destroy(ITfParser *parser)
54{
55 delete parser;
56}
57
58armnn::INetworkPtr ITfParser::CreateNetworkFromTextFile(const char* graphFile,
59 const std::map<std::string, armnn::TensorShape>& inputShapes,
60 const std::vector<std::string>& requestedOutputs)
61{
62 return pTfParserImpl->CreateNetworkFromTextFile(graphFile, inputShapes, requestedOutputs);
63}
64
65armnn::INetworkPtr ITfParser::CreateNetworkFromBinaryFile(const char* graphFile,
66 const std::map<std::string, armnn::TensorShape>& inputShapes,
67 const std::vector<std::string>& requestedOutputs)
68{
69 return pTfParserImpl->CreateNetworkFromBinaryFile(graphFile, inputShapes, requestedOutputs);
70}
71
72armnn::INetworkPtr ITfParser::CreateNetworkFromString(const char* protoText,
73 const std::map<std::string, armnn::TensorShape>& inputShapes,
74 const std::vector<std::string>& requestedOutputs)
75{
76 return pTfParserImpl->CreateNetworkFromString(protoText, inputShapes, requestedOutputs);
77}
78
79BindingPointInfo ITfParser::GetNetworkInputBindingInfo(const std::string& name) const
80{
81 return pTfParserImpl->GetNetworkInputBindingInfo(name);
82}
83
84BindingPointInfo ITfParser::GetNetworkOutputBindingInfo(const std::string& name) const
85{
86 return pTfParserImpl->GetNetworkOutputBindingInfo(name);
87}
surmeh01bceff2f2018-03-29 16:29:27 +010088namespace
89{
90
91const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
92const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
93
surmeh01bceff2f2018-03-29 16:29:27 +010094
95template <typename Callable>
96void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
97 const std::string& attribName,
98 tensorflow::AttrValue::ValueCase expectedValueCase,
99 Callable callable)
100{
101 auto iter = nodeDef.attr().find(attribName);
102 if (iter != nodeDef.attr().end())
103 {
104 const auto& attrValue = iter->second;
105 if (attrValue.value_case() == expectedValueCase)
106 {
107 callable(attrValue);
108 }
109 else
110 {
telsoa01c577f2c2018-08-31 09:22:23 +0100111 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100112 fmt::format("Attribute {} of node {} expected to have {} as tensorflow::AttrValue::ValueCase, "
113 "but found {} instead {}",
114 attribName,
115 nodeDef.name(),
116 static_cast<int>(expectedValueCase),
117 static_cast<int>(attrValue.value_case()),
118 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100119 }
120 }
121 else
122 {
telsoa01c577f2c2018-08-31 09:22:23 +0100123 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100124 fmt::format("Could not find required attribute {} in node {} {}",
125 attribName,
126 nodeDef.name(),
127 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100128 }
129}
130
131template <typename Callable>
132void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
133 const std::string& attribName,
134 tensorflow::AttrValue::ValueCase expectedValueCase,
135 Callable callable)
136{
137 auto iter = nodeDef.attr().find(attribName);
138 if (iter != nodeDef.attr().end())
139 {
140 const auto& attrValue = iter->second;
141 if (attrValue.value_case() == expectedValueCase)
142 {
143 callable(attrValue);
144 }
145 else
146 {
telsoa01c577f2c2018-08-31 09:22:23 +0100147 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100148 fmt::format("Attribute {} of node {} expected to have {} as tensorflow::AttrValue::ValueCase, "
149 "but found {} instead {}",
150 attribName,
151 nodeDef.name(),
152 static_cast<int>(expectedValueCase),
153 static_cast<int>(attrValue.value_case()),
154 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100155 }
156 }
157}
158
159float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
160{
161 float attribValue = 0.0f;
162 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
163 [&attribValue](const tensorflow::AttrValue& attrValue)
164 {
165 attribValue = attrValue.f();
166 });
167 return attribValue;
168}
169
Conor Kennedyc2130a02018-12-05 11:05:54 +0000170int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
171{
172 int32_t attribValue = 0u;
173 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
174 [&attribValue](const tensorflow::AttrValue& attrValue)
175 {
176 attribValue = static_cast<int32_t>(attrValue.i());
177 });
178 return attribValue;
179}
180
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000181bool ReadMandatoryNodeBoolAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
182{
183 bool attribValue = false;
184 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
185 [&attribValue](const tensorflow::AttrValue& attrValue)
186 {
187 attribValue = static_cast<bool>(attrValue.b());
188 });
189 return attribValue;
190}
191
surmeh01bceff2f2018-03-29 16:29:27 +0100192uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
193{
194 uint32_t attribValue = 0u;
195 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
196 [&attribValue](const tensorflow::AttrValue& attrValue)
197 {
198 attribValue = static_cast<uint32_t>(attrValue.i());
199 });
200 return attribValue;
201}
202
203std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
204{
205 std::string attribValue = "";
206 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
207 [&attribValue](const tensorflow::AttrValue& attrValue)
208 {
209 attribValue = attrValue.s();
210 });
211 return attribValue;
212}
213
214std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
215 const std::string& name)
216{
217 std::vector<uint32_t> attriList;
218 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
219 [&attriList](const tensorflow::AttrValue& attrValue)
220 {
221 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
222 {
223 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
224 }
225 });
226
227 return attriList;
228}
229
230std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
231 const std::string& name)
232{
233 std::vector<uint32_t> attriList;
234 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
235 [&attriList](const tensorflow::AttrValue& attrValue)
236 {
237 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
238 {
239 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
240 }
241 });
242
243 return attriList;
244}
245
Aron Virginas-Tar2e259272019-11-27 13:29:51 +0000246std::string ReadOptionalNodeStringAttribute(const tensorflow::NodeDef& nodeDef,
247 const std::string& name,
248 const std::string& defaultValue = "")
249{
250 std::string attribValue = defaultValue;
251 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
252 [&attribValue](const tensorflow::AttrValue& attrValue)
253 {
254 attribValue = attrValue.s();
255 });
256 return attribValue;
257}
258
surmeh01bceff2f2018-03-29 16:29:27 +0100259bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
260 const std::string& name,
261 bool defaultValue = false)
262{
263 bool attribValue = defaultValue;
264 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
265 [&attribValue](const tensorflow::AttrValue& attrValue)
266 {
267 attribValue = attrValue.b();
268 });
269 return attribValue;
270}
271
272tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
273{
274 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
275 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
276 [&attribValue](const tensorflow::AttrValue& attrValue)
277 {
278 attribValue = attrValue.type();
279 });
280 return attribValue;
281}
282
283TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
284{
285 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
286 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
287
288 if (stretchDim != targetDims.end())
289 {
290 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
291 {
telsoa01c577f2c2018-08-31 09:22:23 +0100292 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100293 fmt::format("At most one component of shape can be -1 {}",
294 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100295 }
296
telsoa01c577f2c2018-08-31 09:22:23 +0100297 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100298 armnn::numeric_cast<unsigned int>(
telsoa01c577f2c2018-08-31 09:22:23 +0100299 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100300 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
301 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
302 }
303
304 TensorInfo reshapeInfo = input;
305 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
306
307 return reshapeInfo;
308}
309
telsoa01c577f2c2018-08-31 09:22:23 +0100310// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100311IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
312 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100313{
314 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
315 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
316 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
317 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
318 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
319 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
320
321 armnn::TensorInfo reshapedInfo = input1Info;
322 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
323
324 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
325 ReshapeDescriptor reshapeDesc;
326 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
327 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
328
329 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
330 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
331
332 input1Slot = &reshapeLayer->GetOutputSlot(0);
333
334 return input1Slot;
335}
336
337OutputId ParseOutputId(const std::string & name)
338{
339 unsigned int outputNum = 0;
340 size_t colonPos = name.find_last_of(":");
341 if (colonPos != std::string::npos)
342 {
343 int n = std::stoi(name.substr(colonPos+1));
344 if (n<0 || n>100)
345 {
telsoa01c577f2c2018-08-31 09:22:23 +0100346 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100347 fmt::format("Output tensor id is out of range for {} {}",
348 name,
349 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100350 }
351 outputNum = static_cast<unsigned int>(n);
352 }
353 return OutputId(name.substr(0,colonPos),outputNum);
354}
355
telsoa01c577f2c2018-08-31 09:22:23 +0100356#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
357 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
358 { \
359 throw ParseException( \
James Ward58dec6b2020-09-11 17:32:44 +0100360 fmt::format("Unsupported data format {} passed for {} node {}. " \
361 "Only NHWC and NCHW supported {}", \
362 FORMAT, \
363 NODE_TYPE, \
364 NODE_DEF.name(), \
365 CHECK_LOCATION().AsString())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100366 }
367
368#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
369 if(PADDING != "SAME" && PADDING != "VALID" ) \
370 { \
371 throw ParseException( \
James Ward58dec6b2020-09-11 17:32:44 +0100372 fmt::format("Only 'SAME' and 'VALID' padding supported. Got {} for {} {}", \
373 PADDING, \
374 NODE_DEF.name(), \
375 CHECK_LOCATION().AsString())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100376 } \
377
surmeh01bceff2f2018-03-29 16:29:27 +0100378} // namespace
379
Kevin May7d96b162021-02-03 17:38:41 +0000380const std::map<std::string, ITfParser::TfParserImpl::OperationParsingFunction>
381 ITfParser::TfParserImpl::ms_OperationNameToParsingFunctions = {
382 { "Const", &TfParserImpl::ParseConst },
383 { "Add", &TfParserImpl::ParseAdd },
384 { "AddN", &TfParserImpl::ParseAddN },
385 { "BiasAdd", &TfParserImpl::ParseBiasAdd },
386 { "Identity", &TfParserImpl::ParseIdentity },
387 { "Conv2D", &TfParserImpl::ParseConv2D },
388 { "DepthwiseConv2dNative", &TfParserImpl::ParseDepthwiseConv2D },
389 { "ExpandDims", &TfParserImpl::ParseExpandDims },
390 { "FusedBatchNorm", &TfParserImpl::ParseFusedBatchNorm },
391 { "Gather", &TfParserImpl::ParseGather},
392 { "Greater", &TfParserImpl::ParseGreater},
393 { "ConcatV2", &TfParserImpl::ParseConcat },
394 { "LRN", &TfParserImpl::ParseLrn },
395 { "MatMul", &TfParserImpl::ParseMatMul },
396 { "Mean", &TfParserImpl::ParseMean },
397 { "Mul", &TfParserImpl::ParseMul },
398 { "Placeholder", &TfParserImpl::ParsePlaceholder },
399 { "RealDiv", &TfParserImpl::ParseRealDiv },
400 { "Relu", &TfParserImpl::ParseRelu },
401 { "Relu6", &TfParserImpl::ParseRelu6 },
402 { "Reshape", &TfParserImpl::ParseReshape },
403 { "ResizeBilinear", &TfParserImpl::ParseResizeBilinear },
404 { "Rsqrt", &TfParserImpl::ParseRsqrt },
405 { "Shape", &TfParserImpl::ParseShape },
406 { "Squeeze", &TfParserImpl::ParseSqueeze },
407 { "Sigmoid", &TfParserImpl::ParseSigmoid },
408 { "Softmax", &TfParserImpl::ParseSoftmax },
409 { "Softplus", &TfParserImpl::ParseSoftplus },
410 { "Split", &TfParserImpl::ParseSplit },
411 { "StridedSlice", &TfParserImpl::ParseStridedSlice },
412 { "Tanh", &TfParserImpl::ParseTanh },
413 { "MaxPool", &TfParserImpl::ParseMaxPool },
414 { "AvgPool", &TfParserImpl::ParseAvgPool },
415 { "Maximum", &TfParserImpl::ParseMaximum },
416 { "Minimum", &TfParserImpl::ParseMinimum },
417 { "Equal", &TfParserImpl::ParseEqual },
418 { "Pad", &TfParserImpl::ParsePad },
419 { "Sub", &TfParserImpl::ParseSub },
420 { "Pack" , &TfParserImpl::ParseStack },
421 { "Stack", &TfParserImpl::ParseStack },
422 { "Transpose", &TfParserImpl::ParseTranspose },
narpra016f37f832018-12-21 18:30:00 +0000423};
424
Kevin May7d96b162021-02-03 17:38:41 +0000425const std::list<std::string> ITfParser::TfParserImpl::m_ControlInputs = {
narpra016f37f832018-12-21 18:30:00 +0000426 "Assert"
surmeh01bceff2f2018-03-29 16:29:27 +0100427};
428
Sadik Armagan60bb9d82021-01-11 15:15:01 +0000429void CalcPadding(uint32_t inputSize,
430 uint32_t filterSize,
431 uint32_t stride,
432 uint32_t dilation,
433 uint32_t& paddingFront,
434 uint32_t& paddingBack,
surmeh01bceff2f2018-03-29 16:29:27 +0100435 bool samePadding)
436{
Sadik Armagan60bb9d82021-01-11 15:15:01 +0000437 paddingFront = 0;
438 paddingBack = 0;
439 if (samePadding)
440 {
441 uint32_t outputSize = (inputSize + stride - 1) / stride;
442 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
443 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
444 if (temp > inputSize)
445 {
446 paddingFront = (temp - inputSize) / 2;
447 paddingBack = (temp - inputSize) - paddingFront;
448 }
449 }
surmeh01bceff2f2018-03-29 16:29:27 +0100450}
451
452/// An Abstract base class which represents a single tensorflow operation (node)
453/// that has been (potentially partially) converted to Armnn.
454/// It may not yet have been fully converted into actual Armnn layers.
455class ParsedTfOperation
456{
457public:
Kevin May7d96b162021-02-03 17:38:41 +0000458 ParsedTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node)
surmeh01bceff2f2018-03-29 16:29:27 +0100459 : m_Parser(parser)
460 , m_Node(node)
461 {
462 }
463
464 virtual ~ParsedTfOperation() {};
465
466 const tensorflow::NodeDef& GetNode() const { return m_Node; }
467
468 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
469 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
470 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
471
472 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
473 virtual ParsedTfOperation* ResolveIdentityOperations()
474 {
475 return this;
476 }
477
478protected:
Kevin May7d96b162021-02-03 17:38:41 +0000479 ITfParser::TfParserImpl* m_Parser;
surmeh01bceff2f2018-03-29 16:29:27 +0100480 const tensorflow::NodeDef& m_Node;
481};
482
483/// An ParsedTfOperation where the Armnn equivalent is a single layer,
484/// with output slots that correspond directly to the Tf node outputs.
485class SingleLayerParsedTfOperation : public ParsedTfOperation
486{
487public:
Kevin May7d96b162021-02-03 17:38:41 +0000488 SingleLayerParsedTfOperation(ITfParser::TfParserImpl* parser,
489 const tensorflow::NodeDef& node,
490 IConnectableLayer* layer)
surmeh01bceff2f2018-03-29 16:29:27 +0100491 : ParsedTfOperation(parser, node)
492 , m_Layer(layer)
493 {
494 }
495
496 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
497 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100498 ARMNN_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100499 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100500 unsigned int armnnOutputSlotIdx = tfOutputIndex;
501 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
502 {
503 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100504 fmt::format("The requested output slot #{} "
505 "for {} does not exist {}",
506 armnnOutputSlotIdx,
507 m_Layer->GetName(),
508 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100509 }
510 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
511 }
512
513protected:
514 IConnectableLayer* m_Layer;
515};
516
telsoa01c577f2c2018-08-31 09:22:23 +0100517/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100518class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
519{
520public:
Kevin May7d96b162021-02-03 17:38:41 +0000521 DeferredSingleLayerParsedTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node)
surmeh01bceff2f2018-03-29 16:29:27 +0100522 : SingleLayerParsedTfOperation(parser, node, nullptr)
523 {
524 }
525
526 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
527 {
528 if (!m_Layer)
529 {
530 CreateLayerDeferred();
531 }
532 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
533 }
534
535private:
536 virtual void CreateLayerDeferred() = 0;
537};
538
539
Kevin May7d96b162021-02-03 17:38:41 +0000540ITfParser::TfParserImpl::TfParserImpl()
surmeh01bceff2f2018-03-29 16:29:27 +0100541 : m_Network(nullptr, nullptr)
542{
543}
544
545
Kevin May7d96b162021-02-03 17:38:41 +0000546const tensorflow::NodeDef* ITfParser::TfParserImpl::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100547{
548 if (nodeDef->op() != "Identity")
549 {
550 return nodeDef;
551 }
552
553 if (nodeDef->input_size() != 1)
554 {
telsoa01c577f2c2018-08-31 09:22:23 +0100555 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100556 fmt::format("Identity node should have a single input! {} has {} inputs {}",
557 nodeDef->name(),
558 nodeDef->input_size(),
559 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100560 }
561
562 auto it = m_NodesByName.find(nodeDef->input(0));
563 if (it != m_NodesByName.end())
564 {
565 const tensorflow::NodeDef* inputNode = it->second;
566 return ResolveIdentityNode(inputNode);
567 }
568 else
569 {
telsoa01c577f2c2018-08-31 09:22:23 +0100570 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100571 fmt::format("Cannot find what the Identity node {} is linked to! {}",
572 nodeDef->name(),
573 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100574 }
575}
576
577std::vector<OutputOfConstNodeDef>
Kevin May7d96b162021-02-03 17:38:41 +0000578ITfParser::TfParserImpl::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
surmeh01bceff2f2018-03-29 16:29:27 +0100579{
580 std::vector<OutputOfConstNodeDef> ret;
581
surmeh013537c2c2018-05-18 16:31:43 +0100582 if (nodeDef.op() == "Const")
583 {
584 // For some reason const node can have "Control Inputs". We ignore them for now.
585 return ret;
586 }
587
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100588 ret.reserve(armnn::numeric_cast<size_t>(nodeDef.input_size()));
surmeh01bceff2f2018-03-29 16:29:27 +0100589 for (int j = 0; j < nodeDef.input_size(); ++j)
590 {
591 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100592
593 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
594 {
narpra016f37f832018-12-21 18:30:00 +0000595 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
596 continue;
surmeh013537c2c2018-05-18 16:31:43 +0100597 }
598
surmeh01bceff2f2018-03-29 16:29:27 +0100599 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
600 if (inputIt == m_NodesByName.end())
601 {
602 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100603 fmt::format("Can't find node '{}', which is listed as an input of '{}' {}",
604 nodeDef.input(j),
605 nodeDef.name(),
606 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100607 }
608 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
609 }
610
611 return ret;
612}
613
614std::vector<OutputOfParsedTfOperation>
Kevin May7d96b162021-02-03 17:38:41 +0000615ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +0100616 std::size_t expectedNumInputs)
617{
telsoa01c577f2c2018-08-31 09:22:23 +0100618 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100619 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
620 const std::size_t numInputs = nodes.size();
621 if (numInputs != expectedNumInputs)
622 {
telsoa01c577f2c2018-08-31 09:22:23 +0100623 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100624 fmt::format("Unexpected number of inputs for node {}. Expected {}, found {} {}",
625 nodeDef.name(),
626 expectedNumInputs,
627 numInputs,
628 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100629 }
telsoa01c577f2c2018-08-31 09:22:23 +0100630 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100631 std::vector<OutputOfParsedTfOperation> result;
632 for (auto&& node : nodes)
633 {
634 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
635 if (it == m_ParsedTfOperations.end())
636 {
telsoa01c577f2c2018-08-31 09:22:23 +0100637 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100638 fmt::format("Node with name '{}' has not been parsed {}",
639 node.m_IndexedValue->name(),
640 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100641 }
642 ParsedTfOperation* parsedOp = it->second.get();
643 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
644 parsedOp = parsedOp->ResolveIdentityOperations();
645 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
646 }
647 return result;
648}
649
Kevin May7d96b162021-02-03 17:38:41 +0000650IConnectableLayer* ITfParser::TfParserImpl::CreateAdditionLayer(
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000651 const tensorflow::NodeDef& nodeDef,
652 IOutputSlot* input0Slot,
653 IOutputSlot* input1Slot,
654 const std::string& layerName)
655{
656 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
657 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
658
659 const unsigned int input0Dim = input0Info.GetNumDimensions();
660 const unsigned int input1Dim = input1Info.GetNumDimensions();
661 if (input0Dim != input1Dim)
662 {
663 // broadcasting where input0 and input1 have different number of dimensions
664 // is only supported for 1D and 4D tensors pair
665 if (input0Dim == 1 && input1Dim == 4)
666 {
667 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
668 }
669 else if (input0Dim == 4 && input1Dim == 1)
670 {
671 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
672 }
673 else
674 {
675 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100676 fmt::format("Unsupported broadcast configuration for {} operation {} {}",
677 layerName,
678 nodeDef.name(),
679 CHECK_LOCATION().AsString()));
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000680 }
681 }
682 IConnectableLayer* const layer = m_Network->AddAdditionLayer(layerName.c_str());
683
684 input0Slot->Connect(layer->GetInputSlot(0));
685 input1Slot->Connect(layer->GetInputSlot(1));
686
687 // Ensure the output tensor has the correct dimensions even if a broadcast has been done
688 TensorInfo outputInfo = input0Slot->GetTensorInfo();
689 std::vector<unsigned int> outputShape;
690
691 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
692 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
693
694 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
695 {
696 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
697 }
698
699 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
700 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
701
702 return layer;
703}
704
Kevin May7d96b162021-02-03 17:38:41 +0000705IConnectableLayer* ITfParser::TfParserImpl::CreateAdditionLayer(
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000706 const tensorflow::NodeDef& nodeDef,
707 IConnectableLayer* layerOne,
708 IConnectableLayer* layerTwo,
709 unsigned int numberOfAddition,
710 unsigned long numberOfLayersToConnect,
711 bool isOdd)
712{
713 IOutputSlot* input0Slot = &layerOne->GetOutputSlot(0);
714 IOutputSlot* input1Slot = &layerTwo->GetOutputSlot(0);
715 std::string layerName(nodeDef.name());
716 if (isOdd || numberOfLayersToConnect != 2)
717 {
718 // we are not connecting the final layer
719 layerName.append("_addN_").append(std::to_string(numberOfAddition));
720 }
721 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
722}
723
Kevin May7d96b162021-02-03 17:38:41 +0000724IConnectableLayer* ITfParser::TfParserImpl::CreateAdditionLayer(
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000725 const tensorflow::NodeDef& nodeDef,
726 const OutputOfParsedTfOperation& opOne,
727 const OutputOfParsedTfOperation& opTwo,
728 unsigned int numberOfAddition)
729{
730 IOutputSlot* input0Slot = &opOne.m_IndexedValue->ResolveArmnnOutputSlot(opOne.m_Index);
731 IOutputSlot* input1Slot = &opTwo.m_IndexedValue->ResolveArmnnOutputSlot(opTwo.m_Index);
732 std::string layerName(nodeDef.name());
733 layerName.append("_addN_").append(std::to_string(numberOfAddition));
734 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
735}
736
Kevin May7d96b162021-02-03 17:38:41 +0000737IConnectableLayer* ITfParser::TfParserImpl::CreateAdditionLayer(
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000738 const tensorflow::NodeDef& nodeDef,
739 const OutputOfParsedTfOperation& op,
740 IConnectableLayer* layer)
741{
742 IOutputSlot* input0Slot = &op.m_IndexedValue->ResolveArmnnOutputSlot(op.m_Index);
743 IOutputSlot* input1Slot = &layer->GetOutputSlot(0);
744 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
745}
746
Kevin May7d96b162021-02-03 17:38:41 +0000747ParsedTfOperationPtr ITfParser::TfParserImpl::ParseAddN(const tensorflow::NodeDef& nodeDef,
748 const tensorflow::GraphDef& graphDef)
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000749{
Jan Eilers8eb25602020-03-09 12:13:48 +0000750 IgnoreUnused(graphDef);
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000751 uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
752 if (numberOfInputs < 2)
753 {
754 // should never happen
755 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100756 fmt::format("AddN Node with name '{}' has less than two ({}) inputs {}",
757 nodeDef.name(),
758 std::to_string(numberOfInputs),
759 CHECK_LOCATION().AsString()));
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000760 }
761 else if (numberOfInputs == 2)
762 {
763 //this is the same as a simple Add operation
764 return AddAdditionLayer(nodeDef, false);
765 }
766 else
767 {
768 // build a binary tree of Add layers and return the final Add as the return from the function
769 // if we have an odd number of inputs then the final Add will consist of a layer connecting to an
770 // OutputOfParsedTfOperation, otherwise it will be two layers being added together
771 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
772 unsigned int numberOfAdditions = 0;
773 std::vector<IConnectableLayer*> layers;
774 // NOTE: at this point we will have a minimum of three inputs
775 for (unsigned int i = 0; i < numberOfInputs; ++i)
776 {
777 // every time i is odd we have two inputs to process.
778 bool onSecondItem = i % 2;
779 if (onSecondItem)
780 {
781 ++numberOfAdditions;
782 IConnectableLayer* newLayer = CreateAdditionLayer(
783 nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
784 layers.push_back(newLayer);
785 }
786 }
787
788 std::vector<IConnectableLayer*> layersToConnect(layers);
789 unsigned long numberOfLayersToConnect = layersToConnect.size();
790 bool isOdd = numberOfInputs % 2;
791
792 while (numberOfLayersToConnect > 1)
793 {
794 layers.clear();
795 for (unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
796 bool onSecondItem = i % 2;
797 if (onSecondItem) {
798 ++numberOfAdditions;
799 IConnectableLayer* newLayer = CreateAdditionLayer(
800 nodeDef,
801 layersToConnect[i - 1],
802 layersToConnect[i],
803 numberOfAdditions,
804 numberOfLayersToConnect,
805 isOdd);
806 layers.push_back(newLayer);
807 }
808 }
809 //OK... need to go again... maybe
810 layersToConnect = layers;
811 numberOfLayersToConnect = layersToConnect.size();
812 }
813 IConnectableLayer* finalLayer = layersToConnect[0];
814 // if we had an odd number of inputs we need to connect the final layer to the
815 // last OutputOfParsedTfOperation in order to create the last Add layer we will
816 // be handing back.
817 if (isOdd)
818 {
819 // connect the final layer to the last op
820 finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
821 }
822 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, finalLayer);
823 }
824}
825
Kevin May7d96b162021-02-03 17:38:41 +0000826ParsedTfOperationPtr ITfParser::TfParserImpl::ParseAdd(const tensorflow::NodeDef& nodeDef,
827 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100828{
Jan Eilers8eb25602020-03-09 12:13:48 +0000829 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100830 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
831
telsoa01c577f2c2018-08-31 09:22:23 +0100832 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
833 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100834 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
835 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
836 {
837 IConnectableLayer* layer =
838 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
839 &nodeDef,nodeDef.name().c_str());
840 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
841 }
842 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
843 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
844 {
845 IConnectableLayer* layer =
846 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
847 &nodeDef,nodeDef.name().c_str());
848 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
849 }
850 else
851 {
telsoa01c577f2c2018-08-31 09:22:23 +0100852 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100853 return AddAdditionLayer(nodeDef);
854 }
855}
856
Kevin May7d96b162021-02-03 17:38:41 +0000857ParsedTfOperationPtr ITfParser::TfParserImpl::ParseBiasAdd(const tensorflow::NodeDef& nodeDef,
858 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100859{
Jan Eilers8eb25602020-03-09 12:13:48 +0000860 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100861 return AddAdditionLayer(nodeDef, true);
862}
863
864/// An ParsedTfOperation which forwards to another (used for Identity nodes).
865class ParsedIdentityTfOperation : public ParsedTfOperation
866{
867public:
Kevin May7d96b162021-02-03 17:38:41 +0000868 ParsedIdentityTfOperation(ITfParser::TfParserImpl* parser,
869 const tensorflow::NodeDef& node,
870 ParsedTfOperation* representative)
surmeh01bceff2f2018-03-29 16:29:27 +0100871 : ParsedTfOperation(parser, node)
872 , m_Representative(representative)
873 {
874 }
875
876 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
877 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100878 ARMNN_ASSERT(m_Representative);
surmeh01bceff2f2018-03-29 16:29:27 +0100879 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
880 }
881
882 virtual ParsedTfOperation* ResolveIdentityOperations() override
883 {
884 return m_Representative->ResolveIdentityOperations();
885 }
886
887private:
888 ParsedTfOperation* m_Representative;
889};
890
Kevin May7d96b162021-02-03 17:38:41 +0000891ParsedTfOperationPtr ITfParser::TfParserImpl::ParseIdentity(const tensorflow::NodeDef& nodeDef,
892 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100893{
Jan Eilers8eb25602020-03-09 12:13:48 +0000894 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100895 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
896 // Any requests for the output slots of this node should be forwarded to the node connected as input.
897 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
898}
899
900/// An ParsedTfOperation for a Const node.
901/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
902/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
903template <typename T>
904class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
905{
906public:
Kevin May7d96b162021-02-03 17:38:41 +0000907 ParsedConstTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node,
surmeh01bceff2f2018-03-29 16:29:27 +0100908 const T* tensorData, const TensorInfo& tensorInfo)
909 : DeferredSingleLayerParsedTfOperation(parser, node),
910 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
911 m_TensorInfo(tensorInfo)
912 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100913 ARMNN_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
surmeh01bceff2f2018-03-29 16:29:27 +0100914 }
915
916 void CreateLayerDeferred() override
917 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100918 ARMNN_ASSERT(m_Layer == nullptr);
Kevin May7d96b162021-02-03 17:38:41 +0000919 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage),
920 m_Node.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +0100921 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
922 }
923
Matteo Martincigh482ca852018-12-12 09:20:55 +0000924 ConstTensor GetConstTensor(std::vector<T>& outputTensorData) const
surmeh01bceff2f2018-03-29 16:29:27 +0100925 {
surmeh01bceff2f2018-03-29 16:29:27 +0100926 outputTensorData.resize(m_TensorInfo.GetNumElements());
927
Matteo Martincigh482ca852018-12-12 09:20:55 +0000928 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
929
telsoa01c577f2c2018-08-31 09:22:23 +0100930 // Updates the result to point to the user provided storage.
Matteo Martincigh482ca852018-12-12 09:20:55 +0000931 ConstTensor constTensor(m_TensorInfo, outputTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +0100932 return constTensor;
933 }
934
Matteo Martincigh46315822018-11-28 16:22:36 +0000935 const T* GetStorage() const
936 {
937 return m_Storage.data();
938 }
939
940 const TensorInfo& GetTensorInfo() const
941 {
942 return m_TensorInfo;
943 }
944
surmeh01bceff2f2018-03-29 16:29:27 +0100945private:
946 ///< Manages the lifetime of the tensor data.
947 std::vector<T> m_Storage;
948 ///< Describes the layout of the tensor and points to the data in m_Storage.
949 TensorInfo m_TensorInfo;
950};
951
telsoa01c577f2c2018-08-31 09:22:23 +0100952DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
953 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100954{
955 switch (tfDataType)
956 {
957 case tensorflow::DT_FLOAT:
958 return DataType::Float32;
959 break;
960 case tensorflow::DT_INT32:
961 return DataType::Signed32;
962 break;
963 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100964 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100965 fmt::format("Unknown DataType {} for node {} {}",
966 tensorflow::DataType_Name(tfDataType),
967 nodeDef.name(),
968 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100969 }
970}
971
972struct ParseTfTensorValueList
973{
974 template<typename DataType>
975 static void Parse(
976 const tensorflow::TensorProto& tfTensor,
977 unsigned int dstElements,
978 std::vector<int8_t>& outputData);
979
980 template <typename DataType>
981 static void ReadData(const void* srcData, unsigned int numSrcElements,
982 std::vector<int8_t>& dstData, unsigned int numDstElements)
983 {
telsoa01c577f2c2018-08-31 09:22:23 +0100984 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100985 if (numSrcElements == 0)
986 {
987 return;
988 }
989
telsoa01c577f2c2018-08-31 09:22:23 +0100990 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100991 if (numDstElements == 0)
992 {
993 numDstElements = numSrcElements;
994 }
995
telsoa01c577f2c2018-08-31 09:22:23 +0100996 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100997 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
998
999 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
1000 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
1001
telsoa01c577f2c2018-08-31 09:22:23 +01001002 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +01001003 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
1004
1005 if (numDstElements > numSrcElements)
1006 {
telsoa01c577f2c2018-08-31 09:22:23 +01001007 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +01001008 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
1009 }
1010 }
1011
1012};
1013
1014template <>
1015void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
1016 unsigned int dstElements, std::vector<int8_t>& outputData)
1017{
1018 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
1019 outputData, dstElements);
1020}
1021
1022template <>
1023void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
1024 unsigned int dstElements, std::vector<int8_t>& outputData)
1025{
1026 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
1027 outputData, dstElements);
1028}
1029
1030template <template<typename> class OperatorType, typename T = int8_t>
1031struct MakeTfOperation
1032{
1033 template<typename DataType, class... Args>
Kevin May7d96b162021-02-03 17:38:41 +00001034 inline static std::unique_ptr<OperatorType<DataType>> Parse(ITfParser::TfParserImpl* parser,
1035 const tensorflow::NodeDef& node,
1036 Args&&... args)
surmeh01bceff2f2018-03-29 16:29:27 +01001037 {
1038 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
1039 }
1040};
1041
1042template <>
1043struct MakeTfOperation<ParsedConstTfOperation>
1044{
1045 template<typename DataType, class... Args>
Kevin May7d96b162021-02-03 17:38:41 +00001046 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(ITfParser::TfParserImpl* parser,
surmeh01bceff2f2018-03-29 16:29:27 +01001047 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
1048 {
1049 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1050 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
1051 }
1052};
1053
1054template <class FuncType>
1055struct InvokeParseFunction
1056{
1057 template<class ResType, class... Args>
1058 inline static ResType Result(DataType dataType, Args&&... args)
1059 {
1060 if (dataType == DataType::Float32)
1061 {
1062 return FuncType::template Parse<float>(std::forward<Args>(args)...);
1063 }
1064 else if (dataType == DataType::Signed32)
1065 {
1066 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1067 }
1068
1069 return ResType();
1070 }
1071
1072 template<class... Args>
1073 inline static void Result(DataType dataType, Args&&... args)
1074 {
1075 if (dataType == DataType::Float32)
1076 {
1077 FuncType::template Parse<float>(std::forward<Args>(args)...);
1078 }
1079 else if (dataType == DataType::Signed32)
1080 {
1081 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1082 }
1083 }
1084};
1085
Kevin May7d96b162021-02-03 17:38:41 +00001086ParsedTfOperationPtr ITfParser::TfParserImpl::ParseConst(const tensorflow::NodeDef& nodeDef,
1087 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001088{
Jan Eilers8eb25602020-03-09 12:13:48 +00001089 IgnoreUnused(graphDef);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001090 ARMNN_ASSERT(nodeDef.op() == "Const");
surmeh01bceff2f2018-03-29 16:29:27 +01001091
1092 if (nodeDef.attr().count("value") == 0)
1093 {
telsoa01c577f2c2018-08-31 09:22:23 +01001094 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001095 fmt::format("Value not found for Const node - {} {}",
1096 nodeDef.name(),
1097 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001098 }
1099
1100 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
1101 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1102 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
1103
1104 const auto GetDimensionSize = [](auto& d) { return d.size(); };
1105
1106 std::vector<unsigned int> dimensionSizes;
1107 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1108 std::back_inserter(dimensionSizes), GetDimensionSize);
1109
telsoa01c577f2c2018-08-31 09:22:23 +01001110 // Calculates number of elements.
1111 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001112 unsigned int numElements = 0U;
1113
1114 if (!dimensionSizes.empty())
1115 {
1116 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1117 1U, std::multiplies<unsigned int>());
1118 }
1119
1120 std::vector<int8_t> tensorData;
1121
telsoa01c577f2c2018-08-31 09:22:23 +01001122 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001123 if (tfTensor.tensor_content().empty())
1124 {
1125 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
1126
1127 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +01001128 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +01001129 if (numElements == 0)
1130 {
telsoa01c577f2c2018-08-31 09:22:23 +01001131 const unsigned int tfNumElements =
1132 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001133 dimensionSizes.push_back(tfNumElements);
1134 }
1135 }
telsoa01c577f2c2018-08-31 09:22:23 +01001136 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001137 else
1138 {
1139 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1140
telsoa01c577f2c2018-08-31 09:22:23 +01001141 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +01001142 if (numElements == 0)
1143 {
telsoa01c577f2c2018-08-31 09:22:23 +01001144 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001145 fmt::format("No tensor shape found for Const node - {} {}",
1146 nodeDef.name(),
1147 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001148 }
1149 }
1150
telsoa01c577f2c2018-08-31 09:22:23 +01001151 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001152 if (tensorData.empty())
1153 {
telsoa01c577f2c2018-08-31 09:22:23 +01001154 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001155 fmt::format("No tensor data found for Const node - {} {}",
1156 nodeDef.name(),
1157 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001158 }
1159
telsoa01c577f2c2018-08-31 09:22:23 +01001160 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1161 dimensionSizes.data(),
1162 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001163
1164 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +01001165 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +01001166 if (tensorData.size() > tensorInfo.GetNumBytes())
1167 {
telsoa01c577f2c2018-08-31 09:22:23 +01001168 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001169 fmt::format("Number of elements ({}) should be less than or equal "
1170 "to the number of elements implied by the shape argument ({}) for Const node - {} {}",
1171 (tensorData.size() / GetDataTypeSize(dataType)),
1172 tensorInfo.GetNumElements(),
1173 nodeDef.name(),
1174 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001175 }
1176
1177 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1178 dataType, this, nodeDef, tensorData, tensorInfo);
1179}
1180
1181template<typename Type>
Kevin May7d96b162021-02-03 17:38:41 +00001182bool ITfParser::TfParserImpl::HasParsedConstTensor(const std::string & nodeName) const
surmeh01bceff2f2018-03-29 16:29:27 +01001183{
1184 auto it = m_ParsedTfOperations.find(nodeName);
jimfly01f6ba7472018-12-04 10:09:52 +00001185 if (it == m_ParsedTfOperations.end())
surmeh01bceff2f2018-03-29 16:29:27 +01001186 {
1187 return false;
1188 }
jimfly01f6ba7472018-12-04 10:09:52 +00001189 return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1190}
1191
1192template<typename Type>
Kevin May7d96b162021-02-03 17:38:41 +00001193bool ITfParser::TfParserImpl::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
jimfly01f6ba7472018-12-04 10:09:52 +00001194{
1195 return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
surmeh01bceff2f2018-03-29 16:29:27 +01001196}
1197
Kevin May7d96b162021-02-03 17:38:41 +00001198unsigned int ITfParser::TfParserImpl::GetConstInputIndex(const std::vector<OutputOfParsedTfOperation>& inputs)
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00001199{
1200 for (unsigned int i = 0; i < inputs.size(); i++)
1201 {
1202 if (HasParsedConstTensor<int32_t>(inputs[i].m_IndexedValue->GetNode().name()))
1203 {
1204 return i;
1205 }
1206 }
1207 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001208 fmt::format("ArmNN only supports operators with constant axis. {}",
1209 CHECK_LOCATION().AsString()));
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00001210
1211}
1212
Kevin May7d96b162021-02-03 17:38:41 +00001213ParsedTfOperationPtr ITfParser::TfParserImpl::ParseConv2D(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01001214 const tensorflow::GraphDef& graphDef)
1215{
Jan Eilers8eb25602020-03-09 12:13:48 +00001216 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001217 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1218 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1219 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1220
1221 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1222 {
telsoa01c577f2c2018-08-31 09:22:23 +01001223 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001224 fmt::format("ArmNN only supports Convolution layers with constant weights for {}, input {} {}",
1225 nodeDef.name(),
1226 inputs[1].m_IndexedValue->GetNode().name(),
1227 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001228 }
1229 ParsedConstTfOperation<float>* weightNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001230 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001231
1232 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1233 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1234 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1235
surmeh01bceff2f2018-03-29 16:29:27 +01001236 Convolution2dDescriptor desc;
1237 desc.m_BiasEnabled = false;
1238
telsoa01c577f2c2018-08-31 09:22:23 +01001239 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1240
Matteo Martincigh46315822018-11-28 16:22:36 +00001241 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001242
Matteo Martincigh46315822018-11-28 16:22:36 +00001243 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001244
Matteo Martincigh46315822018-11-28 16:22:36 +00001245 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001246
Matteo Martincigh46315822018-11-28 16:22:36 +00001247 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1248 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001249
Sadik Armagan60bb9d82021-01-11 15:15:01 +00001250 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1251 if (!dilations.empty())
1252 {
1253 desc.m_DilationX = dilations[dataLayoutIndexed.GetWidthIndex()];
1254 desc.m_DilationY = dilations[dataLayoutIndexed.GetHeightIndex()];
1255 }
1256
Matteo Martincigh46315822018-11-28 16:22:36 +00001257 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1258 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1259
1260 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1261 // Tensorflow weights are [H, W, In, Out].
1262 // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1263 // and [Out, In, H, W] when the data layout is NCHW.
1264 PermutationVector permutationVector =
1265 dataLayout == DataLayout::NHWC ?
1266 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1267 std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1268
1269 // Swizzle the tensor using the given permutation vector.
1270 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1271 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1272
1273 // Swizzles the content of the tensor's permanent storage into a local storage.
1274 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1275 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001276 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Matteo Martincigh46315822018-11-28 16:22:36 +00001277
1278 // Create a weight tensor with the newly swizzled data.
1279 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1280
1281 uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1282 uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001283
1284 bool padding = false;
1285 TensorInfo outputInfo;
Matteo Martincigh46315822018-11-28 16:22:36 +00001286 unsigned int outputHeight = 0;
1287 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001288
1289 CHECK_PADDING_TYPE(nodeDef, paddingString);
1290
surmeh01bceff2f2018-03-29 16:29:27 +01001291 if (paddingString == "SAME")
1292 {
1293 padding = true;
surmeh01bceff2f2018-03-29 16:29:27 +01001294 }
1295 else if (paddingString == "VALID")
1296 {
1297 padding = false;
Matteo Martincigh46315822018-11-28 16:22:36 +00001298 }
1299
Sadik Armagan60bb9d82021-01-11 15:15:01 +00001300 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, padding);
1301 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, padding);
1302
1303 // Calculate output height and width
1304 unsigned int dilatedFilterWidth = weightWidth + (desc.m_DilationX - 1) * (weightWidth - 1);
1305 unsigned int readWidth = (inputWidth + desc.m_PadLeft + desc.m_PadRight) - dilatedFilterWidth;
1306 outputWidth = 1 + (readWidth / desc.m_StrideX);
1307
1308 unsigned int dilatedFilterHeight = weightHeight + (desc.m_DilationY - 1) * (weightHeight - 1);
1309 unsigned int readHeight = (inputHeight + desc.m_PadTop + desc.m_PadBottom) - dilatedFilterHeight;
1310 outputHeight = 1 + (readHeight / desc.m_StrideY);
1311
Matteo Martincigh46315822018-11-28 16:22:36 +00001312 switch (dataLayout)
1313 {
1314 case DataLayout::NHWC:
1315 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1316 outputHeight,
1317 outputWidth,
1318 weightTensor.GetShape()[0] },
1319 DataType::Float32);
1320 break;
1321 case DataLayout::NCHW:
1322 default:
surmeh01bceff2f2018-03-29 16:29:27 +01001323 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1324 weightTensor.GetShape()[0],
Matteo Martincigh46315822018-11-28 16:22:36 +00001325 outputHeight,
1326 outputWidth },
1327 DataType::Float32);
1328 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001329 }
surmeh01bceff2f2018-03-29 16:29:27 +01001330
Matteo Martincighfc598e12019-05-14 10:36:13 +01001331 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc,
1332 weightTensor,
1333 EmptyOptional(),
1334 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001335 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Matteo Martincigh46315822018-11-28 16:22:36 +00001336 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001337
1338 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1339}
1340
Kevin May7d96b162021-02-03 17:38:41 +00001341ParsedTfOperationPtr ITfParser::TfParserImpl::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
1342 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001343{
Jan Eilers8eb25602020-03-09 12:13:48 +00001344 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001345 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1346 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1347 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1348
1349 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1350 {
telsoa01c577f2c2018-08-31 09:22:23 +01001351 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001352 fmt::format("ArmNN only supports Depthwise Convolution layer with constant weights. "
1353 "Non const input found {} for node {} {}",
1354 inputs[1].m_IndexedValue->GetNode().name(),
1355 nodeDef.name(),
1356 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001357 }
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001358
surmeh01bceff2f2018-03-29 16:29:27 +01001359 ParsedConstTfOperation<float>* weightNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001360 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001361
surmeh01bceff2f2018-03-29 16:29:27 +01001362 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1363 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1364 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1365
1366 DepthwiseConvolution2dDescriptor desc;
1367 desc.m_BiasEnabled = false;
1368
telsoa01c577f2c2018-08-31 09:22:23 +01001369 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1370
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001371 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001372
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001373 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001374
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001375 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001376
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001377 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1378 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
Sadik Armagan60bb9d82021-01-11 15:15:01 +00001379 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1380 if (!dilations.empty())
1381 {
1382 desc.m_DilationX = dilations[dataLayoutIndexed.GetWidthIndex()];
1383 desc.m_DilationY = dilations[dataLayoutIndexed.GetHeightIndex()];
1384 }
surmeh01bceff2f2018-03-29 16:29:27 +01001385
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001386 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1387 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1388
1389 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
Matteo Martincigh747ef822018-12-18 09:26:39 +00001390 // Tensorflow weights come in the format [H, W, I, M].
1391 // ArmNN weights have to be [M, I, H, W].
1392 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001393
1394 // Swizzle the tensor using the given permutation vector.
1395 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1396 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1397
1398 // Swizzles the content of the tensor's permanent storage into a local storage.
1399 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1400 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001401 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001402
1403 // Create a weight tensor with the newly swizzled data.
1404 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1405
Matteo Martincigh747ef822018-12-18 09:26:39 +00001406 uint32_t weightHeight = weightTensor.GetShape()[2];
1407 uint32_t weightWidth = weightTensor.GetShape()[3];
surmeh01bceff2f2018-03-29 16:29:27 +01001408
1409 bool padding = false;
1410 TensorInfo outputInfo;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001411 unsigned int outputHeight = 0;
1412 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001413
1414 CHECK_PADDING_TYPE(nodeDef, paddingString);
1415
surmeh01bceff2f2018-03-29 16:29:27 +01001416 if (paddingString == "SAME")
1417 {
1418 padding = true;
surmeh01bceff2f2018-03-29 16:29:27 +01001419 }
1420 else if (paddingString == "VALID")
1421 {
1422 padding = false;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001423 }
1424
Sadik Armagan60bb9d82021-01-11 15:15:01 +00001425 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, padding);
1426 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, padding);
1427
1428 // Calculate output height and width
1429 unsigned int dilatedFilterWidth = weightWidth + (desc.m_DilationX - 1) * (weightWidth - 1);
1430 unsigned int readWidth = (inputWidth + desc.m_PadLeft + desc.m_PadRight) - dilatedFilterWidth;
1431 outputWidth = 1 + (readWidth / desc.m_StrideX);
1432
1433 unsigned int dilatedFilterHeight = weightHeight + (desc.m_DilationY - 1) * (weightHeight - 1);
1434 unsigned int readHeight = (inputHeight + desc.m_PadTop + desc.m_PadBottom) - dilatedFilterHeight;
1435 outputHeight = 1 + (readHeight / desc.m_StrideY);
1436
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001437 switch (dataLayout)
1438 {
1439 case DataLayout::NHWC:
1440 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1441 outputHeight,
1442 outputWidth,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001443 weightTensor.GetShape()[0] * weightTensor.GetShape()[1]},
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001444 DataType::Float32);
1445 break;
1446 case DataLayout::NCHW:
1447 default:
1448 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1449 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1450 outputHeight,
1451 outputWidth },
1452 DataType::Float32);
1453 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001454 }
surmeh01bceff2f2018-03-29 16:29:27 +01001455
Matteo Martincighfc598e12019-05-14 10:36:13 +01001456 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1457 weightTensor,
1458 EmptyOptional(),
1459 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001460 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001461 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001462
1463 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1464}
1465
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001466TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef,
1467 TensorInfo inputTensorInfo,
1468 std::int32_t expandDim)
Conor Kennedyc2130a02018-12-05 11:05:54 +00001469{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001470 ARMNN_ASSERT(nodeDef.op() == "ExpandDims");
Conor Kennedyc2130a02018-12-05 11:05:54 +00001471
1472 if (inputTensorInfo.GetNumDimensions() > 4) {
1473 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001474 fmt::format("Unsupported number of dimensions: {} for input shape for ExpandDims {} {}",
1475 inputTensorInfo.GetNumDimensions(),
1476 nodeDef.name(),
1477 CHECK_LOCATION().AsString()));
Conor Kennedyc2130a02018-12-05 11:05:54 +00001478 }
1479
Matthew Sloyan589e3e82020-09-11 16:17:48 +01001480 std::int32_t inputDimSize = armnn::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
Conor Kennedyc2130a02018-12-05 11:05:54 +00001481 std::vector<uint32_t> outputDims;
1482
1483 // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1484 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1485 {
1486 // add current input shape to outputDims
1487 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1488 auto currentDimension = inputTensorInfo.GetShape()[i];
1489 outputDims.push_back(currentDimension);
1490 }
1491
1492 // insert a dimension of 1 at index 'expandDim' of inputs shape
1493 if (expandDim >= 0)
1494 {
1495 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1496 outputDims.insert(getPosition, 1);
1497 }
1498
1499 // if negative number for 'expandDim' then count backwards from the last element
1500 // and insert 1 dimension at index 'expandDim'
1501 if (expandDim < 0)
1502 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01001503 int outputDimSize = armnn::numeric_cast<int>(outputDims.size() + 1);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001504 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1505 outputDims.insert(getPosition, 1);
1506 }
1507 }
1508 else
1509 {
1510 throw InvalidArgumentException(
James Ward58dec6b2020-09-11 17:32:44 +01001511 fmt::format("Cannot expand dimension {} in input tensor with {} dimension {}",
1512 expandDim,
1513 inputDimSize,
1514 CHECK_LOCATION().AsString()));
Conor Kennedyc2130a02018-12-05 11:05:54 +00001515 }
1516
1517 if (outputDims.size() > 4)
1518 {
1519 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001520 fmt::format("Unsupported number of dimensions: {} for output shape for ExpandDims {} {}",
1521 outputDims.size(),
1522 nodeDef.name(),
1523 CHECK_LOCATION().AsString()));
Conor Kennedyc2130a02018-12-05 11:05:54 +00001524 }
1525
1526 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1527 outputDims.data());
1528
1529 TensorInfo outTensorInfo = inputTensorInfo;
1530 outTensorInfo.SetShape(outShape);
1531
1532 return outTensorInfo;
1533}
1534
Kevin May7d96b162021-02-03 17:38:41 +00001535ParsedTfOperationPtr ITfParser::TfParserImpl::ParseExpandDims(const tensorflow::NodeDef& nodeDef,
1536 const tensorflow::GraphDef& graphDef)
Conor Kennedyc2130a02018-12-05 11:05:54 +00001537{
Jan Eilers8eb25602020-03-09 12:13:48 +00001538 IgnoreUnused(graphDef);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001539
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001540 // Number of inputs can either
1541 // be 1 - that indicates that the axis parameter is passed as an attribute of the operation
1542 // or 2 - which means that the axis parameter is passed as a second input
1543 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
1544 const std::size_t numInputs = nodes.size();
1545 std::vector<OutputOfParsedTfOperation> inputs;
1546 std::int32_t expandDim; // axis or dim parameter. Describes which dimension to expand.
1547 if (numInputs == 1)
1548 {
1549 inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1550 expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1551 }
1552 else
1553 {
1554 inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1555
1556 // make sure data type is int32
1557 IOutputSlot& prevLayerOutputSlot = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1558 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1559
1560 if (inputTensorInfo.GetDataType()!=armnn::DataType::Signed32)
1561 {
1562 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001563 fmt::format("The axis parameter of ExpandDims operation given as second input is not of type int32."
1564 " Input {0} Node {1} {2}",
1565 inputs[1].m_IndexedValue->GetNode().name(),
1566 nodeDef.name(),
1567 CHECK_LOCATION().AsString()));
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001568 }
1569
1570 // ensure the second input is a constant value
1571 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1572 {
1573 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001574 fmt::format("ArmNN only supports ExpandDims layers with constant axis/dim parameter. "
1575 "Input {0} Node {1} {2}",
1576 inputs[1].m_IndexedValue->GetNode().name(),
1577 nodeDef.name(),
1578 CHECK_LOCATION().AsString()));
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001579 }
1580
1581 // make sure the second input is scalar or contains only a single value
1582 // (we don't support expand dims for multiple axis but we don't care what shape the
1583 // given tensor has as long as there is only a single value in it
1584 // e.g. a tensor like this [[[1]]] is completely fine)
1585 if (inputTensorInfo.GetNumElements() != 1)
1586 {
1587 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001588 fmt::format("The axis parameter of ExpandDims operation given as second input is not "
1589 "allowed to hold more than one value. "
1590 "Input {0} Node {1} {2}",
1591 inputs[1].m_IndexedValue->GetNode().name(),
1592 nodeDef.name(),
1593 CHECK_LOCATION().AsString()));
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001594 }
1595
1596 ParsedConstTfOperation<int32_t>* expandDimsNode =
1597 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1598
1599 memcpy(&expandDim, expandDimsNode->GetStorage(), sizeof(expandDim));
1600 }
1601
1602 // First input is the vector that should be expanded by another dimension
Conor Kennedyc2130a02018-12-05 11:05:54 +00001603 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1604 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1605
1606 TensorInfo outputInfo;
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001607 outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo, expandDim);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001608
1609 ReshapeDescriptor reshapeDesc;
1610 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1611 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1612 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1613 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1614
1615 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1616}
1617
Kevin May7d96b162021-02-03 17:38:41 +00001618ParsedTfOperationPtr ITfParser::TfParserImpl::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1619 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001620{
Jan Eilers8eb25602020-03-09 12:13:48 +00001621 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001622 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1623
1624 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1625 {
telsoa01c577f2c2018-08-31 09:22:23 +01001626 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001627 fmt::format("ArmNN only supports FusedBatchNormalization layers with constant scale. "
1628 "Input {}. Node {} {}",
1629 inputs[1].m_IndexedValue->GetNode().name(),
1630 nodeDef.name(),
1631 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001632 }
1633 ParsedConstTfOperation<float>* scaleNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001634 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001635
1636 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1637 {
telsoa01c577f2c2018-08-31 09:22:23 +01001638 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001639 fmt::format("ArmNN only supports FusedBatchNormalization layers with constant offset. "
1640 "Input {}. Node {} {}",
1641 inputs[2].m_IndexedValue->GetNode().name(),
1642 nodeDef.name(),
1643 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001644 }
1645 ParsedConstTfOperation<float>* offsetNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001646 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001647
1648 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1649 {
telsoa01c577f2c2018-08-31 09:22:23 +01001650 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001651 fmt::format("ArmNN only supports FusedBatchNormalization layers with constant mean. "
1652 "Input {}. Node {} {}",
1653 inputs[3].m_IndexedValue->GetNode().name(),
1654 nodeDef.name(),
1655 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001656 }
1657 ParsedConstTfOperation<float>* meanNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001658 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001659
1660 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1661 {
telsoa01c577f2c2018-08-31 09:22:23 +01001662 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001663 fmt::format("ArmNN only supports FusedBatchNormalization layers with constant variance. "
1664 "Input {}. Node {} {}",
1665 inputs[4].m_IndexedValue->GetNode().name(),
1666 nodeDef.name(),
1667 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001668 }
1669 ParsedConstTfOperation<float>* varianceNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001670 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001671
Aron Virginas-Tar2e259272019-11-27 13:29:51 +00001672 const std::string dataFormat = ReadOptionalNodeStringAttribute(nodeDef, "data_format", "NHWC");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001673 CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1674
telsoa01c577f2c2018-08-31 09:22:23 +01001675 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001676 BatchNormalizationDescriptor desc;
1677 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001678 desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001679
telsoa01c577f2c2018-08-31 09:22:23 +01001680 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1681 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001682 std::vector<float> scaleTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001683 ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001684
1685 std::vector<float> offsetTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001686 ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001687
1688 std::vector<float> meanTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001689 ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001690
1691 std::vector<float> varianceTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001692 ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001693
1694 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1695 meanTensor,
1696 varianceTensor,
1697 offsetTensor,
1698 scaleTensor,
1699 nodeDef.name().c_str());
1700
1701 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1702
Matteo Martincigh075c7502018-12-05 13:10:45 +00001703 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1704 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001705
1706 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1707}
1708
Kevin May7d96b162021-02-03 17:38:41 +00001709bool ITfParser::TfParserImpl::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1710 size_t alphaLayerIndex,
1711 const OutputOfParsedTfOperation& otherOp,
1712 armnn::IOutputSlot** outputOfLeakyRelu,
1713 armnn::ActivationDescriptor & desc)
telsoa01c577f2c2018-08-31 09:22:23 +01001714{
1715 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1716
1717 // Verifying all these assumptions hold:
1718 //
1719 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1720 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1721 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1722 //
1723
1724 if (mulNodeDef.op() == "Mul")
1725 {
1726 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1727 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1728
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001729 ARMNN_ASSERT(inputs.size() == 2);
1730 ARMNN_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1731 ARMNN_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1732 ARMNN_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
telsoa01c577f2c2018-08-31 09:22:23 +01001733
1734 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1735 {
1736 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1737 {
1738 ParsedConstTfOperation<float>* alpha =
Jan Eilersbb446e52020-04-02 13:56:54 +01001739 PolymorphicDowncast<ParsedConstTfOperation<float> *>(
telsoa01c577f2c2018-08-31 09:22:23 +01001740 inputs[alphaLayerIndex].m_IndexedValue);
1741
1742 std::vector<float> const_data;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001743 ConstTensor const_tensor = alpha->GetConstTensor(const_data);
telsoa01c577f2c2018-08-31 09:22:23 +01001744
1745 if (const_data.size() == 1)
1746 {
1747 desc.m_Function = ActivationFunction::LeakyReLu;
1748 desc.m_A = const_data[0];
1749
1750 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1751 return true;
1752 }
1753 }
1754 }
1755 }
1756 return false;
1757}
1758
Kevin May7d96b162021-02-03 17:38:41 +00001759ParsedTfOperationPtr ITfParser::TfParserImpl::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1760 const tensorflow::GraphDef& graphDef)
telsoa01c577f2c2018-08-31 09:22:23 +01001761{
Jan Eilers8eb25602020-03-09 12:13:48 +00001762 IgnoreUnused(graphDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001763 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
Sadik Armagan975c09a2018-12-04 10:02:08 +00001764 if (inputs.size() != 2)
1765 {
1766 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001767 fmt::format("Maximum expects two inputs!. Got {} for Node {} {}",
1768 inputs.size(),
1769 nodeDef.name(),
1770 CHECK_LOCATION().AsString()));
Sadik Armagan975c09a2018-12-04 10:02:08 +00001771 }
1772
telsoa01c577f2c2018-08-31 09:22:23 +01001773 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1774 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1775 IOutputSlot* outputOfLeakyRelu = nullptr;
1776
1777 ActivationDescriptor desc;
1778
Sadik Armagan975c09a2018-12-04 10:02:08 +00001779 // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1780 // i.e. one of the four possible scenarios:
1781 // 1, max(mul(a, x), x)
1782 // 2, max(mul(x, a), x)
1783 // 3, max(x, mul(a, x))
1784 // 4, max(x, mul(x, a))
1785 // These are handled by an activation layer.
telsoa01c577f2c2018-08-31 09:22:23 +01001786
1787 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1788 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1789 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1790 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1791 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001792 ARMNN_ASSERT(outputOfLeakyRelu != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001793
1794 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1795 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1796 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1797 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1798 }
1799 else
1800 {
Sadik Armagan975c09a2018-12-04 10:02:08 +00001801 // Anything else is just a maximum layer.
1802
1803 return AddMaximumLayer(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001804 }
1805}
1806
Kevin May7d96b162021-02-03 17:38:41 +00001807std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> ITfParser::TfParserImpl::ProcessElementwiseInputSlots(
jimfly0184c70e62018-12-19 13:14:46 +00001808 const tensorflow::NodeDef& nodeDef, const std::string& layerName)
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001809{
1810 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1811
1812 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1813 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1814 const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1815 const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1816
1817 if (input0Dim != input1Dim)
1818 {
1819 // broadcasting where input0 and input1 have different number of dimensions
1820 // is only supported for 1D and 4D tensors pair
1821 if (input0Dim == 1 && input1Dim == 4)
1822 {
1823 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1824 }
1825 else if (input0Dim == 4 && input1Dim == 1)
1826 {
1827 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1828 }
1829 else
1830 {
1831 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001832 fmt::format("Unsupported broadcast configuration for {} operation {} {}",
1833 layerName,
1834 nodeDef.name(),
1835 CHECK_LOCATION().AsString()));
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001836 }
1837 }
jimfly0184c70e62018-12-19 13:14:46 +00001838 return {input0Slot, input1Slot};
1839}
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001840
Kevin May7d96b162021-02-03 17:38:41 +00001841ParsedTfOperationPtr ITfParser::TfParserImpl::ProcessComparisonLayer(
kevmay012b4d88e2019-01-24 14:05:09 +00001842 IOutputSlot* input0Slot,
1843 IOutputSlot* input1Slot,
1844 IConnectableLayer* const layer,
1845 const tensorflow::NodeDef& nodeDef)
1846{
1847 input0Slot->Connect(layer->GetInputSlot(0));
1848 input1Slot->Connect(layer->GetInputSlot(1));
1849
1850 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1851 outputInfo.SetDataType(DataType::Boolean);
1852 std::vector<unsigned int> outputShape;
1853
1854 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1855 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1856
1857 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1858 {
1859 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1860 }
1861
1862 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1863 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1864
1865 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1866}
1867
Kevin May7d96b162021-02-03 17:38:41 +00001868ParsedTfOperationPtr ITfParser::TfParserImpl::ProcessElementwiseLayer(
jimfly0184c70e62018-12-19 13:14:46 +00001869 IOutputSlot* input0Slot,
1870 IOutputSlot* input1Slot,
1871 IConnectableLayer* const layer,
1872 const tensorflow::NodeDef& nodeDef)
1873{
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001874 input0Slot->Connect(layer->GetInputSlot(0));
1875 input1Slot->Connect(layer->GetInputSlot(1));
1876
1877 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1878 std::vector<unsigned int> outputShape;
1879
1880 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1881 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1882
1883 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1884 {
1885 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1886 }
1887
1888 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1889 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1890
1891 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1892}
1893
Kevin May7d96b162021-02-03 17:38:41 +00001894ParsedTfOperationPtr ITfParser::TfParserImpl::ParseGather(const tensorflow::NodeDef& nodeDef,
1895 const tensorflow::GraphDef& graphDef)
FrancisMurtagh94412af2019-01-24 10:53:39 +00001896{
Jan Eilers8eb25602020-03-09 12:13:48 +00001897 IgnoreUnused(graphDef);
FrancisMurtagh94412af2019-01-24 10:53:39 +00001898 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1899 IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1900 IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
Teresa Charlin52664732020-06-29 16:27:03 +01001901 GatherDescriptor descriptor;
1902 descriptor.m_Axis = ReadMandatoryNodeInt32Attribute(nodeDef, "axis");
FrancisMurtagh94412af2019-01-24 10:53:39 +00001903
1904 // Infer shape of output tensor
1905 unsigned int paramsDim = params.GetTensorInfo().GetNumDimensions();
1906 unsigned int indicesDim = indices.GetTensorInfo().GetNumDimensions();
1907 unsigned int outputDim = paramsDim - 1 + indicesDim;
1908
1909 std::vector<unsigned int> dimSizes;
1910
1911 for (unsigned int i = 0; i < indicesDim; ++i)
1912 {
1913 dimSizes.push_back(indices.GetTensorInfo().GetShape()[i]);
1914 }
1915 for (unsigned int i = 1; i < paramsDim; ++i)
1916 {
1917 dimSizes.push_back(params.GetTensorInfo().GetShape()[i]);
1918 }
1919
1920 const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data());
1921
1922 const TensorInfo inferredOutputInfo(inferredShape, params.GetTensorInfo().GetDataType());
1923
Teresa Charlin52664732020-06-29 16:27:03 +01001924 IConnectableLayer* const layer = m_Network->AddGatherLayer(descriptor, nodeDef.name().c_str());
FrancisMurtagh94412af2019-01-24 10:53:39 +00001925 layer->GetOutputSlot(0).SetTensorInfo(inferredOutputInfo);
1926
1927 params.Connect(layer->GetInputSlot(0));
1928 indices.Connect(layer->GetInputSlot(1));
1929
1930 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1931}
1932
Kevin May7d96b162021-02-03 17:38:41 +00001933ParsedTfOperationPtr ITfParser::TfParserImpl::ParseGreater(const tensorflow::NodeDef& nodeDef,
1934 const tensorflow::GraphDef& graphDef)
jimfly01a06bf312018-12-18 16:24:51 +00001935{
Jan Eilers8eb25602020-03-09 12:13:48 +00001936 IgnoreUnused(graphDef);
jimfly01a06bf312018-12-18 16:24:51 +00001937 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
1938 IOutputSlot* input0Slot = inputLayers.first;
1939 IOutputSlot* input1Slot = inputLayers.second;
1940
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001941 ComparisonDescriptor descriptor(ComparisonOperation::Greater);
1942 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly01a06bf312018-12-18 16:24:51 +00001943
kevmay012b4d88e2019-01-24 14:05:09 +00001944 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly01a06bf312018-12-18 16:24:51 +00001945}
1946
Kevin May7d96b162021-02-03 17:38:41 +00001947ParsedTfOperationPtr ITfParser::TfParserImpl::ParseEqual(const tensorflow::NodeDef& nodeDef,
1948 const tensorflow::GraphDef& graphDef)
jimfly0184c70e62018-12-19 13:14:46 +00001949{
Jan Eilers8eb25602020-03-09 12:13:48 +00001950 IgnoreUnused(graphDef);
jimfly0184c70e62018-12-19 13:14:46 +00001951 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
1952 IOutputSlot* input0Slot = inputLayers.first;
1953 IOutputSlot* input1Slot = inputLayers.second;
1954
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001955 ComparisonDescriptor descriptor(ComparisonOperation::Equal);
1956 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly0184c70e62018-12-19 13:14:46 +00001957
kevmay012b4d88e2019-01-24 14:05:09 +00001958 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly0184c70e62018-12-19 13:14:46 +00001959}
1960
Kevin May7d96b162021-02-03 17:38:41 +00001961ParsedTfOperationPtr ITfParser::TfParserImpl::ParseMinimum(const tensorflow::NodeDef& nodeDef,
1962 const tensorflow::GraphDef& graphDef)
jimfly0184c70e62018-12-19 13:14:46 +00001963{
Jan Eilers8eb25602020-03-09 12:13:48 +00001964 IgnoreUnused(graphDef);
jimfly0184c70e62018-12-19 13:14:46 +00001965 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
1966 IOutputSlot* input0Slot = inputLayers.first;
1967 IOutputSlot* input1Slot = inputLayers.second;
1968
1969 IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1970
1971 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1972}
1973
Kevin May7d96b162021-02-03 17:38:41 +00001974ParsedTfOperationPtr ITfParser::TfParserImpl::ParseSub(const tensorflow::NodeDef& nodeDef,
1975 const tensorflow::GraphDef& graphDef)
jimfly0123be07e2018-12-04 17:47:22 +00001976{
Jan Eilers8eb25602020-03-09 12:13:48 +00001977 IgnoreUnused(graphDef);
jimfly0123be07e2018-12-04 17:47:22 +00001978 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1979
1980 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1981 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1982
1983 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
1984 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
1985
1986 if (input0Info.GetNumDimensions() == 1)
1987 {
1988 const bool isNHWC = true;
1989 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1990 }
1991
1992 if (input1Info.GetNumDimensions() == 1)
1993 {
1994 const bool isNHWC = true;
1995 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1996 }
1997
1998 IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
1999
2000 input0Slot->Connect(layer->GetInputSlot(0));
2001 input1Slot->Connect(layer->GetInputSlot(1));
2002
2003 if (input0Info.GetNumDimensions() == 1)
2004 {
2005 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2006 }
2007 else
2008 {
2009 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2010 }
2011
2012 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2013}
2014
Kevin May7d96b162021-02-03 17:38:41 +00002015ParsedTfOperationPtr ITfParser::TfParserImpl::ParseStack(const tensorflow::NodeDef& nodeDef,
2016 const tensorflow::GraphDef& graphDef)
Sadik Armagan48d70932020-02-18 15:18:27 +00002017{
Jan Eilers8eb25602020-03-09 12:13:48 +00002018 IgnoreUnused(graphDef);
Sadik Armagan48d70932020-02-18 15:18:27 +00002019 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2020
2021 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2022 if (numInputs < 1)
2023 {
2024 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002025 fmt::format("Pack/Stack expects at least one input. Got {} for Node {} {}",
2026 numInputs,
2027 nodeDef.name(),
2028 CHECK_LOCATION().AsString()));
Sadik Armagan48d70932020-02-18 15:18:27 +00002029 }
2030
2031 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2032 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2033 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2034 const TensorInfo& inputTensorInfo = input0Slot->GetTensorInfo();
2035 auto numDimensions = inputTensorInfo.GetShape().GetNumDimensions();
2036
2037 // validate axis
2038 int32_t axis = ReadMandatoryNodeInt32Attribute(nodeDef, "axis");
2039 const int sNumDimensions = (static_cast<int>(numDimensions) + 1);
2040 if (!(axis < sNumDimensions && axis >= -sNumDimensions))
2041 {
2042 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002043 fmt::format("Axis index is not in range. Got {} for Node {} {}",
2044 axis,
2045 nodeDef.name(),
2046 CHECK_LOCATION().AsString()));
Sadik Armagan48d70932020-02-18 15:18:27 +00002047 }
2048
2049 if (axis < 0)
2050 {
2051 axis = static_cast<int32_t>(numDimensions) + axis + 1;
2052 }
2053
2054 StackDescriptor stackDescriptor;
2055 stackDescriptor.m_Axis = static_cast<uint32_t>(axis);
2056 stackDescriptor.m_NumInputs = static_cast<uint32_t>(numInputs);
2057 stackDescriptor.m_InputShape = inputTensorInfo.GetShape();
2058
2059 const unsigned int supportedNumDims = 4;
2060 for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2061 {
2062 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2063 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2064
2065 // Double check dimensions of the tensors
2066 if (inputTensorInfo.GetNumDimensions() >= supportedNumDims)
2067 {
2068 throw armnn::ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002069 fmt::format("The number of dimensions: {} for input tensors of the "
2070 "Pack/Stack op. Number of dimensions should be less than {} {}",
2071 inputTensorInfo.GetNumDimensions(),
2072 supportedNumDims,
2073 CHECK_LOCATION().AsString()));
Sadik Armagan48d70932020-02-18 15:18:27 +00002074 }
2075 }
2076
2077 std::vector<unsigned int> outputDimensions;
2078 for (unsigned int i = 0; i < stackDescriptor.m_InputShape.GetNumDimensions(); ++i)
2079 {
2080 outputDimensions.push_back(stackDescriptor.m_InputShape[i]);
2081 }
2082 outputDimensions.insert(outputDimensions.begin() + axis, numInputs);
2083
2084 // add Stack Layer
2085 IConnectableLayer* const layer = m_Network->AddStackLayer(stackDescriptor, nodeDef.name().c_str());
2086
2087 for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2088 {
2089 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2090 inputSlot.Connect(layer->GetInputSlot(viewIndex));
2091 }
2092
2093 layer->GetOutputSlot(0).SetTensorInfo(
2094 armnn::TensorInfo(static_cast<uint32_t>(outputDimensions.size()),
2095 outputDimensions.data(),
2096 inputTensorInfo.GetDataType()));
2097
2098 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2099}
2100
Kevin May7d96b162021-02-03 17:38:41 +00002101ParsedTfOperationPtr ITfParser::TfParserImpl::ParseTranspose(const tensorflow::NodeDef& nodeDef,
2102 const tensorflow::GraphDef& graphDef)
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002103{
Jan Eilers8eb25602020-03-09 12:13:48 +00002104 IgnoreUnused(graphDef);
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002105
2106 auto inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2107 const auto inputCount = inputs.size();
2108
2109 if (inputCount != 2)
2110 {
2111 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002112 fmt::format("The number of given input is {}. It should be two for Transpose op."
2113 "Node {} {}",
2114 inputCount,
2115 nodeDef.name(),
2116 CHECK_LOCATION().AsString()));
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002117 }
2118
2119 auto* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2120
2121 const auto constInput = inputs[GetConstInputIndex(inputs)];
2122 auto* permuteVectorInput =
Jan Eilersbb446e52020-04-02 13:56:54 +01002123 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(constInput.m_IndexedValue);
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002124 const auto& permuteVectorInfo = permuteVectorInput->GetTensorInfo();
2125
2126 std::vector<int32_t> permuteVectorData;
2127 permuteVectorInput->GetConstTensor(permuteVectorData);
2128
Mike Kelly08759e22020-03-02 11:41:31 +00002129 std::vector<unsigned int> armnnPermuteVectorData(permuteVectorData.begin(), permuteVectorData.end());
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002130
2131 const auto permutationVector = PermutationVector(armnnPermuteVectorData.data(), permuteVectorInfo.GetNumElements());
Mike Kelly08759e22020-03-02 11:41:31 +00002132 const auto desc = TransposeDescriptor(permutationVector);
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002133
Mike Kelly08759e22020-03-02 11:41:31 +00002134 auto* layer = m_Network->AddTransposeLayer(desc, nodeDef.name().c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002135 ARMNN_ASSERT(layer);
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002136
2137 input0Slot->Connect(layer->GetInputSlot(0));
2138
2139 const auto& input0Info = input0Slot->GetTensorInfo();
2140 armnn::TensorInfo outputInfo {input0Info};
Mike Kelly08759e22020-03-02 11:41:31 +00002141 outputInfo.SetShape(armnnUtils::TransposeTensorShape(input0Info.GetShape(), desc.m_DimMappings));
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002142 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2143
2144 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2145}
2146
jimfly01f6ba7472018-12-04 10:09:52 +00002147unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
2148 const TensorInfo& inputTensorInfo,
2149 const std::string& nodeName)
2150{
2151 unsigned int rank = paddingTensor.GetShape()[0];
2152 unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
2153 if (rank != expectedRank)
2154 {
2155 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002156 fmt::format("Expected the padding tensor to be of rank {} not {} on Node {} {}.",
2157 expectedRank,
2158 rank,
2159 nodeName,
2160 CHECK_LOCATION().AsString()));
jimfly01f6ba7472018-12-04 10:09:52 +00002161 }
2162 unsigned int second = paddingTensor.GetShape()[1];
2163 if (second != 2)
2164 {
2165 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002166 fmt::format("Expected the padding tensor to be of dimensions "
2167 "[{1}, 2] not [{1}, {2}] on Node {3} {4}.",
2168 rank,
2169 second,
2170 nodeName,
2171 CHECK_LOCATION().AsString()));
jimfly01f6ba7472018-12-04 10:09:52 +00002172 }
2173 return rank;
2174}
2175
2176TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
2177 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
2178{
2179 unsigned int numDims = inputTensorInfo.GetNumDimensions();
2180 std::vector<unsigned int> outDims;
2181 for (unsigned int i = 0; i < numDims; ++i)
2182 {
2183 unsigned int dimSize = inputTensorInfo.GetShape()[i];
2184 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2185 dimSize += dimPadding.first;
2186 dimSize += dimPadding.second;
2187 outDims.push_back(dimSize);
2188 }
2189 TensorInfo paddedTensorInfo = inputTensorInfo;
2190 unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
2191 paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
2192 return paddedTensorInfo;
2193}
2194
Kevin May7d96b162021-02-03 17:38:41 +00002195ParsedTfOperationPtr ITfParser::TfParserImpl::ParsePad(const tensorflow::NodeDef& nodeDef,
2196 const tensorflow::GraphDef& graphDef)
jimfly01f6ba7472018-12-04 10:09:52 +00002197{
Jan Eilers8eb25602020-03-09 12:13:48 +00002198 IgnoreUnused(graphDef);
jimfly01f6ba7472018-12-04 10:09:52 +00002199 // input consists of:
2200 // input[0] the tensor which will be padded
2201 // input[1] the tensor holding the padding values
2202 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2203 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2204 TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
2205 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
2206 {
2207 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002208 fmt::format("ArmNN only supports Pad with constant padding. "
2209 "Input {}. Node {} {}",
2210 inputs[1].m_IndexedValue->GetNode().name(),
2211 nodeDef.name(),
2212 CHECK_LOCATION().AsString()));
jimfly01f6ba7472018-12-04 10:09:52 +00002213
2214 }
2215 ParsedConstTfOperation<int32_t>* paddingTensorOp =
Jan Eilersbb446e52020-04-02 13:56:54 +01002216 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
jimfly01f6ba7472018-12-04 10:09:52 +00002217
2218 std::vector<int32_t> paddingTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002219 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
jimfly01f6ba7472018-12-04 10:09:52 +00002220 // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
2221 // and should match the rank of the input tensor that is being padded.
2222 // For each dimension D of input, paddings[D, 0] indicates how many values to add
2223 // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
2224 // many values to add after the contents of tensor in that dimension
2225 // This needs to be translated into a padList for ACL
2226 std::vector<std::pair<unsigned int, unsigned int>> padList;
2227 unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
2228 for (unsigned int i = 0; i < rank; ++i)
2229 {
2230 std::pair<unsigned int, unsigned int> paddingForDim;
2231 for (unsigned int j = 0; j < 2; j++)
2232 {
2233 unsigned int index = (i * 2) + j;
2234 int paddingAmount = paddingTensorData[index];
2235 // make sure we can cast to an unsigned value
2236 if (paddingAmount < 0)
2237 {
2238 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002239 fmt::format("Negative amount {} specified at [{}, {}] of padding tensor on Node {} {}.",
2240 paddingAmount,
2241 i,
2242 j,
2243 nodeDef.name(),
2244 CHECK_LOCATION().AsString()));
jimfly01f6ba7472018-12-04 10:09:52 +00002245 }
2246 if (j == 0)
2247 {
2248 paddingForDim.first = static_cast<unsigned int>(paddingAmount);
2249 }
2250 else
2251 {
2252 paddingForDim.second = static_cast<unsigned int>(paddingAmount);
2253 }
2254 }
2255 padList.push_back(paddingForDim);
2256 }
2257 PadDescriptor padDescriptor(padList);
2258 IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
2259 previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
2260 // Use the padding to calculate the new output tensor shape
2261 TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
2262 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2263 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2264}
2265
Kevin May7d96b162021-02-03 17:38:41 +00002266ParsedTfOperationPtr ITfParser::TfParserImpl::ParseConcat(const tensorflow::NodeDef& nodeDef,
2267 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01002268{
Jan Eilers8eb25602020-03-09 12:13:48 +00002269 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002270 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002271
telsoa01c577f2c2018-08-31 09:22:23 +01002272 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01002273 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
surmeh01bceff2f2018-03-29 16:29:27 +01002274
surmeh01bceff2f2018-03-29 16:29:27 +01002275 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2276
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002277 // Constant tensor index
2278 unsigned int index = GetConstInputIndex(inputs);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002279 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002280 ParsedConstTfOperation<int32_t>* shapeNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002281 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002282
surmeh01bceff2f2018-03-29 16:29:27 +01002283 std::vector<int32_t> axisTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002284 shapeNode->GetConstTensor(axisTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002285
telsoa01c577f2c2018-08-31 09:22:23 +01002286 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002287 const unsigned int concatDim = static_cast<unsigned int>(axisTensorData[0]);
surmeh01bceff2f2018-03-29 16:29:27 +01002288
telsoa01c577f2c2018-08-31 09:22:23 +01002289 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002290 if (concatDim == 0 || concatDim == 2)
surmeh01bceff2f2018-03-29 16:29:27 +01002291 {
telsoa01c577f2c2018-08-31 09:22:23 +01002292 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002293 fmt::format("Dimension {} for concatenation is not supported by Armnn. "
2294 "Node {} {}",
2295 concatDim,
2296 nodeDef.name(),
2297 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002298 }
2299
Matthew Jacksondba634f2019-08-15 15:14:18 +01002300 const unsigned int supportedNumDims = 4;
Matteo Martincighf9afc792018-12-06 12:03:17 +00002301 unsigned int numConcatViews = numInputs - 1;
Matthew Jacksondba634f2019-08-15 15:14:18 +01002302 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002303 concatDescriptor.SetConcatAxis(concatDim);
Matthew Jacksondba634f2019-08-15 15:14:18 +01002304 TensorShape mergeDims(supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002305 unsigned int mergeDim = 0;
2306 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002307 {
telsoa01c577f2c2018-08-31 09:22:23 +01002308 // Need to double check whether it should be
Matteo Martincighf9afc792018-12-06 12:03:17 +00002309 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002310 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2311
Matteo Martincighf9afc792018-12-06 12:03:17 +00002312 // Double check dimensions of the tensors
Matthew Jacksondba634f2019-08-15 15:14:18 +01002313 if (inputTensorInfo.GetNumDimensions() != supportedNumDims)
Matteo Martincighf9afc792018-12-06 12:03:17 +00002314 {
2315 throw armnn::ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002316 fmt::format("The number of dimensions: {} for input tensors of the "
2317 "concatenation op should be {} {}",
2318 inputTensorInfo.GetNumDimensions(),
2319 supportedNumDims,
2320 CHECK_LOCATION().AsString()));
Matteo Martincighf9afc792018-12-06 12:03:17 +00002321 }
2322
2323 // Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
2324 mergeDims = inputTensorInfo.GetShape();
2325 unsigned int* viewOrigin = const_cast<unsigned int*>(concatDescriptor.GetViewOrigin(viewIndex));
Matthew Jacksondba634f2019-08-15 15:14:18 +01002326 std::fill(viewOrigin, viewOrigin + supportedNumDims, 0);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002327
2328 // Update the view origin coordinates and the merge dimension value
2329 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
2330 mergeDim += mergeDims[concatDim];
surmeh01bceff2f2018-03-29 16:29:27 +01002331 }
2332
Matteo Martincighf9afc792018-12-06 12:03:17 +00002333 // Update the output shape
2334 mergeDims[concatDim] = mergeDim;
Jim Flynn906f9462019-05-10 13:55:21 +01002335 armnn::IConnectableLayer *layer = m_Network->AddConcatLayer(concatDescriptor, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002336
Matteo Martincighf9afc792018-12-06 12:03:17 +00002337 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
surmeh01bceff2f2018-03-29 16:29:27 +01002338
Matteo Martincighf9afc792018-12-06 12:03:17 +00002339 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002340 {
Matteo Martincighf9afc792018-12-06 12:03:17 +00002341 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2342 inputSlot.Connect(layer->GetInputSlot(viewIndex));
surmeh01bceff2f2018-03-29 16:29:27 +01002343 }
2344
2345 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2346}
2347
Kevin May7d96b162021-02-03 17:38:41 +00002348ParsedTfOperationPtr ITfParser::TfParserImpl::ParseShape(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01002349 const tensorflow::GraphDef& graphDef)
2350{
Jan Eilers8eb25602020-03-09 12:13:48 +00002351 IgnoreUnused(graphDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002352 // Note: the Shape layer is handled in a special way, because:
2353 // 1. ARMNN doesn't support int32 tensors which it outputs.
2354 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01002355 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01002356 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01002357
2358 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
2359 if (tfDataType != tensorflow::DT_INT32)
2360 {
telsoa01c577f2c2018-08-31 09:22:23 +01002361 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002362 fmt::format("Armnn only supports DT_INT32 as out_type. Got {} for Node {} {}",
2363 tensorflow::DataType_Name(tfDataType),
2364 nodeDef.name(),
2365 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002366 }
2367
2368 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2369 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2370 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2371 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
2372
2373 std::vector<int32_t> shapeTensorData;
2374 shapeTensorData.reserve(prevLayerDimensions);
2375
2376 for (unsigned int i=0; i<prevLayerDimensions; ++i)
2377 {
2378 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
2379 }
2380
2381 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2382
2383 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
2384 nodeDef,
2385 &shapeTensorData[0],
2386 shapeTensorInfo);
2387}
2388
Kevin May7d96b162021-02-03 17:38:41 +00002389ParsedTfOperationPtr ITfParser::TfParserImpl::ParseReshape(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01002390 const tensorflow::GraphDef& graphDef)
2391{
Jan Eilers8eb25602020-03-09 12:13:48 +00002392 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002393 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2394 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2395
2396 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2397 {
telsoa01c577f2c2018-08-31 09:22:23 +01002398 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002399 fmt::format("ArmNN only supports Reshape layers with constant shapes. "
2400 "Input {} Node {} {}",
2401 inputs[1].m_IndexedValue->GetNode().name(),
2402 nodeDef.name(),
2403 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002404 }
2405 ParsedConstTfOperation<int32_t>* shapeNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002406 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01002407
2408 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2409 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2410
2411 std::vector<int32_t> shapeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002412 ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002413 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2414
2415 TensorShape targetShape = outputTensorInfo.GetShape();
2416 ReshapeDescriptor reshapeDesc;
2417 reshapeDesc.m_TargetShape = targetShape;
2418
2419 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2420 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2421 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2422
2423 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2424}
2425
Kevin May7d96b162021-02-03 17:38:41 +00002426ParsedTfOperationPtr ITfParser::TfParserImpl::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01002427 const tensorflow::GraphDef& graphDef)
2428{
Jan Eilers8eb25602020-03-09 12:13:48 +00002429 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002430 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2431
2432 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2433 {
telsoa01c577f2c2018-08-31 09:22:23 +01002434 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002435 fmt::format("ArmNN only supports ResizeBilinear layers with constant sizes. "
2436 "Input {}. Node {} {}",
2437 inputs[1].m_IndexedValue->GetNode().name(),
2438 nodeDef.name(),
2439 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002440 }
2441 ParsedConstTfOperation<int32_t>* sizeNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002442 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01002443
telsoa01c577f2c2018-08-31 09:22:23 +01002444 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01002445 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
2446 {
telsoa01c577f2c2018-08-31 09:22:23 +01002447 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002448 fmt::format("ArmNN only supports ResizeBilinear layers with align_corners set to false. "
2449 "Node {} {}",
2450 nodeDef.name(),
2451 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002452 }
2453
telsoa01c577f2c2018-08-31 09:22:23 +01002454 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01002455 std::vector<int32_t> sizeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002456 ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002457
telsoa01c577f2c2018-08-31 09:22:23 +01002458 // The descriptor only has target height and width attributes, which we get from the size tensor.
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002459 ResizeDescriptor desc;
2460 desc.m_Method = armnn::ResizeMethod::Bilinear;
surmeh01bceff2f2018-03-29 16:29:27 +01002461 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002462 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2463 desc.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002464
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002465 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002466
2467 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2468 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01002469 // The input shape is always in BHWC format, this will be swizzled below; for now,
2470 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01002471 unsigned int outBatch = inputTensorInfo.GetShape()[0];
2472 unsigned int outChannels = inputTensorInfo.GetShape()[3];
2473 unsigned int outHeight = desc.m_TargetHeight;
2474 unsigned int outWidth = desc.m_TargetWidth;
jimfly018a121502018-12-06 16:19:52 +00002475 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
telsoa01c577f2c2018-08-31 09:22:23 +01002476 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01002477 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2478 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2479
jimfly018a121502018-12-06 16:19:52 +00002480 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002481
2482 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2483}
2484
2485TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
2486{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002487 ARMNN_ASSERT(nodeDef.op() == "Squeeze");
surmeh01bceff2f2018-03-29 16:29:27 +01002488 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2489
2490 DataType type;
2491 if (tfDataType == tensorflow::DT_FLOAT)
2492 {
2493 type = DataType::Float32;
2494 }
2495 else if (tfDataType == tensorflow::DT_INT32)
2496 {
2497 type = DataType::Signed32;
2498 }
2499 else
2500 {
telsoa01c577f2c2018-08-31 09:22:23 +01002501 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002502 fmt::format("Unsupported DataType {} for Squeeze operation {} {}",
2503 tensorflow::DataType_Name(tfDataType),
2504 nodeDef.name(),
2505 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01002506 }
2507
2508
2509 if (inputTensorInfo.GetNumDimensions() > 4)
2510 {
2511 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002512 fmt::format("Unsupported number of dimensions: {} for input shape for Squeeze {} {}",
2513 inputTensorInfo.GetNumDimensions(),
2514 nodeDef.name(),
2515 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002516 }
2517
2518 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01002519 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2520
surmeh01bceff2f2018-03-29 16:29:27 +01002521 if (squeezeDims.empty())
2522 {
telsoa01c577f2c2018-08-31 09:22:23 +01002523 squeezeDims.assign(dimensionSequence,
2524 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01002525 }
2526
2527 std::vector<uint32_t> outputDims;
2528 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2529 {
telsoa01c577f2c2018-08-31 09:22:23 +01002530 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2531 auto currentDimension = inputTensorInfo.GetShape()[i];
2532 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01002533 {
telsoa01c577f2c2018-08-31 09:22:23 +01002534 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01002535 }
2536 }
2537
2538 if (outputDims.size() > 4)
2539 {
telsoa01c577f2c2018-08-31 09:22:23 +01002540 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002541 fmt::format("Unsupported number of dimensions: {} for output shape for Squeeze {} {}",
2542 outputDims.size(),
2543 nodeDef.name(),
2544 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002545 }
2546
telsoa01c577f2c2018-08-31 09:22:23 +01002547 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2548 outputDims.data());
2549
2550 TensorInfo outTensorInfo = inputTensorInfo;
2551 outTensorInfo.SetShape(outShape);
2552 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01002553
2554 return outTensorInfo;
2555}
2556
Kevin May7d96b162021-02-03 17:38:41 +00002557ParsedTfOperationPtr ITfParser::TfParserImpl::ParseSqueeze(const tensorflow::NodeDef& nodeDef,
2558 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01002559{
Jan Eilers8eb25602020-03-09 12:13:48 +00002560 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002561 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2562
2563 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2564 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2565
2566 TensorInfo outputInfo;
2567 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2568
2569 ReshapeDescriptor reshapeDesc;
2570 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2571 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2572 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2573 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2574
2575 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2576}
2577
Kevin May7d96b162021-02-03 17:38:41 +00002578ParsedTfOperationPtr ITfParser::TfParserImpl::ParseLrn(const tensorflow::NodeDef& nodeDef,
2579 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01002580{
Jan Eilers8eb25602020-03-09 12:13:48 +00002581 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002582 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2583
2584 NormalizationDescriptor normalizationDescriptor;
2585 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2586 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2587 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2588 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2589 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2590 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
ruoyan018174f362018-12-04 18:24:08 +00002591 normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002592
2593 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2594 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2595
2596 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002597 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2598 nodeDef.name().c_str());
ruoyan018174f362018-12-04 18:24:08 +00002599 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2600 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
surmeh01bceff2f2018-03-29 16:29:27 +01002601
2602 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2603}
2604
2605/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01002606/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2607/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2608/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2609///
surmeh01bceff2f2018-03-29 16:29:27 +01002610class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
2611{
2612public:
Kevin May7d96b162021-02-03 17:38:41 +00002613 ParsedMatMulTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node)
surmeh01bceff2f2018-03-29 16:29:27 +01002614 : DeferredSingleLayerParsedTfOperation(parser, node)
2615 {
2616 }
2617
2618 void CreateLayerDeferred() override
2619 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002620 ARMNN_ASSERT(m_Layer == nullptr);
surmeh01bceff2f2018-03-29 16:29:27 +01002621 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2622 }
2623};
2624
Kevin May7d96b162021-02-03 17:38:41 +00002625ParsedTfOperationPtr ITfParser::TfParserImpl::ParseMatMul(const tensorflow::NodeDef& nodeDef,
2626 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01002627{
Jan Eilers8eb25602020-03-09 12:13:48 +00002628 IgnoreUnused(graphDef);
Derek Lambertibaa177f2019-12-10 22:00:43 +00002629
telsoa01c577f2c2018-08-31 09:22:23 +01002630 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01002631 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2632}
2633
Kevin May7d96b162021-02-03 17:38:41 +00002634ParsedTfOperationPtr ITfParser::TfParserImpl::ParseMean(const tensorflow::NodeDef& nodeDef,
2635 const tensorflow::GraphDef& graphDef)
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002636{
Jan Eilers8eb25602020-03-09 12:13:48 +00002637 IgnoreUnused(graphDef);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002638 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2639 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2640 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2641
2642 if (inputs.size() != 2)
2643 {
2644 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002645 fmt::format("Mean expects two inputs!. Got {} for Node {} {}",
2646 inputs.size(),
2647 nodeDef.name(),
2648 CHECK_LOCATION().AsString()));
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002649 }
2650
2651 bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef, "keep_dims");
2652
2653 ParsedConstTfOperation<int32_t>* axisNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002654 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002655
2656 const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo();
2657
2658 ConstTensor axisTensor(axisTensorInfo, axisNode->GetStorage());
2659 const int* axisData = static_cast<const int*>(axisTensor.GetMemoryArea());
2660
2661 TensorInfo outputTensorInfo;
2662 MeanDescriptor meanDescriptor;
2663 meanDescriptor.m_KeepDims = keepDims;
2664
2665 // Negative axis values are supported so that the process requires
2666 // to convert them into the corresponding positive ones.
2667 // Duplicate values are also removed.
2668 std::vector<int> rawAxisVector(axisData, axisData + axisTensorInfo.GetNumElements());
2669 std::set<unsigned int> positiveAxisSet;
2670 int rank = static_cast<int>(inputTensorInfo.GetNumDimensions());
2671
2672 std::transform(rawAxisVector.begin(), rawAxisVector.end(),
2673 std::inserter(positiveAxisSet, positiveAxisSet.begin()),
2674 [rank](int i) -> unsigned int { return static_cast<unsigned int>((i + rank) % rank); });
2675
Derek Lambertibaa177f2019-12-10 22:00:43 +00002676 CalculateReducedOutputTensoInfo(inputTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002677
2678 if (inputTensorInfo.GetNumDimensions() > positiveAxisSet.size())
2679 {
2680 meanDescriptor.m_Axis.assign(positiveAxisSet.begin(), positiveAxisSet.end());
2681 }
2682
2683 IConnectableLayer* layer = m_Network->AddMeanLayer(meanDescriptor, nodeDef.name().c_str());
2684 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2685 inputSlot.Connect(layer->GetInputSlot(0));
2686
2687 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2688}
2689
telsoa01c577f2c2018-08-31 09:22:23 +01002690/// An ParsedTfOperation for a Mul node.
2691/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2692/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2693/// and in these cases armnn doesn't need a separate layer for the Mul.
2694///
2695class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
2696{
2697public:
Kevin May7d96b162021-02-03 17:38:41 +00002698 ParsedMulTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node)
telsoa01c577f2c2018-08-31 09:22:23 +01002699 : DeferredSingleLayerParsedTfOperation(parser, node)
2700 {
2701 }
2702
2703 void CreateLayerDeferred() override
2704 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002705 ARMNN_ASSERT(m_Layer == nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01002706 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2707 }
2708};
2709
Kevin May7d96b162021-02-03 17:38:41 +00002710ParsedTfOperationPtr ITfParser::TfParserImpl::ParseMul(const tensorflow::NodeDef& nodeDef,
2711 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01002712{
Jan Eilers8eb25602020-03-09 12:13:48 +00002713 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002714
telsoa01c577f2c2018-08-31 09:22:23 +01002715 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002716}
2717
Kevin May7d96b162021-02-03 17:38:41 +00002718ParsedTfOperationPtr ITfParser::TfParserImpl::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01002719 const tensorflow::GraphDef& graphDef)
2720{
Jan Eilers8eb25602020-03-09 12:13:48 +00002721 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002722
2723 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2724
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002725 const LayerBindingId layerId = armnn::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
surmeh01bceff2f2018-03-29 16:29:27 +01002726
2727 auto it = m_InputShapes.find(nodeDef.name());
2728 if (it == m_InputShapes.end())
2729 {
telsoa01c577f2c2018-08-31 09:22:23 +01002730 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002731 fmt::format("Missing input shape for Placeholder '{}' {}",
2732 nodeDef.name(),
2733 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002734 }
2735 TensorInfo tensorInfo(it->second, DataType::Float32);
2736
2737 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2738
2739 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2740
2741 TrackInputBinding(layer, layerId, tensorInfo);
2742
2743 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2744}
2745
Kevin May7d96b162021-02-03 17:38:41 +00002746ParsedTfOperationPtr ITfParser::TfParserImpl::ParseRealDiv(const tensorflow::NodeDef& nodeDef,
2747 const tensorflow::GraphDef& graphDef)
saoste01bbd40612018-08-28 15:41:51 +01002748{
Jan Eilers8eb25602020-03-09 12:13:48 +00002749 IgnoreUnused(graphDef);
saoste01bbd40612018-08-28 15:41:51 +01002750 return AddRealDivLayer(nodeDef);
2751}
2752
Kevin May7d96b162021-02-03 17:38:41 +00002753ParsedTfOperationPtr ITfParser::TfParserImpl::ParseRelu(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01002754 const tensorflow::GraphDef& graphDef)
2755{
Jan Eilers8eb25602020-03-09 12:13:48 +00002756 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002757
2758 ActivationDescriptor activationDesc;
2759 activationDesc.m_Function = ActivationFunction::ReLu;
2760 return AddActivationLayer(nodeDef, activationDesc);
2761}
2762
Kevin May7d96b162021-02-03 17:38:41 +00002763ParsedTfOperationPtr ITfParser::TfParserImpl::ParseRelu6(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01002764 const tensorflow::GraphDef& graphDef)
2765{
Jan Eilers8eb25602020-03-09 12:13:48 +00002766 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002767
2768 ActivationDescriptor activationDesc;
2769 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2770 activationDesc.m_A = 6.0f;
2771 activationDesc.m_B = 0.0f;
2772
2773 return AddActivationLayer(nodeDef, activationDesc);
2774}
2775
Kevin May7d96b162021-02-03 17:38:41 +00002776ParsedTfOperationPtr ITfParser::TfParserImpl::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01002777 const tensorflow::GraphDef& graphDef)
2778{
Jan Eilers8eb25602020-03-09 12:13:48 +00002779 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002780
2781 ActivationDescriptor activationDesc;
2782 activationDesc.m_Function = ActivationFunction::Sigmoid;
2783
2784 return AddActivationLayer(nodeDef, activationDesc);
2785}
2786
Kevin May7d96b162021-02-03 17:38:41 +00002787ParsedTfOperationPtr ITfParser::TfParserImpl::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002788 const tensorflow::GraphDef &graphDef)
2789{
Jan Eilers8eb25602020-03-09 12:13:48 +00002790 IgnoreUnused(graphDef);
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002791
2792 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2793
josh minor4a3c6102020-01-06 16:40:46 -06002794 ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
2795 IConnectableLayer* const layer = m_Network->AddElementwiseUnaryLayer(descriptor, nodeDef.name().c_str());
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002796
2797 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2798 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2799 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2800
2801 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2802}
2803
Kevin May7d96b162021-02-03 17:38:41 +00002804ParsedTfOperationPtr ITfParser::TfParserImpl::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01002805 const tensorflow::GraphDef& graphDef)
2806{
Jan Eilers8eb25602020-03-09 12:13:48 +00002807 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002808
2809 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2810
2811 SoftmaxDescriptor softmaxDescriptor;
2812 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2813
2814 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2815 prevLayerSlot.Connect(layer->GetInputSlot(0));
2816 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2817
2818 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2819}
2820
Kevin May7d96b162021-02-03 17:38:41 +00002821ParsedTfOperationPtr ITfParser::TfParserImpl::ParseSplit(const tensorflow::NodeDef& nodeDef,
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002822 const tensorflow::GraphDef& graphDef)
2823{
Jan Eilers8eb25602020-03-09 12:13:48 +00002824 IgnoreUnused(graphDef);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002825
2826 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2827 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2828 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2829
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002830 // Constant tensor index
2831 unsigned int index = GetConstInputIndex(inputs);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002832 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002833 ParsedConstTfOperation<int32_t>* shapeNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002834 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002835
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002836 std::vector<int32_t> axisTensorData;
2837 shapeNode->GetConstTensor(axisTensorData);
2838
2839 // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2840 const unsigned int splitDim = static_cast<unsigned int>(axisTensorData[0]);
2841
2842 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2843 if (splitDim == 0 || splitDim == 2)
2844 {
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002845 throw armnn::ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002846 fmt::format("Dimension {} for split is not supported by Armnn. "
2847 "Node {} {}",
2848 splitDim,
2849 nodeDef.name(),
2850 CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002851 }
2852
Saoirse Stewart315258e2019-02-28 11:32:41 +00002853 // As Armnn only supports splitter outputs of the same shape, therefore num_split will be limited to an integer.
2854 uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef, "num_split");
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002855
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002856 IOutputSlot& inputSlot = inputs[1 - index].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1 - index].m_Index);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002857 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2858
Matthew Jacksondba634f2019-08-15 15:14:18 +01002859 const unsigned int supportedNumDims = 4;
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002860 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2861
Matthew Jacksondba634f2019-08-15 15:14:18 +01002862 if (inputDimSize != supportedNumDims)
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002863 {
2864 throw armnn::ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002865 fmt::format("The number of dimensions: {} for input tensors of the "
2866 "split op should be {} {}",
2867 inputTensorInfo.GetNumDimensions(),
2868 supportedNumDims,
2869 CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002870 }
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002871
2872 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2873
2874 // Add current input shape to splitterDimSizes
2875 for (unsigned int i = 0; i < inputDimSize; ++i)
2876 {
2877 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2878 }
2879
2880 if (splitterDimSizes[splitDim] % num_split != 0)
2881 {
2882 throw ParseException("Number of splits must evenly divide the dimension");
2883 }
2884 splitterDimSizes[splitDim] /= num_split;
2885
2886 SplitterDescriptor splitDesc(num_split);
2887 for (unsigned int g = 0; g < num_split; ++g)
2888 {
2889 // Set the size of the views.
2890 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2891 {
2892 splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2893 }
2894 splitDesc.SetViewOriginCoord(g, splitDim, splitterDimSizes[splitDim] * g);
2895 }
2896
2897 IConnectableLayer *layer = m_Network->AddSplitterLayer(splitDesc, nodeDef.name().c_str());
2898
2899 inputSlot.Connect(layer->GetInputSlot(0));
2900
2901 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2902 splitterDimSizes.data());
2903
2904 for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
2905 {
2906 layer->GetOutputSlot(i).SetTensorInfo(armnn::TensorInfo(outShape, inputTensorInfo.GetDataType()));
2907 }
2908
2909 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2910}
2911
Kevin May7d96b162021-02-03 17:38:41 +00002912ParsedTfOperationPtr ITfParser::TfParserImpl::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01002913 const tensorflow::GraphDef& graphDef)
2914{
Jan Eilers8eb25602020-03-09 12:13:48 +00002915 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002916
2917 ActivationDescriptor activationDesc;
2918 activationDesc.m_Function = ActivationFunction::SoftReLu;
2919
2920 return AddActivationLayer(nodeDef, activationDesc);
2921}
2922
Kevin May7d96b162021-02-03 17:38:41 +00002923ParsedTfOperationPtr ITfParser::TfParserImpl::ParseStridedSlice(const tensorflow::NodeDef& nodeDef,
2924 const tensorflow::GraphDef& graphDef)
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002925{
Jan Eilers8eb25602020-03-09 12:13:48 +00002926 IgnoreUnused(graphDef);
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002927
2928 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2929 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2930 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2931
2932 ParsedConstTfOperation<int32_t>* beginNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002933 PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[1].m_IndexedValue);
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002934 std::vector<int32_t> beginTensorData;
2935 beginNode->GetConstTensor(beginTensorData);
2936
2937 ParsedConstTfOperation<int32_t>* endNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002938 PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[2].m_IndexedValue);
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002939 std::vector<int32_t> endTensorData;
2940 endNode->GetConstTensor(endTensorData);
2941
2942 ParsedConstTfOperation<int32_t>* stridesNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002943 PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[3].m_IndexedValue);
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002944 std::vector<int32_t> stridesTensorData;
2945 stridesNode->GetConstTensor(stridesTensorData);
2946
2947 StridedSliceDescriptor desc;
2948 desc.m_Begin = beginTensorData;
2949 desc.m_End = endTensorData;
2950 desc.m_Stride = stridesTensorData;
2951 desc.m_BeginMask = ReadMandatoryNodeInt32Attribute(nodeDef, "begin_mask");
2952 desc.m_EndMask = ReadMandatoryNodeInt32Attribute(nodeDef, "end_mask");
2953 desc.m_EllipsisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "ellipsis_mask");
2954 desc.m_NewAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "new_axis_mask");
2955 desc.m_ShrinkAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "shrink_axis_mask");
2956 desc.m_DataLayout = armnn::DataLayout::NHWC;
2957 IConnectableLayer* const layer = m_Network->AddStridedSliceLayer(desc, nodeDef.name().c_str());
2958
2959 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2960 TensorInfo inputTensorInfo = prevLayerSlot.GetTensorInfo();
2961
2962 TensorInfo outputTensorInfo;
2963 CalculateStridedSliceOutputTensorInfo(inputTensorInfo, desc, outputTensorInfo);
2964
2965 prevLayerSlot.Connect(layer->GetInputSlot(0));
2966 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2967
2968 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2969}
2970
Kevin May7d96b162021-02-03 17:38:41 +00002971ParsedTfOperationPtr ITfParser::TfParserImpl::ParseTanh(const tensorflow::NodeDef& nodeDef,
2972 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01002973{
Jan Eilers8eb25602020-03-09 12:13:48 +00002974 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002975
2976 ActivationDescriptor activationDesc;
2977 activationDesc.m_Function = ActivationFunction::TanH;
2978 activationDesc.m_A = 1.0f;
2979 activationDesc.m_B = 1.0f;
2980
2981 return AddActivationLayer(nodeDef, activationDesc);
2982}
2983
Kevin May7d96b162021-02-03 17:38:41 +00002984ParsedTfOperationPtr ITfParser::TfParserImpl::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01002985 ActivationDescriptor& activationDesc)
2986{
2987 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2988
2989 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2990
2991 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2992 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2993 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2994 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2995}
2996
Kevin May7d96b162021-02-03 17:38:41 +00002997ParsedTfOperationPtr ITfParser::TfParserImpl::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01002998 const tensorflow::GraphDef& graphDef)
2999{
3000 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
3001}
3002
Kevin May7d96b162021-02-03 17:38:41 +00003003ParsedTfOperationPtr ITfParser::TfParserImpl::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01003004 const tensorflow::GraphDef& graphDef)
3005{
3006 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
3007}
3008
Kevin May7d96b162021-02-03 17:38:41 +00003009ParsedTfOperationPtr ITfParser::TfParserImpl::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01003010 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
3011{
Jan Eilers8eb25602020-03-09 12:13:48 +00003012 IgnoreUnused(graphDef);
Derek Lambertibaa177f2019-12-10 22:00:43 +00003013
surmeh01bceff2f2018-03-29 16:29:27 +01003014 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
3015 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3016 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
3017
3018 if (inputs.size() != 1)
3019 {
telsoa01c577f2c2018-08-31 09:22:23 +01003020 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003021 fmt::format("2D Pooling expects one input!. Got {} for Node {} {}",
3022 inputs.size(),
3023 nodeDef.name(),
3024 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003025 }
3026
3027 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
3028 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
3029 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
3030 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
3031
3032 Pooling2dDescriptor pooling2dDescriptor;
FrancisMurtaghf005e312018-12-06 15:26:04 +00003033 pooling2dDescriptor.m_PoolType = pooltype;
3034 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
surmeh01bceff2f2018-03-29 16:29:27 +01003035 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
3036
telsoa01c577f2c2018-08-31 09:22:23 +01003037 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
FrancisMurtaghf005e312018-12-06 15:26:04 +00003038 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
3039 pooling2dDescriptor.m_DataLayout = dataLayout;
3040 DataLayoutIndexed dataLayoutIndexed(dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +01003041
FrancisMurtaghf005e312018-12-06 15:26:04 +00003042 pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
3043 pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
3044 pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
3045 pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01003046
FrancisMurtaghf005e312018-12-06 15:26:04 +00003047 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
3048 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01003049
3050 bool padding = false;
3051 TensorInfo outputInfo;
FrancisMurtaghf005e312018-12-06 15:26:04 +00003052 unsigned int outputHeight = 0;
3053 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01003054
3055 CHECK_PADDING_TYPE(nodeDef, paddingString);
3056
surmeh01bceff2f2018-03-29 16:29:27 +01003057 if (paddingString == "SAME")
3058 {
3059 padding = true;
FrancisMurtaghf005e312018-12-06 15:26:04 +00003060
3061 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
3062 static_cast<float>(pooling2dDescriptor.m_StrideY)));
3063 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
3064 static_cast<float>(pooling2dDescriptor.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01003065 }
3066 else if (paddingString == "VALID")
3067 {
3068 padding = false;
FrancisMurtaghf005e312018-12-06 15:26:04 +00003069
3070 outputHeight = static_cast<uint32_t>(ceil(
3071 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
3072 static_cast<float>(pooling2dDescriptor.m_StrideY)));
3073 outputWidth = static_cast<uint32_t>(ceil(
3074 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
3075 static_cast<float>(pooling2dDescriptor.m_StrideX)));
3076 }
3077
3078 switch (dataLayout)
3079 {
3080 case DataLayout::NHWC:
3081 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
3082 outputHeight,
3083 outputWidth,
3084 inputTensorInfo.GetShape()[3] },
3085 DataType::Float32);
3086 break;
3087 case DataLayout::NCHW:
3088 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
3089 inputTensorInfo.GetShape()[1],
3090 outputHeight,
3091 outputWidth },
3092 DataType::Float32);
3093 break;
surmeh01bceff2f2018-03-29 16:29:27 +01003094 }
surmeh01bceff2f2018-03-29 16:29:27 +01003095
Sadik Armagan60bb9d82021-01-11 15:15:01 +00003096 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX, 1u,
FrancisMurtaghf005e312018-12-06 15:26:04 +00003097 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
Sadik Armagan60bb9d82021-01-11 15:15:01 +00003098 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY, 1u,
FrancisMurtaghf005e312018-12-06 15:26:04 +00003099 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01003100
3101
3102 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
3103 if (layer == nullptr)
3104 {
telsoa01c577f2c2018-08-31 09:22:23 +01003105 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003106 fmt::format("Failed to add pooling2d layer for {} {}",
3107 nodeDef.name(),
3108 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003109 }
3110
3111 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3112
FrancisMurtaghf005e312018-12-06 15:26:04 +00003113 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01003114
3115 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3116}
3117
Kevin May7d96b162021-02-03 17:38:41 +00003118ParsedTfOperationPtr ITfParser::TfParserImpl::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
surmeh01bceff2f2018-03-29 16:29:27 +01003119{
3120 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3121
3122 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3123 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3124
3125 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
3126 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
3127
3128 if (isBiasAdd)
3129 {
3130 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
3131 // with the same data in the correct dimension for broadcast in addition.
3132 if(input1Info.GetNumDimensions() != 1)
3133 {
telsoa01c577f2c2018-08-31 09:22:23 +01003134 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003135 fmt::format("Unsupported bias for BiasAdd. It should be a 1D vector. "
3136 "Got {} dimensions for input {}. Node {} {}",
3137 input1Info.GetNumDimensions(),
3138 inputs[1].m_IndexedValue->GetNode().name(),
3139 nodeDef.name(),
3140 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003141 }
3142
3143 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01003144
telsoa01c577f2c2018-08-31 09:22:23 +01003145 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01003146 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003147 }
3148 else
3149 {
3150 if (input0Info.GetNumDimensions() == 1)
3151 {
3152 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003153 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003154 }
3155
3156 if (input1Info.GetNumDimensions() == 1)
3157 {
3158 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003159 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003160 }
3161 }
3162
3163 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
3164
3165 input0Slot->Connect(layer->GetInputSlot(0));
3166 input1Slot->Connect(layer->GetInputSlot(1));
3167
Nattapat Chaimanowongfab64f02019-02-15 16:46:24 +00003168 if (input0Info.GetNumDimensions() == input1Info.GetNumDimensions())
3169 {
3170 const TensorShape& input0Shape = input0Info.GetShape();
3171 const TensorShape& input1Shape = input1Info.GetShape();
3172
3173 std::vector<unsigned int> outputShape;
3174 outputShape.reserve(input0Shape.GetNumDimensions());
3175 TensorInfo outputInfo(input0Info);
3176
3177 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3178 {
3179 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3180 }
3181
3182 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3183
3184 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3185 }
3186 else if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
surmeh01bceff2f2018-03-29 16:29:27 +01003187 {
3188 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3189 }
3190 else
3191 {
3192 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3193 }
3194
3195 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3196}
3197
Kevin May7d96b162021-02-03 17:38:41 +00003198ParsedTfOperationPtr ITfParser::TfParserImpl::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
saoste01bbd40612018-08-28 15:41:51 +01003199{
3200 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3201
3202 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
3203 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3204 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3205
3206 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3207 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3208
3209
3210 if (input0NumDims < input1NumDims)
3211 {
3212 const bool isNHWC = true;
3213 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3214 }
3215 if (input1NumDims < input0NumDims)
3216 {
3217 const bool isNHWC = true;
3218 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3219 }
3220
3221 input0Slot->Connect(layer->GetInputSlot(0));
3222 input1Slot->Connect(layer->GetInputSlot(1));
3223
3224 if (input0NumDims < input1NumDims)
3225 {
3226 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3227 }
3228 else
3229 {
3230 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3231
3232 }
3233 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3234}
3235
Kevin May7d96b162021-02-03 17:38:41 +00003236ParsedTfOperationPtr ITfParser::TfParserImpl::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
Sadik Armagan975c09a2018-12-04 10:02:08 +00003237{
3238 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3239
3240 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3241 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3242
3243 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3244 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3245
3246 if (input0NumDims < input1NumDims)
3247 {
3248 const bool isNHWC = true;
3249 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3250 }
3251 if (input1NumDims < input0NumDims)
3252 {
3253 const bool isNHWC = true;
3254 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3255 }
3256
3257 IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
3258
3259 input0Slot->Connect(layer->GetInputSlot(0));
3260 input1Slot->Connect(layer->GetInputSlot(1));
3261
3262 TensorInfo outputInfo = input0Slot->GetTensorInfo();
3263 std::vector<unsigned int> outputShape;
3264
3265 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
3266 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
3267
3268 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3269 {
3270 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3271 }
3272
3273 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3274 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3275
3276 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3277}
3278
Kevin May7d96b162021-02-03 17:38:41 +00003279IConnectableLayer* ITfParser::TfParserImpl::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
telsoa01c577f2c2018-08-31 09:22:23 +01003280{
3281 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3282
3283 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
3284 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3285 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3286
3287 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3288 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3289
3290 if (input0NumDims < input1NumDims)
3291 {
3292 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003293 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003294 }
3295 if (input1NumDims < input0NumDims)
3296 {
3297 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003298 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003299 }
3300
3301 input0Slot->Connect(layer->GetInputSlot(0));
3302 input1Slot->Connect(layer->GetInputSlot(1));
3303
3304 if (input0NumDims < input1NumDims)
3305 {
3306 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3307 }
3308 else
3309 {
3310 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3311 }
3312 return layer;
3313}
3314
Kevin May7d96b162021-02-03 17:38:41 +00003315IConnectableLayer* ITfParser::TfParserImpl::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
surmeh01bceff2f2018-03-29 16:29:27 +01003316 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
3317{
telsoa01c577f2c2018-08-31 09:22:23 +01003318 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01003319 ParsedConstTfOperation<float>* biasNode = nullptr;
3320 if (addNodeDef != nullptr)
3321 {
3322 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01003323 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003324 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
3325 {
Jan Eilersbb446e52020-04-02 13:56:54 +01003326 biasNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01003327 }
3328 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
3329 {
Jan Eilersbb446e52020-04-02 13:56:54 +01003330 biasNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01003331 }
3332 else
3333 {
telsoa01c577f2c2018-08-31 09:22:23 +01003334 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003335 fmt::format("ArmNN only supports fully connected layers with constant bias. "
3336 "Inputs {} and {}. AddNode {}. MatMulNode {} {}",
3337 addInputs[0].m_IndexedValue->GetNode().name(),
3338 addInputs[1].m_IndexedValue->GetNode().name(),
3339 addNodeDef->name(),
3340 matMulNodeDef.name(),
3341 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003342 }
3343 }
3344
telsoa01c577f2c2018-08-31 09:22:23 +01003345 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003346 ParsedConstTfOperation<float>* weightNode = nullptr;
3347 ParsedTfOperation* inputNode = nullptr;
3348 unsigned int inputIdx = 0;
3349 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
3350 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
3351 {
Jan Eilersbb446e52020-04-02 13:56:54 +01003352 weightNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01003353 inputNode = mulInputs[1].m_IndexedValue;
3354 inputIdx = mulInputs[1].m_Index;
3355 }
3356 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
3357 {
Jan Eilersbb446e52020-04-02 13:56:54 +01003358 weightNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01003359 inputNode = mulInputs[0].m_IndexedValue;
3360 inputIdx = mulInputs[0].m_Index;
3361 }
3362 else
3363 {
telsoa01c577f2c2018-08-31 09:22:23 +01003364 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003365 fmt::format("ArmNN only supports fully connected layers with constant weights. "
3366 "Inputs {} and {}. MatMulNode {} {}",
3367 mulInputs[0].m_IndexedValue->GetNode().name(),
3368 mulInputs[1].m_IndexedValue->GetNode().name(),
3369 matMulNodeDef.name(),
3370 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003371 }
3372
3373 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003374 // Handles weight.
Matteo Martincigh482ca852018-12-12 09:20:55 +00003375 ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003376
3377 FullyConnectedDescriptor desc;
3378 desc.m_BiasEnabled = addNodeDef != nullptr;
3379
3380 IConnectableLayer* layer = nullptr;
Matteo Martincighfc598e12019-05-14 10:36:13 +01003381 Optional<ConstTensor> optionalBiases;
3382 std::vector<float> biasTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003383 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003384 if (addNodeDef != nullptr)
3385 {
Matteo Martincigh482ca852018-12-12 09:20:55 +00003386 ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003387
3388 if (weights.GetShape()[1] != biases.GetShape()[0])
3389 {
telsoa01c577f2c2018-08-31 09:22:23 +01003390 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003391 fmt::format("Shape of matmul weights and bias do not match. "
3392 "AddNode {}. MatMulNode {} {}",
3393 addNodeDef->name(),
3394 matMulNodeDef.name(),
3395 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003396 }
3397
Matteo Martincighfc598e12019-05-14 10:36:13 +01003398 optionalBiases = Optional<ConstTensor>(biases);
surmeh01bceff2f2018-03-29 16:29:27 +01003399 }
Matteo Martincighfc598e12019-05-14 10:36:13 +01003400 layer = m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
surmeh01bceff2f2018-03-29 16:29:27 +01003401
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003402 ARMNN_ASSERT(layer != nullptr);
surmeh01bceff2f2018-03-29 16:29:27 +01003403
3404 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3405 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3406
telsoa01c577f2c2018-08-31 09:22:23 +01003407 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01003408 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
3409 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3410 return layer;
3411}
3412
Kevin May7d96b162021-02-03 17:38:41 +00003413void ITfParser::TfParserImpl::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01003414{
telsoa01c577f2c2018-08-31 09:22:23 +01003415 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01003416 tensorflow::DataType type = tensorflow::DT_FLOAT;
3417 if (nodeDef.attr().count("T") != 0)
3418 {
3419 auto attr = nodeDef.attr().at("T");
3420 type = attr.type();
3421 }
3422 else if (nodeDef.attr().count("dtype") != 0)
3423 {
3424 auto attr = nodeDef.attr().at("dtype");
3425 type = attr.type();
3426 }
3427
Ferran Balaguerc602f292019-02-08 17:09:55 +00003428 if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() != "Const")
surmeh01bceff2f2018-03-29 16:29:27 +01003429 {
telsoa01c577f2c2018-08-31 09:22:23 +01003430 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003431 fmt::format("Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). "
3432 "Got {} for Node {} {}",
3433 tensorflow::DataType_Name(type),
3434 nodeDef.name(),
3435 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003436 }
3437
3438 const std::string& operation = nodeDef.op();
narpra016f37f832018-12-21 18:30:00 +00003439 auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3440 if (itControlInput != m_ControlInputs.end())
3441 {
3442 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
3443 return;
3444 }
surmeh01bceff2f2018-03-29 16:29:27 +01003445 auto it = ms_OperationNameToParsingFunctions.find(operation);
3446 if (it != ms_OperationNameToParsingFunctions.end())
3447 {
3448 auto func = it->second;
3449 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
3450 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3451
telsoa01c577f2c2018-08-31 09:22:23 +01003452 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01003453 auto it = m_ParsedTfOperations.find(nodeDef.name());
3454 if (it != m_ParsedTfOperations.end())
3455 {
James Ward58dec6b2020-09-11 17:32:44 +01003456 throw ParseException(fmt::format("Name {} used by more than one node", nodeDef.name()));
surmeh01bceff2f2018-03-29 16:29:27 +01003457 }
3458 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3459
telsoa01c577f2c2018-08-31 09:22:23 +01003460 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003461 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3462 m_RequestedOutputs.end())
3463 {
3464 auto outId = ParseOutputId(nodeDef.name());
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003465 const LayerBindingId layerId = armnn::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
surmeh01bceff2f2018-03-29 16:29:27 +01003466 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3467
3468 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3469
3470 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
3471
3472 prevSlot.Connect(outputLayer->GetInputSlot(0));
3473
3474 TrackOutputBinding(outputLayer, layerId, tensorInfo);
3475 }
3476 }
3477 else
3478 {
telsoa01c577f2c2018-08-31 09:22:23 +01003479 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003480 fmt::format("Unsupported operation {} in tensorflow::GraphDef {}",
3481 operation,
3482 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003483 }
3484}
3485
Kevin May7d96b162021-02-03 17:38:41 +00003486void ITfParser::TfParserImpl::LoadGraphDef(const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01003487{
telsoa01c577f2c2018-08-31 09:22:23 +01003488 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01003489 m_NodesByName.clear();
3490 m_NetworkInputsBindingInfo.clear();
3491 m_NetworkOutputsBindingInfo.clear();
3492
3493 for (int i = 0; i < graphDef.node_size(); ++i)
3494 {
3495 const tensorflow::NodeDef& node = graphDef.node(i);
3496 m_NodesByName[node.name()] = &node;
3497 }
3498
Francis Murtaghbb190a62019-04-04 11:16:29 +01003499 // Checks that the input nodes the user has requested exist.
3500 for (const auto& pair : m_InputShapes)
3501 {
3502 const std::string& requestedInputName = pair.first;
3503 auto nodeIt = m_NodesByName.find(requestedInputName);
3504 if (nodeIt == m_NodesByName.end())
3505 {
3506 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003507 fmt::format("Couldn't find requested input node '{}' in graph {}",
3508 requestedInputName,
3509 CHECK_LOCATION().AsString()));
Francis Murtaghbb190a62019-04-04 11:16:29 +01003510 }
3511 }
3512
telsoa01c577f2c2018-08-31 09:22:23 +01003513 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01003514 std::vector<const tensorflow::NodeDef*> targetNodes;
3515 for (const std::string& requestedOutputName : m_RequestedOutputs)
3516 {
3517 auto nodeIt = m_NodesByName.find(requestedOutputName);
3518 if (nodeIt == m_NodesByName.end())
3519 {
telsoa01c577f2c2018-08-31 09:22:23 +01003520 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003521 fmt::format("Couldn't find requested output node '{}' in graph {}",
3522 requestedOutputName,
3523 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003524 }
3525 targetNodes.push_back(nodeIt->second);
3526 }
3527
telsoa01c577f2c2018-08-31 09:22:23 +01003528 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003529 std::vector<const tensorflow::NodeDef*> sortedNodes;
3530 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3531 targetNodes,
3532 [this](const tensorflow::NodeDef* node)
3533 {
3534 auto outputs = GetTfInputNodes(*node);
3535 std::vector<const tensorflow::NodeDef*> nodesOnly;
3536 for (const auto & o : outputs) {
3537 nodesOnly.push_back(o.m_IndexedValue);
3538 }
3539 return nodesOnly;
3540 },
3541 sortedNodes))
3542 {
telsoa01c577f2c2018-08-31 09:22:23 +01003543 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003544 fmt::format("Cycle detected in graph {}",
3545 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003546 }
3547
telsoa01c577f2c2018-08-31 09:22:23 +01003548 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003549 for (const auto& it : sortedNodes)
3550 {
3551 const tensorflow::NodeDef& currentNode = *it;
3552 LoadNodeDef(currentNode, graphDef);
3553 }
3554}
3555
Kevin May7d96b162021-02-03 17:38:41 +00003556INetworkPtr ITfParser::TfParserImpl::CreateNetworkFromTextFile(const char* graphFile,
surmeh01bceff2f2018-03-29 16:29:27 +01003557 const std::map<std::string, TensorShape>& inputShapes,
3558 const std::vector<std::string>& requestedOutputs)
3559{
3560 FILE* fd = fopen(graphFile, "r");
3561
3562 if (fd == nullptr)
3563 {
telsoa01c577f2c2018-08-31 09:22:23 +01003564 throw FileNotFoundException(
James Ward58dec6b2020-09-11 17:32:44 +01003565 fmt::format("Graph file {} failed to open {}",
3566 graphFile,
3567 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003568 }
3569
telsoa01c577f2c2018-08-31 09:22:23 +01003570 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003571 tensorflow::GraphDef graphDef;
3572 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
3573 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3574 delete input;
3575 fclose(fd);
3576
3577 if (!success)
3578 {
telsoa01c577f2c2018-08-31 09:22:23 +01003579 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003580 fmt::format("Failed to parse graph file {}",
3581 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003582 }
3583
3584 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3585}
3586
Kevin May7d96b162021-02-03 17:38:41 +00003587INetworkPtr ITfParser::TfParserImpl::CreateNetworkFromString(const char* protoText,
surmeh01bceff2f2018-03-29 16:29:27 +01003588 const std::map<std::string, TensorShape>& inputShapes,
3589 const std::vector<std::string>& requestedOutputs)
3590{
telsoa01c577f2c2018-08-31 09:22:23 +01003591 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003592 tensorflow::GraphDef graphDef;
3593 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3594
3595 if (!success)
3596 {
telsoa01c577f2c2018-08-31 09:22:23 +01003597 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003598 fmt::format("Failed to parse graph file {}",
3599 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003600 }
3601
3602 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3603}
3604
Kevin May7d96b162021-02-03 17:38:41 +00003605INetworkPtr ITfParser::TfParserImpl::CreateNetworkFromBinaryFile(const char* graphFile,
surmeh01bceff2f2018-03-29 16:29:27 +01003606 const std::map<std::string, TensorShape>& inputShapes,
3607 const std::vector<std::string>& requestedOutputs)
3608{
3609 FILE* fd = fopen(graphFile, "rb");
3610
3611 if (fd == nullptr)
3612 {
telsoa01c577f2c2018-08-31 09:22:23 +01003613 throw FileNotFoundException(
James Ward58dec6b2020-09-11 17:32:44 +01003614 fmt::format("Graph file {} failed to open {}",
3615 graphFile,
3616 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003617 }
3618
telsoa01c577f2c2018-08-31 09:22:23 +01003619 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003620 tensorflow::GraphDef graphDef;
3621
3622 google::protobuf::io::FileInputStream inStream(fileno(fd));
3623 google::protobuf::io::CodedInputStream codedStream(&inStream);
Nikhil Raje5181532020-10-09 14:52:25 +01003624 codedStream.SetTotalBytesLimit(INT_MAX);
surmeh01bceff2f2018-03-29 16:29:27 +01003625 bool success = graphDef.ParseFromCodedStream(&codedStream);
3626 fclose(fd);
3627
3628 if (!success)
3629 {
telsoa01c577f2c2018-08-31 09:22:23 +01003630 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003631 fmt::format("Failed to parse protobuf file {} {}",
3632 graphFile,
3633 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003634 }
3635
3636 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3637}
3638
Kevin May7d96b162021-02-03 17:38:41 +00003639INetworkPtr ITfParser::TfParserImpl::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
surmeh01bceff2f2018-03-29 16:29:27 +01003640 const std::map<std::string, TensorShape>& inputShapes,
3641 const std::vector<std::string>& requestedOutputs)
3642{
3643 m_Network = INetwork::Create();
3644
3645 m_InputShapes = inputShapes;
3646 if (requestedOutputs.size() == 0)
3647 {
telsoa01c577f2c2018-08-31 09:22:23 +01003648 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003649 fmt::format("requestedOutputs must have at least one entry {}",
3650 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003651 }
3652 m_RequestedOutputs = requestedOutputs;
3653
3654 try
3655 {
3656 LoadGraphDef(graphDef);
3657 }
3658 catch (const ParseException& e)
3659 {
3660 Cleanup();
3661 throw e;
3662 }
3663
3664 Cleanup();
3665
3666 return std::move(m_Network);
3667}
3668
Kevin May7d96b162021-02-03 17:38:41 +00003669void ITfParser::TfParserImpl::Cleanup()
surmeh01bceff2f2018-03-29 16:29:27 +01003670{
telsoa01c577f2c2018-08-31 09:22:23 +01003671 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01003672 m_InputShapes.clear();
3673 m_RequestedOutputs.clear();
3674 m_NodesByName.clear();
3675 m_ParsedTfOperations.clear();
3676}
3677
Kevin May7d96b162021-02-03 17:38:41 +00003678BindingPointInfo ITfParser::TfParserImpl::GetNetworkInputBindingInfo(const std::string& name) const
surmeh01bceff2f2018-03-29 16:29:27 +01003679{
3680 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
3681}
3682
Kevin May7d96b162021-02-03 17:38:41 +00003683BindingPointInfo ITfParser::TfParserImpl::GetNetworkOutputBindingInfo(const std::string& name) const
surmeh01bceff2f2018-03-29 16:29:27 +01003684{
3685 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
3686}
3687
Kevin May7d96b162021-02-03 17:38:41 +00003688std::pair<LayerBindingId, TensorInfo> ITfParser::TfParserImpl::GetBindingInfo(const std::string& layerName,
surmeh01bceff2f2018-03-29 16:29:27 +01003689 const char* bindingPointDesc,
3690 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3691{
3692 auto it = nameToBindingInfo.find(layerName);
3693 if (it == nameToBindingInfo.end())
3694 {
telsoa01c577f2c2018-08-31 09:22:23 +01003695 throw InvalidArgumentException(
James Ward58dec6b2020-09-11 17:32:44 +01003696 fmt::format("Unknown {} '{}' {}",
3697 bindingPointDesc,
3698 layerName,
3699 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003700 }
3701 return it->second;
3702}
3703
Kevin May7d96b162021-02-03 17:38:41 +00003704void ITfParser::TfParserImpl::TrackInputBinding(IConnectableLayer* layer,
3705 LayerBindingId id,
3706 const TensorInfo& tensorInfo)
surmeh01bceff2f2018-03-29 16:29:27 +01003707{
3708 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3709}
3710
Kevin May7d96b162021-02-03 17:38:41 +00003711void ITfParser::TfParserImpl::TrackOutputBinding(IConnectableLayer* layer,
3712 LayerBindingId id,
3713 const TensorInfo& tensorInfo)
surmeh01bceff2f2018-03-29 16:29:27 +01003714{
3715 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3716}
3717
Kevin May7d96b162021-02-03 17:38:41 +00003718void ITfParser::TfParserImpl::TrackBindingPoint(IConnectableLayer* layer,
surmeh01bceff2f2018-03-29 16:29:27 +01003719 LayerBindingId id,
3720 const TensorInfo& tensorInfo,
3721 const char* bindingPointDesc,
3722 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3723{
3724 const std::string layerName = layer->GetName();
3725 auto it = nameToBindingInfo.find(layerName);
3726 if (it == nameToBindingInfo.end())
3727 {
3728 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3729 }
3730 else
3731 {
telsoa01c577f2c2018-08-31 09:22:23 +01003732 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003733 fmt::format("Id {} used by more than one {} layer {}",
3734 id,
3735 bindingPointDesc,
3736 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003737 }
3738}
3739
Matthew Sloyanac001ee2021-02-03 10:43:04 +00003740const std::string ITfParser::TfParserImpl::GetVersion()
3741{
3742 return TF_PARSER_VERSION;
3743}
3744
surmeh01bceff2f2018-03-29 16:29:27 +01003745} // namespace armnnTfParser