blob: dd6a06fd0002e3a13e962669baab6710fe83e549 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "OnnxParser.hpp"
6
Matthew Sloyanac001ee2021-02-03 10:43:04 +00007#include "armnnOnnxParser/Version.hpp"
8
Matthew Bentham39ef3e52020-01-20 10:09:09 +00009#include <armnn/Descriptors.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010010#include <armnn/utility/Assert.hpp>
Matthew Sloyan589e3e82020-09-11 16:17:48 +010011#include <armnn/utility/NumericCast.hpp>
Narumol Prangnawaratbc3bb622021-09-24 16:08:34 +010012#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010013#include <VerificationHelpers.hpp>
14
James Ward58dec6b2020-09-11 17:32:44 +010015#include <fmt/format.h>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010016
telsoa01c577f2c2018-08-31 09:22:23 +010017#include <google/protobuf/text_format.h>
18#include <google/protobuf/io/zero_copy_stream_impl.h>
19
Matthew Sloyanac001ee2021-02-03 10:43:04 +000020#include <iostream>
telsoa01c577f2c2018-08-31 09:22:23 +010021#include <numeric>
Jan Eilers53ef7952021-06-02 12:01:25 +010022#include <armnnUtils/Permute.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010023
24using namespace armnn;
25
26namespace armnnOnnxParser
27{
Kevin Mayef33cb12021-01-29 14:24:57 +000028
29IOnnxParser::IOnnxParser() : pOnnxParserImpl(new OnnxParserImpl()) {}
30
31IOnnxParser::~IOnnxParser() = default;
32
33IOnnxParser* IOnnxParser::CreateRaw()
34{
35 return new IOnnxParser();
36}
37
38IOnnxParserPtr IOnnxParser::Create()
39{
40 return IOnnxParserPtr(CreateRaw(), &IOnnxParser::Destroy);
41}
42
43void IOnnxParser::Destroy(IOnnxParser* parser)
44{
45 delete parser;
46}
47
48armnn::INetworkPtr IOnnxParser::CreateNetworkFromBinaryFile(const char* graphFile)
49{
50 return pOnnxParserImpl->CreateNetworkFromBinaryFile(graphFile);
51}
52
53armnn::INetworkPtr IOnnxParser::CreateNetworkFromTextFile(const char* graphFile)
54{
55 return pOnnxParserImpl->CreateNetworkFromTextFile(graphFile);
56}
57
58armnn::INetworkPtr IOnnxParser::CreateNetworkFromString(const std::string& protoText)
59{
60 return pOnnxParserImpl->CreateNetworkFromString(protoText);
61}
62
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +010063armnn::INetworkPtr IOnnxParser::CreateNetworkFromBinaryFile(
64 const char* graphFile,
65 const std::map<std::string, armnn::TensorShape>& inputShapes)
66{
67 return pOnnxParserImpl->CreateNetworkFromBinaryFile(graphFile, inputShapes);
68}
69
70armnn::INetworkPtr IOnnxParser::CreateNetworkFromTextFile(const char* graphFile,
71 const std::map<std::string, armnn::TensorShape>& inputShapes)
72{
73 return pOnnxParserImpl->CreateNetworkFromTextFile(graphFile, inputShapes);
74}
75
76armnn::INetworkPtr IOnnxParser::CreateNetworkFromString(const std::string& protoText,
77 const std::map<std::string, armnn::TensorShape>& inputShapes)
78{
79 return pOnnxParserImpl->CreateNetworkFromString(protoText, inputShapes);
80}
81
Kevin Mayef33cb12021-01-29 14:24:57 +000082BindingPointInfo IOnnxParser::GetNetworkInputBindingInfo(const std::string& name) const
83{
84 return pOnnxParserImpl->GetNetworkInputBindingInfo(name);
85}
86
87BindingPointInfo IOnnxParser::GetNetworkOutputBindingInfo(const std::string& name) const
88{
89 return pOnnxParserImpl->GetNetworkOutputBindingInfo(name);
90}
91
telsoa01c577f2c2018-08-31 09:22:23 +010092namespace
93{
94void CheckValidDataType(std::initializer_list<onnx::TensorProto::DataType> validInputTypes,
95 const onnx::TensorProto::DataType actualValue,
96 const char* validExpr,
97 std::string nodeName,
98 std::string tensorName,
99 const armnn::CheckLocation& location)
100{
101 bool isValid = std::any_of(validInputTypes.begin(),
102 validInputTypes.end(),
103 [&actualValue](onnx::TensorProto::DataType x) { return x == actualValue; } );
104 if (!isValid)
105 {
106 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100107 fmt::format("Datatype {} is not valid for tensor '{}' of node '{}', not in {{{}}}. {}",
108 onnx::TensorProto::DataType_Name(actualValue),
109 tensorName,
110 nodeName,
111 validExpr,
112 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100113 }
114}
115
116#define CHECK_VALID_DATATYPE(NODE, TENSOR, ACTUAL, ...) \
117CheckValidDataType({__VA_ARGS__}, ACTUAL, #__VA_ARGS__, NODE, TENSOR, CHECK_LOCATION())
118
119using StrTypeListPair = std::pair<const char*, std::initializer_list<onnx::TensorProto::DataType>>;
120#define STR_LIST(...) StrTypeListPair(#__VA_ARGS__, {__VA_ARGS__})
121
122template <typename Callable>
123void ReadMandatoryNodeAttributeImpl(const onnx::NodeProto& node,
124 const std::string& attribName,
125 onnx::AttributeProto::AttributeType expectedType,
126 Callable callable)
127{
128 auto attribs = node.attribute();
129 int attriNum = 0;
130 while (attriNum < node.attribute_size())
131 {
132 if (attribs.Get(attriNum).name() == attribName)
133 {
134 if (attribs.Get(attriNum).type() == expectedType)
135 {
136 callable(attribs.Get(attriNum));
137 }
138 else
139 {
James Ward58dec6b2020-09-11 17:32:44 +0100140 throw ParseException(fmt::format("Attribute {} of node {} expected to have {} as "
141 "onnx::AttributeProto::AttributeType, but found {} instead {}",
142 attribName,
143 node.name(),
144 onnx::AttributeProto::AttributeType_Name(expectedType),
145 onnx::AttributeProto::AttributeType_Name(attribs.Get(attriNum).type()),
146 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100147 }
148 break;
149 }
150 ++attriNum;
151 }
152 if (attriNum == node.attribute_size())
153 {
James Ward58dec6b2020-09-11 17:32:44 +0100154 throw ParseException(fmt::format("Could not find required attribute {} in node {} {}",
155 attribName, node.name(), CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100156 }
157}
158
159template <typename Callable>
160void ReadOptionalNodeAttributeImpl(const onnx::NodeProto& node,
161 const std::string& attribName,
162 onnx::AttributeProto::AttributeType expectedType,
163 Callable callable)
164{
165 auto attribs = node.attribute();
166 for (int attriNum = 0; attriNum < node.attribute_size(); ++attriNum)
167 {
168 if (attribs.Get(attriNum).name() == attribName)
169 {
170 if (attribs.Get(attriNum).type() == expectedType)
171 {
172 callable(attribs.Get(attriNum));
173 }
174 else
175 {
James Ward58dec6b2020-09-11 17:32:44 +0100176 throw ParseException(
177 fmt::format("Attribute {} of node {} expected to have {} as onnx::AttributeProto::AttributeType, "
178 "but found {} instead {}",
179 attribName,
180 node.name(),
181 onnx::AttributeProto::AttributeType_Name(expectedType),
182 onnx::AttributeProto::AttributeType_Name(attribs.Get(attriNum).type()),
183 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100184 }
185 }
186 }
187}
188
Narumol Prangnawaratbc3bb622021-09-24 16:08:34 +0100189int ReadMandatoryNodeIntAttribute(const onnx::NodeProto& node,
190 const std::string& name)
191{
192 int attribValue = 0;
193 ReadMandatoryNodeAttributeImpl(node, name, onnx::AttributeProto::INT,
194 [&attribValue](const onnx::AttributeProto& attrValue)
195 {
196 attribValue = CHECKED_INT32(attrValue.i());
197 });
198 return attribValue;
199}
200
Ryan OSheaed27ee72020-04-22 16:37:29 +0100201int64_t ReadOptionalNodeInt64Attribute(const onnx::NodeProto& node,
202 const std::string& name,
203 const int64_t defaultValue = 0)
204{
205 int64_t attribValue = defaultValue;
206 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::INT,
207 [&attribValue](const onnx::AttributeProto& attrValue)
208 {
209 attribValue = attrValue.i();
210 });
211 return attribValue;
212}
213
telsoa01c577f2c2018-08-31 09:22:23 +0100214std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const onnx::NodeProto& node,
215 const std::string& name)
216{
217 std::vector<uint32_t> attriList;
218 ReadMandatoryNodeAttributeImpl(node, name, onnx::AttributeProto::INTS,
219 [&attriList](const onnx::AttributeProto& attrValue)
220 {
221 for (int attriNum = 0; attriNum < attrValue.ints_size(); ++attriNum)
222 {
223 attriList.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(attrValue.ints().Get(attriNum))));
224 }
225 });
226 return attriList;
227}
228
229uint32_t ReadOptionalNodeUint32Attribute(const onnx::NodeProto& node,
230 const std::string& name,
231 const uint32_t defaultVal = 0u)
232{
233 uint32_t attribValue = defaultVal;
234 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::INT,
235 [&attribValue](const onnx::AttributeProto& attrValue)
236 {
237 attribValue = CHECKED_NON_NEGATIVE(CHECKED_INT32((attrValue.i())));
238 });
239 return attribValue;
240}
241
242std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const onnx::NodeProto& node,
243 const std::string& name)
244{
245 std::vector<uint32_t> attriList;
246 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::INTS,
247 [&attriList](const onnx::AttributeProto& attrValue)
248 {
249 for (int attriNum = 0; attriNum < attrValue.ints_size(); ++attriNum)
250 {
251 attriList.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(attrValue.ints().Get(attriNum))));
252 }
253 });
254
255 return attriList;
256}
257
258float ReadOptionalNodeFloatAttribute(const onnx::NodeProto& node,
259 const std::string& name,
260 const float defaultValue = 0.0f)
261{
262 float attribValue = defaultValue;
263 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::FLOAT,
264 [&attribValue](const onnx::AttributeProto& attrValue)
265 {
266 attribValue = attrValue.f();
267 });
268 return attribValue;
269}
270
271std::string ReadOptionalNodeStringAttribute(const onnx::NodeProto& node, const std::string& name)
272{
273 std::string attribValue = "";
274 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::STRING,
275 [&attribValue](const onnx::AttributeProto& attrValue)
276 {
277 attribValue = attrValue.s();
278 });
279 return attribValue;
280}
281
Tee Jungfcf6fd52019-11-01 05:27:28 +0000282armnn::TensorInfo ToTensorInfo(const std::string& name, std::vector<unsigned int>& shape, int data_type)
telsoa01c577f2c2018-08-31 09:22:23 +0100283{
Narumol Prangnawarat452274c2021-09-23 16:12:19 +0100284 DataType type;
285 switch(data_type)
286 {
287 case onnx::TensorProto::FLOAT:
288 {
289 type = DataType::Float32;
telsoa01c577f2c2018-08-31 09:22:23 +0100290 break;
Narumol Prangnawarat452274c2021-09-23 16:12:19 +0100291 }
292 case onnx::TensorProto::INT32:
293 case onnx::TensorProto::INT64:
294 {
295 type = DataType::Signed32;
296 break;
297 }
298 default:
299 {
300 throw ParseException(
301 fmt::format("'{}' is not a currently supported datatype for tensor {}."
302 " Supported dataTypes are FLOAT, INT32 and INT64. {}",
303 onnx::TensorProto::DataType_Name(static_cast<onnx::TensorProto::DataType>(data_type)),
304 name,
305 CHECK_LOCATION().AsString() ));
306 }
307 }
Tee Jungcaf2bdd2019-11-13 07:23:14 +0000308
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +0100309 // Scalar Tensor
Narumol Prangnawarat452274c2021-09-23 16:12:19 +0100310 if (shape.empty())
311 {
312 return TensorInfo(TensorShape(Dimensionality::Scalar), type);
313 }
Tee Jungcaf2bdd2019-11-13 07:23:14 +0000314
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +0100315 // Dynamic Tensor
316 if(std::find(shape.begin(), shape.end(), 0) != shape.end())
317 {
318 return TensorInfo(TensorShape(Dimensionality::NotSpecified), type);
319 }
320
Narumol Prangnawarat452274c2021-09-23 16:12:19 +0100321 return TensorInfo(TensorShape(static_cast<unsigned int>(shape.size()), shape.data()), type);
Tee Jungfcf6fd52019-11-01 05:27:28 +0000322}
323
324armnn::TensorInfo ToTensorInfo(const onnx::ValueInfoProto& info)
325{
326 const onnx::TensorShapeProto onnxShape = info.type().tensor_type().shape();
327 std::vector<unsigned int> shapeDims;
328 for (int i = 0; i < onnxShape.dim_size(); ++i)
329 {
330 shapeDims.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(onnxShape.dim(i).dim_value())));
331 }
332
333 return ToTensorInfo(info.name(), shapeDims, info.type().tensor_type().elem_type());
334}
335
336armnn::TensorInfo ToTensorInfo(const onnx::TensorProto& tensor)
337{
338 std::vector<unsigned int> shapeDims;
Ryan OShea337c17f2020-02-21 12:33:17 +0000339
Tee Jungfcf6fd52019-11-01 05:27:28 +0000340 for (auto dim: tensor.dims())
341 {
342 shapeDims.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(dim)));
343 }
344
345 return ToTensorInfo(tensor.name(), shapeDims, tensor.data_type());
telsoa01c577f2c2018-08-31 09:22:23 +0100346}
347
348std::string TensorInfoAsString(const TensorInfo& info,
349 const std::string& name,
350 const onnx::TensorProto::DataType& type)
351{
352 const TensorShape shape = info.GetShape();
353 std::stringstream ss;
354 ss << "tensor '" << name << "' contains "
355 << onnx::TensorProto::DataType_Name(type)
356 << " and has shape [";
357
358 for (uint32_t i = 0; i < shape.GetNumDimensions() - 1; ++i)
359 {
360 ss << shape[i] << ", ";
361 }
362 ss << shape[shape.GetNumDimensions() - 1] << "]";
363 return ss.str();
364}
365
Sadik Armagan60bb9d82021-01-11 15:15:01 +0000366void CalcPadding(uint32_t inputSize,
367 uint32_t filterSize,
368 uint32_t stride,
369 uint32_t dilation,
370 uint32_t* paddingFront,
371 uint32_t* paddingBack,
372 bool isUpper)
telsoa01c577f2c2018-08-31 09:22:23 +0100373{
374 uint32_t outputSize = (inputSize + stride - 1) / stride;
Sadik Armagan60bb9d82021-01-11 15:15:01 +0000375 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
376 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100377 *paddingFront = (temp - inputSize) / 2;
378 *paddingBack = *paddingFront;
379 if((temp - inputSize) % 2 == 1)
380 {
381 if (isUpper)
382 {
Sadik Armagan60bb9d82021-01-11 15:15:01 +0000383 *paddingBack += 1;
telsoa01c577f2c2018-08-31 09:22:23 +0100384 }
385 else
386 {
Sadik Armagan60bb9d82021-01-11 15:15:01 +0000387 *paddingFront += 1;
telsoa01c577f2c2018-08-31 09:22:23 +0100388 }
389 }
390}
391
Ryan OSheaed27ee72020-04-22 16:37:29 +0100392TensorInfo ComputeReshapeInfo(const TensorShape& targetShapeTensor,
telsoa01c577f2c2018-08-31 09:22:23 +0100393 const TensorShape& inShape,
Narumol Prangnawarat452274c2021-09-23 16:12:19 +0100394 const std::string& outName,
395 DataType dataType = DataType::Float32)
telsoa01c577f2c2018-08-31 09:22:23 +0100396{
397 std::vector<int> targetDims;
Ryan OSheaed27ee72020-04-22 16:37:29 +0100398 for(uint i = 0; i < targetShapeTensor.GetNumDimensions(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +0100399 {
Ryan OSheaed27ee72020-04-22 16:37:29 +0100400 int val = CHECKED_INT32(targetShapeTensor[i]);
telsoa01c577f2c2018-08-31 09:22:23 +0100401 if(val == 0)
402 {
403 targetDims.push_back(static_cast<int>(inShape[static_cast<uint>(i)]));
404 }
405 else
406 {
407 targetDims.push_back(val);
408 }
409 }
410
411 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
412 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
413 if (stretchDim != targetDims.end())
414 {
415 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
416 {
417 std::stringstream ss;
418 ss << "[ ";
419 for(uint i = 0; i < targetDims.size() - 1; ++i)
420 {
421 ss << targetDims[i] << ", ";
422 }
423 ss << targetDims[targetDims.size() - 1] << " ]";
424
James Ward58dec6b2020-09-11 17:32:44 +0100425 throw ParseException(
426 fmt::format("Error during creation of reshaped tensor '{}'. At most one component of shape can be "
427 " -1 and here, shape is {} {}",
428 outName,
429 ss.str(),
430 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100431 }
432
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100433 auto targetNumElements = armnn::numeric_cast<unsigned int>(std::accumulate(targetDims.begin(), targetDims.end(),
telsoa01c577f2c2018-08-31 09:22:23 +0100434 -1, std::multiplies<int32_t>()));
435 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
436 outDims[stretchIndex] = inShape.GetNumElements() / targetNumElements;
437 }
438 TensorShape outShape = TensorShape{static_cast<unsigned int>(outDims.size()), outDims.data()};
Narumol Prangnawarat452274c2021-09-23 16:12:19 +0100439 return TensorInfo(outShape, dataType);
telsoa01c577f2c2018-08-31 09:22:23 +0100440}
441
442} //namespace
443
Kevin Mayef33cb12021-01-29 14:24:57 +0000444const std::map<std::string, OnnxParserImpl::OperationParsingFunction> OnnxParserImpl::m_ParserFunctions = {
445 { "BatchNormalization", &OnnxParserImpl::ParseBatchNormalization},
446 { "GlobalAveragePool", &OnnxParserImpl::ParseGlobalAveragePool},
447 { "AveragePool", &OnnxParserImpl::ParseAveragePool },
448 { "Clip", &OnnxParserImpl::ParseClip },
449 { "Constant", &OnnxParserImpl::ParseConstant },
450 { "MaxPool", &OnnxParserImpl::ParseMaxPool },
451 { "Reshape", &OnnxParserImpl::ParseReshape },
452 { "Sigmoid", &OnnxParserImpl::ParseSigmoid },
453 { "Tanh", &OnnxParserImpl::ParseTanh },
454 { "Relu", &OnnxParserImpl::ParseRelu },
455 { "LeakyRelu", &OnnxParserImpl::ParseLeakyRelu },
456 { "Conv", &OnnxParserImpl::ParseConv },
457 { "Add", &OnnxParserImpl::ParseAdd },
Narumol Prangnawaratcdc495e2021-09-16 18:13:39 +0100458 { "Flatten", &OnnxParserImpl::ParseFlatten },
Narumol Prangnawaratf10b15a2021-09-17 21:08:57 +0100459 { "Shape", &OnnxParserImpl::ParseShape },
460 { "Gather", &OnnxParserImpl::ParseGather },
Narumol Prangnawaratbc3bb622021-09-24 16:08:34 +0100461 { "Unsqueeze", &OnnxParserImpl::ParseUnsqueeze },
Narumol Prangnawarat1112b012021-09-30 12:10:50 +0100462 { "Concat", &OnnxParserImpl::ParseConcat },
463 { "Gemm", &OnnxParserImpl::ParseGemm }
telsoa01c577f2c2018-08-31 09:22:23 +0100464};
465
466template<typename TypePair, typename Location>
Kevin Mayef33cb12021-01-29 14:24:57 +0000467void OnnxParserImpl::ValidateInputs(const onnx::NodeProto& node,
telsoa01c577f2c2018-08-31 09:22:23 +0100468 TypePair validInputs,
469 const Location& location)
470{
471 for(auto input : node.input())
472 {
473 CheckValidDataType(validInputs.second,
474 m_TensorsInfo[input].m_dtype,
475 validInputs.first,
476 node.name(),
477 input,
478 location);
479 }
480}
481
482#define VALID_INPUTS(NODE, VALID_INPUTS) \
Kevin Mayef33cb12021-01-29 14:24:57 +0000483 OnnxParserImpl::ValidateInputs(NODE, \
telsoa01c577f2c2018-08-31 09:22:23 +0100484 VALID_INPUTS, \
485 CHECK_LOCATION())
486
Kevin Mayef33cb12021-01-29 14:24:57 +0000487std::vector<TensorInfo> OnnxParserImpl::ComputeOutputInfo(std::vector<std::string> outNames,
488 const IConnectableLayer* layer,
Narumol Prangnawarat452274c2021-09-23 16:12:19 +0100489 std::vector<TensorShape> inputShapes,
490 const onnx::TensorProto::DataType& dataType)
telsoa01c577f2c2018-08-31 09:22:23 +0100491{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100492 ARMNN_ASSERT(! outNames.empty());
telsoa01c577f2c2018-08-31 09:22:23 +0100493 bool needCompute = std::any_of(outNames.begin(),
494 outNames.end(),
495 [this](std::string name)
496 {
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +0100497 return (m_TensorsInfo.count(name) == 0 || m_TensorsInfo[name].m_info == nullptr
498 || m_TensorsInfo[name].m_info->GetShape().GetDimensionality() ==
499 Dimensionality::NotSpecified);
telsoa01c577f2c2018-08-31 09:22:23 +0100500 });
Narumol Prangnawarat452274c2021-09-23 16:12:19 +0100501 std::vector<TensorInfo> outInfo;
502 //if the output info(s) are not here, we need to compute them
503 std::vector<TensorShape> inferredShapes;
504 DataType armnnType = DataType::Float32;
505 if(needCompute) {
506 inferredShapes = layer->InferOutputShapes(inputShapes);
507 ARMNN_ASSERT(inferredShapes.size() == outNames.size());
508 switch (dataType) {
509 case onnx::TensorProto::FLOAT: {
510 armnnType = DataType::Float32;
511 break;
512 }
513 case onnx::TensorProto::INT32:
514 case onnx::TensorProto::INT64: {
515 armnnType = DataType::Signed32;
516 break;
517 }
518 default: {
519 throw ParseException(
520 fmt::format("'{}' is not a currently supported datatype for {}."
521 " Supported dataTypes are FLOAT, INT32 and INT64. {}",
522 onnx::TensorProto::DataType_Name(static_cast<onnx::TensorProto::DataType>(dataType)),
523 layer->GetName(),
524 CHECK_LOCATION().AsString()));
525 }
526 }
527 }
528 for (uint i = 0; i < outNames.size(); ++i)
529 {
530 if(needCompute)
531 {
532 m_TensorsInfo[outNames[i]] = OnnxTensor();
533 m_TensorsInfo[outNames[i]].m_info = std::make_unique<TensorInfo>(
534 TensorInfo(inferredShapes[i], armnnType));
535 m_TensorsInfo[outNames[i]].m_dtype = dataType;
536 }
telsoa01c577f2c2018-08-31 09:22:23 +0100537 outInfo.push_back(*m_TensorsInfo[outNames[i]].m_info);
Narumol Prangnawarat452274c2021-09-23 16:12:19 +0100538 }
539 return outInfo;
telsoa01c577f2c2018-08-31 09:22:23 +0100540}
541
Kevin Mayef33cb12021-01-29 14:24:57 +0000542OnnxParserImpl::OnnxParserImpl()
telsoa01c577f2c2018-08-31 09:22:23 +0100543 : m_Network(nullptr, nullptr)
544{
545}
546
Kevin Mayef33cb12021-01-29 14:24:57 +0000547void OnnxParserImpl::ResetParser()
telsoa01c577f2c2018-08-31 09:22:23 +0100548{
549 m_Network = armnn::INetworkPtr(nullptr, nullptr);
550 m_Graph = nullptr;
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +0100551 m_InputInfos.clear();
552 m_OutputInfos.clear();
telsoa01c577f2c2018-08-31 09:22:23 +0100553}
554
Kevin Mayef33cb12021-01-29 14:24:57 +0000555void OnnxParserImpl::Cleanup()
telsoa01c577f2c2018-08-31 09:22:23 +0100556{
557 m_TensorConnections.clear();
558 m_TensorsInfo.clear();
559 m_OutputsMap.clear();
560 m_OutputsFusedAndUsed.clear();
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +0100561 m_InputShapes.clear();
telsoa01c577f2c2018-08-31 09:22:23 +0100562}
563
Jan Eilers53ef7952021-06-02 12:01:25 +0100564template<typename T>
565std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
566CreateConstTensorImpl(const T* bufferPtr,
567 armnn::TensorInfo& tensorInfo,
568 const armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100569{
Jan Eilers53ef7952021-06-02 12:01:25 +0100570 ARMNN_ASSERT_MSG(bufferPtr != nullptr, fmt::format("Buffer for permutation is null").c_str());
571
572 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
573
574 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
575 {
576 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
577 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
578 reinterpret_cast<const T*>(bufferPtr), data.get(), sizeof(T));
579 }
580 else
581 {
582 ::memcpy(data.get(), bufferPtr, tensorInfo.GetNumBytes());
583 }
584
585 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
586}
587
588std::pair<ConstTensor, std::unique_ptr<float[]>>
589OnnxParserImpl::CreateConstTensor(const std::string name,
590 armnn::Optional<armnn::PermutationVector&> permutationVector)
591{
592 TensorInfo tensorInfo = *m_TensorsInfo[name].m_info;
telsoa01c577f2c2018-08-31 09:22:23 +0100593 onnx::TensorProto onnxTensor = *m_TensorsInfo[name].m_tensor;
594
Narumol Prangnawaratf10b15a2021-09-17 21:08:57 +0100595 //ONNX can have Float16 and double constant nodes but ArmNN only supports float32
596 CHECK_VALID_DATATYPE(name, onnxTensor.name(),
597 static_cast<onnx::TensorProto::DataType>(onnxTensor.data_type()), onnx::TensorProto::FLOAT);
598
Matthew Sloyan81beae32021-07-13 19:46:11 +0100599 // Makes sure IsConstant flag is set.
600 tensorInfo.SetConstant();
601
Jan Eilers53ef7952021-06-02 12:01:25 +0100602 // Const tensors requires at least a list of values
603 if (tensorInfo.GetNumElements() == 0)
604 {
605 throw ParseException(fmt::format("No tensor data found for Const tensor '{}' {}",
606 name,
607 CHECK_LOCATION().AsString()));
608 }
609
telsoa01c577f2c2018-08-31 09:22:23 +0100610 auto srcData = onnxTensor.float_data().data();
Pablo Tello3dcc1c62019-04-24 14:20:21 +0100611 // Copy the value list entries into the destination
612 if (!onnxTensor.has_raw_data())
telsoa01c577f2c2018-08-31 09:22:23 +0100613 {
Pablo Tello3dcc1c62019-04-24 14:20:21 +0100614 if(tensorInfo.GetNumElements() != static_cast<uint>(onnxTensor.float_data_size()))
615 {
James Ward58dec6b2020-09-11 17:32:44 +0100616 throw ParseException(
617 fmt::format("The number of data provided ({}) does not match the tensor '{}' number of "
618 "elements ({}) {}",
619 onnxTensor.float_data_size(),
620 name,
621 tensorInfo.GetNumElements(),
622 CHECK_LOCATION().AsString()));
Pablo Tello3dcc1c62019-04-24 14:20:21 +0100623 }
Jan Eilers53ef7952021-06-02 12:01:25 +0100624 return CreateConstTensorImpl<float>(srcData, tensorInfo, permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100625 }
Pablo Tello3dcc1c62019-04-24 14:20:21 +0100626 else
627 {
Jan Eilers53ef7952021-06-02 12:01:25 +0100628 return CreateConstTensorImpl<float>(reinterpret_cast<const float*>(onnxTensor.raw_data().c_str()),
629 tensorInfo,
630 permutationVector);
Pablo Tello3dcc1c62019-04-24 14:20:21 +0100631 }
telsoa01c577f2c2018-08-31 09:22:23 +0100632}
633
Narumol Prangnawaratf10b15a2021-09-17 21:08:57 +0100634std::pair<ConstTensor, std::unique_ptr<int32_t[]>>
635OnnxParserImpl::CreateInt64ConstTensor(const std::string name,
636 armnn::Optional<armnn::PermutationVector&> permutationVector)
637{
638 TensorInfo tensorInfo = *m_TensorsInfo[name].m_info;
639 onnx::TensorProto onnxTensor = *m_TensorsInfo[name].m_tensor;
640
641 CHECK_VALID_DATATYPE(name, onnxTensor.name(),
642 static_cast<onnx::TensorProto::DataType>(onnxTensor.data_type()), onnx::TensorProto::INT64);
643
644 // Makes sure IsConstant flag is set.
645 tensorInfo.SetConstant();
646 uint numElements = tensorInfo.GetNumElements();
647
648 // Const tensors requires at least a list of values
649 if (numElements == 0)
650 {
651 throw ParseException(fmt::format("No tensor data found for Const tensor '{}' {}",
652 name,
653 CHECK_LOCATION().AsString()));
654 }
655
656 // Copy the value list entries into the destination
657 if (!onnxTensor.has_raw_data())
658 {
659 auto srcData = onnxTensor.int64_data().data();
660 if(numElements != static_cast<uint>(onnxTensor.int64_data_size()))
661 {
662 throw ParseException(
663 fmt::format("The number of data provided ({}) does not match the tensor '{}' number of "
664 "elements ({}) {}",
665 onnxTensor.int64_data_size(),
666 name,
667 tensorInfo.GetNumElements(),
668 CHECK_LOCATION().AsString()));
669 }
670
671 std::vector<int32_t> int32Data;
672 for(uint i = 0; i < numElements; i++)
673 {
674 int32_t int32Value = CHECKED_INT32(srcData[i]);
675 int32Data.push_back(int32Value);
676 }
677
678 return CreateConstTensorImpl<int32_t>(int32Data.data(), tensorInfo, permutationVector);
679 }
680 else
681 {
682 auto srcData = reinterpret_cast<const int64_t*>(onnxTensor.raw_data().c_str());
683 std::vector<int32_t> int32Data;
684 for(uint i = 0; i < numElements; i++)
685 {
686 int32_t int32Value = CHECKED_INT32(srcData[i]);
687 int32Data.push_back(int32Value);
688 }
689 return CreateConstTensorImpl<int32_t>(int32Data.data(), tensorInfo, permutationVector);
690 }
691}
692
Kevin Mayef33cb12021-01-29 14:24:57 +0000693ModelPtr OnnxParserImpl::LoadModelFromTextFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100694{
695 FILE* fd = fopen(graphFile, "r");
696
697 if (fd == nullptr)
698 {
James Ward58dec6b2020-09-11 17:32:44 +0100699 throw FileNotFoundException(fmt::format("Invalid (null) filename {}", CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100700 }
701
702 // Parse the file into a message
703 ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
704 using google::protobuf::io::FileInputStream;
705 std::unique_ptr<FileInputStream> input = std::make_unique<FileInputStream>(fileno(fd));
706 bool success = google::protobuf::TextFormat::Parse(input.get(), modelProto.get());
707 fclose(fd);
708
709 if (!success)
710 {
711 std::stringstream error;
712 error << "Failed to parse graph file";
James Ward58dec6b2020-09-11 17:32:44 +0100713 throw ParseException(fmt::format("{} {}", error.str(), CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100714 }
715 return modelProto;
716}
717
Kevin Mayef33cb12021-01-29 14:24:57 +0000718INetworkPtr OnnxParserImpl::CreateNetworkFromTextFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100719{
720 ResetParser();
721 ModelPtr modelProto = LoadModelFromTextFile(graphFile);
722 return CreateNetworkFromModel(*modelProto);
723}
724
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +0100725INetworkPtr OnnxParserImpl::CreateNetworkFromTextFile(const char* graphFile,
726 const std::map<std::string, armnn::TensorShape>& inputShapes)
727{
728 ResetParser();
729 m_InputShapes = inputShapes;
730 ModelPtr modelProto = LoadModelFromTextFile(graphFile);
731 return CreateNetworkFromModel(*modelProto);
732}
telsoa01c577f2c2018-08-31 09:22:23 +0100733
Kevin Mayef33cb12021-01-29 14:24:57 +0000734ModelPtr OnnxParserImpl::LoadModelFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100735{
736 FILE* fd = fopen(graphFile, "rb");
737
738 if (fd == nullptr)
739 {
James Ward58dec6b2020-09-11 17:32:44 +0100740 throw FileNotFoundException(fmt::format("Invalid (null) filename {}", CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100741 }
742
743 // Parse the file into a message
744 ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
745
746 google::protobuf::io::FileInputStream inStream(fileno(fd));
747 google::protobuf::io::CodedInputStream codedStream(&inStream);
Nikhil Raje5181532020-10-09 14:52:25 +0100748 codedStream.SetTotalBytesLimit(INT_MAX);
telsoa01c577f2c2018-08-31 09:22:23 +0100749 bool success = modelProto.get()->ParseFromCodedStream(&codedStream);
750 fclose(fd);
751
752 if (!success)
753 {
754 std::stringstream error;
755 error << "Failed to parse graph file";
James Ward58dec6b2020-09-11 17:32:44 +0100756 throw ParseException(fmt::format("{} {}", error.str(), CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100757 }
758 return modelProto;
759
760}
761
Kevin Mayef33cb12021-01-29 14:24:57 +0000762INetworkPtr OnnxParserImpl::CreateNetworkFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100763{
764 ResetParser();
765 ModelPtr modelProto = LoadModelFromBinaryFile(graphFile);
766 return CreateNetworkFromModel(*modelProto);
767}
768
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +0100769INetworkPtr OnnxParserImpl::CreateNetworkFromBinaryFile(const char* graphFile,
770 const std::map<std::string, armnn::TensorShape>& inputShapes)
771{
772 ResetParser();
773 m_InputShapes = inputShapes;
774 ModelPtr modelProto = LoadModelFromBinaryFile(graphFile);
775 return CreateNetworkFromModel(*modelProto);
776}
777
Kevin Mayef33cb12021-01-29 14:24:57 +0000778ModelPtr OnnxParserImpl::LoadModelFromString(const std::string& protoText)
telsoa01c577f2c2018-08-31 09:22:23 +0100779{
780 if (protoText == "")
781 {
James Ward58dec6b2020-09-11 17:32:44 +0100782 throw InvalidArgumentException(fmt::format("Invalid (empty) string for model parameter {}",
783 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100784 }
785 // Parse the string into a message
786 ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
787 bool success = google::protobuf::TextFormat::ParseFromString(protoText, modelProto.get());
788 if (!success)
789 {
790 std::stringstream error;
791 error << "Failed to parse graph file";
James Ward58dec6b2020-09-11 17:32:44 +0100792 throw ParseException(fmt::format("{} {}", error.str(), CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100793 }
794 return modelProto;
795}
796
Kevin Mayef33cb12021-01-29 14:24:57 +0000797INetworkPtr OnnxParserImpl::CreateNetworkFromString(const std::string& protoText)
telsoa01c577f2c2018-08-31 09:22:23 +0100798{
799 ResetParser();
800 ModelPtr modelProto = LoadModelFromString(protoText);
801 return CreateNetworkFromModel(*modelProto);
802}
803
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +0100804INetworkPtr OnnxParserImpl::CreateNetworkFromString(const std::string& protoText,
805 const std::map<std::string, armnn::TensorShape>& inputShapes)
806{
807 ResetParser();
808 m_InputShapes = inputShapes;
809 ModelPtr modelProto = LoadModelFromString(protoText);
810 return CreateNetworkFromModel(*modelProto);
811}
812
Kevin Mayef33cb12021-01-29 14:24:57 +0000813INetworkPtr OnnxParserImpl::CreateNetworkFromModel(onnx::ModelProto& model)
telsoa01c577f2c2018-08-31 09:22:23 +0100814{
815 m_Network = INetwork::Create();
816 try
817 {
818 m_Graph = std::make_unique<onnx::GraphProto>(*model.mutable_graph());
819 LoadGraph();
820 }
821 catch (const ParseException& e)
822 {
823 Cleanup();
824 throw e;
825 }
826 Cleanup();
827 return std::move(m_Network);
828}
829
Kevin Mayef33cb12021-01-29 14:24:57 +0000830void OnnxParserImpl::LoadGraph()
telsoa01c577f2c2018-08-31 09:22:23 +0100831{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100832 ARMNN_ASSERT(m_Graph.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100833
834 //Fill m_TensorsInfo with the shapes and value of every tensor
835 SetupInfo(m_Graph->mutable_output());
836 SetupInfo(m_Graph->mutable_input());
837 SetupInfo(m_Graph->mutable_value_info());
838
839 for (auto tensor : m_Graph->initializer())
840 {
841 m_TensorsInfo[tensor.name()].m_tensor = std::make_unique<const onnx::TensorProto>(tensor);
Tee Jungfcf6fd52019-11-01 05:27:28 +0000842 m_TensorsInfo[tensor.name()].m_info = std::make_unique<TensorInfo>(ToTensorInfo(tensor));
843 m_TensorsInfo[tensor.name()].m_dtype =
844 static_cast<onnx::TensorProto::DataType>(tensor.data_type());
telsoa01c577f2c2018-08-31 09:22:23 +0100845 }
846
847 SetupInputLayers();
848 SetupOutputLayers();
849
850 //Detect FullyConnected layers with bias and update the FusedAndUsed map acccordingly
851 DetectFullyConnected();
852
853 //Parsing the graph
854 for(size_t nodeIndex = 0; nodeIndex < static_cast<size_t>(m_Graph->node_size()); nodeIndex++)
855 {
856 auto node = m_Graph->node(static_cast<int>(nodeIndex));
857 const std::string& operation = node.op_type();
858
859 // check which layers we handled already (add and matmul fused as FC)
Ryan OShea337c17f2020-02-21 12:33:17 +0000860 if (operation == "MatMul" )
telsoa01c577f2c2018-08-31 09:22:23 +0100861 {
862 if(m_OutputsFusedAndUsed[nodeIndex].inputForNodes != m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.size())
863 {
864 //Node which can not be fused as a FullyConnected layer (used in layers as a simple matmul output)
865 AddFullyConnected(node);
866 }
867 }
868 else if (!(m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.empty()) && operation == "Add")
869 {
870 int matmulIndex = static_cast<int> (m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes[0]);
871 AddFullyConnected(m_Graph->node(matmulIndex), &node);
872 }
873 else if (m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.empty()) //node is not part of a fused layer
874 {
875 auto it = m_ParserFunctions.find(operation);
876 if (it != m_ParserFunctions.end())
877 {
878 auto func = it->second;
879 (this->*func)(node);
880 }
881 else
882 {
James Ward58dec6b2020-09-11 17:32:44 +0100883 throw ParseException(fmt::format("Unsupported operation {} for node '{}' {}",
884 operation,
885 node.name(),
886 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100887 }
888 }
889 }
890
891 //Making the connections between outputs and inputs of each layers
892 for (const auto& tensorCon : m_TensorConnections)
893 {
894 if (tensorCon.second.outputSlot != nullptr)
895 {
896 for (size_t inputSlotIdx = 0; inputSlotIdx < tensorCon.second.inputSlots.size(); ++inputSlotIdx)
897 {
898 tensorCon.second.outputSlot->Connect(*(tensorCon.second.inputSlots[inputSlotIdx]));
899 }
900 }
901 }
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +0100902
903 // Get output info.
904 for(int outputIndex = 0; outputIndex < m_Graph->output_size(); ++outputIndex)
905 {
906 auto output = m_Graph->output(outputIndex);
907 m_OutputInfos[output.name()] = *m_TensorsInfo[output.name()].m_info;
908 }
telsoa01c577f2c2018-08-31 09:22:23 +0100909}
910
Kevin Mayef33cb12021-01-29 14:24:57 +0000911void OnnxParserImpl::SetupInfo(const google::protobuf::RepeatedPtrField<onnx::ValueInfoProto >* list)
telsoa01c577f2c2018-08-31 09:22:23 +0100912{
913 for (auto tensor : *list)
914 {
915 m_TensorsInfo[tensor.name()] = OnnxTensor();
916 m_TensorsInfo[tensor.name()].m_info = std::make_unique<TensorInfo>(ToTensorInfo(tensor));
Matteo Martincighe355dc22018-12-10 13:45:27 +0000917 m_TensorsInfo[tensor.name()].m_dtype =
918 static_cast<onnx::TensorProto::DataType>(tensor.type().tensor_type().elem_type());
telsoa01c577f2c2018-08-31 09:22:23 +0100919 }
920}
921
Kevin Mayef33cb12021-01-29 14:24:57 +0000922void OnnxParserImpl::DetectFullyConnected()
telsoa01c577f2c2018-08-31 09:22:23 +0100923{
924 m_OutputsFusedAndUsed = std::vector<UsageSummary> (static_cast<size_t>(m_Graph->node_size()), UsageSummary());
925 auto matmulAndConstant = [&](const std::string& constInput,
926 const std::string& matmulInput,
927 int& nodeIndex)
928 {
929 auto matmulIt = m_OutputsMap.find(matmulInput);
930 if(matmulIt != m_OutputsMap.end() && matmulIt->second.first->op_type() == "MatMul"
931 && m_TensorsInfo[constInput].isConstant())
932 {
933 nodeIndex = matmulIt->second.second;
934 return true;
935 }
936 return false;
937 };
938
939 for(int nodeIndex = 0; nodeIndex < m_Graph->node_size(); nodeIndex++)
940 {
941 const onnx::NodeProto* node = &m_Graph->node(nodeIndex);
942 for (const std::string& output : node->output())
943 {
944 m_OutputsMap[output] = std::make_pair(node, nodeIndex);
945 }
946
947 for (const std::string& input : node->input()) //count how many time a node is used as input
948 {
949 auto matmulIt = m_OutputsMap.find(input);
950 if(matmulIt != m_OutputsMap.end()){
951 ++m_OutputsFusedAndUsed[static_cast<size_t>(matmulIt->second.second)].inputForNodes; //node used
952 }
953 }
954
955 if (node->op_type() == "Add")
956 {
957 int matmulIndex = 0;
958 if (matmulAndConstant(node->input(0), node->input(1), matmulIndex) ||
959 matmulAndConstant(node->input(1), node->input(0), matmulIndex))
960 {
961 //matmul and add were fused
962 m_OutputsFusedAndUsed[static_cast<size_t>(matmulIndex)].fusedWithNodes
963 .push_back(static_cast<size_t>(nodeIndex));
964
965 m_OutputsFusedAndUsed[static_cast<size_t>(nodeIndex)].fusedWithNodes
966 .push_back(static_cast<size_t>(matmulIndex));
967 }
968 }
969 }
970
971 for (auto output: m_Graph->output()) { //Add usages as output of the graph in count of usages
972 auto matmulIt = m_OutputsMap.find(output.name());
973 if(matmulIt != m_OutputsMap.end()){
974 ++m_OutputsFusedAndUsed[static_cast<size_t>(matmulIt->second.second)].inputForNodes;
975 }
976 }
977}
978
979template<typename Location>
Kevin Mayef33cb12021-01-29 14:24:57 +0000980void OnnxParserImpl::GetInputAndParam(const onnx::NodeProto& node,
981 std::string* inputName,
982 std::string* constName,
983 const Location& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100984{
985 int cstIndex;
986 if (m_TensorsInfo[node.input(0)].isConstant())
987 {
988 cstIndex = 0;
989 }
990 else if (m_TensorsInfo[node.input(1)].isConstant())
991 {
992 cstIndex = 1;
993 }
994 else
995 {
James Ward58dec6b2020-09-11 17:32:44 +0100996 throw ParseException(fmt::format("One of the input tensors ('{}' or '{}') should be constant in node '{}' {}",
997 node.input(0),
998 node.input(1),
999 node.name(),
1000 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001001 }
1002 if(constName)
1003 {
1004 *constName = node.input(cstIndex);
1005 }
1006 if(inputName)
1007 {
1008 *inputName = node.input(!cstIndex);
1009 }
1010}
1011
1012template<typename Location>
Kevin Mayef33cb12021-01-29 14:24:57 +00001013void OnnxParserImpl::To1DTensor(const std::string& name, const Location& location)
telsoa01c577f2c2018-08-31 09:22:23 +01001014{
1015 TensorShape shape = m_TensorsInfo[name].m_info->GetShape();
1016 std::vector<uint32_t> newShape;
1017 for(uint i = 0; i < shape.GetNumDimensions() - 1; ++i)
1018 {
1019 if(shape[i] != 1)
1020 {
James Ward58dec6b2020-09-11 17:32:44 +01001021 throw ParseException(
1022 fmt::format("Only tensors with shape [1, ..., 1, X] can be converted to 1D and {} {}",
1023 TensorInfoAsString(*m_TensorsInfo[name].m_info, name, m_TensorsInfo[name].m_dtype),
1024 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001025 }
1026 }
1027 newShape.push_back(shape[shape.GetNumDimensions() - 1]);
1028
1029 m_TensorsInfo[name].m_info->SetShape(TensorShape(static_cast<unsigned int>(newShape.size()), newShape.data()));
1030}
1031
Kevin Mayef33cb12021-01-29 14:24:57 +00001032void OnnxParserImpl::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, const Convolution2dDescriptor& convDesc)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001033{
1034 ARMNN_ASSERT(node.op_type() == "Conv");
1035
1036 DepthwiseConvolution2dDescriptor desc;
1037 desc.m_PadLeft = convDesc.m_PadLeft;
1038 desc.m_PadRight = convDesc.m_PadRight;
1039 desc.m_PadTop = convDesc.m_PadTop;
1040 desc.m_PadBottom = convDesc.m_PadBottom;
1041 desc.m_StrideX = convDesc.m_StrideX;
1042 desc.m_StrideY = convDesc.m_StrideY;
1043 desc.m_BiasEnabled = convDesc.m_BiasEnabled;
1044
Cathal Corbett06902652022-04-14 17:55:11 +01001045 armnn::IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, node.name().c_str());
1046 std::vector<std::string> tensorIndexes= {node.input(0), node.input(1)};
Jan Eilers53ef7952021-06-02 12:01:25 +01001047
1048 // weights come in as [O,1,H,W] from ONNX and need to be converted to ArmNNs dephtwise weights layout [1,H,W,O]
1049 armnn::PermutationVector perVec {3,0,1,2};
1050 auto weightTensor = CreateConstTensor(node.input(1), perVec);
Ryan OSheaed27ee72020-04-22 16:37:29 +01001051
Cathal Corbett06902652022-04-14 17:55:11 +01001052 IConnectableLayer* weightsLayer = m_Network->AddConstantLayer(weightTensor.first);
1053 weightsLayer->GetOutputSlot(0).SetTensorInfo(weightTensor.first.GetInfo());
1054 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
1055
Ryan OSheaed27ee72020-04-22 16:37:29 +01001056 if (node.input_size() == 3)
1057 {
1058 if(!m_TensorsInfo[node.input(2)].isConstant())
1059 {
James Ward58dec6b2020-09-11 17:32:44 +01001060 throw ParseException(fmt::format("Bias '{}' should be constant in Conv layer '{}' {}",
1061 node.input(2),
1062 node.name(),
1063 CHECK_LOCATION().AsString()));
Ryan OSheaed27ee72020-04-22 16:37:29 +01001064 }
Cathal Corbett06902652022-04-14 17:55:11 +01001065
Ryan OSheaed27ee72020-04-22 16:37:29 +01001066 desc.m_BiasEnabled = true;
1067 auto biasTensor = CreateConstTensor(node.input(2));
Cathal Corbett06902652022-04-14 17:55:11 +01001068 tensorIndexes.emplace_back(node.input(2));
1069
1070 IConnectableLayer* biasLayer = m_Network->AddConstantLayer(biasTensor.first);
1071 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensor.first.GetInfo());
1072 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
Ryan OSheaed27ee72020-04-22 16:37:29 +01001073 }
Cathal Corbett06902652022-04-14 17:55:11 +01001074
Ryan OSheaed27ee72020-04-22 16:37:29 +01001075 ARMNN_ASSERT(layer != nullptr);
1076
1077 auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
1078 { m_TensorsInfo[node.input(0)].m_info->GetShape(),
Jan Eilers53ef7952021-06-02 12:01:25 +01001079 weightTensor.first.GetInfo().GetShape() });
Ryan OSheaed27ee72020-04-22 16:37:29 +01001080
1081 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1082
1083 // register the input connection slots for the layer, connections are made after all layers have been created
1084 // only the tensors for the inputs are relevant, exclude the const tensors
Cathal Corbett06902652022-04-14 17:55:11 +01001085 RegisterInputSlots(layer, tensorIndexes);
Ryan OSheaed27ee72020-04-22 16:37:29 +01001086
1087 // register the output connection slots for the layer, connections are made after all layers have been created
1088 RegisterOutputSlots(layer, {node.output(0)});
1089}
1090
Kevin Mayef33cb12021-01-29 14:24:57 +00001091void OnnxParserImpl::AddFullyConnected(const onnx::NodeProto& matmulNode, const onnx::NodeProto* addNode)
telsoa01c577f2c2018-08-31 09:22:23 +01001092{
1093
1094 // find matmul inputs
1095 std::string weightName;
1096 std::string inputName;
1097 CHECK_VALID_SIZE(static_cast<size_t>(matmulNode.input_size()), 2);
1098 CHECK_VALID_SIZE(static_cast<size_t>(matmulNode.output_size()), 1);
1099 VALID_INPUTS(matmulNode, STR_LIST(onnx::TensorProto::FLOAT));
1100
1101 GetInputAndParam(matmulNode, &inputName, &weightName, CHECK_LOCATION());
1102
1103 FullyConnectedDescriptor desc;
1104 desc.m_BiasEnabled = addNode != nullptr;
1105
1106 IConnectableLayer* layer = nullptr;
1107 if(desc.m_BiasEnabled)
1108 {
1109 // find bias const
1110 std::string biasName;
1111 CHECK_VALID_SIZE(static_cast<size_t>(addNode->input_size()), 2);
1112 CHECK_VALID_SIZE(static_cast<size_t>(addNode->output_size()), 1);
1113 VALID_INPUTS(*addNode, STR_LIST(onnx::TensorProto::FLOAT));
1114
1115 GetInputAndParam(*addNode, nullptr, &biasName, CHECK_LOCATION());
1116
1117 //Output shape is [1, weights[1]] and 1d vec in ONNX can be [1,X] so we convert biases to "armnn" 1D
1118 To1DTensor(biasName, CHECK_LOCATION());
1119 TensorInfo weightInfo = *m_TensorsInfo[weightName].m_info;
1120 TensorInfo biasInfo = *m_TensorsInfo[biasName].m_info;
1121
1122 if (weightInfo.GetShape()[1] != biasInfo.GetShape()[0])
1123 {
James Ward58dec6b2020-09-11 17:32:44 +01001124 throw ParseException(
1125 fmt::format("Shape of weights '{}' and bias of following Add node '{}' do not match : {}"
1126 " and {} ( /!\\ bias should be a 1D tensor) {}",
1127 weightName,
1128 addNode->name(),
1129 TensorInfoAsString(*m_TensorsInfo[weightName].m_info, weightName,
1130 m_TensorsInfo[weightName].m_dtype),
1131 TensorInfoAsString(*m_TensorsInfo[biasName].m_info, biasName,
1132 m_TensorsInfo[biasName].m_dtype ),
1133 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001134 }
Matthew Sloyan81beae32021-07-13 19:46:11 +01001135
1136 // Just add a FullyConnected layer, weights and biases are handled as inputs now.
1137 layer = m_Network->AddFullyConnectedLayer(desc, matmulNode.name().c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001138 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001139
1140 auto outputInfo = ComputeOutputInfo({addNode->output(0)}, layer,
1141 {m_TensorsInfo[inputName].m_info->GetShape(),
1142 m_TensorsInfo[weightName].m_info->GetShape()});
telsoa01c577f2c2018-08-31 09:22:23 +01001143 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1144
Matthew Sloyan81beae32021-07-13 19:46:11 +01001145 // Add constant layer to store weights/biases and connect to FullyConnected layer..
1146 if(m_TensorsInfo[weightName].isConstant())
1147 {
1148 IConnectableLayer* weightsLayer = m_Network->AddConstantLayer(CreateConstTensor(weightName).first);
1149
1150 weightInfo.SetConstant();
1151 weightsLayer->GetOutputSlot(0).SetTensorInfo(weightInfo);
1152 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
1153 }
1154
1155 if(m_TensorsInfo[biasName].isConstant())
1156 {
1157 IConnectableLayer* biasLayer = m_Network->AddConstantLayer(CreateConstTensor(biasName).first);
1158
1159 biasInfo.SetConstant();
1160 biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
1161 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
1162 }
1163
1164 RegisterInputSlots(layer, {inputName, weightName, biasName});
telsoa01c577f2c2018-08-31 09:22:23 +01001165 RegisterOutputSlots(layer, {addNode->output(0)});
1166 }
1167 else
1168 {
Matthew Sloyan81beae32021-07-13 19:46:11 +01001169 layer = m_Network->AddFullyConnectedLayer(desc, matmulNode.name().c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001170 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001171
1172 auto outputInfo = ComputeOutputInfo({matmulNode.output(0)}, layer,
1173 {m_TensorsInfo[inputName].m_info->GetShape(),
1174 m_TensorsInfo[weightName].m_info->GetShape()});
1175 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1176
Matthew Sloyan81beae32021-07-13 19:46:11 +01001177 // Add constant layer to store weights and connect to FullyConnected layer.
1178 if(m_TensorsInfo[weightName].isConstant())
1179 {
1180 TensorInfo weightInfo = *m_TensorsInfo[weightName].m_info;
1181 IConnectableLayer* weightsLayer = m_Network->AddConstantLayer(CreateConstTensor(weightName).first);
1182
1183 weightInfo.SetConstant();
1184 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
1185 weightsLayer->GetOutputSlot(0).SetTensorInfo(weightInfo);
1186 }
1187
1188 RegisterInputSlots(layer, {inputName, weightName});
telsoa01c577f2c2018-08-31 09:22:23 +01001189 RegisterOutputSlots(layer, {matmulNode.output(0)});
1190 }
1191}
1192
Kevin Mayef33cb12021-01-29 14:24:57 +00001193void OnnxParserImpl::AddPoolingLayer(const onnx::NodeProto& node, Pooling2dDescriptor& desc)
telsoa01c577f2c2018-08-31 09:22:23 +01001194{
1195
1196 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 1);
1197 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
1198
1199 VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
1200
1201 std::vector<uint32_t> kernel_shape = ReadMandatoryNodeUint32ListAttribute(node, "kernel_shape"); //size of pool win
1202 std::vector<uint32_t> strides = ReadOptionalNodeUint32ListAttribute(node, "strides");
1203 std::vector<uint32_t> pads = ReadOptionalNodeUint32ListAttribute(node, "pads");
1204
1205 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
1206 desc.m_PoolWidth = kernel_shape[1];
1207 desc.m_PoolHeight = kernel_shape[0];
1208
1209 if(strides.empty())
1210 {
1211 desc.m_StrideX = 1;
1212 desc.m_StrideY = 1;
1213 }
1214 else
1215 {
1216 desc.m_StrideX = strides[1];
1217 desc.m_StrideY = strides[0];
1218 }
1219
1220 //Check new padding version first
1221 if(pads.empty())
1222 {
1223 //Check deprecated version
1224 std::string paddingString = ReadOptionalNodeStringAttribute(node, "auto_pad");
1225 if(paddingString != "VALID" && paddingString != "" && paddingString != "NOTSET")
1226 {
1227 bool isUpper;
1228 if( paddingString == "SAME_LOWER")
1229 {
1230 isUpper = false;
1231 }
1232 else if (paddingString == "SAME_UPPER")
1233 {
1234 isUpper = true;
1235 }
1236 else
1237 {
James Ward58dec6b2020-09-11 17:32:44 +01001238 throw ParseException(fmt::format("Invalid auto_pad attribute for node {}. "
1239 "Only SAME_UPPER, SAME_LOWER or VALID supported and found {} {}",
1240 node.name(),
1241 paddingString,
1242 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001243 }
1244 auto inputInfo = *m_TensorsInfo[node.input(0)].m_info;
1245 uint32_t inputHeight = inputInfo.GetShape()[2];
1246 uint32_t inputWidth = inputInfo.GetShape()[3];
Sadik Armagan60bb9d82021-01-11 15:15:01 +00001247 CalcPadding(inputHeight,
1248 desc.m_PoolHeight,
1249 desc.m_StrideY,
1250 1u,
1251 &desc.m_PadTop,
1252 &desc.m_PadBottom,
1253 isUpper);
1254 CalcPadding(inputWidth,
1255 desc.m_PoolWidth,
1256 desc.m_StrideX,
1257 1u,
1258 &desc.m_PadLeft,
1259 &desc.m_PadRight,
1260 isUpper);
telsoa01c577f2c2018-08-31 09:22:23 +01001261 }
1262 }
1263 else
1264 {
1265 desc.m_PadTop = pads[0];
1266 desc.m_PadLeft = pads[1];
1267 desc.m_PadBottom = pads[2];
1268 desc.m_PadRight = pads[3];
1269 }
1270
1271 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001272 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001273
1274 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
1275 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1276
1277 // register the input connection slots for the layer, connections are made after all layers have been created
1278 // only the tensors for the inputs are relevant, exclude the const tensors
1279 RegisterInputSlots(layer, {node.input(0)});
1280
1281 // register the output connection slots for the layer, connections are made after all layers have been created
1282 RegisterOutputSlots(layer, {node.output(0)});
1283}
1284
Kevin Mayef33cb12021-01-29 14:24:57 +00001285std::pair<std::string, std::string> OnnxParserImpl::AddPrepareBroadcast(const std::string& input0,
1286 const std::string& input1)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001287{
1288 std::pair<std::string, std::string> inputs = std::make_pair(input0, input1);
1289
1290 TensorShape input0Shape = m_TensorsInfo[input0].m_info->GetShape();
1291 TensorShape input1Shape = m_TensorsInfo[input1].m_info->GetShape();
1292
1293 if(input1Shape.GetNumDimensions() < input0Shape.GetNumDimensions())
1294 {
James Ward58dec6b2020-09-11 17:32:44 +01001295 auto outputName = fmt::format("reshape_output_{}", input1);
Ryan OSheaed27ee72020-04-22 16:37:29 +01001296 PrependForBroadcast(outputName, input1, input0);
1297 inputs.second = outputName;
1298 }
1299 else if(input0Shape.GetNumDimensions() < input1Shape.GetNumDimensions())
1300 {
James Ward58dec6b2020-09-11 17:32:44 +01001301 auto outputName = fmt::format("reshape_output_{}", input0);
Ryan OSheaed27ee72020-04-22 16:37:29 +01001302 PrependForBroadcast(outputName, input0, input1);
1303 inputs.first = outputName;
1304 }
1305 return inputs;
1306}
1307
Kevin Mayef33cb12021-01-29 14:24:57 +00001308void OnnxParserImpl::CreateConstantLayer(const std::string& tensorName, const std::string& layerName)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001309{
1310 auto armnnTensor = CreateConstTensor(tensorName);
Narumol Prangnawaratf10b15a2021-09-17 21:08:57 +01001311 IConnectableLayer* layer = m_Network->AddConstantLayer(armnnTensor.first, layerName.c_str());
1312 layer->GetOutputSlot(0).SetTensorInfo(armnnTensor.first.GetInfo());
1313 RegisterOutputSlots(layer, {tensorName});
1314}
Ryan OSheaed27ee72020-04-22 16:37:29 +01001315
Narumol Prangnawaratf10b15a2021-09-17 21:08:57 +01001316void OnnxParserImpl::CreateInt64ConstantLayer(const std::string& tensorName, const std::string& layerName)
1317{
1318 auto armnnTensor = CreateInt64ConstTensor(tensorName);
Ryan OSheaed27ee72020-04-22 16:37:29 +01001319 IConnectableLayer* layer = m_Network->AddConstantLayer(armnnTensor.first, layerName.c_str());
1320 layer->GetOutputSlot(0).SetTensorInfo(armnnTensor.first.GetInfo());
1321 RegisterOutputSlots(layer, {tensorName});
1322}
1323
Kevin Mayef33cb12021-01-29 14:24:57 +00001324void OnnxParserImpl::CreateReshapeLayer(const std::string& inputName,
1325 const std::string& outputName,
1326 const std::string& layerName)
telsoa01c577f2c2018-08-31 09:22:23 +01001327{
1328 const TensorInfo outputTensorInfo = *m_TensorsInfo[outputName].m_info;
1329 ReshapeDescriptor reshapeDesc;
1330 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1331
1332 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001333 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001334 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1335
1336 // register the input connection slots for the layer, connections are made after all layers have been created
1337 // only the tensors for the inputs are relevant, exclude the const tensors
1338 RegisterInputSlots(layer, {inputName});
1339
1340 // register the output connection slots for the layer, connections are made after all layers have been created
1341 RegisterOutputSlots(layer, {outputName});
1342}
1343
Kevin Mayef33cb12021-01-29 14:24:57 +00001344void OnnxParserImpl::ParseActivation(const onnx::NodeProto& node, const armnn::ActivationFunction func)
telsoa01c577f2c2018-08-31 09:22:23 +01001345{
Finn Williams7ee5d2c2020-03-27 11:11:50 +00001346 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 1, 3);
telsoa01c577f2c2018-08-31 09:22:23 +01001347 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
1348
1349 VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
1350
1351 ActivationDescriptor desc;
Tee Jung7ff9a602019-11-01 07:04:42 +00001352 desc.m_Function = func;
telsoa01c577f2c2018-08-31 09:22:23 +01001353
Finn Williams7ee5d2c2020-03-27 11:11:50 +00001354 if (func == ActivationFunction::BoundedReLu)
1355 {
Narumol Prangnawaratf106ab72021-09-15 17:30:37 +01001356 if (node.input_size() == 1 && node.attribute_size() > 0)
1357 {
1358 desc.m_A = ReadOptionalNodeFloatAttribute(node, "max", std::numeric_limits<float>::max());
1359 desc.m_B = ReadOptionalNodeFloatAttribute(node, "min", std::numeric_limits<float>::lowest());
1360 }
1361 else
1362 {
1363 desc.m_A = node.input(2).empty() ? std::numeric_limits<float>::max() : std::stof(node.input(2));
1364 desc.m_B = node.input(1).empty() ? std::numeric_limits<float>::lowest() : std::stof(node.input(1));
1365 }
Finn Williams7ee5d2c2020-03-27 11:11:50 +00001366 }
1367
telsoa01c577f2c2018-08-31 09:22:23 +01001368 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, node.name().c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001369 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001370
1371 auto outputInfo = ComputeOutputInfo({ node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
1372 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1373
1374 // register the input connection slots for the layer, connections are made after all layers have been created
1375 // only the tensors for the inputs are relevant, exclude the const tensors
1376 RegisterInputSlots(layer, {node.input(0)});
1377
1378 // register the output connection slots for the layer, connections are made after all layers have been created
1379 RegisterOutputSlots(layer, {node.output(0)});
1380}
1381
Kevin Mayef33cb12021-01-29 14:24:57 +00001382void OnnxParserImpl::ParseClip(const onnx::NodeProto& node)
Finn Williams7ee5d2c2020-03-27 11:11:50 +00001383{
1384 ParseActivation(node, ActivationFunction::BoundedReLu);
1385}
1386
Kevin Mayef33cb12021-01-29 14:24:57 +00001387void OnnxParserImpl::ParseSigmoid(const onnx::NodeProto& node)
Tee Jung7ff9a602019-11-01 07:04:42 +00001388{
1389 ParseActivation(node, ActivationFunction::Sigmoid);
1390}
1391
Kevin Mayef33cb12021-01-29 14:24:57 +00001392void OnnxParserImpl::ParseTanh(const onnx::NodeProto& node)
Tee Jung7ff9a602019-11-01 07:04:42 +00001393{
1394 ParseActivation(node, ActivationFunction::TanH);
1395}
1396
Kevin Mayef33cb12021-01-29 14:24:57 +00001397void OnnxParserImpl::ParseRelu(const onnx::NodeProto& node)
Tee Jung7ff9a602019-11-01 07:04:42 +00001398{
1399 ParseActivation(node, ActivationFunction::ReLu);
1400}
1401
Kevin Mayef33cb12021-01-29 14:24:57 +00001402void OnnxParserImpl::ParseLeakyRelu(const onnx::NodeProto& node)
Tee Jung7ff9a602019-11-01 07:04:42 +00001403{
1404 ParseActivation(node, ActivationFunction::LeakyReLu);
1405}
telsoa01c577f2c2018-08-31 09:22:23 +01001406
Kevin Mayef33cb12021-01-29 14:24:57 +00001407void OnnxParserImpl::ParseAdd(const onnx::NodeProto& node)
telsoa01c577f2c2018-08-31 09:22:23 +01001408{
Ryan OSheaed27ee72020-04-22 16:37:29 +01001409 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 2);
1410 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001411
Ryan OSheaed27ee72020-04-22 16:37:29 +01001412 VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
telsoa01c577f2c2018-08-31 09:22:23 +01001413
Ryan OSheaed27ee72020-04-22 16:37:29 +01001414 // TODO: unify broadcast validation code across layers
1415 // tracked by: IVGCVSW-1576
telsoa01c577f2c2018-08-31 09:22:23 +01001416
Ryan OSheaed27ee72020-04-22 16:37:29 +01001417 // Checking broadcast compatibility : only scalar or 1D tensors
1418 auto inputs = AddPrepareBroadcast(node.input(0), node.input(1));
1419 auto input0 = *m_TensorsInfo[inputs.first].m_info;
1420 auto input1 = *m_TensorsInfo[inputs.second].m_info;
1421 ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
1422
1423 unsigned int numDims = input0.GetNumDimensions();
1424 for (unsigned int i = 0; i < numDims; i++)
telsoa01c577f2c2018-08-31 09:22:23 +01001425 {
Ryan OSheaed27ee72020-04-22 16:37:29 +01001426 unsigned int dim0 = input0.GetShape()[i];
1427 unsigned int dim1 = input1.GetShape()[i];
1428 if (dim0 != dim1 && dim0 != 1 && dim1 != 1)
telsoa01c577f2c2018-08-31 09:22:23 +01001429 {
James Ward58dec6b2020-09-11 17:32:44 +01001430 throw ParseException(
1431 fmt::format("Broadcast is only supported for scalar or 1D tensors in Add node '{}'. "
1432 "Input dimensions should either match or one should be of size 1 and here, "
1433 "{} and {} {}",
1434 node.name(),
1435 TensorInfoAsString(*m_TensorsInfo[inputs.first].m_info, inputs.first,
1436 m_TensorsInfo[inputs.first].m_dtype),
1437 TensorInfoAsString(*m_TensorsInfo[inputs.second].m_info, inputs.second,
1438 m_TensorsInfo[inputs.second].m_dtype),
1439 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001440 }
telsoa01c577f2c2018-08-31 09:22:23 +01001441 }
Ryan OSheaed27ee72020-04-22 16:37:29 +01001442
1443
1444 IConnectableLayer* layer = m_Network->AddAdditionLayer(node.name().c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001445 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001446
1447 auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
Ryan OSheaed27ee72020-04-22 16:37:29 +01001448 { m_TensorsInfo[inputs.first].m_info->GetShape(),
1449 m_TensorsInfo[inputs.second].m_info->GetShape() });
telsoa01c577f2c2018-08-31 09:22:23 +01001450 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1451
Ryan OSheaed27ee72020-04-22 16:37:29 +01001452 // register the input connection -> for constant inputs, we need to make a newDim constant layer
1453 if(m_TensorsInfo[inputs.first].isConstant()) {
James Ward58dec6b2020-09-11 17:32:44 +01001454 CreateConstantLayer(inputs.first, fmt::format("Add:constant_of_{}", node.input(0)));
Ryan OSheaed27ee72020-04-22 16:37:29 +01001455 }
1456 if(m_TensorsInfo[inputs.second].isConstant()) {
James Ward58dec6b2020-09-11 17:32:44 +01001457 CreateConstantLayer(inputs.second, fmt::format("Add:constant_of_{}", node.input(1)));
Ryan OSheaed27ee72020-04-22 16:37:29 +01001458 }
1459 RegisterInputSlots(layer, {inputs.first, inputs.second});
telsoa01c577f2c2018-08-31 09:22:23 +01001460
Ryan OSheaed27ee72020-04-22 16:37:29 +01001461 // register the output connection
telsoa01c577f2c2018-08-31 09:22:23 +01001462 RegisterOutputSlots(layer, {node.output(0)});
1463}
1464
Kevin Mayef33cb12021-01-29 14:24:57 +00001465void OnnxParserImpl::ParseAveragePool(const onnx::NodeProto& node)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001466{
1467 Pooling2dDescriptor desc;
1468 desc.m_PoolType = PoolingAlgorithm::Average;
1469
1470 uint32_t count_include_pad = 0;
1471 count_include_pad = ReadOptionalNodeUint32Attribute(node, "count_include_pad");
1472 if(count_include_pad) {
1473 desc.m_PaddingMethod = PaddingMethod::IgnoreValue;
1474 }
1475 AddPoolingLayer(node, desc);
1476}
1477
Kevin Mayef33cb12021-01-29 14:24:57 +00001478void OnnxParserImpl::ParseBatchNormalization(const onnx::NodeProto& node)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001479{
1480 //IGNORE momentum parameter and spatial parameters
1481
1482 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 5);
1483 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
1484
1485 VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
1486 for(int ind = 1; ind < node.input_size(); ++ind)
1487 {
1488 auto tensor = node.input(ind);
1489 if(! m_TensorsInfo[tensor].isConstant())
1490 {
James Ward58dec6b2020-09-11 17:32:44 +01001491 throw ParseException(
1492 fmt::format("Input tensor '{}' should be constant in BatchNormalization node '{}' {}",
1493 tensor,
1494 node.name(),
1495 CHECK_LOCATION().AsString()));
Ryan OSheaed27ee72020-04-22 16:37:29 +01001496 }
1497 }
1498
1499 float epsilon = ReadOptionalNodeFloatAttribute(node, "epsilon", 1e-5f);
1500 BatchNormalizationDescriptor desc;
1501 desc.m_Eps = epsilon;
1502
1503 auto scaleTensor = CreateConstTensor(node.input(1));
1504 auto biasTensor = CreateConstTensor(node.input(2));
1505 auto meanTensor = CreateConstTensor(node.input(3));
1506 auto varTensor = CreateConstTensor(node.input(4));
1507
1508 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1509 meanTensor.first,
1510 varTensor.first,
1511 biasTensor.first,
1512 scaleTensor.first,
1513 node.name().c_str());
1514 ARMNN_ASSERT(layer != nullptr);
1515
1516 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
1517 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1518
1519 RegisterInputSlots(layer, {node.input(0)}); //don't register constant inputs
1520
1521 // register the output connection
1522 RegisterOutputSlots(layer, {node.output(0)});
1523}
1524
Narumol Prangnawaratbc3bb622021-09-24 16:08:34 +01001525void OnnxParserImpl::ParseConcat(const onnx::NodeProto& node)
1526{
1527 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
1528
1529 uint32_t numConcatView = static_cast<uint32_t>(node.input_size());
1530 uint32_t inputRank = m_TensorsInfo[node.input(0)].m_info->GetNumDimensions();
1531
1532 int axisInt = ReadMandatoryNodeIntAttribute(node, "axis");
1533
1534 unsigned int concatDimInput = static_cast<unsigned int>(
1535 (static_cast<int>(inputRank) + axisInt) % static_cast<int>(inputRank));
1536
1537 OriginsDescriptor concatDescriptor(numConcatView, inputRank);
1538 concatDescriptor.SetConcatAxis(concatDimInput);
1539
1540 unsigned int mergeDimOrigin = 0;
1541
1542 std::vector<TensorShape> inputShapes;
1543 std::vector<std::string> tensorIds;
1544
1545 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1546 {
1547 std::string nodeName = node.input(static_cast<int>(viewIndex));
1548 auto inputTensorInfo = *m_TensorsInfo[nodeName].m_info;
1549 inputShapes.push_back(inputTensorInfo.GetShape());
1550 tensorIds.push_back(nodeName);
1551
1552 // Set up concatDescriptor view origin
1553 armnnUtils::ProcessConcatInputTensorInfo(
1554 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
1555 }
1556
1557 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, node.name().c_str());
1558 ARMNN_ASSERT(layer != nullptr);
1559
Narumol Prangnawarat452274c2021-09-23 16:12:19 +01001560 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, inputShapes,
1561 m_TensorsInfo[node.input(0)].m_dtype);
Narumol Prangnawaratbc3bb622021-09-24 16:08:34 +01001562
1563 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1564
1565 // register the input connection slots for the layer, connections are made after all layers have been created
1566 RegisterInputSlots(layer, tensorIds);
1567
1568 // register the output connection slots for the layer, connections are made after all layers have been created
1569 RegisterOutputSlots(layer, { node.output(0) });
1570}
1571
Kevin Mayef33cb12021-01-29 14:24:57 +00001572void OnnxParserImpl::ParseConstant(const onnx::NodeProto& node)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001573{
1574 CHECK_VALID_SIZE(static_cast<size_t>(node.attribute_size()), 1);
1575 if (!node.attribute(0).has_t())
1576 {
James Ward58dec6b2020-09-11 17:32:44 +01001577 throw ParseException(fmt::format("Value not found for Constant node '{}' {}",
1578 node.name(),
1579 CHECK_LOCATION().AsString()));
Ryan OSheaed27ee72020-04-22 16:37:29 +01001580 }
1581 const onnx::TensorProto& onnxTensor = node.attribute(0).t();
1582
Ryan OSheaed27ee72020-04-22 16:37:29 +01001583 //Register this as a m_ConstParam so we know we can use it as a constant param in future layers.
1584 m_TensorsInfo[node.output(0)].m_tensor = std::make_unique<const onnx::TensorProto>(onnxTensor);
1585 m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(ToTensorInfo(onnxTensor));
1586 m_TensorsInfo[node.output(0)].m_dtype = static_cast<onnx::TensorProto::DataType>(onnxTensor.data_type());
1587
Narumol Prangnawaratf10b15a2021-09-17 21:08:57 +01001588 if (m_TensorsInfo[node.output(0)].m_dtype == onnx::TensorProto_DataType_FLOAT)
1589 {
1590 CreateConstantLayer(node.output(0), node.name());
1591 }
1592 else if (m_TensorsInfo[node.output(0)].m_dtype == onnx::TensorProto_DataType_INT64)
1593 {
1594 CreateInt64ConstantLayer(node.output(0), node.name());
1595 }
1596 else
1597 {
1598 throw ParseException(fmt::format("Data type not support for Constant node '{}' {}",
1599 node.name(),
1600 CHECK_LOCATION().AsString()));
1601 }
Ryan OSheaed27ee72020-04-22 16:37:29 +01001602}
1603
Kevin Mayef33cb12021-01-29 14:24:57 +00001604void OnnxParserImpl::ParseConv(const onnx::NodeProto& node)
telsoa01c577f2c2018-08-31 09:22:23 +01001605{
1606 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 2, 3); //input, weight, (bias)
1607 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
1608
1609 VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
1610
1611 if(m_TensorsInfo[node.input(0)].m_info->GetNumDimensions() != 4)
1612 {
James Ward58dec6b2020-09-11 17:32:44 +01001613 throw ParseException(
1614 fmt::format("ArmNN only supports 2D convolution and Conv layer '{}' input {} {}",
1615 node.name(),
1616 TensorInfoAsString(*m_TensorsInfo[node.input(0)].m_info, node.input(0),
1617 m_TensorsInfo[node.input(0)].m_dtype),
1618 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001619 }
1620
1621 if(!m_TensorsInfo[node.input(1)].isConstant())
1622 {
James Ward58dec6b2020-09-11 17:32:44 +01001623 throw ParseException(
1624 fmt::format("Weights '{}' should be constant in Conv layer '{}' {}",
1625 node.input(1),
1626 node.name(),
1627 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001628 }
1629
1630 auto inputInfo = *m_TensorsInfo[node.input(0)].m_info;
1631
telsoa01c577f2c2018-08-31 09:22:23 +01001632 Convolution2dDescriptor desc;
1633 desc.m_BiasEnabled = false;
1634
1635 std::vector<uint32_t> strides = ReadOptionalNodeUint32ListAttribute(node, "strides");
1636 if(strides.empty())
1637 {
1638 desc.m_StrideX = 1;
1639 desc.m_StrideY = 1;
1640 }
1641 else
1642 {
1643 desc.m_StrideX = strides[1];
1644 desc.m_StrideY = strides[0];
1645 }
1646
Sadik Armagan60bb9d82021-01-11 15:15:01 +00001647 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(node, "dilations");
1648 if(!dilations.empty())
1649 {
1650 desc.m_DilationX = dilations[1];
1651 desc.m_DilationY = dilations[0];
1652 }
1653
telsoa01c577f2c2018-08-31 09:22:23 +01001654 std::vector<uint32_t> pads = ReadOptionalNodeUint32ListAttribute(node, "pads");
1655 //Check new padding version first
1656 if(pads.empty())
1657 {
1658 //Check deprecated version
1659 std::string paddingString = ReadOptionalNodeStringAttribute(node, "auto_pad");
1660 if(paddingString != "VALID" && paddingString != "" && paddingString != "NOTSET")
1661 {
1662 bool isUpper;
1663 if( paddingString == "SAME_LOWER")
1664 {
1665 isUpper = false;
1666 }
1667 else if (paddingString == "SAME_UPPER")
1668 {
1669 isUpper = true;
1670 }
1671 else
1672 {
James Ward58dec6b2020-09-11 17:32:44 +01001673 throw ParseException(
1674 fmt::format("Invalid auto_pad attribute for node {}. Only SAME_UPPER, SAME_LOWER or VALID "
1675 "supported and found {} {}",
1676 node.name(),
1677 paddingString,
1678 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001679 }
1680 uint32_t inputHeight = inputInfo.GetShape()[2];
1681 uint32_t inputWidth = inputInfo.GetShape()[3];
1682
1683 uint32_t weightHeight;
1684 uint32_t weightWidth;
1685 std::vector<uint32_t> kernel_shape = ReadOptionalNodeUint32ListAttribute(node, "kernel_shape");
1686 if (kernel_shape.empty())
1687 {
1688 const TensorInfo weightTensorInfo = *m_TensorsInfo[node.input(1)].m_info;
1689 weightHeight = weightTensorInfo.GetShape()[2];
1690 weightWidth = weightTensorInfo.GetShape()[3];
1691 }
1692 else
1693 {
1694 weightHeight = kernel_shape[0];
1695 weightWidth = kernel_shape[1];
1696 }
Sadik Armagan60bb9d82021-01-11 15:15:01 +00001697 CalcPadding(inputHeight,
1698 weightHeight,
1699 desc.m_StrideY,
1700 desc.m_DilationY,
1701 &desc.m_PadTop,
1702 &desc.m_PadBottom,
1703 isUpper);
1704 CalcPadding(inputWidth,
1705 weightWidth,
1706 desc.m_StrideX,
1707 desc.m_DilationX,
1708 &desc.m_PadLeft,
1709 &desc.m_PadRight,
1710 isUpper);
telsoa01c577f2c2018-08-31 09:22:23 +01001711 }
1712 }
1713 else
1714 {
1715 desc.m_PadTop = pads[0];
1716 desc.m_PadLeft = pads[1];
1717 desc.m_PadBottom = pads[2];
1718 desc.m_PadRight = pads[3];
1719 }
1720
1721 uint32_t group = ReadOptionalNodeUint32Attribute(node, "group", 1);
1722 if(group > 1)
1723 {
1724 if (group > inputInfo.GetShape()[1])
1725 {
1726 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001727 fmt::format("Error parsing Convolution node: {}. "
1728 "The 'group'={} parameter cannot be larger than the "
1729 "channel of the input shape={} (in NCHW format). {}",
1730 node.name(),
1731 group,
1732 inputInfo.GetShape()[1],
1733 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001734 }
1735 else if (group == inputInfo.GetShape()[1])
1736 {
1737 // we use a depthwise convolution here, because the number of groups equals to the
1738 // input channels
1739 AddConvLayerWithDepthwiseConv(node, desc);
1740 return;
1741 }
1742 else
1743 {
1744 // TODO: split the input by channels into channels/groups separate convolutions
Jim Flynne242f2d2019-05-22 14:24:13 +01001745 // and concatenate the results afterwards
James Ward58dec6b2020-09-11 17:32:44 +01001746 throw ParseException(fmt::format("Error parsing Convolution node: {}. "
1747 "The 'group'={} parameter should be 1 or be equal to the "
1748 "channel of the input shape={} (in NCHW format). {}",
1749 node.name(),
1750 group,
1751 inputInfo.GetShape()[1],
1752 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001753 }
1754 }
1755
1756 armnn::IConnectableLayer* layer;
1757 auto weightTensor = CreateConstTensor(node.input(1));
1758
1759 if (node.input_size() == 3)
1760 {
1761 if(!m_TensorsInfo[node.input(2)].isConstant())
1762 {
James Ward58dec6b2020-09-11 17:32:44 +01001763 throw ParseException(fmt::format("Bias '{}' should be constant in Conv layer '{}' {}",
1764 node.input(2),
1765 node.name(),
1766 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001767 }
1768 desc.m_BiasEnabled = true;
1769 auto biasTensor = CreateConstTensor(node.input(2));
1770 layer = m_Network->AddConvolution2dLayer(desc,
1771 weightTensor.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001772 Optional<ConstTensor>(biasTensor.first),
telsoa01c577f2c2018-08-31 09:22:23 +01001773 node.name().c_str());
1774 }
1775 else
1776 {
1777 layer = m_Network->AddConvolution2dLayer(desc,
1778 weightTensor.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001779 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +01001780 node.name().c_str());
1781 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001782 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001783
1784 auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
1785 { m_TensorsInfo[node.input(0)].m_info->GetShape(),
1786 m_TensorsInfo[node.input(1)].m_info->GetShape() });
1787 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1788
1789 // register the input connection slots for the layer, connections are made after all layers have been created
1790 // only the tensors for the inputs are relevant, exclude the const tensors
1791 RegisterInputSlots(layer, {node.input(0)});
1792
1793 // register the output connection slots for the layer, connections are made after all layers have been created
1794 RegisterOutputSlots(layer, {node.output(0)});
1795}
1796
Kevin Mayef33cb12021-01-29 14:24:57 +00001797void OnnxParserImpl::ParseFlatten(const onnx::NodeProto& node)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001798{
1799 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 1);
1800 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
1801
1802 CHECK_VALID_DATATYPE(node.name(), node.input(0),
1803 m_TensorsInfo[node.input(0)].m_dtype,
1804 onnx::TensorProto::FLOAT);
1805
1806 int64_t axis = ReadOptionalNodeInt64Attribute(node, "axis", 1);
1807 TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
1808
1809 /// Negative axis conversion
1810 if (axis < 0)
1811 {
1812 axis += inputShape.GetNumDimensions();
1813 }
1814
1815 /// Check Axis is within dimensions
1816 if (axis < 0 || axis >= inputShape.GetNumDimensions())
1817 {
James Ward58dec6b2020-09-11 17:32:44 +01001818 throw ParseException(fmt::format("Axis '{}' invalid. Tensor has '{}' dimensions in FlattenLayer '{}'",
1819 axis, inputShape.GetNumDimensions(), node.name()));
Ryan OSheaed27ee72020-04-22 16:37:29 +01001820 }
1821
1822 /// If axis chosen is 0 dimension1 will always be 1 in output , default dimension2 to 1 because 0 is invalid
1823 uint dimension1{1};
1824 uint dimension2{1};
1825 uint i{0};
1826
1827 /// dimension1 = (d_0 * d_1 ... d_(axis-1))
1828 for (i = 0; i < axis; i++){
1829 dimension1 *= inputShape[i];
1830 }
1831
1832 /// dimension2 = (d_axis * d_(axis+1) ... d_n)
1833 for (i = static_cast<uint>(axis); i < inputShape.GetNumDimensions(); i++){
1834 dimension2 *= inputShape[i];
1835 }
1836
1837 TensorShape outputShape{dimension1, dimension2};
1838
1839 auto outInfo = ComputeReshapeInfo(outputShape, inputShape, node.output(0));
1840 m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(outInfo);
1841 CreateReshapeLayer(node.input(0), node.output(0), node.name());
1842}
1843
Narumol Prangnawaratf10b15a2021-09-17 21:08:57 +01001844void OnnxParserImpl::ParseGather(const onnx::NodeProto& node)
1845{
1846 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 2);
1847 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
1848
1849 armnn::GatherDescriptor gatherDescriptor;
1850 gatherDescriptor.m_Axis = static_cast<int>(ReadOptionalNodeInt64Attribute(node, "axis", 0));
1851
1852 IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, node.name().c_str());
1853 ARMNN_ASSERT(layer != nullptr);
1854
Narumol Prangnawarat452274c2021-09-23 16:12:19 +01001855 const TensorShape& inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
1856 const TensorShape& indicesShape = m_TensorsInfo[node.input(1)].m_info->GetShape();
1857 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, { inputShape, indicesShape },
1858 m_TensorsInfo[node.input(0)].m_dtype);
Narumol Prangnawaratf10b15a2021-09-17 21:08:57 +01001859 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1860
1861 // register the input connection slots for the layer, connections are made after all layers have been created
1862 RegisterInputSlots(layer, { node.input(0), node.input(1) });
1863
1864 // register the output connection slots for the layer, connections are made after all layers have been created
1865 RegisterOutputSlots(layer, { node.output(0) });
1866}
1867
Narumol Prangnawarat1112b012021-09-30 12:10:50 +01001868void OnnxParserImpl::ParseGemm(const onnx::NodeProto& node)
1869{
1870 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 2, 3);
1871 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
1872
1873 int transA = static_cast<int>(ReadOptionalNodeUint32Attribute(node, "transA", 0));
1874 int transB = static_cast<int>(ReadOptionalNodeUint32Attribute(node, "transB", 0));
1875 float alpha = ReadOptionalNodeFloatAttribute(node, "alpha", 1.0);
1876 float beta = ReadOptionalNodeFloatAttribute(node, "beta", 1.0);
1877 bool biasEnabled = node.input_size() == 3;
1878
1879 TensorShape input0Shape = m_TensorsInfo[node.input(0)].m_info->GetShape();
1880 TensorShape input1Shape = m_TensorsInfo[node.input(1)].m_info->GetShape();
1881
1882 // if transB != 0, add transpose to the input1 (tanspose weight matrix in FullyConnected)
1883 armnn::FullyConnectedDescriptor fullyConnectedDescriptor;
1884 fullyConnectedDescriptor.m_BiasEnabled = biasEnabled;
1885 fullyConnectedDescriptor.m_TransposeWeightMatrix = transB;
1886
1887 IConnectableLayer* layer = nullptr;
1888
1889 // Just add a FullyConnected layer, weights and biases are handled as inputs now.
1890 layer = m_Network->AddFullyConnectedLayer(fullyConnectedDescriptor, node.name().c_str());
1891 ARMNN_ASSERT(layer != nullptr);
1892
1893 // if transA != 0, add transpose to the input0
1894 if (transA != 0)
1895 {
1896 std::string transAName = "transpose_" + node.input(0);
1897 armnn::TransposeDescriptor transposeADescriptor;
1898 transposeADescriptor.m_DimMappings = { 1, 0 };
1899 IConnectableLayer* transALayer = m_Network->AddTransposeLayer(transposeADescriptor, transAName.c_str());
1900 ARMNN_ASSERT(transALayer != nullptr);
1901 auto transAInfo = ComputeOutputInfo({ transAName }, transALayer, { input0Shape });
1902 transALayer->GetOutputSlot(0).SetTensorInfo(transAInfo[0]);
1903 transALayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
1904 // register the input connection slots for the layer, connections are made after all layers have been created
1905 RegisterInputSlot(transALayer, node.input(0), 0);
1906 input0Shape = transAInfo[0].GetShape();
1907 }
1908 else
1909 {
1910 RegisterInputSlot(layer, node.input(0), 0);
1911 }
1912
1913 // Add constant layer to store weights/biases and connect to FullyConnected layer.
1914 if(m_TensorsInfo[node.input(1)].isConstant())
1915 {
1916 IConnectableLayer* weightsLayer = m_Network->AddConstantLayer(CreateConstTensor(node.input(1)).first);
1917 TensorInfo weightInfo = *m_TensorsInfo[node.input(1)].m_info;
1918 weightInfo.SetConstant();
1919 weightsLayer->GetOutputSlot(0).SetTensorInfo(weightInfo);
1920
1921 // if alpha != 1, multiply to the weight
1922 if (alpha != 1)
1923 {
1924 std::string activationName = "activation_" + node.input(1);
1925 armnn::ActivationDescriptor activationDescriptor;
1926 activationDescriptor.m_A = alpha;
1927 activationDescriptor.m_Function = ActivationFunction::Linear;
1928 IConnectableLayer* actLayer = m_Network->AddActivationLayer(activationDescriptor, activationName.c_str());
1929 ARMNN_ASSERT(actLayer != nullptr);
1930
1931 auto actInfo = ComputeOutputInfo({ activationName }, actLayer, { weightInfo.GetShape() });
1932 actLayer->GetOutputSlot(0).SetTensorInfo(actInfo[0]);
1933 actLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
1934 weightsLayer->GetOutputSlot(0).Connect(actLayer->GetInputSlot(0u));
1935 input1Shape = actInfo[0].GetShape();
1936 }
1937 else
1938 {
1939 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
1940 input1Shape = weightInfo.GetShape();
1941 }
1942 }
1943 else
1944 {
1945 // if alpha != 1, multiply to the weight
1946 if (alpha != 1)
1947 {
1948 std::string activationName = "activation_" + node.input(1);
1949 armnn::ActivationDescriptor activationDescriptor;
1950 activationDescriptor.m_A = alpha;
1951 activationDescriptor.m_Function = ActivationFunction::Linear;
1952 IConnectableLayer* actLayer = m_Network->AddActivationLayer(activationDescriptor, activationName.c_str());
1953 ARMNN_ASSERT(actLayer != nullptr);
1954
1955 auto actInfo = ComputeOutputInfo({ activationName }, actLayer, { input1Shape });
1956 actLayer->GetOutputSlot(0).SetTensorInfo(actInfo[0]);
1957 actLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
1958 RegisterInputSlot(actLayer, node.input(1), 0);
1959 input1Shape = actInfo[0].GetShape();
1960 }
1961 else
1962 {
1963 RegisterInputSlot(layer, node.input(1), 1);
1964 }
1965 }
1966
1967 if(biasEnabled && m_TensorsInfo[node.input(2)].isConstant())
1968 {
1969 To1DTensor(node.input(2), CHECK_LOCATION());
1970 IConnectableLayer* biasLayer = m_Network->AddConstantLayer(CreateConstTensor(node.input(2)).first);
1971 TensorInfo biasInfo = *m_TensorsInfo[node.input(2)].m_info;
1972 biasInfo.SetConstant();
1973 biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
1974
1975 // if beta != 1, multiply to the bias
1976 if (beta != 1)
1977 {
1978 std::string activationName = "activation_" + node.input(2);
1979 armnn::ActivationDescriptor activationDescriptor;
1980 activationDescriptor.m_A = beta;
1981 activationDescriptor.m_Function = ActivationFunction::Linear;
1982 IConnectableLayer* actLayer = m_Network->AddActivationLayer(activationDescriptor, activationName.c_str());
1983 ARMNN_ASSERT(actLayer != nullptr);
1984
1985 auto actInfo = ComputeOutputInfo({ activationName }, actLayer, { biasInfo.GetShape() });
1986 actLayer->GetOutputSlot(0).SetTensorInfo(actInfo[0]);
1987 actLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
1988 biasLayer->GetOutputSlot(0).Connect(actLayer->GetInputSlot(0u));
1989 }
1990 else
1991 {
1992 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
1993 }
1994 }
1995 else if (biasEnabled)
1996 {
1997 // Currently we support non-constant tensor of input C (bias) of Gemm when the dimension is 1
1998 if (m_TensorsInfo[node.input(2)].m_info->GetNumDimensions() != 1)
1999 {
2000 throw ParseException(fmt::format("The parser supports constant or non-constant with 1 dimension for "
2001 "Input C of Gemm. Input '{}' in '{}' is not supported '{}'",
2002 node.input(2),
2003 node.name(),
2004 CHECK_LOCATION().AsString()));
2005 }
2006 // if beta != 1, multiply to the bias
2007 if (beta != 1)
2008 {
2009 std::string activationName = "activation_" + node.input(2);
2010 armnn::ActivationDescriptor activationDescriptor;
2011 activationDescriptor.m_A = beta;
2012 activationDescriptor.m_Function = ActivationFunction::Linear;
2013 IConnectableLayer* actLayer = m_Network->AddActivationLayer(activationDescriptor, activationName.c_str());
2014 ARMNN_ASSERT(actLayer != nullptr);
2015
2016 auto actInfo = ComputeOutputInfo({ activationName },
2017 actLayer,
2018 { m_TensorsInfo[node.input(2)].m_info->GetShape() });
2019 actLayer->GetOutputSlot(0).SetTensorInfo(actInfo[0]);
2020 actLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
2021 RegisterInputSlot(actLayer, node.input(2), 0);
2022 }
2023 else
2024 {
2025 RegisterInputSlot(layer, node.input(2), 2);
2026 }
2027 }
2028
2029 // Set final output of the FullyConnected layer
2030 auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
2031 { input0Shape, input1Shape });
2032 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
2033
2034 RegisterOutputSlots(layer, {node.output(0)});
2035}
2036
Kevin Mayef33cb12021-01-29 14:24:57 +00002037void OnnxParserImpl::ParseGlobalAveragePool(const onnx::NodeProto& node)
Ryan OSheaed27ee72020-04-22 16:37:29 +01002038{
2039 Pooling2dDescriptor desc = Pooling2dDescriptor();
2040 desc.m_PoolType = PoolingAlgorithm::Average;
2041
2042 //kernel size is the same as input
2043 TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
2044 desc.m_PoolWidth = inputShape[3];
2045 desc.m_PoolHeight = inputShape[2];
2046
2047 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
2048 ARMNN_ASSERT(layer != nullptr);
2049
2050 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {inputShape});
2051 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
2052
2053 // register the input connection slots for the layer, connections are made after all layers have been created
2054 // only the tensors for the inputs are relevant, exclude the const tensors
2055 RegisterInputSlots(layer, {node.input(0)});
2056
2057 // register the output connection slots for the layer, connections are made after all layers have been created
2058 RegisterOutputSlots(layer, {node.output(0)});
2059}
2060
Kevin Mayef33cb12021-01-29 14:24:57 +00002061void OnnxParserImpl::ParseMaxPool(const onnx::NodeProto& node)
Ryan OSheaed27ee72020-04-22 16:37:29 +01002062{
2063 Pooling2dDescriptor desc;
2064 desc.m_PoolType = PoolingAlgorithm::Max;
2065 desc.m_PaddingMethod = PaddingMethod::Exclude;
2066 AddPoolingLayer(node, desc);
2067}
2068
Narumol Prangnawaratcdc495e2021-09-16 18:13:39 +01002069void OnnxParserImpl::ParseShape(const onnx::NodeProto& node)
2070{
2071 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 1);
2072 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
2073
Narumol Prangnawaratcdc495e2021-09-16 18:13:39 +01002074 IConnectableLayer* layer = m_Network->AddShapeLayer(node.name().c_str());
2075 ARMNN_ASSERT(layer != nullptr);
2076
2077 TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
Narumol Prangnawarat452274c2021-09-23 16:12:19 +01002078 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {inputShape}, onnx::TensorProto::INT64);
Narumol Prangnawaratcdc495e2021-09-16 18:13:39 +01002079 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
2080
2081 // register the input connection slots for the layer, connections are made after all layers have been created
2082 RegisterInputSlots(layer, {node.input(0)});
2083
2084 // register the output connection slots for the layer, connections are made after all layers have been created
2085 RegisterOutputSlots(layer, {node.output(0)});
2086}
2087
Kevin Mayef33cb12021-01-29 14:24:57 +00002088void OnnxParserImpl::ParseReshape(const onnx::NodeProto& node)
Ryan OSheaed27ee72020-04-22 16:37:29 +01002089{
2090 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 2);
2091 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
2092
2093 CHECK_VALID_DATATYPE(node.name(), node.input(0),
2094 m_TensorsInfo[node.input(0)].m_dtype,
2095 onnx::TensorProto::FLOAT); //input
2096 CHECK_VALID_DATATYPE(node.name(), node.input(1),
2097 m_TensorsInfo[node.input(1)].m_dtype,
2098 onnx::TensorProto::INT64); //shape
2099
Narumol Prangnawarat4b536e32021-10-18 12:35:19 +01002100 TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
2101
2102 std::vector<unsigned int> targetShape;
2103 if(m_TensorsInfo[node.input(1)].isConstant())
Ryan OSheaed27ee72020-04-22 16:37:29 +01002104 {
Narumol Prangnawarat4b536e32021-10-18 12:35:19 +01002105 unsigned int dims = static_cast<unsigned int>(m_TensorsInfo[node.input(1)].m_tensor->int64_data_size());
2106 targetShape.reserve(dims);
2107
2108 for(uint i = 0; i < dims; i++)
2109 {
2110 int val = CHECKED_INT32(m_TensorsInfo[node.input(1)].m_tensor->int64_data(static_cast<int>(i)));
2111 targetShape[i]= static_cast<unsigned int>(val);
2112 }
2113 }
2114 else
2115 {
2116 // The parser only supports shape (batch, -1) or (-1) for non-constant shape input.
2117 unsigned int dims = m_TensorsInfo[node.input(1)].m_info->GetNumDimensions();
2118 TensorShape shapes = m_TensorsInfo[node.input(1)].m_info->GetShape();
2119 if (dims != 1 || shapes[0] > 2)
2120 {
2121 throw ParseException(fmt::format("Invalid input shape '{}' in Reshape layer '{}' {}",
2122 node.input(1),
2123 node.name(),
2124 CHECK_LOCATION().AsString()));
2125 }
2126
2127 unsigned int numInputElements = m_TensorsInfo[node.input(0)].m_info->GetNumElements();
2128 if (shapes[0] == 1)
2129 {
2130 targetShape = { numInputElements };
2131 }
2132 else if (shapes[0] == 2)
2133 {
2134 targetShape = { inputShape[0] , numInputElements / inputShape[0] };
2135 }
Ryan OSheaed27ee72020-04-22 16:37:29 +01002136 }
2137
2138 if(m_TensorsInfo[node.input(0)].isConstant())
2139 {
2140 //make a new cst tensor -> move the data to the output tensor (the shape is already good in the output tensor)
2141 if(m_TensorsInfo.count(node.output(0)) == 0)
2142 {
2143 m_TensorsInfo[node.output(0)] = OnnxTensor();
2144 }
2145 m_TensorsInfo[node.output(0)].m_tensor =
2146 std::make_unique<onnx::TensorProto>(*m_TensorsInfo[node.input(0)].m_tensor);
2147 }
2148 else
2149 {
Ryan OSheaed27ee72020-04-22 16:37:29 +01002150 if(m_TensorsInfo.count(node.output(0)) == 0 || m_TensorsInfo[node.output(0)].m_info == nullptr)
2151 {
Narumol Prangnawarat4b536e32021-10-18 12:35:19 +01002152 auto outInfo = ComputeReshapeInfo(
2153 TensorShape(static_cast<unsigned int>(targetShape.size()), targetShape.data()),
2154 inputShape, node.output(0));
Ryan OSheaed27ee72020-04-22 16:37:29 +01002155 m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(outInfo);
2156 }
2157
2158 CreateReshapeLayer(node.input(0), node.output(0), node.name());
2159 }
2160}
2161
Narumol Prangnawaratfe6aa2f2021-09-23 16:11:17 +01002162void OnnxParserImpl::ParseUnsqueeze(const onnx::NodeProto& node)
2163{
2164 CHECK_VALID_SIZE(armnn::numeric_cast<size_t>(node.input_size()), 1, 2);
2165 CHECK_VALID_SIZE(armnn::numeric_cast<size_t>(node.output_size()), 1);
2166
Narumol Prangnawaratfe6aa2f2021-09-23 16:11:17 +01002167 TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
2168 std::vector<uint32_t> dims;
2169 if (node.input_size() == 1 && node.attribute_size() > 0)
2170 {
2171 dims = ReadMandatoryNodeUint32ListAttribute(node, "axes");
2172 }
2173 else
2174 {
2175 CHECK_VALID_DATATYPE(node.name(), node.input(1),
2176 m_TensorsInfo[node.input(1)].m_dtype,
2177 onnx::TensorProto::INT64); //axes
2178
2179 auto int64Axes = m_TensorsInfo[node.input(1)].m_tensor->int64_data().data();
2180 uint numDim = armnn::numeric_cast<uint>(m_TensorsInfo[node.input(1)].m_tensor->int64_data_size());
2181
2182 for(uint i = 0; i < numDim; i++)
2183 {
2184 uint32_t uint32Value = CHECKED_NON_NEGATIVE(CHECKED_INT32(int64Axes[i]));
2185 dims.push_back(uint32Value);
2186 }
2187 }
2188
2189 // Ensure that the axes are sorted
2190 std::sort(dims.begin(), dims.end());
2191
2192 std::vector<unsigned int> targetShape;
2193
Narumol Prangnawarat452274c2021-09-23 16:12:19 +01002194 if (inputShape.GetDimensionality() != Dimensionality::Scalar)
Narumol Prangnawaratfe6aa2f2021-09-23 16:11:17 +01002195 {
Narumol Prangnawarat452274c2021-09-23 16:12:19 +01002196 for(uint i = 0; i < inputShape.GetNumDimensions(); i++)
2197 {
2198 targetShape.push_back(inputShape[i]);
2199 }
Narumol Prangnawaratfe6aa2f2021-09-23 16:11:17 +01002200 }
2201
2202 for(uint i = 0; i < dims.size(); i++)
2203 {
2204 targetShape.insert(targetShape.begin() + armnn::numeric_cast<int>(dims[i]), 1);
2205 }
2206
Narumol Prangnawarat452274c2021-09-23 16:12:19 +01002207 auto outInfo = ComputeReshapeInfo(TensorShape(static_cast<unsigned int>(targetShape.size()), targetShape.data()),
2208 inputShape, node.output(0), m_TensorsInfo[node.input(0)].m_info->GetDataType());
Narumol Prangnawaratfe6aa2f2021-09-23 16:11:17 +01002209 m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(outInfo);
Narumol Prangnawarat452274c2021-09-23 16:12:19 +01002210 m_TensorsInfo[node.output(0)].m_dtype = m_TensorsInfo[node.input(0)].m_dtype;
Narumol Prangnawaratfe6aa2f2021-09-23 16:11:17 +01002211
2212 CreateReshapeLayer(node.input(0), node.output(0), node.name());
2213}
2214
Kevin Mayef33cb12021-01-29 14:24:57 +00002215void OnnxParserImpl::PrependForBroadcast(const std::string& outputName,
2216 const std::string& input0,
2217 const std::string& input1)
telsoa01c577f2c2018-08-31 09:22:23 +01002218{
2219 //input0 should be reshaped to have same number of dim as input1
2220 TensorInfo outputTensorInfo = TensorInfo(*m_TensorsInfo[input0].m_info);
2221
2222 TensorShape input0Shape = m_TensorsInfo[input0].m_info->GetShape();
2223 TensorShape input1Shape = m_TensorsInfo[input1].m_info->GetShape();
2224
2225 uint32_t diff = input1Shape.GetNumDimensions() - input0Shape.GetNumDimensions();
2226 std::vector<uint32_t> newShape;
2227 while(diff > 0)
2228 {
2229 newShape.push_back(1);
2230 diff--;
2231 }
2232 for (uint dim = 0; dim < input0Shape.GetNumDimensions(); ++dim)
2233 {
2234 newShape.push_back(input0Shape[dim]);
2235 }
2236 outputTensorInfo.SetShape(TensorShape(static_cast<unsigned int>(newShape.size()), newShape.data()));
2237
2238 //add the new tensor to m_TensorsInfo
2239 m_TensorsInfo[outputName] = OnnxTensor();
2240 m_TensorsInfo[outputName].m_info = std::make_unique<TensorInfo>(outputTensorInfo);
2241
2242 //add reshape layer if the parent was not constant...
2243 if( ! m_TensorsInfo[input0].isConstant())
2244 {
James Ward58dec6b2020-09-11 17:32:44 +01002245 CreateReshapeLayer(input0, outputName, fmt::format("Add:reshapeOf{}", input0));
telsoa01c577f2c2018-08-31 09:22:23 +01002246 }
2247 else //make it constant and it will be create in Add
2248 {
2249 m_TensorsInfo[outputName].m_tensor = std::make_unique<onnx::TensorProto>(*m_TensorsInfo[input0].m_tensor);
2250
2251 }
2252}
2253
Kevin Mayef33cb12021-01-29 14:24:57 +00002254void OnnxParserImpl::SetupInputLayers()
telsoa01c577f2c2018-08-31 09:22:23 +01002255{
2256 //Find user input and add their layers
2257 for(int inputIndex = 0; inputIndex < m_Graph->input_size(); ++inputIndex)
2258 {
2259 auto input = m_Graph->input(inputIndex);
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +01002260 if (!m_TensorsInfo[input.name()].isConstant())
telsoa01c577f2c2018-08-31 09:22:23 +01002261 {
2262 IConnectableLayer* layer =
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +01002263 m_Network->AddInputLayer(static_cast<armnn::LayerBindingId>(inputIndex), input.name().c_str());
2264 TensorInfo tensorInfo = *m_TensorsInfo[input.name()].m_info;
2265 if (tensorInfo.GetShape().GetDimensionality() == Dimensionality::NotSpecified)
2266 {
2267 if (m_InputShapes.find(input.name()) == m_InputShapes.end())
2268 {
2269 throw ParseException(fmt::format("The parser does not support dynamic tensor, "
2270 "please specify input shape for {}. {}",
2271 input.name(),
2272 CHECK_LOCATION().AsString()));
2273 }
2274 else
2275 {
2276 tensorInfo.SetShape(m_InputShapes[input.name()]);
2277 m_TensorsInfo[input.name()].m_info = std::make_unique<TensorInfo>(tensorInfo);
2278 }
2279
2280 }
telsoa01c577f2c2018-08-31 09:22:23 +01002281 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2282
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +01002283 m_InputInfos[input.name()] = tensorInfo;
2284
telsoa01c577f2c2018-08-31 09:22:23 +01002285 RegisterOutputSlots(layer,{ input.name() });
2286 }
2287 }
2288}
2289
Kevin Mayef33cb12021-01-29 14:24:57 +00002290void OnnxParserImpl::SetupOutputLayers()
telsoa01c577f2c2018-08-31 09:22:23 +01002291{
2292 if(m_Graph->output_size() == 0)
2293 {
James Ward58dec6b2020-09-11 17:32:44 +01002294 throw ParseException(fmt::format("The given model does not have any outputs {}", CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01002295 }
2296
2297 for(int outputIndex = 0; outputIndex < m_Graph->output_size(); ++outputIndex)
2298 {
2299 IConnectableLayer* layer =
2300 m_Network->AddOutputLayer(static_cast<armnn::LayerBindingId>(outputIndex),
2301 m_Graph->output(outputIndex).name().c_str());
2302
2303 RegisterInputSlots(layer, { m_Graph->output(outputIndex).name() });
2304 }
2305}
2306
Narumol Prangnawarat1112b012021-09-30 12:10:50 +01002307void OnnxParserImpl::RegisterInputSlot(IConnectableLayer* layer,
2308 const std::string& tensorId,
2309 unsigned int slotIndex)
2310{
2311 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2312
2313 auto it = m_TensorConnections.find(tensorId);
2314
2315 if (it == m_TensorConnections.end())
2316 {
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +01002317 //First time seeing this tensor, we need to map it
Narumol Prangnawarat1112b012021-09-30 12:10:50 +01002318 m_TensorConnections[tensorId] = TensorSlots();
2319 }
2320 m_TensorConnections[tensorId].inputSlots.push_back(slot);
2321}
2322
Kevin Mayef33cb12021-01-29 14:24:57 +00002323void OnnxParserImpl::RegisterInputSlots(IConnectableLayer* layer, const std::vector<std::string>& tensorIds)
telsoa01c577f2c2018-08-31 09:22:23 +01002324{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002325 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01002326 if (tensorIds.size() != layer->GetNumInputSlots())
2327 {
2328 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002329 fmt::format("The number of tensor inputs ({}) does not match the number expected ({}) {}",
2330 tensorIds.size(),
2331 layer->GetNumInputSlots(),
2332 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01002333 }
Matthew Sloyan81beae32021-07-13 19:46:11 +01002334
telsoa01c577f2c2018-08-31 09:22:23 +01002335 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2336 {
2337 std::string tensorId = tensorIds[slotIndex];
2338 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2339
2340 auto it = m_TensorConnections.find(tensorId);
2341
2342 if (it == m_TensorConnections.end())
2343 {
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +01002344 // First time seing this tensor, we need to map it
telsoa01c577f2c2018-08-31 09:22:23 +01002345 m_TensorConnections[tensorId] = TensorSlots();
2346 }
2347 m_TensorConnections[tensorId].inputSlots.push_back(slot);
2348 }
2349}
2350
Kevin Mayef33cb12021-01-29 14:24:57 +00002351void OnnxParserImpl::RegisterOutputSlots(IConnectableLayer* layer, const std::vector<std::string>& tensorIds)
telsoa01c577f2c2018-08-31 09:22:23 +01002352{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002353 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01002354 if (tensorIds.size() != layer->GetNumOutputSlots())
2355 {
2356 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002357 fmt::format("The number of tensor outputs ({}) does not match the number expected ({}) {} ",
2358 tensorIds.size(),
2359 layer->GetNumOutputSlots(),
2360 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01002361 }
2362
2363 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2364 {
2365 std::string tensorId = tensorIds[slotIndex];
2366 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2367
2368 auto it = m_TensorConnections.find(tensorId);
2369
2370 if (it == m_TensorConnections.end())
2371 {
2372 //First time seing this tensor, we need to map it
2373 m_TensorConnections[tensorId] = TensorSlots();
2374 }
2375
Ryan OShea337c17f2020-02-21 12:33:17 +00002376 TensorSlots& tensorSlots = m_TensorConnections[tensorId];
telsoa01c577f2c2018-08-31 09:22:23 +01002377
2378 // assuming there is only one producer for that tensor
2379 if (tensorSlots.outputSlot != nullptr)
2380 {
James Ward58dec6b2020-09-11 17:32:44 +01002381 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
2382 "tensor:{} {}",
2383 tensorId,
2384 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01002385 }
2386 tensorSlots.outputSlot = slot;
2387 }
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +01002388
telsoa01c577f2c2018-08-31 09:22:23 +01002389}
2390
Kevin Mayef33cb12021-01-29 14:24:57 +00002391BindingPointInfo OnnxParserImpl::GetNetworkInputBindingInfo(const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01002392{
2393 for(int i = 0; i < m_Graph->input_size(); ++i)
2394 {
2395 auto input = m_Graph->input(i);
2396 if(input.name() == name)
2397 {
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +01002398 auto it = m_InputInfos.find(name);
2399
2400 if (it != m_InputInfos.end())
2401 {
2402 return std::make_pair(static_cast<armnn::LayerBindingId>(i), it->second);
2403 }
telsoa01c577f2c2018-08-31 09:22:23 +01002404 }
2405 }
James Ward58dec6b2020-09-11 17:32:44 +01002406 throw InvalidArgumentException(fmt::format("The input layer '{}' does not exist {}",
2407 name, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01002408}
2409
Kevin Mayef33cb12021-01-29 14:24:57 +00002410BindingPointInfo OnnxParserImpl::GetNetworkOutputBindingInfo(const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01002411{
2412 for(int i = 0; i < m_Graph->output_size(); ++i)
2413 {
2414 auto output = m_Graph->output(i);
2415 if(output.name() == name)
2416 {
Narumol Prangnawarat1b11f322021-10-13 11:44:50 +01002417 auto it = m_OutputInfos.find(name);
2418
2419 if (it != m_OutputInfos.end())
2420 {
2421 return std::make_pair(static_cast<armnn::LayerBindingId>(i), it->second);
2422 }
telsoa01c577f2c2018-08-31 09:22:23 +01002423 }
2424 }
James Ward58dec6b2020-09-11 17:32:44 +01002425 throw InvalidArgumentException(fmt::format("The output layer '{}' does not exist {}",
2426 name, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01002427}
2428
Kevin Mayef33cb12021-01-29 14:24:57 +00002429std::vector<std::string> OnnxParserImpl::GetInputs(ModelPtr& model)
telsoa01c577f2c2018-08-31 09:22:23 +01002430{
2431 if(model == nullptr) {
James Ward58dec6b2020-09-11 17:32:44 +01002432 throw InvalidArgumentException(fmt::format("The given model cannot be null {}",
2433 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01002434 }
2435
2436 std::vector<std::string> inputNames;
2437 std::map<std::string, bool> isConstant;
2438 for(auto tensor : model->graph().initializer())
2439 {
2440 isConstant[tensor.name()] = true;
2441 }
2442 for(auto input : model->graph().input())
2443 {
2444 auto it = isConstant.find(input.name());
2445 if(it == isConstant.end())
2446 {
2447 inputNames.push_back(input.name());
2448 }
2449 }
2450 return inputNames;
2451}
2452
Kevin Mayef33cb12021-01-29 14:24:57 +00002453std::vector<std::string> OnnxParserImpl::GetOutputs(ModelPtr& model)
telsoa01c577f2c2018-08-31 09:22:23 +01002454{
2455 if(model == nullptr) {
James Ward58dec6b2020-09-11 17:32:44 +01002456 throw InvalidArgumentException(fmt::format("The given model cannot be null {}",
2457 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01002458 }
2459
2460 std::vector<std::string> outputNames;
2461 for(auto output : model->graph().output())
2462 {
2463 outputNames.push_back(output.name());
2464 }
2465 return outputNames;
2466}
2467
Matthew Sloyanac001ee2021-02-03 10:43:04 +00002468const std::string OnnxParserImpl::GetVersion()
2469{
2470 return ONNX_PARSER_VERSION;
2471}
2472
telsoa01c577f2c2018-08-31 09:22:23 +01002473} // namespace armnnOnnxParser