blob: a7e6902fddcd20056c59678191e5f943b50d0ebf [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "OnnxParser.hpp"
6
Matthew Sloyanac001ee2021-02-03 10:43:04 +00007#include "armnnOnnxParser/Version.hpp"
8
Matthew Bentham39ef3e52020-01-20 10:09:09 +00009#include <armnn/Descriptors.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010010#include <armnn/utility/Assert.hpp>
Matthew Sloyan589e3e82020-09-11 16:17:48 +010011#include <armnn/utility/NumericCast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010012#include <VerificationHelpers.hpp>
13
James Ward58dec6b2020-09-11 17:32:44 +010014#include <fmt/format.h>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010015
telsoa01c577f2c2018-08-31 09:22:23 +010016#include <google/protobuf/text_format.h>
17#include <google/protobuf/io/zero_copy_stream_impl.h>
18
Matthew Sloyanac001ee2021-02-03 10:43:04 +000019#include <iostream>
telsoa01c577f2c2018-08-31 09:22:23 +010020#include <numeric>
Jan Eilers53ef7952021-06-02 12:01:25 +010021#include <armnnUtils/Permute.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022
23using namespace armnn;
24
25namespace armnnOnnxParser
26{
Kevin Mayef33cb12021-01-29 14:24:57 +000027
28IOnnxParser::IOnnxParser() : pOnnxParserImpl(new OnnxParserImpl()) {}
29
30IOnnxParser::~IOnnxParser() = default;
31
32IOnnxParser* IOnnxParser::CreateRaw()
33{
34 return new IOnnxParser();
35}
36
37IOnnxParserPtr IOnnxParser::Create()
38{
39 return IOnnxParserPtr(CreateRaw(), &IOnnxParser::Destroy);
40}
41
42void IOnnxParser::Destroy(IOnnxParser* parser)
43{
44 delete parser;
45}
46
47armnn::INetworkPtr IOnnxParser::CreateNetworkFromBinaryFile(const char* graphFile)
48{
49 return pOnnxParserImpl->CreateNetworkFromBinaryFile(graphFile);
50}
51
52armnn::INetworkPtr IOnnxParser::CreateNetworkFromTextFile(const char* graphFile)
53{
54 return pOnnxParserImpl->CreateNetworkFromTextFile(graphFile);
55}
56
57armnn::INetworkPtr IOnnxParser::CreateNetworkFromString(const std::string& protoText)
58{
59 return pOnnxParserImpl->CreateNetworkFromString(protoText);
60}
61
62BindingPointInfo IOnnxParser::GetNetworkInputBindingInfo(const std::string& name) const
63{
64 return pOnnxParserImpl->GetNetworkInputBindingInfo(name);
65}
66
67BindingPointInfo IOnnxParser::GetNetworkOutputBindingInfo(const std::string& name) const
68{
69 return pOnnxParserImpl->GetNetworkOutputBindingInfo(name);
70}
71
telsoa01c577f2c2018-08-31 09:22:23 +010072namespace
73{
74void CheckValidDataType(std::initializer_list<onnx::TensorProto::DataType> validInputTypes,
75 const onnx::TensorProto::DataType actualValue,
76 const char* validExpr,
77 std::string nodeName,
78 std::string tensorName,
79 const armnn::CheckLocation& location)
80{
81 bool isValid = std::any_of(validInputTypes.begin(),
82 validInputTypes.end(),
83 [&actualValue](onnx::TensorProto::DataType x) { return x == actualValue; } );
84 if (!isValid)
85 {
86 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +010087 fmt::format("Datatype {} is not valid for tensor '{}' of node '{}', not in {{{}}}. {}",
88 onnx::TensorProto::DataType_Name(actualValue),
89 tensorName,
90 nodeName,
91 validExpr,
92 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +010093 }
94}
95
96#define CHECK_VALID_DATATYPE(NODE, TENSOR, ACTUAL, ...) \
97CheckValidDataType({__VA_ARGS__}, ACTUAL, #__VA_ARGS__, NODE, TENSOR, CHECK_LOCATION())
98
99using StrTypeListPair = std::pair<const char*, std::initializer_list<onnx::TensorProto::DataType>>;
100#define STR_LIST(...) StrTypeListPair(#__VA_ARGS__, {__VA_ARGS__})
101
102template <typename Callable>
103void ReadMandatoryNodeAttributeImpl(const onnx::NodeProto& node,
104 const std::string& attribName,
105 onnx::AttributeProto::AttributeType expectedType,
106 Callable callable)
107{
108 auto attribs = node.attribute();
109 int attriNum = 0;
110 while (attriNum < node.attribute_size())
111 {
112 if (attribs.Get(attriNum).name() == attribName)
113 {
114 if (attribs.Get(attriNum).type() == expectedType)
115 {
116 callable(attribs.Get(attriNum));
117 }
118 else
119 {
James Ward58dec6b2020-09-11 17:32:44 +0100120 throw ParseException(fmt::format("Attribute {} of node {} expected to have {} as "
121 "onnx::AttributeProto::AttributeType, but found {} instead {}",
122 attribName,
123 node.name(),
124 onnx::AttributeProto::AttributeType_Name(expectedType),
125 onnx::AttributeProto::AttributeType_Name(attribs.Get(attriNum).type()),
126 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100127 }
128 break;
129 }
130 ++attriNum;
131 }
132 if (attriNum == node.attribute_size())
133 {
James Ward58dec6b2020-09-11 17:32:44 +0100134 throw ParseException(fmt::format("Could not find required attribute {} in node {} {}",
135 attribName, node.name(), CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100136 }
137}
138
139template <typename Callable>
140void ReadOptionalNodeAttributeImpl(const onnx::NodeProto& node,
141 const std::string& attribName,
142 onnx::AttributeProto::AttributeType expectedType,
143 Callable callable)
144{
145 auto attribs = node.attribute();
146 for (int attriNum = 0; attriNum < node.attribute_size(); ++attriNum)
147 {
148 if (attribs.Get(attriNum).name() == attribName)
149 {
150 if (attribs.Get(attriNum).type() == expectedType)
151 {
152 callable(attribs.Get(attriNum));
153 }
154 else
155 {
James Ward58dec6b2020-09-11 17:32:44 +0100156 throw ParseException(
157 fmt::format("Attribute {} of node {} expected to have {} as onnx::AttributeProto::AttributeType, "
158 "but found {} instead {}",
159 attribName,
160 node.name(),
161 onnx::AttributeProto::AttributeType_Name(expectedType),
162 onnx::AttributeProto::AttributeType_Name(attribs.Get(attriNum).type()),
163 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100164 }
165 }
166 }
167}
168
Ryan OSheaed27ee72020-04-22 16:37:29 +0100169int64_t ReadOptionalNodeInt64Attribute(const onnx::NodeProto& node,
170 const std::string& name,
171 const int64_t defaultValue = 0)
172{
173 int64_t attribValue = defaultValue;
174 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::INT,
175 [&attribValue](const onnx::AttributeProto& attrValue)
176 {
177 attribValue = attrValue.i();
178 });
179 return attribValue;
180}
181
telsoa01c577f2c2018-08-31 09:22:23 +0100182std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const onnx::NodeProto& node,
183 const std::string& name)
184{
185 std::vector<uint32_t> attriList;
186 ReadMandatoryNodeAttributeImpl(node, name, onnx::AttributeProto::INTS,
187 [&attriList](const onnx::AttributeProto& attrValue)
188 {
189 for (int attriNum = 0; attriNum < attrValue.ints_size(); ++attriNum)
190 {
191 attriList.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(attrValue.ints().Get(attriNum))));
192 }
193 });
194 return attriList;
195}
196
197uint32_t ReadOptionalNodeUint32Attribute(const onnx::NodeProto& node,
198 const std::string& name,
199 const uint32_t defaultVal = 0u)
200{
201 uint32_t attribValue = defaultVal;
202 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::INT,
203 [&attribValue](const onnx::AttributeProto& attrValue)
204 {
205 attribValue = CHECKED_NON_NEGATIVE(CHECKED_INT32((attrValue.i())));
206 });
207 return attribValue;
208}
209
210std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const onnx::NodeProto& node,
211 const std::string& name)
212{
213 std::vector<uint32_t> attriList;
214 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::INTS,
215 [&attriList](const onnx::AttributeProto& attrValue)
216 {
217 for (int attriNum = 0; attriNum < attrValue.ints_size(); ++attriNum)
218 {
219 attriList.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(attrValue.ints().Get(attriNum))));
220 }
221 });
222
223 return attriList;
224}
225
226float ReadOptionalNodeFloatAttribute(const onnx::NodeProto& node,
227 const std::string& name,
228 const float defaultValue = 0.0f)
229{
230 float attribValue = defaultValue;
231 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::FLOAT,
232 [&attribValue](const onnx::AttributeProto& attrValue)
233 {
234 attribValue = attrValue.f();
235 });
236 return attribValue;
237}
238
239std::string ReadOptionalNodeStringAttribute(const onnx::NodeProto& node, const std::string& name)
240{
241 std::string attribValue = "";
242 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::STRING,
243 [&attribValue](const onnx::AttributeProto& attrValue)
244 {
245 attribValue = attrValue.s();
246 });
247 return attribValue;
248}
249
Tee Jungfcf6fd52019-11-01 05:27:28 +0000250armnn::TensorInfo ToTensorInfo(const std::string& name, std::vector<unsigned int>& shape, int data_type)
telsoa01c577f2c2018-08-31 09:22:23 +0100251{
telsoa01c577f2c2018-08-31 09:22:23 +0100252 DataType type;
Tee Jungfcf6fd52019-11-01 05:27:28 +0000253 switch(data_type)
telsoa01c577f2c2018-08-31 09:22:23 +0100254 {
255 case onnx::TensorProto::FLOAT:
256 {
257 type = DataType::Float32;
258 break;
259 }
260 case onnx::TensorProto::INT32:
261 case onnx::TensorProto::INT64:
262 {
263 type = DataType::Signed32;
264 break;
265 }
266 default:
267 {
268 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100269 fmt::format("'{}' is not a currently supported datatype for tensor {}."
270 " Supported dataTypes are FLOAT, INT32 and INT64. {}",
271 onnx::TensorProto::DataType_Name(static_cast<onnx::TensorProto::DataType>(data_type)),
272 name,
273 CHECK_LOCATION().AsString() ));
telsoa01c577f2c2018-08-31 09:22:23 +0100274 }
telsoa01c577f2c2018-08-31 09:22:23 +0100275 }
Tee Jungcaf2bdd2019-11-13 07:23:14 +0000276
277 // To avoid crashes by trivial tensors
278 if (shape.empty())
279 {
280 return TensorInfo(TensorShape(), type);
281 }
282
Tee Jungfcf6fd52019-11-01 05:27:28 +0000283 return TensorInfo(TensorShape(static_cast<unsigned int>(shape.size()), shape.data()), type);
284}
285
286armnn::TensorInfo ToTensorInfo(const onnx::ValueInfoProto& info)
287{
288 const onnx::TensorShapeProto onnxShape = info.type().tensor_type().shape();
289 std::vector<unsigned int> shapeDims;
290 for (int i = 0; i < onnxShape.dim_size(); ++i)
291 {
292 shapeDims.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(onnxShape.dim(i).dim_value())));
293 }
294
Ryan OShea337c17f2020-02-21 12:33:17 +0000295 if (shapeDims.empty())
296 {
297 shapeDims.push_back(1);
298 }
299
Tee Jungfcf6fd52019-11-01 05:27:28 +0000300 return ToTensorInfo(info.name(), shapeDims, info.type().tensor_type().elem_type());
301}
302
303armnn::TensorInfo ToTensorInfo(const onnx::TensorProto& tensor)
304{
305 std::vector<unsigned int> shapeDims;
Ryan OShea337c17f2020-02-21 12:33:17 +0000306
Tee Jungfcf6fd52019-11-01 05:27:28 +0000307 for (auto dim: tensor.dims())
308 {
309 shapeDims.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(dim)));
310 }
311
Ryan OShea337c17f2020-02-21 12:33:17 +0000312 if (shapeDims.empty())
313 {
314 shapeDims.push_back(1);
315 }
316
Tee Jungfcf6fd52019-11-01 05:27:28 +0000317 return ToTensorInfo(tensor.name(), shapeDims, tensor.data_type());
telsoa01c577f2c2018-08-31 09:22:23 +0100318}
319
320std::string TensorInfoAsString(const TensorInfo& info,
321 const std::string& name,
322 const onnx::TensorProto::DataType& type)
323{
324 const TensorShape shape = info.GetShape();
325 std::stringstream ss;
326 ss << "tensor '" << name << "' contains "
327 << onnx::TensorProto::DataType_Name(type)
328 << " and has shape [";
329
330 for (uint32_t i = 0; i < shape.GetNumDimensions() - 1; ++i)
331 {
332 ss << shape[i] << ", ";
333 }
334 ss << shape[shape.GetNumDimensions() - 1] << "]";
335 return ss.str();
336}
337
Sadik Armagan60bb9d82021-01-11 15:15:01 +0000338void CalcPadding(uint32_t inputSize,
339 uint32_t filterSize,
340 uint32_t stride,
341 uint32_t dilation,
342 uint32_t* paddingFront,
343 uint32_t* paddingBack,
344 bool isUpper)
telsoa01c577f2c2018-08-31 09:22:23 +0100345{
346 uint32_t outputSize = (inputSize + stride - 1) / stride;
Sadik Armagan60bb9d82021-01-11 15:15:01 +0000347 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
348 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100349 *paddingFront = (temp - inputSize) / 2;
350 *paddingBack = *paddingFront;
351 if((temp - inputSize) % 2 == 1)
352 {
353 if (isUpper)
354 {
Sadik Armagan60bb9d82021-01-11 15:15:01 +0000355 *paddingBack += 1;
telsoa01c577f2c2018-08-31 09:22:23 +0100356 }
357 else
358 {
Sadik Armagan60bb9d82021-01-11 15:15:01 +0000359 *paddingFront += 1;
telsoa01c577f2c2018-08-31 09:22:23 +0100360 }
361 }
362}
363
Ryan OSheaed27ee72020-04-22 16:37:29 +0100364TensorInfo ComputeReshapeInfo(const TensorShape& targetShapeTensor,
telsoa01c577f2c2018-08-31 09:22:23 +0100365 const TensorShape& inShape,
366 const std::string& outName)
367{
368 std::vector<int> targetDims;
Ryan OSheaed27ee72020-04-22 16:37:29 +0100369 for(uint i = 0; i < targetShapeTensor.GetNumDimensions(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +0100370 {
Ryan OSheaed27ee72020-04-22 16:37:29 +0100371 int val = CHECKED_INT32(targetShapeTensor[i]);
telsoa01c577f2c2018-08-31 09:22:23 +0100372 if(val == 0)
373 {
374 targetDims.push_back(static_cast<int>(inShape[static_cast<uint>(i)]));
375 }
376 else
377 {
378 targetDims.push_back(val);
379 }
380 }
381
382 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
383 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
384 if (stretchDim != targetDims.end())
385 {
386 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
387 {
388 std::stringstream ss;
389 ss << "[ ";
390 for(uint i = 0; i < targetDims.size() - 1; ++i)
391 {
392 ss << targetDims[i] << ", ";
393 }
394 ss << targetDims[targetDims.size() - 1] << " ]";
395
James Ward58dec6b2020-09-11 17:32:44 +0100396 throw ParseException(
397 fmt::format("Error during creation of reshaped tensor '{}'. At most one component of shape can be "
398 " -1 and here, shape is {} {}",
399 outName,
400 ss.str(),
401 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100402 }
403
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100404 auto targetNumElements = armnn::numeric_cast<unsigned int>(std::accumulate(targetDims.begin(), targetDims.end(),
telsoa01c577f2c2018-08-31 09:22:23 +0100405 -1, std::multiplies<int32_t>()));
406 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
407 outDims[stretchIndex] = inShape.GetNumElements() / targetNumElements;
408 }
409 TensorShape outShape = TensorShape{static_cast<unsigned int>(outDims.size()), outDims.data()};
410 return TensorInfo(outShape, DataType::Float32);
411}
412
413} //namespace
414
Kevin Mayef33cb12021-01-29 14:24:57 +0000415const std::map<std::string, OnnxParserImpl::OperationParsingFunction> OnnxParserImpl::m_ParserFunctions = {
416 { "BatchNormalization", &OnnxParserImpl::ParseBatchNormalization},
417 { "GlobalAveragePool", &OnnxParserImpl::ParseGlobalAveragePool},
418 { "AveragePool", &OnnxParserImpl::ParseAveragePool },
419 { "Clip", &OnnxParserImpl::ParseClip },
420 { "Constant", &OnnxParserImpl::ParseConstant },
421 { "MaxPool", &OnnxParserImpl::ParseMaxPool },
422 { "Reshape", &OnnxParserImpl::ParseReshape },
423 { "Sigmoid", &OnnxParserImpl::ParseSigmoid },
424 { "Tanh", &OnnxParserImpl::ParseTanh },
425 { "Relu", &OnnxParserImpl::ParseRelu },
426 { "LeakyRelu", &OnnxParserImpl::ParseLeakyRelu },
427 { "Conv", &OnnxParserImpl::ParseConv },
428 { "Add", &OnnxParserImpl::ParseAdd },
429 { "Flatten", &OnnxParserImpl::ParseFlatten},
telsoa01c577f2c2018-08-31 09:22:23 +0100430};
431
432template<typename TypePair, typename Location>
Kevin Mayef33cb12021-01-29 14:24:57 +0000433void OnnxParserImpl::ValidateInputs(const onnx::NodeProto& node,
telsoa01c577f2c2018-08-31 09:22:23 +0100434 TypePair validInputs,
435 const Location& location)
436{
437 for(auto input : node.input())
438 {
439 CheckValidDataType(validInputs.second,
440 m_TensorsInfo[input].m_dtype,
441 validInputs.first,
442 node.name(),
443 input,
444 location);
445 }
446}
447
448#define VALID_INPUTS(NODE, VALID_INPUTS) \
Kevin Mayef33cb12021-01-29 14:24:57 +0000449 OnnxParserImpl::ValidateInputs(NODE, \
telsoa01c577f2c2018-08-31 09:22:23 +0100450 VALID_INPUTS, \
451 CHECK_LOCATION())
452
Kevin Mayef33cb12021-01-29 14:24:57 +0000453std::vector<TensorInfo> OnnxParserImpl::ComputeOutputInfo(std::vector<std::string> outNames,
454 const IConnectableLayer* layer,
455 std::vector<TensorShape> inputShapes)
telsoa01c577f2c2018-08-31 09:22:23 +0100456{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100457 ARMNN_ASSERT(! outNames.empty());
telsoa01c577f2c2018-08-31 09:22:23 +0100458 bool needCompute = std::any_of(outNames.begin(),
459 outNames.end(),
460 [this](std::string name)
461 {
462 return (m_TensorsInfo.count(name) == 0 || m_TensorsInfo[name].m_info == nullptr);
463 });
464 std::vector<TensorInfo> outInfo;
465 //if the output info(s) are not here, we need to compute them
466 std::vector<TensorShape> inferredShapes;
467 if(needCompute)
468 {
469 inferredShapes = layer->InferOutputShapes(inputShapes);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100470 ARMNN_ASSERT(inferredShapes.size() == outNames.size());
telsoa01c577f2c2018-08-31 09:22:23 +0100471 }
472 for (uint i = 0; i < outNames.size(); ++i)
473 {
474 if(needCompute)
475 {
476 m_TensorsInfo[outNames[i]] = OnnxTensor();
477 m_TensorsInfo[outNames[i]].m_info = std::make_unique<TensorInfo>(
478 TensorInfo(inferredShapes[i], DataType::Float32));
479 }
480 outInfo.push_back(*m_TensorsInfo[outNames[i]].m_info);
481 }
482 return outInfo;
483}
484
Kevin Mayef33cb12021-01-29 14:24:57 +0000485OnnxParserImpl::OnnxParserImpl()
telsoa01c577f2c2018-08-31 09:22:23 +0100486 : m_Network(nullptr, nullptr)
487{
488}
489
Kevin Mayef33cb12021-01-29 14:24:57 +0000490void OnnxParserImpl::ResetParser()
telsoa01c577f2c2018-08-31 09:22:23 +0100491{
492 m_Network = armnn::INetworkPtr(nullptr, nullptr);
493 m_Graph = nullptr;
494}
495
Kevin Mayef33cb12021-01-29 14:24:57 +0000496void OnnxParserImpl::Cleanup()
telsoa01c577f2c2018-08-31 09:22:23 +0100497{
498 m_TensorConnections.clear();
499 m_TensorsInfo.clear();
500 m_OutputsMap.clear();
501 m_OutputsFusedAndUsed.clear();
502}
503
Jan Eilers53ef7952021-06-02 12:01:25 +0100504template<typename T>
505std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
506CreateConstTensorImpl(const T* bufferPtr,
507 armnn::TensorInfo& tensorInfo,
508 const armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100509{
Jan Eilers53ef7952021-06-02 12:01:25 +0100510 ARMNN_ASSERT_MSG(bufferPtr != nullptr, fmt::format("Buffer for permutation is null").c_str());
511
512 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
513
514 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
515 {
516 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
517 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
518 reinterpret_cast<const T*>(bufferPtr), data.get(), sizeof(T));
519 }
520 else
521 {
522 ::memcpy(data.get(), bufferPtr, tensorInfo.GetNumBytes());
523 }
524
525 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
526}
527
528std::pair<ConstTensor, std::unique_ptr<float[]>>
529OnnxParserImpl::CreateConstTensor(const std::string name,
530 armnn::Optional<armnn::PermutationVector&> permutationVector)
531{
532 TensorInfo tensorInfo = *m_TensorsInfo[name].m_info;
telsoa01c577f2c2018-08-31 09:22:23 +0100533 onnx::TensorProto onnxTensor = *m_TensorsInfo[name].m_tensor;
534
Matthew Sloyan81beae32021-07-13 19:46:11 +0100535 // Makes sure IsConstant flag is set.
536 tensorInfo.SetConstant();
537
Jan Eilers53ef7952021-06-02 12:01:25 +0100538 // Const tensors requires at least a list of values
539 if (tensorInfo.GetNumElements() == 0)
540 {
541 throw ParseException(fmt::format("No tensor data found for Const tensor '{}' {}",
542 name,
543 CHECK_LOCATION().AsString()));
544 }
545
telsoa01c577f2c2018-08-31 09:22:23 +0100546 auto srcData = onnxTensor.float_data().data();
Pablo Tello3dcc1c62019-04-24 14:20:21 +0100547 // Copy the value list entries into the destination
548 if (!onnxTensor.has_raw_data())
telsoa01c577f2c2018-08-31 09:22:23 +0100549 {
Pablo Tello3dcc1c62019-04-24 14:20:21 +0100550 if(tensorInfo.GetNumElements() != static_cast<uint>(onnxTensor.float_data_size()))
551 {
James Ward58dec6b2020-09-11 17:32:44 +0100552 throw ParseException(
553 fmt::format("The number of data provided ({}) does not match the tensor '{}' number of "
554 "elements ({}) {}",
555 onnxTensor.float_data_size(),
556 name,
557 tensorInfo.GetNumElements(),
558 CHECK_LOCATION().AsString()));
Pablo Tello3dcc1c62019-04-24 14:20:21 +0100559 }
Jan Eilers53ef7952021-06-02 12:01:25 +0100560 return CreateConstTensorImpl<float>(srcData, tensorInfo, permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100561 }
Pablo Tello3dcc1c62019-04-24 14:20:21 +0100562 else
563 {
Jan Eilers53ef7952021-06-02 12:01:25 +0100564 return CreateConstTensorImpl<float>(reinterpret_cast<const float*>(onnxTensor.raw_data().c_str()),
565 tensorInfo,
566 permutationVector);
Pablo Tello3dcc1c62019-04-24 14:20:21 +0100567 }
telsoa01c577f2c2018-08-31 09:22:23 +0100568}
569
Kevin Mayef33cb12021-01-29 14:24:57 +0000570ModelPtr OnnxParserImpl::LoadModelFromTextFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100571{
572 FILE* fd = fopen(graphFile, "r");
573
574 if (fd == nullptr)
575 {
James Ward58dec6b2020-09-11 17:32:44 +0100576 throw FileNotFoundException(fmt::format("Invalid (null) filename {}", CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100577 }
578
579 // Parse the file into a message
580 ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
581 using google::protobuf::io::FileInputStream;
582 std::unique_ptr<FileInputStream> input = std::make_unique<FileInputStream>(fileno(fd));
583 bool success = google::protobuf::TextFormat::Parse(input.get(), modelProto.get());
584 fclose(fd);
585
586 if (!success)
587 {
588 std::stringstream error;
589 error << "Failed to parse graph file";
James Ward58dec6b2020-09-11 17:32:44 +0100590 throw ParseException(fmt::format("{} {}", error.str(), CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100591 }
592 return modelProto;
593}
594
Kevin Mayef33cb12021-01-29 14:24:57 +0000595INetworkPtr OnnxParserImpl::CreateNetworkFromTextFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100596{
597 ResetParser();
598 ModelPtr modelProto = LoadModelFromTextFile(graphFile);
599 return CreateNetworkFromModel(*modelProto);
600}
601
602
Kevin Mayef33cb12021-01-29 14:24:57 +0000603ModelPtr OnnxParserImpl::LoadModelFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100604{
605 FILE* fd = fopen(graphFile, "rb");
606
607 if (fd == nullptr)
608 {
James Ward58dec6b2020-09-11 17:32:44 +0100609 throw FileNotFoundException(fmt::format("Invalid (null) filename {}", CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100610 }
611
612 // Parse the file into a message
613 ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
614
615 google::protobuf::io::FileInputStream inStream(fileno(fd));
616 google::protobuf::io::CodedInputStream codedStream(&inStream);
Nikhil Raje5181532020-10-09 14:52:25 +0100617 codedStream.SetTotalBytesLimit(INT_MAX);
telsoa01c577f2c2018-08-31 09:22:23 +0100618 bool success = modelProto.get()->ParseFromCodedStream(&codedStream);
619 fclose(fd);
620
621 if (!success)
622 {
623 std::stringstream error;
624 error << "Failed to parse graph file";
James Ward58dec6b2020-09-11 17:32:44 +0100625 throw ParseException(fmt::format("{} {}", error.str(), CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100626 }
627 return modelProto;
628
629}
630
Kevin Mayef33cb12021-01-29 14:24:57 +0000631INetworkPtr OnnxParserImpl::CreateNetworkFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100632{
633 ResetParser();
634 ModelPtr modelProto = LoadModelFromBinaryFile(graphFile);
635 return CreateNetworkFromModel(*modelProto);
636}
637
Kevin Mayef33cb12021-01-29 14:24:57 +0000638ModelPtr OnnxParserImpl::LoadModelFromString(const std::string& protoText)
telsoa01c577f2c2018-08-31 09:22:23 +0100639{
640 if (protoText == "")
641 {
James Ward58dec6b2020-09-11 17:32:44 +0100642 throw InvalidArgumentException(fmt::format("Invalid (empty) string for model parameter {}",
643 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100644 }
645 // Parse the string into a message
646 ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
647 bool success = google::protobuf::TextFormat::ParseFromString(protoText, modelProto.get());
648 if (!success)
649 {
650 std::stringstream error;
651 error << "Failed to parse graph file";
James Ward58dec6b2020-09-11 17:32:44 +0100652 throw ParseException(fmt::format("{} {}", error.str(), CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100653 }
654 return modelProto;
655}
656
Kevin Mayef33cb12021-01-29 14:24:57 +0000657INetworkPtr OnnxParserImpl::CreateNetworkFromString(const std::string& protoText)
telsoa01c577f2c2018-08-31 09:22:23 +0100658{
659 ResetParser();
660 ModelPtr modelProto = LoadModelFromString(protoText);
661 return CreateNetworkFromModel(*modelProto);
662}
663
Kevin Mayef33cb12021-01-29 14:24:57 +0000664INetworkPtr OnnxParserImpl::CreateNetworkFromModel(onnx::ModelProto& model)
telsoa01c577f2c2018-08-31 09:22:23 +0100665{
666 m_Network = INetwork::Create();
667 try
668 {
669 m_Graph = std::make_unique<onnx::GraphProto>(*model.mutable_graph());
670 LoadGraph();
671 }
672 catch (const ParseException& e)
673 {
674 Cleanup();
675 throw e;
676 }
677 Cleanup();
678 return std::move(m_Network);
679}
680
Kevin Mayef33cb12021-01-29 14:24:57 +0000681void OnnxParserImpl::LoadGraph()
telsoa01c577f2c2018-08-31 09:22:23 +0100682{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100683 ARMNN_ASSERT(m_Graph.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100684
685 //Fill m_TensorsInfo with the shapes and value of every tensor
686 SetupInfo(m_Graph->mutable_output());
687 SetupInfo(m_Graph->mutable_input());
688 SetupInfo(m_Graph->mutable_value_info());
689
690 for (auto tensor : m_Graph->initializer())
691 {
692 m_TensorsInfo[tensor.name()].m_tensor = std::make_unique<const onnx::TensorProto>(tensor);
Tee Jungfcf6fd52019-11-01 05:27:28 +0000693 m_TensorsInfo[tensor.name()].m_info = std::make_unique<TensorInfo>(ToTensorInfo(tensor));
694 m_TensorsInfo[tensor.name()].m_dtype =
695 static_cast<onnx::TensorProto::DataType>(tensor.data_type());
telsoa01c577f2c2018-08-31 09:22:23 +0100696 }
697
698 SetupInputLayers();
699 SetupOutputLayers();
700
701 //Detect FullyConnected layers with bias and update the FusedAndUsed map acccordingly
702 DetectFullyConnected();
703
704 //Parsing the graph
705 for(size_t nodeIndex = 0; nodeIndex < static_cast<size_t>(m_Graph->node_size()); nodeIndex++)
706 {
707 auto node = m_Graph->node(static_cast<int>(nodeIndex));
708 const std::string& operation = node.op_type();
709
710 // check which layers we handled already (add and matmul fused as FC)
Ryan OShea337c17f2020-02-21 12:33:17 +0000711 if (operation == "MatMul" )
telsoa01c577f2c2018-08-31 09:22:23 +0100712 {
713 if(m_OutputsFusedAndUsed[nodeIndex].inputForNodes != m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.size())
714 {
715 //Node which can not be fused as a FullyConnected layer (used in layers as a simple matmul output)
716 AddFullyConnected(node);
717 }
718 }
719 else if (!(m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.empty()) && operation == "Add")
720 {
721 int matmulIndex = static_cast<int> (m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes[0]);
722 AddFullyConnected(m_Graph->node(matmulIndex), &node);
723 }
724 else if (m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.empty()) //node is not part of a fused layer
725 {
726 auto it = m_ParserFunctions.find(operation);
727 if (it != m_ParserFunctions.end())
728 {
729 auto func = it->second;
730 (this->*func)(node);
731 }
732 else
733 {
James Ward58dec6b2020-09-11 17:32:44 +0100734 throw ParseException(fmt::format("Unsupported operation {} for node '{}' {}",
735 operation,
736 node.name(),
737 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100738 }
739 }
740 }
741
742 //Making the connections between outputs and inputs of each layers
743 for (const auto& tensorCon : m_TensorConnections)
744 {
745 if (tensorCon.second.outputSlot != nullptr)
746 {
747 for (size_t inputSlotIdx = 0; inputSlotIdx < tensorCon.second.inputSlots.size(); ++inputSlotIdx)
748 {
749 tensorCon.second.outputSlot->Connect(*(tensorCon.second.inputSlots[inputSlotIdx]));
750 }
751 }
752 }
753}
754
Kevin Mayef33cb12021-01-29 14:24:57 +0000755void OnnxParserImpl::SetupInfo(const google::protobuf::RepeatedPtrField<onnx::ValueInfoProto >* list)
telsoa01c577f2c2018-08-31 09:22:23 +0100756{
757 for (auto tensor : *list)
758 {
759 m_TensorsInfo[tensor.name()] = OnnxTensor();
760 m_TensorsInfo[tensor.name()].m_info = std::make_unique<TensorInfo>(ToTensorInfo(tensor));
Matteo Martincighe355dc22018-12-10 13:45:27 +0000761 m_TensorsInfo[tensor.name()].m_dtype =
762 static_cast<onnx::TensorProto::DataType>(tensor.type().tensor_type().elem_type());
telsoa01c577f2c2018-08-31 09:22:23 +0100763 }
764}
765
Kevin Mayef33cb12021-01-29 14:24:57 +0000766void OnnxParserImpl::DetectFullyConnected()
telsoa01c577f2c2018-08-31 09:22:23 +0100767{
768 m_OutputsFusedAndUsed = std::vector<UsageSummary> (static_cast<size_t>(m_Graph->node_size()), UsageSummary());
769 auto matmulAndConstant = [&](const std::string& constInput,
770 const std::string& matmulInput,
771 int& nodeIndex)
772 {
773 auto matmulIt = m_OutputsMap.find(matmulInput);
774 if(matmulIt != m_OutputsMap.end() && matmulIt->second.first->op_type() == "MatMul"
775 && m_TensorsInfo[constInput].isConstant())
776 {
777 nodeIndex = matmulIt->second.second;
778 return true;
779 }
780 return false;
781 };
782
783 for(int nodeIndex = 0; nodeIndex < m_Graph->node_size(); nodeIndex++)
784 {
785 const onnx::NodeProto* node = &m_Graph->node(nodeIndex);
786 for (const std::string& output : node->output())
787 {
788 m_OutputsMap[output] = std::make_pair(node, nodeIndex);
789 }
790
791 for (const std::string& input : node->input()) //count how many time a node is used as input
792 {
793 auto matmulIt = m_OutputsMap.find(input);
794 if(matmulIt != m_OutputsMap.end()){
795 ++m_OutputsFusedAndUsed[static_cast<size_t>(matmulIt->second.second)].inputForNodes; //node used
796 }
797 }
798
799 if (node->op_type() == "Add")
800 {
801 int matmulIndex = 0;
802 if (matmulAndConstant(node->input(0), node->input(1), matmulIndex) ||
803 matmulAndConstant(node->input(1), node->input(0), matmulIndex))
804 {
805 //matmul and add were fused
806 m_OutputsFusedAndUsed[static_cast<size_t>(matmulIndex)].fusedWithNodes
807 .push_back(static_cast<size_t>(nodeIndex));
808
809 m_OutputsFusedAndUsed[static_cast<size_t>(nodeIndex)].fusedWithNodes
810 .push_back(static_cast<size_t>(matmulIndex));
811 }
812 }
813 }
814
815 for (auto output: m_Graph->output()) { //Add usages as output of the graph in count of usages
816 auto matmulIt = m_OutputsMap.find(output.name());
817 if(matmulIt != m_OutputsMap.end()){
818 ++m_OutputsFusedAndUsed[static_cast<size_t>(matmulIt->second.second)].inputForNodes;
819 }
820 }
821}
822
823template<typename Location>
Kevin Mayef33cb12021-01-29 14:24:57 +0000824void OnnxParserImpl::GetInputAndParam(const onnx::NodeProto& node,
825 std::string* inputName,
826 std::string* constName,
827 const Location& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100828{
829 int cstIndex;
830 if (m_TensorsInfo[node.input(0)].isConstant())
831 {
832 cstIndex = 0;
833 }
834 else if (m_TensorsInfo[node.input(1)].isConstant())
835 {
836 cstIndex = 1;
837 }
838 else
839 {
James Ward58dec6b2020-09-11 17:32:44 +0100840 throw ParseException(fmt::format("One of the input tensors ('{}' or '{}') should be constant in node '{}' {}",
841 node.input(0),
842 node.input(1),
843 node.name(),
844 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100845 }
846 if(constName)
847 {
848 *constName = node.input(cstIndex);
849 }
850 if(inputName)
851 {
852 *inputName = node.input(!cstIndex);
853 }
854}
855
856template<typename Location>
Kevin Mayef33cb12021-01-29 14:24:57 +0000857void OnnxParserImpl::To1DTensor(const std::string& name, const Location& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100858{
859 TensorShape shape = m_TensorsInfo[name].m_info->GetShape();
860 std::vector<uint32_t> newShape;
861 for(uint i = 0; i < shape.GetNumDimensions() - 1; ++i)
862 {
863 if(shape[i] != 1)
864 {
James Ward58dec6b2020-09-11 17:32:44 +0100865 throw ParseException(
866 fmt::format("Only tensors with shape [1, ..., 1, X] can be converted to 1D and {} {}",
867 TensorInfoAsString(*m_TensorsInfo[name].m_info, name, m_TensorsInfo[name].m_dtype),
868 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100869 }
870 }
871 newShape.push_back(shape[shape.GetNumDimensions() - 1]);
872
873 m_TensorsInfo[name].m_info->SetShape(TensorShape(static_cast<unsigned int>(newShape.size()), newShape.data()));
874}
875
Kevin Mayef33cb12021-01-29 14:24:57 +0000876void OnnxParserImpl::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, const Convolution2dDescriptor& convDesc)
Ryan OSheaed27ee72020-04-22 16:37:29 +0100877{
878 ARMNN_ASSERT(node.op_type() == "Conv");
879
880 DepthwiseConvolution2dDescriptor desc;
881 desc.m_PadLeft = convDesc.m_PadLeft;
882 desc.m_PadRight = convDesc.m_PadRight;
883 desc.m_PadTop = convDesc.m_PadTop;
884 desc.m_PadBottom = convDesc.m_PadBottom;
885 desc.m_StrideX = convDesc.m_StrideX;
886 desc.m_StrideY = convDesc.m_StrideY;
887 desc.m_BiasEnabled = convDesc.m_BiasEnabled;
888
889 armnn::IConnectableLayer* layer;
Jan Eilers53ef7952021-06-02 12:01:25 +0100890
891 // weights come in as [O,1,H,W] from ONNX and need to be converted to ArmNNs dephtwise weights layout [1,H,W,O]
892 armnn::PermutationVector perVec {3,0,1,2};
893 auto weightTensor = CreateConstTensor(node.input(1), perVec);
Ryan OSheaed27ee72020-04-22 16:37:29 +0100894
895 if (node.input_size() == 3)
896 {
897 if(!m_TensorsInfo[node.input(2)].isConstant())
898 {
James Ward58dec6b2020-09-11 17:32:44 +0100899 throw ParseException(fmt::format("Bias '{}' should be constant in Conv layer '{}' {}",
900 node.input(2),
901 node.name(),
902 CHECK_LOCATION().AsString()));
Ryan OSheaed27ee72020-04-22 16:37:29 +0100903 }
904 desc.m_BiasEnabled = true;
905 auto biasTensor = CreateConstTensor(node.input(2));
906 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
907 weightTensor.first,
908 Optional<ConstTensor>(biasTensor.first),
909 node.name().c_str());
910 }
911 else
912 {
913 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
914 weightTensor.first,
915 EmptyOptional(),
916 node.name().c_str());
917 }
918 ARMNN_ASSERT(layer != nullptr);
919
920 auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
921 { m_TensorsInfo[node.input(0)].m_info->GetShape(),
Jan Eilers53ef7952021-06-02 12:01:25 +0100922 weightTensor.first.GetInfo().GetShape() });
Ryan OSheaed27ee72020-04-22 16:37:29 +0100923
924 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
925
926 // register the input connection slots for the layer, connections are made after all layers have been created
927 // only the tensors for the inputs are relevant, exclude the const tensors
928 RegisterInputSlots(layer, {node.input(0)});
929
930 // register the output connection slots for the layer, connections are made after all layers have been created
931 RegisterOutputSlots(layer, {node.output(0)});
932}
933
Kevin Mayef33cb12021-01-29 14:24:57 +0000934void OnnxParserImpl::AddFullyConnected(const onnx::NodeProto& matmulNode, const onnx::NodeProto* addNode)
telsoa01c577f2c2018-08-31 09:22:23 +0100935{
936
937 // find matmul inputs
938 std::string weightName;
939 std::string inputName;
940 CHECK_VALID_SIZE(static_cast<size_t>(matmulNode.input_size()), 2);
941 CHECK_VALID_SIZE(static_cast<size_t>(matmulNode.output_size()), 1);
942 VALID_INPUTS(matmulNode, STR_LIST(onnx::TensorProto::FLOAT));
943
944 GetInputAndParam(matmulNode, &inputName, &weightName, CHECK_LOCATION());
945
946 FullyConnectedDescriptor desc;
947 desc.m_BiasEnabled = addNode != nullptr;
948
949 IConnectableLayer* layer = nullptr;
950 if(desc.m_BiasEnabled)
951 {
952 // find bias const
953 std::string biasName;
954 CHECK_VALID_SIZE(static_cast<size_t>(addNode->input_size()), 2);
955 CHECK_VALID_SIZE(static_cast<size_t>(addNode->output_size()), 1);
956 VALID_INPUTS(*addNode, STR_LIST(onnx::TensorProto::FLOAT));
957
958 GetInputAndParam(*addNode, nullptr, &biasName, CHECK_LOCATION());
959
960 //Output shape is [1, weights[1]] and 1d vec in ONNX can be [1,X] so we convert biases to "armnn" 1D
961 To1DTensor(biasName, CHECK_LOCATION());
962 TensorInfo weightInfo = *m_TensorsInfo[weightName].m_info;
963 TensorInfo biasInfo = *m_TensorsInfo[biasName].m_info;
964
965 if (weightInfo.GetShape()[1] != biasInfo.GetShape()[0])
966 {
James Ward58dec6b2020-09-11 17:32:44 +0100967 throw ParseException(
968 fmt::format("Shape of weights '{}' and bias of following Add node '{}' do not match : {}"
969 " and {} ( /!\\ bias should be a 1D tensor) {}",
970 weightName,
971 addNode->name(),
972 TensorInfoAsString(*m_TensorsInfo[weightName].m_info, weightName,
973 m_TensorsInfo[weightName].m_dtype),
974 TensorInfoAsString(*m_TensorsInfo[biasName].m_info, biasName,
975 m_TensorsInfo[biasName].m_dtype ),
976 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100977 }
Matthew Sloyan81beae32021-07-13 19:46:11 +0100978
979 // Just add a FullyConnected layer, weights and biases are handled as inputs now.
980 layer = m_Network->AddFullyConnectedLayer(desc, matmulNode.name().c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100981 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100982
983 auto outputInfo = ComputeOutputInfo({addNode->output(0)}, layer,
984 {m_TensorsInfo[inputName].m_info->GetShape(),
985 m_TensorsInfo[weightName].m_info->GetShape()});
telsoa01c577f2c2018-08-31 09:22:23 +0100986 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
987
Matthew Sloyan81beae32021-07-13 19:46:11 +0100988 // Add constant layer to store weights/biases and connect to FullyConnected layer..
989 if(m_TensorsInfo[weightName].isConstant())
990 {
991 IConnectableLayer* weightsLayer = m_Network->AddConstantLayer(CreateConstTensor(weightName).first);
992
993 weightInfo.SetConstant();
994 weightsLayer->GetOutputSlot(0).SetTensorInfo(weightInfo);
995 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
996 }
997
998 if(m_TensorsInfo[biasName].isConstant())
999 {
1000 IConnectableLayer* biasLayer = m_Network->AddConstantLayer(CreateConstTensor(biasName).first);
1001
1002 biasInfo.SetConstant();
1003 biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
1004 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
1005 }
1006
1007 RegisterInputSlots(layer, {inputName, weightName, biasName});
telsoa01c577f2c2018-08-31 09:22:23 +01001008 RegisterOutputSlots(layer, {addNode->output(0)});
1009 }
1010 else
1011 {
Matthew Sloyan81beae32021-07-13 19:46:11 +01001012 layer = m_Network->AddFullyConnectedLayer(desc, matmulNode.name().c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001013 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001014
1015 auto outputInfo = ComputeOutputInfo({matmulNode.output(0)}, layer,
1016 {m_TensorsInfo[inputName].m_info->GetShape(),
1017 m_TensorsInfo[weightName].m_info->GetShape()});
1018 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1019
Matthew Sloyan81beae32021-07-13 19:46:11 +01001020 // Add constant layer to store weights and connect to FullyConnected layer.
1021 if(m_TensorsInfo[weightName].isConstant())
1022 {
1023 TensorInfo weightInfo = *m_TensorsInfo[weightName].m_info;
1024 IConnectableLayer* weightsLayer = m_Network->AddConstantLayer(CreateConstTensor(weightName).first);
1025
1026 weightInfo.SetConstant();
1027 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
1028 weightsLayer->GetOutputSlot(0).SetTensorInfo(weightInfo);
1029 }
1030
1031 RegisterInputSlots(layer, {inputName, weightName});
telsoa01c577f2c2018-08-31 09:22:23 +01001032 RegisterOutputSlots(layer, {matmulNode.output(0)});
1033 }
1034}
1035
Kevin Mayef33cb12021-01-29 14:24:57 +00001036void OnnxParserImpl::AddPoolingLayer(const onnx::NodeProto& node, Pooling2dDescriptor& desc)
telsoa01c577f2c2018-08-31 09:22:23 +01001037{
1038
1039 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 1);
1040 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
1041
1042 VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
1043
1044 std::vector<uint32_t> kernel_shape = ReadMandatoryNodeUint32ListAttribute(node, "kernel_shape"); //size of pool win
1045 std::vector<uint32_t> strides = ReadOptionalNodeUint32ListAttribute(node, "strides");
1046 std::vector<uint32_t> pads = ReadOptionalNodeUint32ListAttribute(node, "pads");
1047
1048 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
1049 desc.m_PoolWidth = kernel_shape[1];
1050 desc.m_PoolHeight = kernel_shape[0];
1051
1052 if(strides.empty())
1053 {
1054 desc.m_StrideX = 1;
1055 desc.m_StrideY = 1;
1056 }
1057 else
1058 {
1059 desc.m_StrideX = strides[1];
1060 desc.m_StrideY = strides[0];
1061 }
1062
1063 //Check new padding version first
1064 if(pads.empty())
1065 {
1066 //Check deprecated version
1067 std::string paddingString = ReadOptionalNodeStringAttribute(node, "auto_pad");
1068 if(paddingString != "VALID" && paddingString != "" && paddingString != "NOTSET")
1069 {
1070 bool isUpper;
1071 if( paddingString == "SAME_LOWER")
1072 {
1073 isUpper = false;
1074 }
1075 else if (paddingString == "SAME_UPPER")
1076 {
1077 isUpper = true;
1078 }
1079 else
1080 {
James Ward58dec6b2020-09-11 17:32:44 +01001081 throw ParseException(fmt::format("Invalid auto_pad attribute for node {}. "
1082 "Only SAME_UPPER, SAME_LOWER or VALID supported and found {} {}",
1083 node.name(),
1084 paddingString,
1085 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001086 }
1087 auto inputInfo = *m_TensorsInfo[node.input(0)].m_info;
1088 uint32_t inputHeight = inputInfo.GetShape()[2];
1089 uint32_t inputWidth = inputInfo.GetShape()[3];
Sadik Armagan60bb9d82021-01-11 15:15:01 +00001090 CalcPadding(inputHeight,
1091 desc.m_PoolHeight,
1092 desc.m_StrideY,
1093 1u,
1094 &desc.m_PadTop,
1095 &desc.m_PadBottom,
1096 isUpper);
1097 CalcPadding(inputWidth,
1098 desc.m_PoolWidth,
1099 desc.m_StrideX,
1100 1u,
1101 &desc.m_PadLeft,
1102 &desc.m_PadRight,
1103 isUpper);
telsoa01c577f2c2018-08-31 09:22:23 +01001104 }
1105 }
1106 else
1107 {
1108 desc.m_PadTop = pads[0];
1109 desc.m_PadLeft = pads[1];
1110 desc.m_PadBottom = pads[2];
1111 desc.m_PadRight = pads[3];
1112 }
1113
1114 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001115 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001116
1117 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
1118 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1119
1120 // register the input connection slots for the layer, connections are made after all layers have been created
1121 // only the tensors for the inputs are relevant, exclude the const tensors
1122 RegisterInputSlots(layer, {node.input(0)});
1123
1124 // register the output connection slots for the layer, connections are made after all layers have been created
1125 RegisterOutputSlots(layer, {node.output(0)});
1126}
1127
Kevin Mayef33cb12021-01-29 14:24:57 +00001128std::pair<std::string, std::string> OnnxParserImpl::AddPrepareBroadcast(const std::string& input0,
1129 const std::string& input1)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001130{
1131 std::pair<std::string, std::string> inputs = std::make_pair(input0, input1);
1132
1133 TensorShape input0Shape = m_TensorsInfo[input0].m_info->GetShape();
1134 TensorShape input1Shape = m_TensorsInfo[input1].m_info->GetShape();
1135
1136 if(input1Shape.GetNumDimensions() < input0Shape.GetNumDimensions())
1137 {
James Ward58dec6b2020-09-11 17:32:44 +01001138 auto outputName = fmt::format("reshape_output_{}", input1);
Ryan OSheaed27ee72020-04-22 16:37:29 +01001139 PrependForBroadcast(outputName, input1, input0);
1140 inputs.second = outputName;
1141 }
1142 else if(input0Shape.GetNumDimensions() < input1Shape.GetNumDimensions())
1143 {
James Ward58dec6b2020-09-11 17:32:44 +01001144 auto outputName = fmt::format("reshape_output_{}", input0);
Ryan OSheaed27ee72020-04-22 16:37:29 +01001145 PrependForBroadcast(outputName, input0, input1);
1146 inputs.first = outputName;
1147 }
1148 return inputs;
1149}
1150
Kevin Mayef33cb12021-01-29 14:24:57 +00001151void OnnxParserImpl::CreateConstantLayer(const std::string& tensorName, const std::string& layerName)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001152{
1153 auto armnnTensor = CreateConstTensor(tensorName);
1154
1155 IConnectableLayer* layer = m_Network->AddConstantLayer(armnnTensor.first, layerName.c_str());
1156 layer->GetOutputSlot(0).SetTensorInfo(armnnTensor.first.GetInfo());
1157 RegisterOutputSlots(layer, {tensorName});
1158}
1159
Kevin Mayef33cb12021-01-29 14:24:57 +00001160void OnnxParserImpl::CreateReshapeLayer(const std::string& inputName,
1161 const std::string& outputName,
1162 const std::string& layerName)
telsoa01c577f2c2018-08-31 09:22:23 +01001163{
1164 const TensorInfo outputTensorInfo = *m_TensorsInfo[outputName].m_info;
1165 ReshapeDescriptor reshapeDesc;
1166 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1167
1168 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001169 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001170 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1171
1172 // register the input connection slots for the layer, connections are made after all layers have been created
1173 // only the tensors for the inputs are relevant, exclude the const tensors
1174 RegisterInputSlots(layer, {inputName});
1175
1176 // register the output connection slots for the layer, connections are made after all layers have been created
1177 RegisterOutputSlots(layer, {outputName});
1178}
1179
Kevin Mayef33cb12021-01-29 14:24:57 +00001180void OnnxParserImpl::ParseActivation(const onnx::NodeProto& node, const armnn::ActivationFunction func)
telsoa01c577f2c2018-08-31 09:22:23 +01001181{
Finn Williams7ee5d2c2020-03-27 11:11:50 +00001182 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 1, 3);
telsoa01c577f2c2018-08-31 09:22:23 +01001183 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
1184
1185 VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
1186
1187 ActivationDescriptor desc;
Tee Jung7ff9a602019-11-01 07:04:42 +00001188 desc.m_Function = func;
telsoa01c577f2c2018-08-31 09:22:23 +01001189
Finn Williams7ee5d2c2020-03-27 11:11:50 +00001190 if (func == ActivationFunction::BoundedReLu)
1191 {
1192 desc.m_A = node.input(2).empty() ? std::numeric_limits<float>::max() : std::stof(node.input(2));
1193 desc.m_B = node.input(1).empty() ? std::numeric_limits<float>::lowest() : std::stof(node.input(1));
1194 }
1195
telsoa01c577f2c2018-08-31 09:22:23 +01001196 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, node.name().c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001197 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001198
1199 auto outputInfo = ComputeOutputInfo({ node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
1200 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1201
1202 // register the input connection slots for the layer, connections are made after all layers have been created
1203 // only the tensors for the inputs are relevant, exclude the const tensors
1204 RegisterInputSlots(layer, {node.input(0)});
1205
1206 // register the output connection slots for the layer, connections are made after all layers have been created
1207 RegisterOutputSlots(layer, {node.output(0)});
1208}
1209
Kevin Mayef33cb12021-01-29 14:24:57 +00001210void OnnxParserImpl::ParseClip(const onnx::NodeProto& node)
Finn Williams7ee5d2c2020-03-27 11:11:50 +00001211{
1212 ParseActivation(node, ActivationFunction::BoundedReLu);
1213}
1214
Kevin Mayef33cb12021-01-29 14:24:57 +00001215void OnnxParserImpl::ParseSigmoid(const onnx::NodeProto& node)
Tee Jung7ff9a602019-11-01 07:04:42 +00001216{
1217 ParseActivation(node, ActivationFunction::Sigmoid);
1218}
1219
Kevin Mayef33cb12021-01-29 14:24:57 +00001220void OnnxParserImpl::ParseTanh(const onnx::NodeProto& node)
Tee Jung7ff9a602019-11-01 07:04:42 +00001221{
1222 ParseActivation(node, ActivationFunction::TanH);
1223}
1224
Kevin Mayef33cb12021-01-29 14:24:57 +00001225void OnnxParserImpl::ParseRelu(const onnx::NodeProto& node)
Tee Jung7ff9a602019-11-01 07:04:42 +00001226{
1227 ParseActivation(node, ActivationFunction::ReLu);
1228}
1229
Kevin Mayef33cb12021-01-29 14:24:57 +00001230void OnnxParserImpl::ParseLeakyRelu(const onnx::NodeProto& node)
Tee Jung7ff9a602019-11-01 07:04:42 +00001231{
1232 ParseActivation(node, ActivationFunction::LeakyReLu);
1233}
telsoa01c577f2c2018-08-31 09:22:23 +01001234
Kevin Mayef33cb12021-01-29 14:24:57 +00001235void OnnxParserImpl::ParseAdd(const onnx::NodeProto& node)
telsoa01c577f2c2018-08-31 09:22:23 +01001236{
Ryan OSheaed27ee72020-04-22 16:37:29 +01001237 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 2);
1238 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001239
Ryan OSheaed27ee72020-04-22 16:37:29 +01001240 VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
telsoa01c577f2c2018-08-31 09:22:23 +01001241
Ryan OSheaed27ee72020-04-22 16:37:29 +01001242 // TODO: unify broadcast validation code across layers
1243 // tracked by: IVGCVSW-1576
telsoa01c577f2c2018-08-31 09:22:23 +01001244
Ryan OSheaed27ee72020-04-22 16:37:29 +01001245 // Checking broadcast compatibility : only scalar or 1D tensors
1246 auto inputs = AddPrepareBroadcast(node.input(0), node.input(1));
1247 auto input0 = *m_TensorsInfo[inputs.first].m_info;
1248 auto input1 = *m_TensorsInfo[inputs.second].m_info;
1249 ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
1250
1251 unsigned int numDims = input0.GetNumDimensions();
1252 for (unsigned int i = 0; i < numDims; i++)
telsoa01c577f2c2018-08-31 09:22:23 +01001253 {
Ryan OSheaed27ee72020-04-22 16:37:29 +01001254 unsigned int dim0 = input0.GetShape()[i];
1255 unsigned int dim1 = input1.GetShape()[i];
1256 if (dim0 != dim1 && dim0 != 1 && dim1 != 1)
telsoa01c577f2c2018-08-31 09:22:23 +01001257 {
James Ward58dec6b2020-09-11 17:32:44 +01001258 throw ParseException(
1259 fmt::format("Broadcast is only supported for scalar or 1D tensors in Add node '{}'. "
1260 "Input dimensions should either match or one should be of size 1 and here, "
1261 "{} and {} {}",
1262 node.name(),
1263 TensorInfoAsString(*m_TensorsInfo[inputs.first].m_info, inputs.first,
1264 m_TensorsInfo[inputs.first].m_dtype),
1265 TensorInfoAsString(*m_TensorsInfo[inputs.second].m_info, inputs.second,
1266 m_TensorsInfo[inputs.second].m_dtype),
1267 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001268 }
telsoa01c577f2c2018-08-31 09:22:23 +01001269 }
Ryan OSheaed27ee72020-04-22 16:37:29 +01001270
1271
1272 IConnectableLayer* layer = m_Network->AddAdditionLayer(node.name().c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001273 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001274
1275 auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
Ryan OSheaed27ee72020-04-22 16:37:29 +01001276 { m_TensorsInfo[inputs.first].m_info->GetShape(),
1277 m_TensorsInfo[inputs.second].m_info->GetShape() });
telsoa01c577f2c2018-08-31 09:22:23 +01001278 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1279
Ryan OSheaed27ee72020-04-22 16:37:29 +01001280 // register the input connection -> for constant inputs, we need to make a newDim constant layer
1281 if(m_TensorsInfo[inputs.first].isConstant()) {
James Ward58dec6b2020-09-11 17:32:44 +01001282 CreateConstantLayer(inputs.first, fmt::format("Add:constant_of_{}", node.input(0)));
Ryan OSheaed27ee72020-04-22 16:37:29 +01001283 }
1284 if(m_TensorsInfo[inputs.second].isConstant()) {
James Ward58dec6b2020-09-11 17:32:44 +01001285 CreateConstantLayer(inputs.second, fmt::format("Add:constant_of_{}", node.input(1)));
Ryan OSheaed27ee72020-04-22 16:37:29 +01001286 }
1287 RegisterInputSlots(layer, {inputs.first, inputs.second});
telsoa01c577f2c2018-08-31 09:22:23 +01001288
Ryan OSheaed27ee72020-04-22 16:37:29 +01001289 // register the output connection
telsoa01c577f2c2018-08-31 09:22:23 +01001290 RegisterOutputSlots(layer, {node.output(0)});
1291}
1292
Kevin Mayef33cb12021-01-29 14:24:57 +00001293void OnnxParserImpl::ParseAveragePool(const onnx::NodeProto& node)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001294{
1295 Pooling2dDescriptor desc;
1296 desc.m_PoolType = PoolingAlgorithm::Average;
1297
1298 uint32_t count_include_pad = 0;
1299 count_include_pad = ReadOptionalNodeUint32Attribute(node, "count_include_pad");
1300 if(count_include_pad) {
1301 desc.m_PaddingMethod = PaddingMethod::IgnoreValue;
1302 }
1303 AddPoolingLayer(node, desc);
1304}
1305
Kevin Mayef33cb12021-01-29 14:24:57 +00001306void OnnxParserImpl::ParseBatchNormalization(const onnx::NodeProto& node)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001307{
1308 //IGNORE momentum parameter and spatial parameters
1309
1310 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 5);
1311 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
1312
1313 VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
1314 for(int ind = 1; ind < node.input_size(); ++ind)
1315 {
1316 auto tensor = node.input(ind);
1317 if(! m_TensorsInfo[tensor].isConstant())
1318 {
James Ward58dec6b2020-09-11 17:32:44 +01001319 throw ParseException(
1320 fmt::format("Input tensor '{}' should be constant in BatchNormalization node '{}' {}",
1321 tensor,
1322 node.name(),
1323 CHECK_LOCATION().AsString()));
Ryan OSheaed27ee72020-04-22 16:37:29 +01001324 }
1325 }
1326
1327 float epsilon = ReadOptionalNodeFloatAttribute(node, "epsilon", 1e-5f);
1328 BatchNormalizationDescriptor desc;
1329 desc.m_Eps = epsilon;
1330
1331 auto scaleTensor = CreateConstTensor(node.input(1));
1332 auto biasTensor = CreateConstTensor(node.input(2));
1333 auto meanTensor = CreateConstTensor(node.input(3));
1334 auto varTensor = CreateConstTensor(node.input(4));
1335
1336 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1337 meanTensor.first,
1338 varTensor.first,
1339 biasTensor.first,
1340 scaleTensor.first,
1341 node.name().c_str());
1342 ARMNN_ASSERT(layer != nullptr);
1343
1344 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
1345 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1346
1347 RegisterInputSlots(layer, {node.input(0)}); //don't register constant inputs
1348
1349 // register the output connection
1350 RegisterOutputSlots(layer, {node.output(0)});
1351}
1352
Kevin Mayef33cb12021-01-29 14:24:57 +00001353void OnnxParserImpl::ParseConstant(const onnx::NodeProto& node)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001354{
1355 CHECK_VALID_SIZE(static_cast<size_t>(node.attribute_size()), 1);
1356 if (!node.attribute(0).has_t())
1357 {
James Ward58dec6b2020-09-11 17:32:44 +01001358 throw ParseException(fmt::format("Value not found for Constant node '{}' {}",
1359 node.name(),
1360 CHECK_LOCATION().AsString()));
Ryan OSheaed27ee72020-04-22 16:37:29 +01001361 }
1362 const onnx::TensorProto& onnxTensor = node.attribute(0).t();
1363
1364 //ONNX can have Float16 and double constant nodes but ArmNN only supports float32
1365 CHECK_VALID_DATATYPE(node.name(), onnxTensor.name(),
1366 static_cast<onnx::TensorProto::DataType>(onnxTensor.data_type()), onnx::TensorProto::FLOAT);
1367
1368 //Register this as a m_ConstParam so we know we can use it as a constant param in future layers.
1369 m_TensorsInfo[node.output(0)].m_tensor = std::make_unique<const onnx::TensorProto>(onnxTensor);
1370 m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(ToTensorInfo(onnxTensor));
1371 m_TensorsInfo[node.output(0)].m_dtype = static_cast<onnx::TensorProto::DataType>(onnxTensor.data_type());
1372
1373 CreateConstantLayer(node.output(0), node.name());
1374}
1375
Kevin Mayef33cb12021-01-29 14:24:57 +00001376void OnnxParserImpl::ParseConv(const onnx::NodeProto& node)
telsoa01c577f2c2018-08-31 09:22:23 +01001377{
1378 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 2, 3); //input, weight, (bias)
1379 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
1380
1381 VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
1382
1383 if(m_TensorsInfo[node.input(0)].m_info->GetNumDimensions() != 4)
1384 {
James Ward58dec6b2020-09-11 17:32:44 +01001385 throw ParseException(
1386 fmt::format("ArmNN only supports 2D convolution and Conv layer '{}' input {} {}",
1387 node.name(),
1388 TensorInfoAsString(*m_TensorsInfo[node.input(0)].m_info, node.input(0),
1389 m_TensorsInfo[node.input(0)].m_dtype),
1390 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001391 }
1392
1393 if(!m_TensorsInfo[node.input(1)].isConstant())
1394 {
James Ward58dec6b2020-09-11 17:32:44 +01001395 throw ParseException(
1396 fmt::format("Weights '{}' should be constant in Conv layer '{}' {}",
1397 node.input(1),
1398 node.name(),
1399 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001400 }
1401
1402 auto inputInfo = *m_TensorsInfo[node.input(0)].m_info;
1403
telsoa01c577f2c2018-08-31 09:22:23 +01001404 Convolution2dDescriptor desc;
1405 desc.m_BiasEnabled = false;
1406
1407 std::vector<uint32_t> strides = ReadOptionalNodeUint32ListAttribute(node, "strides");
1408 if(strides.empty())
1409 {
1410 desc.m_StrideX = 1;
1411 desc.m_StrideY = 1;
1412 }
1413 else
1414 {
1415 desc.m_StrideX = strides[1];
1416 desc.m_StrideY = strides[0];
1417 }
1418
Sadik Armagan60bb9d82021-01-11 15:15:01 +00001419 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(node, "dilations");
1420 if(!dilations.empty())
1421 {
1422 desc.m_DilationX = dilations[1];
1423 desc.m_DilationY = dilations[0];
1424 }
1425
telsoa01c577f2c2018-08-31 09:22:23 +01001426 std::vector<uint32_t> pads = ReadOptionalNodeUint32ListAttribute(node, "pads");
1427 //Check new padding version first
1428 if(pads.empty())
1429 {
1430 //Check deprecated version
1431 std::string paddingString = ReadOptionalNodeStringAttribute(node, "auto_pad");
1432 if(paddingString != "VALID" && paddingString != "" && paddingString != "NOTSET")
1433 {
1434 bool isUpper;
1435 if( paddingString == "SAME_LOWER")
1436 {
1437 isUpper = false;
1438 }
1439 else if (paddingString == "SAME_UPPER")
1440 {
1441 isUpper = true;
1442 }
1443 else
1444 {
James Ward58dec6b2020-09-11 17:32:44 +01001445 throw ParseException(
1446 fmt::format("Invalid auto_pad attribute for node {}. Only SAME_UPPER, SAME_LOWER or VALID "
1447 "supported and found {} {}",
1448 node.name(),
1449 paddingString,
1450 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001451 }
1452 uint32_t inputHeight = inputInfo.GetShape()[2];
1453 uint32_t inputWidth = inputInfo.GetShape()[3];
1454
1455 uint32_t weightHeight;
1456 uint32_t weightWidth;
1457 std::vector<uint32_t> kernel_shape = ReadOptionalNodeUint32ListAttribute(node, "kernel_shape");
1458 if (kernel_shape.empty())
1459 {
1460 const TensorInfo weightTensorInfo = *m_TensorsInfo[node.input(1)].m_info;
1461 weightHeight = weightTensorInfo.GetShape()[2];
1462 weightWidth = weightTensorInfo.GetShape()[3];
1463 }
1464 else
1465 {
1466 weightHeight = kernel_shape[0];
1467 weightWidth = kernel_shape[1];
1468 }
Sadik Armagan60bb9d82021-01-11 15:15:01 +00001469 CalcPadding(inputHeight,
1470 weightHeight,
1471 desc.m_StrideY,
1472 desc.m_DilationY,
1473 &desc.m_PadTop,
1474 &desc.m_PadBottom,
1475 isUpper);
1476 CalcPadding(inputWidth,
1477 weightWidth,
1478 desc.m_StrideX,
1479 desc.m_DilationX,
1480 &desc.m_PadLeft,
1481 &desc.m_PadRight,
1482 isUpper);
telsoa01c577f2c2018-08-31 09:22:23 +01001483 }
1484 }
1485 else
1486 {
1487 desc.m_PadTop = pads[0];
1488 desc.m_PadLeft = pads[1];
1489 desc.m_PadBottom = pads[2];
1490 desc.m_PadRight = pads[3];
1491 }
1492
1493 uint32_t group = ReadOptionalNodeUint32Attribute(node, "group", 1);
1494 if(group > 1)
1495 {
1496 if (group > inputInfo.GetShape()[1])
1497 {
1498 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001499 fmt::format("Error parsing Convolution node: {}. "
1500 "The 'group'={} parameter cannot be larger than the "
1501 "channel of the input shape={} (in NCHW format). {}",
1502 node.name(),
1503 group,
1504 inputInfo.GetShape()[1],
1505 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001506 }
1507 else if (group == inputInfo.GetShape()[1])
1508 {
1509 // we use a depthwise convolution here, because the number of groups equals to the
1510 // input channels
1511 AddConvLayerWithDepthwiseConv(node, desc);
1512 return;
1513 }
1514 else
1515 {
1516 // TODO: split the input by channels into channels/groups separate convolutions
Jim Flynne242f2d2019-05-22 14:24:13 +01001517 // and concatenate the results afterwards
James Ward58dec6b2020-09-11 17:32:44 +01001518 throw ParseException(fmt::format("Error parsing Convolution node: {}. "
1519 "The 'group'={} parameter should be 1 or be equal to the "
1520 "channel of the input shape={} (in NCHW format). {}",
1521 node.name(),
1522 group,
1523 inputInfo.GetShape()[1],
1524 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001525 }
1526 }
1527
1528 armnn::IConnectableLayer* layer;
1529 auto weightTensor = CreateConstTensor(node.input(1));
1530
1531 if (node.input_size() == 3)
1532 {
1533 if(!m_TensorsInfo[node.input(2)].isConstant())
1534 {
James Ward58dec6b2020-09-11 17:32:44 +01001535 throw ParseException(fmt::format("Bias '{}' should be constant in Conv layer '{}' {}",
1536 node.input(2),
1537 node.name(),
1538 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001539 }
1540 desc.m_BiasEnabled = true;
1541 auto biasTensor = CreateConstTensor(node.input(2));
1542 layer = m_Network->AddConvolution2dLayer(desc,
1543 weightTensor.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001544 Optional<ConstTensor>(biasTensor.first),
telsoa01c577f2c2018-08-31 09:22:23 +01001545 node.name().c_str());
1546 }
1547 else
1548 {
1549 layer = m_Network->AddConvolution2dLayer(desc,
1550 weightTensor.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001551 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +01001552 node.name().c_str());
1553 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001554 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001555
1556 auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
1557 { m_TensorsInfo[node.input(0)].m_info->GetShape(),
1558 m_TensorsInfo[node.input(1)].m_info->GetShape() });
1559 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1560
1561 // register the input connection slots for the layer, connections are made after all layers have been created
1562 // only the tensors for the inputs are relevant, exclude the const tensors
1563 RegisterInputSlots(layer, {node.input(0)});
1564
1565 // register the output connection slots for the layer, connections are made after all layers have been created
1566 RegisterOutputSlots(layer, {node.output(0)});
1567}
1568
Kevin Mayef33cb12021-01-29 14:24:57 +00001569void OnnxParserImpl::ParseFlatten(const onnx::NodeProto& node)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001570{
1571 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 1);
1572 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
1573
1574 CHECK_VALID_DATATYPE(node.name(), node.input(0),
1575 m_TensorsInfo[node.input(0)].m_dtype,
1576 onnx::TensorProto::FLOAT);
1577
1578 int64_t axis = ReadOptionalNodeInt64Attribute(node, "axis", 1);
1579 TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
1580
1581 /// Negative axis conversion
1582 if (axis < 0)
1583 {
1584 axis += inputShape.GetNumDimensions();
1585 }
1586
1587 /// Check Axis is within dimensions
1588 if (axis < 0 || axis >= inputShape.GetNumDimensions())
1589 {
James Ward58dec6b2020-09-11 17:32:44 +01001590 throw ParseException(fmt::format("Axis '{}' invalid. Tensor has '{}' dimensions in FlattenLayer '{}'",
1591 axis, inputShape.GetNumDimensions(), node.name()));
Ryan OSheaed27ee72020-04-22 16:37:29 +01001592 }
1593
1594 /// If axis chosen is 0 dimension1 will always be 1 in output , default dimension2 to 1 because 0 is invalid
1595 uint dimension1{1};
1596 uint dimension2{1};
1597 uint i{0};
1598
1599 /// dimension1 = (d_0 * d_1 ... d_(axis-1))
1600 for (i = 0; i < axis; i++){
1601 dimension1 *= inputShape[i];
1602 }
1603
1604 /// dimension2 = (d_axis * d_(axis+1) ... d_n)
1605 for (i = static_cast<uint>(axis); i < inputShape.GetNumDimensions(); i++){
1606 dimension2 *= inputShape[i];
1607 }
1608
1609 TensorShape outputShape{dimension1, dimension2};
1610
1611 auto outInfo = ComputeReshapeInfo(outputShape, inputShape, node.output(0));
1612 m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(outInfo);
1613 CreateReshapeLayer(node.input(0), node.output(0), node.name());
1614}
1615
Kevin Mayef33cb12021-01-29 14:24:57 +00001616void OnnxParserImpl::ParseGlobalAveragePool(const onnx::NodeProto& node)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001617{
1618 Pooling2dDescriptor desc = Pooling2dDescriptor();
1619 desc.m_PoolType = PoolingAlgorithm::Average;
1620
1621 //kernel size is the same as input
1622 TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
1623 desc.m_PoolWidth = inputShape[3];
1624 desc.m_PoolHeight = inputShape[2];
1625
1626 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
1627 ARMNN_ASSERT(layer != nullptr);
1628
1629 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {inputShape});
1630 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1631
1632 // register the input connection slots for the layer, connections are made after all layers have been created
1633 // only the tensors for the inputs are relevant, exclude the const tensors
1634 RegisterInputSlots(layer, {node.input(0)});
1635
1636 // register the output connection slots for the layer, connections are made after all layers have been created
1637 RegisterOutputSlots(layer, {node.output(0)});
1638}
1639
Kevin Mayef33cb12021-01-29 14:24:57 +00001640void OnnxParserImpl::ParseMaxPool(const onnx::NodeProto& node)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001641{
1642 Pooling2dDescriptor desc;
1643 desc.m_PoolType = PoolingAlgorithm::Max;
1644 desc.m_PaddingMethod = PaddingMethod::Exclude;
1645 AddPoolingLayer(node, desc);
1646}
1647
Kevin Mayef33cb12021-01-29 14:24:57 +00001648void OnnxParserImpl::ParseReshape(const onnx::NodeProto& node)
Ryan OSheaed27ee72020-04-22 16:37:29 +01001649{
1650 CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 2);
1651 CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
1652
1653 CHECK_VALID_DATATYPE(node.name(), node.input(0),
1654 m_TensorsInfo[node.input(0)].m_dtype,
1655 onnx::TensorProto::FLOAT); //input
1656 CHECK_VALID_DATATYPE(node.name(), node.input(1),
1657 m_TensorsInfo[node.input(1)].m_dtype,
1658 onnx::TensorProto::INT64); //shape
1659
1660 if(!m_TensorsInfo[node.input(1)].isConstant())
1661 {
James Ward58dec6b2020-09-11 17:32:44 +01001662 throw ParseException(fmt::format("Shape '{}' should be constant in Reshape layer '{}' {}",
1663 node.input(1),
1664 node.name(),
1665 CHECK_LOCATION().AsString()));
Ryan OSheaed27ee72020-04-22 16:37:29 +01001666 }
1667
1668 if(m_TensorsInfo[node.input(0)].isConstant())
1669 {
1670 //make a new cst tensor -> move the data to the output tensor (the shape is already good in the output tensor)
1671 if(m_TensorsInfo.count(node.output(0)) == 0)
1672 {
1673 m_TensorsInfo[node.output(0)] = OnnxTensor();
1674 }
1675 m_TensorsInfo[node.output(0)].m_tensor =
1676 std::make_unique<onnx::TensorProto>(*m_TensorsInfo[node.input(0)].m_tensor);
1677 }
1678 else
1679 {
1680 TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
1681
1682 if(m_TensorsInfo.count(node.output(0)) == 0 || m_TensorsInfo[node.output(0)].m_info == nullptr)
1683 {
1684 uint64_t dims = static_cast<uint64_t>(m_TensorsInfo[node.input(1)].m_tensor->int64_data_size());
1685 TensorShape targetShape{static_cast<unsigned int>(dims), 1};
1686
1687 for(uint i = 0; i < dims; i++)
1688 {
1689 int val = CHECKED_INT32(m_TensorsInfo[node.input(1)].m_tensor->int64_data(static_cast<int>(i)));
1690 targetShape[i]= static_cast<unsigned int>(val);
1691 }
1692
1693 auto outInfo = ComputeReshapeInfo(targetShape, inputShape, node.output(0));
1694 m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(outInfo);
1695 }
1696
1697 CreateReshapeLayer(node.input(0), node.output(0), node.name());
1698 }
1699}
1700
Kevin Mayef33cb12021-01-29 14:24:57 +00001701void OnnxParserImpl::PrependForBroadcast(const std::string& outputName,
1702 const std::string& input0,
1703 const std::string& input1)
telsoa01c577f2c2018-08-31 09:22:23 +01001704{
1705 //input0 should be reshaped to have same number of dim as input1
1706 TensorInfo outputTensorInfo = TensorInfo(*m_TensorsInfo[input0].m_info);
1707
1708 TensorShape input0Shape = m_TensorsInfo[input0].m_info->GetShape();
1709 TensorShape input1Shape = m_TensorsInfo[input1].m_info->GetShape();
1710
1711 uint32_t diff = input1Shape.GetNumDimensions() - input0Shape.GetNumDimensions();
1712 std::vector<uint32_t> newShape;
1713 while(diff > 0)
1714 {
1715 newShape.push_back(1);
1716 diff--;
1717 }
1718 for (uint dim = 0; dim < input0Shape.GetNumDimensions(); ++dim)
1719 {
1720 newShape.push_back(input0Shape[dim]);
1721 }
1722 outputTensorInfo.SetShape(TensorShape(static_cast<unsigned int>(newShape.size()), newShape.data()));
1723
1724 //add the new tensor to m_TensorsInfo
1725 m_TensorsInfo[outputName] = OnnxTensor();
1726 m_TensorsInfo[outputName].m_info = std::make_unique<TensorInfo>(outputTensorInfo);
1727
1728 //add reshape layer if the parent was not constant...
1729 if( ! m_TensorsInfo[input0].isConstant())
1730 {
James Ward58dec6b2020-09-11 17:32:44 +01001731 CreateReshapeLayer(input0, outputName, fmt::format("Add:reshapeOf{}", input0));
telsoa01c577f2c2018-08-31 09:22:23 +01001732 }
1733 else //make it constant and it will be create in Add
1734 {
1735 m_TensorsInfo[outputName].m_tensor = std::make_unique<onnx::TensorProto>(*m_TensorsInfo[input0].m_tensor);
1736
1737 }
1738}
1739
Kevin Mayef33cb12021-01-29 14:24:57 +00001740void OnnxParserImpl::SetupInputLayers()
telsoa01c577f2c2018-08-31 09:22:23 +01001741{
1742 //Find user input and add their layers
1743 for(int inputIndex = 0; inputIndex < m_Graph->input_size(); ++inputIndex)
1744 {
1745 auto input = m_Graph->input(inputIndex);
1746 if (! m_TensorsInfo[input.name()].isConstant())
1747 {
1748 IConnectableLayer* layer =
1749 m_Network->AddInputLayer(static_cast<armnn::LayerBindingId>(inputIndex), input.name().c_str());
1750 auto tensorInfo = ToTensorInfo(input);
1751 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1752
1753 RegisterOutputSlots(layer,{ input.name() });
1754 }
1755 }
1756}
1757
Kevin Mayef33cb12021-01-29 14:24:57 +00001758void OnnxParserImpl::SetupOutputLayers()
telsoa01c577f2c2018-08-31 09:22:23 +01001759{
1760 if(m_Graph->output_size() == 0)
1761 {
James Ward58dec6b2020-09-11 17:32:44 +01001762 throw ParseException(fmt::format("The given model does not have any outputs {}", CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001763 }
1764
1765 for(int outputIndex = 0; outputIndex < m_Graph->output_size(); ++outputIndex)
1766 {
1767 IConnectableLayer* layer =
1768 m_Network->AddOutputLayer(static_cast<armnn::LayerBindingId>(outputIndex),
1769 m_Graph->output(outputIndex).name().c_str());
1770
1771 RegisterInputSlots(layer, { m_Graph->output(outputIndex).name() });
1772 }
1773}
1774
Kevin Mayef33cb12021-01-29 14:24:57 +00001775void OnnxParserImpl::RegisterInputSlots(IConnectableLayer* layer, const std::vector<std::string>& tensorIds)
telsoa01c577f2c2018-08-31 09:22:23 +01001776{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001777 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001778 if (tensorIds.size() != layer->GetNumInputSlots())
1779 {
1780 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001781 fmt::format("The number of tensor inputs ({}) does not match the number expected ({}) {}",
1782 tensorIds.size(),
1783 layer->GetNumInputSlots(),
1784 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001785 }
Matthew Sloyan81beae32021-07-13 19:46:11 +01001786
telsoa01c577f2c2018-08-31 09:22:23 +01001787 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
1788 {
1789 std::string tensorId = tensorIds[slotIndex];
1790 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
1791
1792 auto it = m_TensorConnections.find(tensorId);
1793
1794 if (it == m_TensorConnections.end())
1795 {
1796 //First time seing this tensor, we need to map it
1797 m_TensorConnections[tensorId] = TensorSlots();
1798 }
1799 m_TensorConnections[tensorId].inputSlots.push_back(slot);
1800 }
1801}
1802
Kevin Mayef33cb12021-01-29 14:24:57 +00001803void OnnxParserImpl::RegisterOutputSlots(IConnectableLayer* layer, const std::vector<std::string>& tensorIds)
telsoa01c577f2c2018-08-31 09:22:23 +01001804{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001805 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001806 if (tensorIds.size() != layer->GetNumOutputSlots())
1807 {
1808 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001809 fmt::format("The number of tensor outputs ({}) does not match the number expected ({}) {} ",
1810 tensorIds.size(),
1811 layer->GetNumOutputSlots(),
1812 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001813 }
1814
1815 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1816 {
1817 std::string tensorId = tensorIds[slotIndex];
1818 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
1819
1820 auto it = m_TensorConnections.find(tensorId);
1821
1822 if (it == m_TensorConnections.end())
1823 {
1824 //First time seing this tensor, we need to map it
1825 m_TensorConnections[tensorId] = TensorSlots();
1826 }
1827
Ryan OShea337c17f2020-02-21 12:33:17 +00001828 TensorSlots& tensorSlots = m_TensorConnections[tensorId];
telsoa01c577f2c2018-08-31 09:22:23 +01001829
1830 // assuming there is only one producer for that tensor
1831 if (tensorSlots.outputSlot != nullptr)
1832 {
James Ward58dec6b2020-09-11 17:32:44 +01001833 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
1834 "tensor:{} {}",
1835 tensorId,
1836 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001837 }
1838 tensorSlots.outputSlot = slot;
1839 }
1840}
1841
Kevin Mayef33cb12021-01-29 14:24:57 +00001842BindingPointInfo OnnxParserImpl::GetNetworkInputBindingInfo(const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01001843{
1844 for(int i = 0; i < m_Graph->input_size(); ++i)
1845 {
1846 auto input = m_Graph->input(i);
1847 if(input.name() == name)
1848 {
1849 return std::make_pair(static_cast<armnn::LayerBindingId>(i), ToTensorInfo(input));
1850 }
1851 }
James Ward58dec6b2020-09-11 17:32:44 +01001852 throw InvalidArgumentException(fmt::format("The input layer '{}' does not exist {}",
1853 name, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001854}
1855
Kevin Mayef33cb12021-01-29 14:24:57 +00001856BindingPointInfo OnnxParserImpl::GetNetworkOutputBindingInfo(const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01001857{
1858 for(int i = 0; i < m_Graph->output_size(); ++i)
1859 {
1860 auto output = m_Graph->output(i);
1861 if(output.name() == name)
1862 {
1863 return std::make_pair(static_cast<armnn::LayerBindingId>(i), ToTensorInfo(output));
1864 }
1865 }
James Ward58dec6b2020-09-11 17:32:44 +01001866 throw InvalidArgumentException(fmt::format("The output layer '{}' does not exist {}",
1867 name, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001868}
1869
Kevin Mayef33cb12021-01-29 14:24:57 +00001870std::vector<std::string> OnnxParserImpl::GetInputs(ModelPtr& model)
telsoa01c577f2c2018-08-31 09:22:23 +01001871{
1872 if(model == nullptr) {
James Ward58dec6b2020-09-11 17:32:44 +01001873 throw InvalidArgumentException(fmt::format("The given model cannot be null {}",
1874 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001875 }
1876
1877 std::vector<std::string> inputNames;
1878 std::map<std::string, bool> isConstant;
1879 for(auto tensor : model->graph().initializer())
1880 {
1881 isConstant[tensor.name()] = true;
1882 }
1883 for(auto input : model->graph().input())
1884 {
1885 auto it = isConstant.find(input.name());
1886 if(it == isConstant.end())
1887 {
1888 inputNames.push_back(input.name());
1889 }
1890 }
1891 return inputNames;
1892}
1893
Kevin Mayef33cb12021-01-29 14:24:57 +00001894std::vector<std::string> OnnxParserImpl::GetOutputs(ModelPtr& model)
telsoa01c577f2c2018-08-31 09:22:23 +01001895{
1896 if(model == nullptr) {
James Ward58dec6b2020-09-11 17:32:44 +01001897 throw InvalidArgumentException(fmt::format("The given model cannot be null {}",
1898 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001899 }
1900
1901 std::vector<std::string> outputNames;
1902 for(auto output : model->graph().output())
1903 {
1904 outputNames.push_back(output.name());
1905 }
1906 return outputNames;
1907}
1908
Matthew Sloyanac001ee2021-02-03 10:43:04 +00001909const std::string OnnxParserImpl::GetVersion()
1910{
1911 return ONNX_PARSER_VERSION;
1912}
1913
telsoa01c577f2c2018-08-31 09:22:23 +01001914} // namespace armnnOnnxParser