blob: 60df27d7fc941270910bac7410503b3e31fb1029 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Tianle Cheng28288182024-02-23 17:56:54 +00002// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matteo Martincigh49124022019-01-11 13:25:59 +00005
telsoa014fcda012018-03-09 14:13:49 +00006#include "Network.hpp"
7#include "Graph.hpp"
8#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01009#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010#include "Optimizer.hpp"
Derek Lambertiff05cc52019-04-26 13:05:17 +010011#include "SubgraphViewSelector.hpp"
Matteo Martincigh49124022019-01-11 13:25:59 +000012#include "BackendSettings.hpp"
David Beckac42efd2018-09-26 17:41:13 +010013#include "optimizations/All.hpp"
Keith Davisf63b4572022-10-19 14:53:05 +010014#include "armnnUtils/Filesystem.hpp"
Cathal Corbett53837672022-09-01 11:34:37 +010015#include "armnn/utility/Timer.hpp"
telsoa014fcda012018-03-09 14:13:49 +000016
Colm Donelan0c479742021-12-10 12:43:54 +000017#include <armnn/backends/TensorHandle.hpp>
18#include <armnn/backends/WorkloadFactory.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000019#include <armnn/backends/IBackendInternal.hpp>
Derek Lamberti84da38b2019-06-13 11:40:08 +010020#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
David Beckac42efd2018-09-26 17:41:13 +010021
22#include <armnn/Exceptions.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010023#include <armnn/TypesUtils.hpp>
Matteo Martincighc601aa62019-10-29 15:03:22 +000024#include <armnn/BackendRegistry.hpp>
Matthew Benthamf48afc62020-01-15 17:55:08 +000025#include <armnn/Logging.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010026#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000027#include <armnn/utility/IgnoreUnused.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010028#include <armnn/utility/PolymorphicDowncast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000029
Jim Flynn27761832022-03-20 21:52:17 +000030#include <client/include/IProfilingService.hpp>
Jan Eilers99d9d4a2019-11-06 10:02:16 +000031
Nikhil Raj77fe76b2021-06-09 14:55:32 +010032#include <common/include/ProfilingGuid.hpp>
33
Matthew Sloyan81beae32021-07-13 19:46:11 +010034#include <fmt/format.h>
35
telsoa014fcda012018-03-09 14:13:49 +000036#include <fcntl.h>
37#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000038#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010039#include <vector>
Ryan OSheab4c49342023-07-25 14:28:27 +010040#include <armnn/ArmNN.hpp>
telsoa014fcda012018-03-09 14:13:49 +000041
telsoa014fcda012018-03-09 14:13:49 +000042namespace armnn
43{
44
Francis Murtagh3d2b4b22021-02-15 18:23:17 +000045INetwork::INetwork(NetworkOptions networkOptions) : pNetworkImpl(new NetworkImpl(networkOptions)) {}
46
47INetwork::~INetwork() = default;
48
John Mcloughlinc5ee0d72023-03-24 12:07:25 +000049OptimizerOptionsOpaque::OptimizerOptionsOpaque()
50 : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>())
51{
52}
53
54OptimizerOptionsOpaque::OptimizerOptionsOpaque(OptimizerOptionsOpaque const &other)
55 : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>(*other.p_OptimizerOptionsImpl))
56{
57}
58
59OptimizerOptionsOpaque::~OptimizerOptionsOpaque() = default;
60
61OptimizerOptionsOpaque::OptimizerOptionsOpaque(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16,
62 bool importEnabled, ModelOptions modelOptions, bool exportEnabled,
63 bool debugToFile)
64 : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>(reduceFp32ToFp16, debug, reduceFp32ToBf16,
65 importEnabled, modelOptions,
66 exportEnabled, debugToFile))
67{
68}
69
70OptimizerOptionsOpaque::OptimizerOptionsOpaque(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16,
71 ShapeInferenceMethod shapeInferenceMethod,
72 bool importEnabled, ModelOptions modelOptions, bool exportEnabled,
73 bool debugToFile, bool allowExpandedDims)
74 : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>(reduceFp32ToFp16, debug, reduceFp32ToBf16,
75 shapeInferenceMethod, importEnabled,
76 modelOptions, exportEnabled,
77 debugToFile, allowExpandedDims))
78{
79}
80
81OptimizerOptionsOpaque::OptimizerOptionsOpaque(const OptimizerOptions& OptimizerStruct)
82 : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>())
83{
84 p_OptimizerOptionsImpl->m_ImportEnabled = OptimizerStruct.m_ImportEnabled;
85 p_OptimizerOptionsImpl->m_shapeInferenceMethod = OptimizerStruct.m_shapeInferenceMethod;
86 p_OptimizerOptionsImpl->m_ModelOptions = OptimizerStruct.m_ModelOptions;
87 p_OptimizerOptionsImpl->m_ProfilingEnabled = OptimizerStruct.m_ProfilingEnabled;
88 p_OptimizerOptionsImpl->m_DebugToFile = OptimizerStruct.m_DebugToFile;
89 p_OptimizerOptionsImpl->m_Debug = OptimizerStruct.m_Debug;
90 p_OptimizerOptionsImpl->m_ReduceFp32ToFp16 = OptimizerStruct.m_ReduceFp32ToFp16;
91 p_OptimizerOptionsImpl->m_ExportEnabled = OptimizerStruct.m_ExportEnabled;
92 p_OptimizerOptionsImpl->m_AllowExpandedDims = OptimizerStruct.m_AllowExpandedDims;
93 p_OptimizerOptionsImpl->m_ReduceFp32ToBf16 = OptimizerStruct.m_ReduceFp32ToBf16;
94}
95
96OptimizerOptionsOpaque& OptimizerOptionsOpaque::operator= (OptimizerOptionsOpaque other)
97{
98 p_OptimizerOptionsImpl->m_ImportEnabled = other.GetImportEnabled();
99 p_OptimizerOptionsImpl->m_shapeInferenceMethod = other.GetShapeInferenceMethod();
100 p_OptimizerOptionsImpl->m_ModelOptions = other.GetModelOptions();
101 p_OptimizerOptionsImpl->m_ProfilingEnabled = other.GetProfilingEnabled();
102 p_OptimizerOptionsImpl->m_DebugToFile = other.GetDebugToFileEnabled();
103 p_OptimizerOptionsImpl->m_Debug = other.GetDebugEnabled();
104 p_OptimizerOptionsImpl->m_ReduceFp32ToFp16 = other.GetReduceFp32ToFp16();
105 p_OptimizerOptionsImpl->m_ExportEnabled = other.GetExportEnabled();
106 p_OptimizerOptionsImpl->m_AllowExpandedDims = other.GetAllowExpandedDims();
107 p_OptimizerOptionsImpl->m_ReduceFp32ToBf16 = other.GetReduceFp32ToBf16();
108 return *this;
109}
110
111void OptimizerOptionsOpaque::SetImportEnabled(bool ImportState)
112{
113 p_OptimizerOptionsImpl->m_ImportEnabled = ImportState;
114}
115
116void OptimizerOptionsOpaque::SetExportEnabled(bool ExportState)
117{
118 p_OptimizerOptionsImpl->m_ExportEnabled = ExportState;
119}
120
121void OptimizerOptionsOpaque::SetProfilingEnabled(bool ProfilingState)
122{
123 p_OptimizerOptionsImpl->m_ProfilingEnabled = ProfilingState;
124}
125
126void OptimizerOptionsOpaque::SetDebugEnabled(bool DebugState)
127{
128 p_OptimizerOptionsImpl->m_Debug = DebugState;
129}
130
131void OptimizerOptionsOpaque::SetDebugToFileEnabled(bool DebugFileState)
132{
133 p_OptimizerOptionsImpl->m_DebugToFile = DebugFileState;
134}
135
136void OptimizerOptionsOpaque::SetReduceFp32ToFp16(bool ReduceFp32ToFp16State)
137{
138 p_OptimizerOptionsImpl->m_ReduceFp32ToFp16 = ReduceFp32ToFp16State;
139}
140
141void OptimizerOptionsOpaque::SetShapeInferenceMethod(armnn::ShapeInferenceMethod ShapeInferenceMethodType)
142{
143 p_OptimizerOptionsImpl->m_shapeInferenceMethod = ShapeInferenceMethodType;
144}
145
146void OptimizerOptionsOpaque::SetAllowExpandedDims(bool ExpandedDimsAllowed)
147{
148 p_OptimizerOptionsImpl->m_AllowExpandedDims = ExpandedDimsAllowed;
149}
150
151void OptimizerOptionsOpaque::AddModelOption(armnn::BackendOptions NewModelOption)
152{
153 p_OptimizerOptionsImpl->m_ModelOptions.push_back(NewModelOption);
154}
155
156bool OptimizerOptionsOpaque::GetProfilingEnabled() const
157{
158 return p_OptimizerOptionsImpl->m_ProfilingEnabled;
159};
160
161bool OptimizerOptionsOpaque::GetImportEnabled() const
162{
163 return p_OptimizerOptionsImpl->m_ImportEnabled;
164};
165
166bool OptimizerOptionsOpaque::GetExportEnabled() const
167{
168 return p_OptimizerOptionsImpl->m_ExportEnabled;
169};
170
171bool OptimizerOptionsOpaque::GetReduceFp32ToFp16() const
172{
173 return p_OptimizerOptionsImpl->m_ReduceFp32ToFp16;
174};
175
176bool OptimizerOptionsOpaque::GetReduceFp32ToBf16() const
177{
178 return p_OptimizerOptionsImpl->m_ReduceFp32ToBf16;
179}
180
181bool OptimizerOptionsOpaque::GetDebugEnabled() const
182{
183 return p_OptimizerOptionsImpl->m_Debug;
184}
185
186bool OptimizerOptionsOpaque::GetDebugToFileEnabled() const
187{
188 return p_OptimizerOptionsImpl->m_DebugToFile;
189}
190
191bool OptimizerOptionsOpaque::GetAllowExpandedDims() const
192{
193 return p_OptimizerOptionsImpl->m_AllowExpandedDims;
194}
195
196armnn::ModelOptions OptimizerOptionsOpaque::GetModelOptions() const
197{
198 return p_OptimizerOptionsImpl->m_ModelOptions;
199}
200
201armnn::ShapeInferenceMethod OptimizerOptionsOpaque::GetShapeInferenceMethod() const
202{
203 return p_OptimizerOptionsImpl->m_shapeInferenceMethod;
204}
205
206const std::string OptimizerOptionsOpaque::ToString() const
207{
208 std::stringstream stream;
209 stream << "OptimizerOptions: \n";
210 stream << "\tReduceFp32ToFp16: " << p_OptimizerOptionsImpl->m_ReduceFp32ToFp16 << "\n";
211 stream << "\tReduceFp32ToBf16: " << p_OptimizerOptionsImpl->m_ReduceFp32ToBf16 << "\n";
212 stream << "\tDebug: " << p_OptimizerOptionsImpl->m_Debug << "\n";
213 stream << "\tDebug to file: " << p_OptimizerOptionsImpl->m_DebugToFile << "\n";
214 stream << "\tShapeInferenceMethod: " <<
215 (p_OptimizerOptionsImpl->m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ?
216 "ValidateOnly" : "InferAndValidate") << "\n";
217 stream << "\tImportEnabled: " << p_OptimizerOptionsImpl->m_ImportEnabled << "\n";
218 stream << "\tExportEnabled: " << p_OptimizerOptionsImpl->m_ExportEnabled << "\n";
219 stream << "\tProfilingEnabled: " << p_OptimizerOptionsImpl->m_ProfilingEnabled << "\n";
220 stream << "\tAllowExpandedDims: " << p_OptimizerOptionsImpl->m_AllowExpandedDims << "\n";
221
222 stream << "\tModelOptions: \n";
223 for (auto optionsGroup : p_OptimizerOptionsImpl->m_ModelOptions)
224 {
225 for (size_t i=0; i < optionsGroup.GetOptionCount(); i++)
226 {
227 const armnn::BackendOptions::BackendOption option = optionsGroup.GetOption(i);
228 stream << "\t\tBackend: " << optionsGroup.GetBackendId() << "\n"
229 << "\t\t\tOption: " << option.GetName() << "\n"
230 << "\t\t\tValue: " << std::string(option.GetValue().ToString()) << "\n";
231 }
232 }
233
234 return stream.str();
235}
236
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000237Status INetwork::PrintGraph()
238{
239 return pNetworkImpl->PrintGraph();
240}
241
242IConnectableLayer* INetwork::AddInputLayer(LayerBindingId id, const char* name)
243{
244 return pNetworkImpl->AddInputLayer(id, name);
245}
246
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000247IConnectableLayer* INetwork::AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
248 const char* name)
249{
250 return pNetworkImpl->AddArgMinMaxLayer(desc, name);
251}
252
mathad01b392e982021-04-07 12:07:30 +0100253IConnectableLayer* INetwork::AddCastLayer(const char* name)
254{
255 return pNetworkImpl->AddCastLayer(name);
256}
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000257
258IConnectableLayer* INetwork::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
259 const char* name)
260{
261 return pNetworkImpl->AddComparisonLayer(comparisonDescriptor, name);
262}
263
264
265IConnectableLayer* INetwork::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
266 const char* name)
267{
268 return pNetworkImpl->AddConcatLayer(concatDescriptor, name);
269}
270
271
272IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000273 const char* name)
274{
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100275 return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, name);
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000276}
277
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100278IConnectableLayer* INetwork::AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor,
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100279 const char* name)
280{
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100281 return pNetworkImpl->AddConvolution3dLayer(convolution3dDescriptor, name);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100282}
283
284
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000285IConnectableLayer* INetwork::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
286 const char* name)
287{
288 return pNetworkImpl->AddDepthToSpaceLayer(depthToSpaceDescriptor, name);
289}
290
291
292IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
293 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
Cathal Corbett06902652022-04-14 17:55:11 +0100294 const char* name)
295{
296 return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, name);
297}
298
299
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000300IConnectableLayer* INetwork::AddDequantizeLayer(const char* name)
301{
302 return pNetworkImpl->AddDequantizeLayer(name);
303}
304
305
306IConnectableLayer* INetwork::AddDetectionPostProcessLayer(
307 const DetectionPostProcessDescriptor& descriptor,
308 const ConstTensor& anchors,
309 const char* name)
310{
311 return pNetworkImpl->AddDetectionPostProcessLayer(descriptor, anchors, name);
312}
313
Mike Kelly3ec30772023-03-08 13:47:17 +0000314IConnectableLayer* INetwork::AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& elementwiseBinaryDescriptor,
315 const char* name)
316{
317 return pNetworkImpl->AddElementwiseBinaryLayer(elementwiseBinaryDescriptor, name);
318}
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000319
320IConnectableLayer* INetwork::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
321 const char* name)
322{
323 return pNetworkImpl->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name);
324}
325
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000326IConnectableLayer* INetwork::AddFillLayer(const FillDescriptor& fillDescriptor,
327 const char* name)
328{
329 return pNetworkImpl->AddFillLayer(fillDescriptor, name);
330}
331
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000332IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
Matthew Sloyan81beae32021-07-13 19:46:11 +0100333 const char* name)
334{
335 return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, name);
336}
337
Teresa Charlin9145e382023-08-17 18:44:58 +0100338IConnectableLayer* INetwork::AddFusedLayer(const FusedDescriptor& fusedDescriptor,
339 const char* name)
340{
341 return pNetworkImpl->AddFusedLayer(fusedDescriptor, name);
342}
343
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000344IConnectableLayer* INetwork::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
345 const char* name)
346{
347 return pNetworkImpl->AddPermuteLayer(permuteDescriptor, name);
348}
349
350IConnectableLayer* INetwork::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
351 const char* name)
352{
353 return pNetworkImpl->AddBatchToSpaceNdLayer(batchToSpaceNdDescriptor, name);
354}
355
356IConnectableLayer* INetwork::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
357 const char* name)
358{
359 return pNetworkImpl->AddPooling2dLayer(pooling2dDescriptor, name);
360}
361
Tamás Nyíri7b885b32021-10-26 14:47:57 +0100362IConnectableLayer* INetwork::AddPooling3dLayer(const Pooling3dDescriptor& pooling3dDescriptor,
363 const char* name)
364{
365 return pNetworkImpl->AddPooling3dLayer(pooling3dDescriptor, name);
366}
367
Cathal Corbett18655b82021-12-13 13:03:22 +0000368IConnectableLayer* INetwork::AddPrecompiledLayer(const PreCompiledDescriptor& preCompiledDescriptor,
Cathal Corbett3ea01072022-01-06 10:29:43 +0000369 CompiledBlobPtr compiledBlobPtr,
Cathal Corbettcbfd7182021-12-15 17:12:59 +0000370 const Optional<BackendId>& backend,
371 const char* name)
Cathal Corbett18655b82021-12-13 13:03:22 +0000372{
Cathal Corbett3ea01072022-01-06 10:29:43 +0000373 return pNetworkImpl->AddPrecompiledLayer(preCompiledDescriptor, std::move(compiledBlobPtr), backend, name);
Cathal Corbett18655b82021-12-13 13:03:22 +0000374}
375
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000376IConnectableLayer* INetwork::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
377 const char* name)
378{
379 return pNetworkImpl->AddActivationLayer(activationDescriptor, name);
380}
381
382IConnectableLayer* INetwork::AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
383 const char* name)
384{
385 return pNetworkImpl->AddNormalizationLayer(normalizationDescriptor, name);
386}
387
388IConnectableLayer* INetwork::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
389{
390 return pNetworkImpl->AddSliceLayer(sliceDescriptor, name);
391}
392IConnectableLayer* INetwork::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
393 const char* name)
394{
395 return pNetworkImpl->AddSoftmaxLayer(softmaxDescriptor, name);
396}
397
398IConnectableLayer* INetwork::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
399 const char* name)
400{
401 return pNetworkImpl->AddSplitterLayer(splitterDescriptor, name);
402}
403
404IConnectableLayer* INetwork::AddMergeLayer(const char* name)
405{
406 return pNetworkImpl->AddMergeLayer(name);
407}
408
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000409IConnectableLayer* INetwork::AddAdditionLayer(const char* name)
410{
Mike Kelly2c14db62023-03-15 15:06:23 +0000411 ARMNN_NO_DEPRECATE_WARN_BEGIN
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000412 return pNetworkImpl->AddAdditionLayer(name);
Mike Kelly2c14db62023-03-15 15:06:23 +0000413 ARMNN_NO_DEPRECATE_WARN_END
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000414}
415
416IConnectableLayer* INetwork::AddMultiplicationLayer(const char* name)
417{
Mike Kelly2c14db62023-03-15 15:06:23 +0000418 ARMNN_NO_DEPRECATE_WARN_BEGIN
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000419 return pNetworkImpl->AddMultiplicationLayer(name);
Mike Kelly2c14db62023-03-15 15:06:23 +0000420 ARMNN_NO_DEPRECATE_WARN_END
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000421}
422
423IConnectableLayer* INetwork::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
424 const ConstTensor& mean,
425 const ConstTensor& variance,
426 const ConstTensor& beta,
427 const ConstTensor& gamma,
428 const char* name)
429{
430 return pNetworkImpl->AddBatchNormalizationLayer(desc, mean, variance, beta, gamma, name);
431}
432
433IConnectableLayer* INetwork::AddRankLayer(const char* name)
434{
435 return pNetworkImpl->AddRankLayer(name);
436}
437
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000438IConnectableLayer* INetwork::AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
439 const char* name)
440{
441 return pNetworkImpl->AddResizeLayer(resizeDescriptor, name);
442}
443
444IConnectableLayer* INetwork::AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
445 const char* name)
446{
447 return pNetworkImpl->AddReduceLayer(reduceDescriptor, name);
448}
449
450IConnectableLayer* INetwork::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
451 const char* name)
452{
453 return pNetworkImpl->AddInstanceNormalizationLayer(desc, name);
454}
455
456IConnectableLayer* INetwork::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
457 const char* name)
458{
459 return pNetworkImpl->AddL2NormalizationLayer(desc, name);
460}
461
462IConnectableLayer* INetwork::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& logSoftmaxDescriptor,
463 const char* name)
464{
465 return pNetworkImpl->AddLogSoftmaxLayer(logSoftmaxDescriptor, name);
466}
467
468IConnectableLayer* INetwork::AddConstantLayer(const ConstTensor& input,
469 const char* name)
470{
471 return pNetworkImpl->AddConstantLayer(input, name);
472}
473
474IConnectableLayer* INetwork::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
475 const char* name)
476{
477 return pNetworkImpl->AddReshapeLayer(reshapeDescriptor, name);
478}
479
480IConnectableLayer* INetwork::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
481 const char* name)
482{
483 return pNetworkImpl->AddSpaceToBatchNdLayer(spaceToBatchNdDescriptor, name);
484}
485
486IConnectableLayer* INetwork::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
487 const char* name)
488{
489 return pNetworkImpl->AddSpaceToDepthLayer(spaceToDepthDescriptor, name);
490}
491
492IConnectableLayer* INetwork::AddFloorLayer(const char* name)
493{
494 return pNetworkImpl->AddFloorLayer(name);
495}
496IConnectableLayer* INetwork::AddOutputLayer(LayerBindingId id, const char* name)
497{
498 return pNetworkImpl->AddOutputLayer(id, name);
499}
500
501IConnectableLayer* INetwork::AddLstmLayer(const LstmDescriptor& descriptor,
502 const LstmInputParams& params,
503 const char* name)
504{
505 return pNetworkImpl->AddLstmLayer(descriptor, params, name);
506}
507
508IConnectableLayer* INetwork::AddDivisionLayer(const char* name)
509{
Mike Kelly2c14db62023-03-15 15:06:23 +0000510 ARMNN_NO_DEPRECATE_WARN_BEGIN
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000511 return pNetworkImpl->AddDivisionLayer(name);
Mike Kelly2c14db62023-03-15 15:06:23 +0000512 ARMNN_NO_DEPRECATE_WARN_END
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000513}
514
515IConnectableLayer* INetwork::AddSubtractionLayer(const char* name)
516{
Mike Kelly2c14db62023-03-15 15:06:23 +0000517 ARMNN_NO_DEPRECATE_WARN_BEGIN
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000518 return pNetworkImpl->AddSubtractionLayer(name);
Mike Kelly2c14db62023-03-15 15:06:23 +0000519 ARMNN_NO_DEPRECATE_WARN_END
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000520}
521
522IConnectableLayer* INetwork::AddMaximumLayer(const char* name)
523{
Mike Kelly2c14db62023-03-15 15:06:23 +0000524 ARMNN_NO_DEPRECATE_WARN_BEGIN
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000525 return pNetworkImpl->AddMaximumLayer(name);
Mike Kelly2c14db62023-03-15 15:06:23 +0000526 ARMNN_NO_DEPRECATE_WARN_END
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000527}
528
529IConnectableLayer* INetwork::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
530{
531 return pNetworkImpl->AddMeanLayer(meanDescriptor, name);
532}
533
534IConnectableLayer* INetwork::AddPadLayer(const PadDescriptor& padDescriptor,
535 const char* name)
536{
537 return pNetworkImpl->AddPadLayer(padDescriptor, name);
538}
539
540IConnectableLayer* INetwork::AddQuantizeLayer(const char* name)
541{
542 return pNetworkImpl->AddQuantizeLayer(name);
543}
544
545IConnectableLayer* INetwork::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
546 const char* name)
547{
548 return pNetworkImpl->AddStridedSliceLayer(stridedSliceDescriptor, name);
549}
550
551IConnectableLayer* INetwork::AddMinimumLayer(const char* name)
552{
Mike Kelly2c14db62023-03-15 15:06:23 +0000553 ARMNN_NO_DEPRECATE_WARN_BEGIN
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000554 return pNetworkImpl->AddMinimumLayer(name);
Mike Kelly2c14db62023-03-15 15:06:23 +0000555 ARMNN_NO_DEPRECATE_WARN_END
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000556}
557
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000558IConnectableLayer* INetwork::AddGatherLayer(const GatherDescriptor& descriptor,
559 const char* name)
560{
561 return pNetworkImpl->AddGatherLayer(descriptor, name);
562}
563
Teresa Charlinb2d3ec52022-04-12 22:07:09 +0100564IConnectableLayer* INetwork::AddGatherNdLayer(const char* name)
565{
566 return pNetworkImpl->AddGatherNdLayer(name);
567}
568
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000569IConnectableLayer* INetwork::AddSwitchLayer(const char* name)
570{
571 return pNetworkImpl->AddSwitchLayer(name);
572}
573
574IConnectableLayer* INetwork::AddPreluLayer(const char* name)
575{
576 return pNetworkImpl->AddPreluLayer(name);
577}
578
579IConnectableLayer* INetwork::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
580 const ConstTensor& weights,
581 const Optional<ConstTensor>& biases,
582 const char* name)
583{
584 return pNetworkImpl->AddTransposeConvolution2dLayer(descriptor, weights, biases, name);
585}
586
587IConnectableLayer* INetwork::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
588 const char* name)
589{
590 return pNetworkImpl->AddTransposeLayer(transposeDescriptor, name);
591}
592
Keith Davis3ae3f972021-05-21 16:33:48 +0100593IConnectableLayer* INetwork::AddShapeLayer(const char* name)
594{
595 return pNetworkImpl->AddShapeLayer(name);
596}
597
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000598IConnectableLayer* INetwork::AddStackLayer(const StackDescriptor& descriptor,
599 const char* name)
600{
601 return pNetworkImpl->AddStackLayer(descriptor, name);
602}
603
604IConnectableLayer* INetwork::AddStandInLayer(const StandInDescriptor& descriptor,
605 const char* name)
606{
607 return pNetworkImpl->AddStandInLayer(descriptor, name);
608}
609
610IConnectableLayer* INetwork::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
611 const char* name)
612{
613 return pNetworkImpl->AddQuantizedLstmLayer(params, name);
614}
615
616IConnectableLayer* INetwork::AddQLstmLayer(const QLstmDescriptor& descriptor,
617 const LstmInputParams& params,
618 const char* name)
619{
620 return pNetworkImpl->AddQLstmLayer(descriptor, params, name);
621}
622
623IConnectableLayer* INetwork::AddLogicalBinaryLayer(const LogicalBinaryDescriptor& descriptor,
624 const char* name)
625{
626 return pNetworkImpl->AddLogicalBinaryLayer(descriptor, name);
627}
628
Narumol Prangnawarat8ed39ae2021-07-15 16:16:25 +0100629IConnectableLayer* INetwork::AddUnidirectionalSequenceLstmLayer(
630 const UnidirectionalSequenceLstmDescriptor& descriptor,
631 const LstmInputParams& params,
632 const char* name)
633{
634 return pNetworkImpl->AddUnidirectionalSequenceLstmLayer(descriptor, params, name);
635}
636
Simon Obute51f67772021-09-03 15:50:13 +0100637IConnectableLayer* INetwork::AddChannelShuffleLayer(const ChannelShuffleDescriptor &descriptor,
638 const char* name)
639{
640 return pNetworkImpl->AddChannelShuffleLayer(descriptor, name);
641}
642
Samuel Yap6b478092022-07-06 15:36:03 +0100643IConnectableLayer* INetwork::AddBatchMatMulLayer(const BatchMatMulDescriptor &descriptor,
644 const char* name)
645{
646 return pNetworkImpl->AddBatchMatMulLayer(descriptor, name);
647}
648
Tracy Narinebb8d7592023-07-13 16:50:54 +0100649IConnectableLayer* INetwork::AddReverseV2Layer(const char *name)
Tianle Cheng988354d2023-06-28 13:20:47 +0100650{
Tracy Narinebb8d7592023-07-13 16:50:54 +0100651 return pNetworkImpl->AddReverseV2Layer(name);
Tianle Cheng988354d2023-06-28 13:20:47 +0100652}
653
Teresa Charlin79a06a52023-07-13 17:16:45 +0100654IConnectableLayer* INetwork::AddTileLayer(const TileDescriptor &descriptor,
655 const char *name)
656{
657 return pNetworkImpl->AddTileLayer(descriptor, name);
658}
659
Idriss Chaouch98e383e2023-08-28 14:28:31 +0100660IConnectableLayer* INetwork::AddBroadcastToLayer(const BroadcastToDescriptor& descriptor,
661 const char* name)
662{
663 return pNetworkImpl->AddBroadcastToLayer(descriptor, name);
664}
665
Tianle Cheng28288182024-02-23 17:56:54 +0000666IConnectableLayer* INetwork::AddScatterNdLayer(const ScatterNdDescriptor &descriptor,
667 const char *name)
668{
669 return pNetworkImpl->AddScatterNdLayer(descriptor, name);
670}
671
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000672void INetwork::ExecuteStrategy(IStrategy& strategy) const
673{
674 return pNetworkImpl->ExecuteStrategy(strategy);
675}
676
Jim Flynnfcc72f52022-10-14 11:20:07 +0100677armnn::INetwork* INetwork::CreateRaw(const NetworkOptions& networkOptions)
telsoa014fcda012018-03-09 14:13:49 +0000678{
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000679 return new INetwork(networkOptions);
telsoa014fcda012018-03-09 14:13:49 +0000680}
681
Jim Flynnfcc72f52022-10-14 11:20:07 +0100682armnn::INetworkPtr INetwork::Create(const NetworkOptions& networkOptions)
telsoa014fcda012018-03-09 14:13:49 +0000683{
Finn Williamsf24effa2020-07-03 10:12:03 +0100684 return INetworkPtr(CreateRaw(networkOptions), &INetwork::Destroy);
telsoa014fcda012018-03-09 14:13:49 +0000685}
686
687void INetwork::Destroy(INetwork* network)
688{
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000689 delete network;
telsoa014fcda012018-03-09 14:13:49 +0000690}
691
Mike Kelly0d677db2021-06-27 22:39:21 +0100692IOptimizedNetwork::IOptimizedNetwork(const IOptimizedNetwork& other, const ModelOptions& modelOptions)
693 : pOptimizedNetworkImpl(new OptimizedNetworkImpl(*other.pOptimizedNetworkImpl.get(), modelOptions)) {}
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000694
695IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<Graph> graph)
696 : pOptimizedNetworkImpl(new OptimizedNetworkImpl(std::move(graph))) {}
697
698IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<OptimizedNetworkImpl> impl)
699 : pOptimizedNetworkImpl(std::move(impl)) {}
700
701IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
702 : pOptimizedNetworkImpl(new OptimizedNetworkImpl(std::move(graph), modelOptions)) {}
703
704IOptimizedNetwork::~IOptimizedNetwork() = default;
705
telsoa014fcda012018-03-09 14:13:49 +0000706void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
707{
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000708 delete network;
telsoa014fcda012018-03-09 14:13:49 +0000709}
710
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000711Status IOptimizedNetwork::PrintGraph()
712{
713 return pOptimizedNetworkImpl->PrintGraph();
714}
715
716Status IOptimizedNetwork::SerializeToDot(std::ostream& stream) const
717{
718 return pOptimizedNetworkImpl->SerializeToDot(stream);
719}
720
Derek Lambertie155bbf2021-10-13 14:32:12 +0100721const std::shared_ptr<IProfiler>& IOptimizedNetwork::GetProfiler() const
722{
723 return pOptimizedNetworkImpl->GetGraph().GetProfiler();
724}
725
Cathal Corbett5aa9fd72022-02-25 15:33:28 +0000726arm::pipe::ProfilingGuid IOptimizedNetwork::GetGuid() const
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000727{
728 return pOptimizedNetworkImpl->GetGuid();
729}
730
Sadik Armaganb7851f92021-10-06 16:37:02 +0100731size_t IOptimizedNetwork::GetNumInputs() const
732{
733 return pOptimizedNetworkImpl->GetNumInputs();
734}
735
736size_t IOptimizedNetwork::GetNumOutputs() const
737{
738 return pOptimizedNetworkImpl->GetNumOutputs();
739}
740
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000741Status OptimizedNetworkImpl::PrintGraph()
telsoa014fcda012018-03-09 14:13:49 +0000742{
743 m_Graph->Print();
744 return Status::Success;
745}
746
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000747Status OptimizedNetworkImpl::SerializeToDot(std::ostream& stream) const
surmeh01bceff2f2018-03-29 16:29:27 +0100748{
749 return m_Graph->SerializeToDot(stream);
750}
751
Sadik Armaganb7851f92021-10-06 16:37:02 +0100752size_t OptimizedNetworkImpl::GetNumInputs() const
753{
754 return m_Graph->GetNumInputs();
755}
756
757size_t OptimizedNetworkImpl::GetNumOutputs() const
758{
759 return m_Graph->GetNumOutputs();
760}
761
Matteo Martincigh49124022019-01-11 13:25:59 +0000762void ReportError(const std::string& errorMessage,
763 Optional<std::vector<std::string>&> errorMessages)
764{
765 std::stringstream fullErrorMessage;
766 fullErrorMessage << "ERROR: " << errorMessage;
Derek Lamberti08446972019-11-26 16:38:31 +0000767 ARMNN_LOG(warning) << fullErrorMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +0000768 if (errorMessages)
769 {
770 errorMessages.value().push_back(fullErrorMessage.str());
771 }
772}
773
774void ReportWarning(const std::string& warningMessage,
775 Optional<std::vector<std::string>&> warningMessages)
776{
777 std::stringstream fullWarningMessage;
778 fullWarningMessage << "WARNING: " << warningMessage;
Derek Lamberti08446972019-11-26 16:38:31 +0000779 ARMNN_LOG(warning) << fullWarningMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +0000780 if (warningMessages)
781 {
782 warningMessages.value().push_back(fullWarningMessage.str());
783 }
784}
785
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000786OptimizationResult ReturnWithError(OptimizationResult res,
787 const Layer* layer,
788 const BackendSettings& backendSettings,
789 Optional<std::vector<std::string>&> errMessages)
790{
791 std::stringstream failureMsg;
792 failureMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
793 << " is not supported on any preferred backend " << backendSettings.m_PreferredBackends;
794 ReportError(failureMsg.str(), errMessages);
795
796 res.m_Error = true;
797 return res;
798}
799
800
jimfly016b0b53d2018-10-08 14:43:01 +0100801bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
802{
803 bool noErrors = true;
804 unsigned int numOutputs = layer->GetNumOutputSlots();
805 for (unsigned int i = 0; i < numOutputs; i++) {
David Monahanb8554702019-04-25 16:03:38 +0100806 OutputSlot& outputSlot = layer->GetOutputSlot(i);
807 TensorInfo info = outputSlot.GetTensorInfo();
John Mcloughlinb41793a2023-10-16 10:28:40 +0100808 auto quantizationDataType = info.GetDataType();
809 auto quantizationScales = info.GetQuantizationScales();
810 // For any Quantized Tensor ensure scale(s) are set
811 switch(quantizationDataType) {
812 case DataType::QAsymmU8:
813 case DataType::QSymmS16:
814 case DataType::QSymmS8:
815 case DataType::QAsymmS8:
816 if ((quantizationDataType == DataType::QAsymmU8 || quantizationDataType == DataType::QAsymmS8)
817 && info.HasPerAxisQuantization()) {
818 throw InvalidArgumentException("Per Axis Quantization is not supported in "
819 "Asymmetric Quantization Datatype.");
820 }
821 if ((!info.HasPerAxisQuantization() && info.GetQuantizationScale() == 0.f)
822 || (info.HasPerAxisQuantization() && (quantizationScales.end() !=
823 std::find(quantizationScales.begin(), quantizationScales.end(), 0.f)))) {
824 noErrors = false;
825 std::stringstream ss;
826 ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
827 << " (" << layer->GetNameStr() << ") is of type"
828 << " Quantized value but the scale parameter has not been set";
829 ReportError(ss.str(), errMessages);
830 }
831 // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
832 if (!info.HasPerAxisQuantization() && quantizationDataType == DataType::QAsymmU8 &&
833 (info.GetQuantizationScale() != (1.0f / 256.0f) ||
834 info.GetQuantizationOffset() != 0) &&
835 layer->GetType() == armnn::LayerType::Softmax) {
836 std::stringstream ss;
837 ss << "Quantization parameters for Softmax layer (Scale: " <<
838 info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() <<
839 ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
840 ARMNN_LOG(warning) << ss.str();
841 info.SetQuantizationScale((1.0f / 256.0f));
842 info.SetQuantizationOffset(0);
843 outputSlot.SetTensorInfo(info);
844 }
845 break;
846 default:
847 break;
jimfly016b0b53d2018-10-08 14:43:01 +0100848 }
849 }
850 return noErrors;
851}
852
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000853OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings,
854 Graph& graph,
855 Layer* layer,
856 BackendId backend,
857 DataType dataTypeIn,
858 DataType dataTypeOut,
859 const std::vector<BackendId>& availablePreferredBackends,
860 std::string& reasonIfUnsupported,
861 Optional<std::vector<std::string>&> errMessages)
862{
863 OptimizationResult result;
864
865 // Helper lambda to compose meaningful error message before returning with error
866 auto ReturnError = [&](const Layer* layer)
867 {
868 return ReturnWithError(result, layer, backendSettings, errMessages);
869 };
870
871 // need to set the compute device on the layer
872 // before we can check if it is supported
873 layer->SetBackendId(backend);
Ryan OSheab4c49342023-07-25 14:28:27 +0100874 std::string currentReasonIfUnsupported;
Cathal Corbett80f71a82022-12-20 18:25:40 +0000875
876 // To run FP16 operations on CpuAcc we need at least v8.2 architecture. If the available architecture
877 // is older than v8.2, we can check if the operator is supported by changing operator inputs & outputs
878 // to be FP32 and inserting convert layers around the FP32 operator.
Ryan OSheab4c49342023-07-25 14:28:27 +0100879 bool isLayerSupported = IWorkloadFactory::IsLayerSupported(*layer, EmptyOptional(), currentReasonIfUnsupported);
880 reasonIfUnsupported += currentReasonIfUnsupported;
881 // This string matches the error message that is produced by acl when attempting to run FP16 kernels on
882 // a cpu or build that does not have fp16 support. We use this to check if we should add
883 // conversion layers or not.
Cathal Corbett80f71a82022-12-20 18:25:40 +0000884 std::string checkStr = "This CPU architecture does not support F16 data type, you need v8.2 or above";
Ryan OSheab4c49342023-07-25 14:28:27 +0100885 if (!isLayerSupported || currentReasonIfUnsupported.find(checkStr) != std::string::npos)
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000886 {
887 if (dataTypeIn == DataType::Float16 || dataTypeOut == DataType::Float16)
888 {
889 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
890 && layer->GetType() != LayerType::ConvertFp32ToFp16
891 && layer->GetType() != LayerType::ConvertFp16ToFp32)
892 {
Jan Eilers0c0019c2021-08-20 16:42:58 +0100893 auto ConstantLayerFromFp16ToFp32 = [](Layer& layer)
894 {
895 if (layer.GetType() == LayerType::Constant)
896 {
897 ConstantLayer* constantLayer = PolymorphicDowncast<ConstantLayer*>(&layer);
898
899 auto& info = constantLayer->m_LayerOutput->GetTensorInfo();
900
901 if (info.GetDataType() == DataType::Float16)
902 {
903 std::vector<float> newValues(info.GetNumElements());
904
905 armnnUtils::FloatingPointConverter::ConvertFloat16To32(
906 constantLayer->m_LayerOutput->GetConstTensor<Half>(),
907 info.GetNumElements(),
908 newValues.data());
909
910 TensorInfo newInfo(info);
911 newInfo.SetDataType(DataType::Float32);
912 ConstTensor newInput(newInfo, newValues);
913 constantLayer->m_LayerOutput.reset(new ScopedTensorHandle(newInput));
914
915 layer.GetOutputSlot(0).SetTensorInfo(newInfo);
916 }
917 }
918 };
919
920 bool checkType = false;
921
922 for (auto inputSlot : layer->GetInputSlots())
923 {
924 auto connectedOutputSlot = inputSlot.GetConnectedOutputSlot();
925 if (connectedOutputSlot->GetOwningLayer().GetType() == LayerType::Constant)
926 {
927 if (connectedOutputSlot->GetNumConnections() == 1)
928 {
929 checkType = true;
930 ConstantLayerFromFp16ToFp32(connectedOutputSlot->GetOwningLayer());
931 }
932 }
933 }
934
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000935 // Insert FP16 -> FP32 conversion layer before current layer
936 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers;
937 if (dataTypeIn == DataType::Float16)
938 {
939 convertFp16ToFp32Layers =
Jan Eilers0c0019c2021-08-20 16:42:58 +0100940 InsertConvertFp16ToFp32LayersBefore(graph, *layer, checkType);
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000941 }
942
943 // Insert FP32 -> FP16 conversion layer after current layer
944 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers;
945 if (dataTypeOut == DataType::Float16)
946 {
947 convertFp32ToFp16Layers =
948 InsertConvertFp32ToFp16LayersAfter(graph, *layer);
949 }
950
951 // Assign a supported backend to the newly introduced conversion layers
952 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
953 {
954 bool supportedBackendFound = false;
955 std::string reasonIfUnsupported;
956
957 // Try preferred backend first
958 layer->SetBackendId(preferredBackend);
959 if (IWorkloadFactory::IsLayerSupported(*layer,
960 EmptyOptional(),
961 reasonIfUnsupported))
962 {
963 supportedBackendFound = true;
964 }
965 else
966 {
967 for (const auto& backend : availablePreferredBackends)
968 {
969 // Skip preferred backend (we already determined that it is not supported)
970 if (backend == preferredBackend)
971 {
972 continue;
973 }
974
975 layer->SetBackendId(backend);
976 if (IWorkloadFactory::IsLayerSupported(*layer,
977 EmptyOptional(),
978 reasonIfUnsupported))
979 {
980 supportedBackendFound = true;
981 break;
982 }
983 }
984 }
985
986 return supportedBackendFound;
987 };
988
989 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
990 {
991 if (!AssignFirstSupportedBackend(convertLayer, backend))
992 {
993 return ReturnError(convertLayer);
994 }
995 }
996
997 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
998 {
999 if (!AssignFirstSupportedBackend(convertLayer, backend))
1000 {
1001 return ReturnError(convertLayer);
1002 }
1003 }
1004
1005 return result;
1006 }
1007 }
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001008
Derek Lamberti4a9e24b2020-01-03 16:53:38 +00001009 std::stringstream warningMsg;
1010 warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
1011 << " is not supported on requested backend " << layer->GetBackendId().Get()
1012 << " for input data type " << GetDataTypeName(dataTypeIn)
1013 << " and output data type " << GetDataTypeName(dataTypeOut)
1014 << " (reason: " << reasonIfUnsupported
1015 << "), falling back to the next backend.";
1016 ReportWarning(warningMsg.str(), errMessages);
1017
1018 return OptimizationResult(true, false);
1019 }
1020 else
1021 {
1022 return result;
1023 }
1024}
1025
Cathal Corbett53837672022-09-01 11:34:37 +01001026inline std::vector<DataType> GetLayerInOutDatatype(const Layer* layer)
1027{
1028 DataType dataTypeIn = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
1029 layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
1030 DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
1031 layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
1032 return {dataTypeIn, dataTypeOut};
1033}
1034
Ryan OSheab4c49342023-07-25 14:28:27 +01001035bool CheckFp16Support(BackendsMap& backends,
1036 const std::vector<BackendId>& availablePreferredBackends)
1037{
1038 bool hasFp16 = false;
1039 // Check if the first preferred backend has FP16 support
1040 auto firstBackend = availablePreferredBackends[0];
1041 auto backendObjPtr = backends.find(firstBackend)->second.get();
1042 ARMNN_ASSERT(backendObjPtr);
1043 auto hasFp16Capability = BackendOptions::BackendOption{"HasFp16", true};
1044 auto backendCapabilities = backendObjPtr->GetCapabilities();
1045
1046 if (HasMatchingCapability(hasFp16Capability, backendCapabilities))
1047 {
1048 // First preferred backend has FP16 support. Enable reduce FP32 to FP16 when fp16-turbo-mode is enabled.
1049 hasFp16 = true;
1050 ARMNN_LOG(debug) << "The first available preferred backend: " << firstBackend
1051 << ", has FP16 support.";
1052 }
1053 else
1054 {
1055 ARMNN_LOG(warning) << "The first available preferred backend: " << firstBackend
1056 << ", does not have FP16 support. "
1057 << "The FP16 turbo mode option will be disable. It will run using FP32.";
1058 }
1059
1060 // Check if the rest of the available preferred backends have FP16 support
1061 for (size_t i = 1; i < availablePreferredBackends.size(); ++i)
1062 {
1063 auto backend = availablePreferredBackends[i];
1064 backendObjPtr = backends.find(backend)->second.get();
1065 backendCapabilities = backendObjPtr->GetCapabilities();
1066 if (!HasMatchingCapability(hasFp16Capability, backendCapabilities))
1067 {
1068 ARMNN_LOG(warning) << "Next preferred backend: " << backend << ", does not have FP16 support. "
1069 << "It will run using FP32 when falling back to this backend.";
1070 }
1071 else
1072 {
1073 ARMNN_LOG(debug) << "Next preferred backend: " << backend << ", has FP16 support.";
1074 }
1075 }
1076
1077 return hasFp16;
1078}
1079
Francis Murtagh56ccf682021-12-13 18:48:12 +00001080// Refactor to allow passing the IConnectableLayer* rather than Layer Iterator
1081// on Graph and SubgraphView which are different types.
1082void AssignBackendsIConnectable(OptimizedNetworkImpl* optNetObjPtr,
1083 IConnectableLayer* it,
1084 Optional<std::vector<std::string>&> errMessages,
1085 OptimizationResult& result,
1086 BackendSettings& backendSettings,
1087 std::vector<BackendId>& availablePreferredBackends)
1088{
1089 auto ReturnError = [&](const Layer* layer)
1090 {
1091 return ReturnWithError(result, layer, backendSettings, errMessages);
1092 };
1093
1094 auto layer = PolymorphicDowncast<Layer*>(it);
1095
1096 if (layer->GetType() == LayerType::Input)
1097 {
1098 return;
1099 }
1100
Cathal Corbett53837672022-09-01 11:34:37 +01001101 std::vector<DataType> inOutDataType = GetLayerInOutDatatype(layer);
Francis Murtagh56ccf682021-12-13 18:48:12 +00001102
1103 std::string reasonIfUnsupported;
1104 bool found = false;
1105 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
1106 {
1107 // don't bomb immediately, find all the quantized outputs
1108 // which haven't had a scale set and report them all back.
1109 result.m_Error = true;
1110 }
1111
1112 // First try assign layer to hint backend
1113 if (layer->GetBackendHint().has_value() &&
1114 backendSettings.IsBackendSupported(layer->GetBackendHint().value()) &&
1115 AttemptBackendAssignment(backendSettings,
1116 optNetObjPtr->GetGraph(),
1117 layer,
1118 layer->GetBackendHint().value(),
Cathal Corbett53837672022-09-01 11:34:37 +01001119 inOutDataType[0],
1120 inOutDataType[1],
Francis Murtagh56ccf682021-12-13 18:48:12 +00001121 availablePreferredBackends,
1122 reasonIfUnsupported,
1123 errMessages).IsOk())
1124 {
1125 found = true;
1126 backendSettings.m_SelectedBackends.insert(layer->GetBackendHint().value());
1127 }
1128 else
1129 {
1130 // Try assign layer to prefered list of backends
1131 for (const auto& backend : availablePreferredBackends)
1132 {
1133 if (layer->GetBackendHint().has_value() &&
1134 layer->GetBackendHint().value() == backend)
1135 {
1136 continue; //Don't re-test the backend hint
1137 }
1138
1139 OptimizationResult res = AttemptBackendAssignment(backendSettings,
1140 optNetObjPtr->GetGraph(),
1141 layer,
1142 backend,
Cathal Corbett53837672022-09-01 11:34:37 +01001143 inOutDataType[0],
1144 inOutDataType[1],
Francis Murtagh56ccf682021-12-13 18:48:12 +00001145 availablePreferredBackends,
1146 reasonIfUnsupported,
1147 errMessages);
1148
1149 if (res.IsOk())
1150 {
1151 found = true;
1152 backendSettings.m_SelectedBackends.insert(backend);
1153 break;
1154 }
1155 else if (res.IsError())
1156 {
1157 result = res; // Cannot continue.
1158 // Note: we don't need to log the error as it would already
1159 // be logged in AttemptBackendAssignment().
1160 }
1161 else
1162 {
1163 ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
1164 }
1165 }
1166 }
1167
1168 // If the layer is unsupported by any devices, log and return a null network.
1169 if (!found)
1170 {
1171 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
1172 // fallback we should set the compute device on the layer to CpuRef (these are not
1173 // available as accelerated operations, or are only available under certain
1174 // conditions, currently they comprise MemCopy, Constant, Permute)
1175 armnn::LayerType layerType = layer->GetType();
1176 if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
1177 layerType == armnn::LayerType::Constant ||
1178 layerType == armnn::LayerType::Permute))
1179 {
1180 BackendId cpuBackendId(armnn::Compute::CpuRef);
1181 layer->SetBackendId(cpuBackendId);
1182 backendSettings.m_SelectedBackends.insert(cpuBackendId);
1183 }
1184 else
1185 {
1186 result = ReturnError(layer);
1187 }
1188 }
1189
1190}
Derek Lamberti4a9e24b2020-01-03 16:53:38 +00001191
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001192OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
Matteo Martincigh49124022019-01-11 13:25:59 +00001193 BackendSettings& backendSettings,
1194 Graph::Iterator& firstLayer,
1195 Graph::Iterator& lastLayer,
1196 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +00001197{
Derek Lambertif1e0ad32021-10-13 18:02:25 +01001198 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_AssignBackends");
Matteo Martincigh49124022019-01-11 13:25:59 +00001199 OptimizationResult result;
telsoa014fcda012018-03-09 14:13:49 +00001200
Matteo Martincigh49124022019-01-11 13:25:59 +00001201 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
1202 if (availablePreferredBackends.empty())
telsoa01c577f2c2018-08-31 09:22:23 +01001203 {
Matteo Martincigh49124022019-01-11 13:25:59 +00001204 std::stringstream failureMsg;
1205 failureMsg << "No preferred backends are available";
1206 ReportError(failureMsg.str(), errMessages);
1207
1208 result.m_Error = true;
1209 return result;
1210 }
1211
1212 for (auto it = firstLayer; it != lastLayer; ++it)
1213 {
Cathal Corbett53837672022-09-01 11:34:37 +01001214 auto layer = PolymorphicDowncast<Layer*>(*it);
1215 std::vector<DataType> inOutDataType = GetLayerInOutDatatype(layer);
1216
1217 // In AttemptBackendAssignment() we check:
1218 // - if input/output datatypes of the layer are float16
1219 // - if the layer is supported with these datatypes
1220 // If the layer is not supported (failing on ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED() in clframework),
1221 // we attempt to insert convertion layers either side of the new fp32 layer.
1222 bool isFloat16 = false;
1223 for (auto type : inOutDataType)
1224 {
1225 if (type == DataType::Float16)
1226 {
1227 isFloat16 = true;
1228 break;
1229 }
1230 }
1231
1232 if (layer->GetBackendId() == "Unknown" || isFloat16)
1233 {
1234 AssignBackendsIConnectable(optNetObjPtr,
1235 *it,
1236 errMessages,
1237 result,
1238 backendSettings,
1239 availablePreferredBackends);
1240 }
telsoa01c577f2c2018-08-31 09:22:23 +01001241 }
Matteo Martincigh49124022019-01-11 13:25:59 +00001242
Finn Williamsb1aad422021-10-28 19:07:32 +01001243 for (auto it = firstLayer; it != lastLayer; ++it)
1244 {
Francis Murtagh56ccf682021-12-13 18:48:12 +00001245 auto layer = PolymorphicDowncast<Layer*>(*it);
1246
1247 if(layer->GetType() == LayerType::Input)
1248 {
1249 BackendId connectedBackendId = layer->GetOutputSlot(0).GetConnection(0)->GetOwningLayer().GetBackendId();
1250 layer->SetBackendId(connectedBackendId);
1251 }
1252 }
1253
1254 return result;
1255}
1256
1257OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
1258 BackendSettings& backendSettings,
1259 SubgraphView::IConnectableLayerIterator& firstLayer,
1260 SubgraphView::IConnectableLayerIterator& lastLayer,
1261 Optional<std::vector<std::string>&> errMessages)
1262{
1263 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_AssignBackends");
1264 OptimizationResult result;
1265
1266 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
1267 if (availablePreferredBackends.empty())
1268 {
1269 std::stringstream failureMsg;
1270 failureMsg << "No preferred backends are available";
1271 ReportError(failureMsg.str(), errMessages);
1272
1273 result.m_Error = true;
1274 return result;
1275 }
1276
1277 for (auto it = firstLayer; it != lastLayer; ++it)
1278 {
1279 AssignBackendsIConnectable(optNetObjPtr,
1280 *it,
1281 errMessages,
1282 result,
1283 backendSettings,
1284 availablePreferredBackends);
1285 }
1286
1287 for (auto it = firstLayer; it != lastLayer; ++it)
1288 {
1289 auto layer = PolymorphicDowncast<Layer*>(*it);
Finn Williamsb1aad422021-10-28 19:07:32 +01001290
1291 if(layer->GetType() == LayerType::Input)
1292 {
1293 BackendId connectedBackendId = layer->GetOutputSlot(0).GetConnection(0)->GetOwningLayer().GetBackendId();
1294 layer->SetBackendId(connectedBackendId);
1295 }
1296 }
1297
Matteo Martincigh49124022019-01-11 13:25:59 +00001298 return result;
1299}
1300
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001301OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
Matteo Martincighadddddb2019-01-24 14:06:23 +00001302 BackendSettings& backendSettings,
Derek Lambertiff05cc52019-04-26 13:05:17 +01001303 SubgraphView& subgraph,
Matteo Martincighadddddb2019-01-24 14:06:23 +00001304 Optional<std::vector<std::string>&> errMessages)
Matteo Martincigh49124022019-01-11 13:25:59 +00001305{
Francis Murtagh0f3e9a02023-07-28 14:29:46 +01001306 SubgraphView::IConnectableLayerIterator firstLayer = subgraph.begin();
1307 SubgraphView::IConnectableLayerIterator lastLayer = subgraph.end();
Matteo Martincighadddddb2019-01-24 14:06:23 +00001308 return AssignBackends(optNetObjPtr,
1309 backendSettings,
1310 firstLayer,
1311 lastLayer,
1312 errMessages);
1313}
1314
Derek Lamberti84da38b2019-06-13 11:40:08 +01001315BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry& handleFactoryRegistry,
1316 BackendSettings& backendSettings)
1317{
1318 BackendsMap backends;
1319 auto const& backendRegistry = BackendRegistryInstance();
1320 for (auto&& selectedBackend : backendSettings.m_SupportedBackends)
1321 {
1322 auto backendFactory = backendRegistry.GetFactory(selectedBackend);
1323 auto backendObjPtr = backendFactory();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001324 ARMNN_ASSERT(backendObjPtr);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001325
1326 backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
1327
1328 backends[backendObjPtr->GetId()] = std::move(backendObjPtr);
1329 }
1330
1331 return backends;
1332}
1333
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001334OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
Matteo Martincighadddddb2019-01-24 14:06:23 +00001335 BackendSettings& backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +01001336 BackendsMap& backends,
Mike Kelly07810fc2020-11-12 10:58:48 +00001337 const ModelOptions& modelOptions,
Matteo Martincighadddddb2019-01-24 14:06:23 +00001338 Optional<std::vector<std::string>&> errMessages)
1339{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001340 ARMNN_ASSERT(optNetObjPtr);
Derek Lambertif1e0ad32021-10-13 18:02:25 +01001341 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_ApplyBackendOptimizations")
Matteo Martincigh49124022019-01-11 13:25:59 +00001342 OptimizationResult result;
1343
Matteo Martincighadddddb2019-01-24 14:06:23 +00001344 // Get the optimized graph
1345 Graph& optGraph = optNetObjPtr->GetGraph();
Matteo Martincigh49124022019-01-11 13:25:59 +00001346
Matteo Martincighadddddb2019-01-24 14:06:23 +00001347 // Run backend specific optimizations
Matteo Martincighadddddb2019-01-24 14:06:23 +00001348 for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
Matteo Martincigh49124022019-01-11 13:25:59 +00001349 {
Derek Lamberti84da38b2019-06-13 11:40:08 +01001350 auto backendObjPtr = backends.find(selectedBackend)->second.get();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001351 ARMNN_ASSERT(backendObjPtr);
Matteo Martincighadddddb2019-01-24 14:06:23 +00001352
Francis Murtaghd97db7e2023-01-16 13:11:29 +00001353 if (selectedBackend == armnn::Compute::GpuAcc || selectedBackend == armnn::Compute::CpuAcc)
Cathal Corbett4b19d222022-05-11 20:12:17 +01001354 {
1355 Optimizer::Pass(optGraph, MakeOptimizations(optimizations::PermuteDepthwiseConv2dWeights()));
Cathal Corbett541880f2022-05-16 15:20:56 +01001356 Optimizer::Pass(optGraph, MakeOptimizations(optimizations::FusePermuteIntoConstLayer()));
Cathal Corbett4b19d222022-05-11 20:12:17 +01001357 }
1358
Matteo Martincighadddddb2019-01-24 14:06:23 +00001359 // Select sub-graphs based on backend
Derek Lambertiff05cc52019-04-26 13:05:17 +01001360 SubgraphViewSelector::Subgraphs subgraphs =
Rob Hughes65c32262019-07-23 15:33:39 +01001361 SubgraphViewSelector::SelectSubgraphs(optGraph,
Matteo Martincigh602af092019-05-01 10:31:27 +01001362 // Select layers assigned to the requested backend
1363 [&backendObjPtr](const Layer& layer)
1364 {
Francis Murtagh56ccf682021-12-13 18:48:12 +00001365
Matteo Martincigh602af092019-05-01 10:31:27 +01001366 return layer.GetType() != LayerType::Input &&
1367 layer.GetType() != LayerType::Output &&
1368 layer.GetBackendId() == backendObjPtr->GetId();
1369 });
Derek Lambertiff05cc52019-04-26 13:05:17 +01001370 if (subgraphs.empty())
Matteo Martincigh49124022019-01-11 13:25:59 +00001371 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001372 // No sub-graphs found, try with next selected backend
1373 continue;
Matteo Martincigh49124022019-01-11 13:25:59 +00001374 }
Matteo Martincighadddddb2019-01-24 14:06:23 +00001375
1376 // Try to optimize each sub-graph
Derek Lambertiff05cc52019-04-26 13:05:17 +01001377 for (auto& subgraph : subgraphs)
Matteo Martincigh49124022019-01-11 13:25:59 +00001378 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001379 // Try to optimize the current sub-graph
Derek Lambertif1e0ad32021-10-13 18:02:25 +01001380 ARMNN_SCOPED_PROFILING_EVENT(backendObjPtr->GetId(), "Optimizer_OptimizeSubgraph");
Mike Kelly07810fc2020-11-12 10:58:48 +00001381 OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph, modelOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001382 ARMNN_ASSERT(optimizationViews.Validate(*subgraph));
Matteo Martincighadddddb2019-01-24 14:06:23 +00001383
1384 // Optimization attempted, check the resulting optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +01001385 for (auto& substitution : optimizationViews.GetSubstitutions())
Matteo Martincighadddddb2019-01-24 14:06:23 +00001386 {
1387 // Sub-graph optimized, substitute the sub-graph with the new optimized one in the main optimized graph
Matteo Martincigh84924332019-05-09 12:46:16 +01001388 SubgraphView& replacementSubgraph = substitution.m_ReplacementSubgraph;
1389 SubgraphView& substitutableSubgraph = substitution.m_SubstitutableSubgraph;
1390 optGraph.SubstituteSubgraph(substitutableSubgraph, replacementSubgraph);
Matteo Martincighadddddb2019-01-24 14:06:23 +00001391
1392 // Assign the current backend to the optimized sub-graph
Francis Murtagh56ccf682021-12-13 18:48:12 +00001393 const SubgraphView::IConnectableLayers& subgraphLayers = replacementSubgraph.GetIConnectableLayers();
1394 std::for_each(subgraphLayers.begin(), subgraphLayers.end(), [&selectedBackend](IConnectableLayer* l)
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001395 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001396 ARMNN_ASSERT(l);
Francis Murtagh56ccf682021-12-13 18:48:12 +00001397 PolymorphicDowncast<Layer*>(l)->SetBackendId(selectedBackend);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001398 });
Matteo Martincighadddddb2019-01-24 14:06:23 +00001399 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001400
Mike Kelly4cc341c2023-07-07 15:43:06 +01001401 // Remove deleted sub-graphs
1402 for (auto& deletedSubgraph : optimizationViews.GetDeletedSubgraphs())
1403 {
1404 for (auto& l : deletedSubgraph.GetIConnectableLayers())
1405 {
1406 Layer* deletedLayer = PolymorphicDowncast<Layer*>(l);
1407 for (unsigned int in = deletedLayer->GetNumInputSlots(); in > 0; --in)
1408 {
1409 auto inputSlot = deletedLayer->GetInputSlot(in -1);
1410 OutputSlot* parentOut = inputSlot.GetConnectedOutputSlot();
1411 parentOut->Disconnect(inputSlot);
1412 for (unsigned int out = deletedLayer->GetOutputSlot(in -1).GetNumConnections(); out > 0; --out)
1413 {
Mike Kellyb6de7a12023-07-18 12:03:41 +01001414 InputSlot* childIn = deletedLayer->GetOutputSlot(in - 1).GetConnection(out -1);
Mike Kelly4cc341c2023-07-07 15:43:06 +01001415 deletedLayer->GetOutputSlot(in - 1).Disconnect(*childIn);
1416 parentOut->Connect(*childIn);
1417 }
1418 }
1419 optGraph.EraseLayer(deletedLayer);
1420 }
1421 }
1422
Matteo Martincigh84924332019-05-09 12:46:16 +01001423 if (!optimizationViews.GetFailedSubgraphs().empty())
Matteo Martincighadddddb2019-01-24 14:06:23 +00001424 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001425 std::stringstream warningMsg;
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001426 warningMsg << "Some sub-graph(s) failed to optimized on " << backendObjPtr->GetId() << " backend.";
Matteo Martincighadddddb2019-01-24 14:06:23 +00001427 ReportWarning(warningMsg.str(), errMessages);
1428
1429 // Failed to optimize the given sub-graph, re-assign the sub-graph layers to other available backends
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001430 BackendSettings settingsCopy(backendSettings);
Matteo Martincighadddddb2019-01-24 14:06:23 +00001431 if (!backendObjPtr->GetId().IsCpuRef())
1432 {
1433 // Add the current backend to the list of backends to ignore
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001434 settingsCopy.m_IgnoredBackends.insert(backendObjPtr->GetId());
Matteo Martincighadddddb2019-01-24 14:06:23 +00001435 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001436
1437 int count=0;
Matteo Martincigh84924332019-05-09 12:46:16 +01001438 for (auto& failedSubgraph : optimizationViews.GetFailedSubgraphs())
Matteo Martincighadddddb2019-01-24 14:06:23 +00001439 {
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001440 // An error occurred: the optimization was attempted but not performed, try different backends
1441 std::stringstream subgraphMsg;
Francis Murtagh56ccf682021-12-13 18:48:12 +00001442 subgraphMsg << "Re-assigning backends to " << failedSubgraph.GetIConnectableLayers().size()
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001443 << " layers inside sub-graph " << count++;
Matteo Martincigh328d92b2019-07-04 17:52:55 +01001444 ReportWarning(subgraphMsg.str(), errMessages);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001445
1446 OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr,
1447 settingsCopy,
1448 *subgraph,
1449 errMessages);
1450 if (reassignmentResult.m_Error)
1451 {
1452 // Failed to re-assign one of the remaining backends to each layer of the sub-graph
1453 result.m_Error = true;
1454 return result;
1455 }
Matteo Martincighadddddb2019-01-24 14:06:23 +00001456 }
Matteo Martincigh49124022019-01-11 13:25:59 +00001457 }
1458 }
1459 }
1460
1461 return result;
1462}
1463
Derek Lamberti84da38b2019-06-13 11:40:08 +01001464bool RequiresCopy(ITensorHandleFactory::FactoryId src,
1465 ITensorHandleFactory::FactoryId dst,
1466 TensorHandleFactoryRegistry& registry)
1467{
1468 if (src != dst)
1469 {
1470 ITensorHandleFactory* srcFactory = registry.GetFactory(src);
1471 ITensorHandleFactory* dstFactory = registry.GetFactory(dst);
1472
Matteo Martincigha6539ed2019-08-27 13:43:32 +01001473 if (srcFactory && dstFactory &&
1474 (srcFactory->GetExportFlags() & dstFactory->GetImportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001475 {
1476 return false;
1477 }
1478 return true;
1479 }
1480 return false;
1481}
1482
1483// Find the handle factory for the input layer which results in fewest required copies.
1484ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backends,
1485 OutputSlot& slot,
Narumol Prangnawarate5f0b242021-05-07 17:52:36 +01001486 TensorHandleFactoryRegistry& registry,
1487 bool importEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001488{
1489 Layer& layer = slot.GetOwningLayer();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001490 ARMNN_ASSERT(layer.GetType() == LayerType::Input);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001491
1492 // Explicitly select the tensorhandle factory for InputLayer because the rules for it are slightly different. It
1493 // doesn't matter which backend it is assigned to because they all use the same implementation, which
1494 // requires Map/Unmap support. This means that, so long as the handle type supports map/unmap semantics, we can
1495 // select a factory with maximum compatibility with the layers connected to the InputLayer.
1496
1497 // First ensure the from backends can support the TensorHandeAPI
1498 auto frmBackend = backends.find(layer.GetBackendId());
1499 if (frmBackend == backends.end() ||
1500 !frmBackend->second->SupportsTensorAllocatorAPI())
1501 {
1502 return ITensorHandleFactory::LegacyFactoryId;
1503 }
1504
1505 // Go through all connections to the output slot and determine the TensorHandleFactory which results in the
1506 // fewest copies.
1507 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
1508 int topScore = 0;
1509 ITensorHandleFactory::FactoryId topChoice = ITensorHandleFactory::LegacyFactoryId;
1510
1511 for (auto&& connection : slot.GetConnections())
1512 {
Narumol Prangnawarate5f0b242021-05-07 17:52:36 +01001513
Derek Lamberti84da38b2019-06-13 11:40:08 +01001514 const Layer& connectedLayer = connection->GetOwningLayer();
1515
1516 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001517 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +01001518
1519 if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
1520 {
1521 // The destination backend does not support the tensor allocator API, move to the next one
1522 continue;
1523 }
1524
1525 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
1526 for (auto&& dst : dstPrefs)
1527 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001528 // Input layers use the mem copy workload or import, so the selected factory must
1529 // support either the map/unmap API or Import API
Derek Lamberti84da38b2019-06-13 11:40:08 +01001530 ITensorHandleFactory* factory = registry.GetFactory(dst);
Narumol Prangnawarate5f0b242021-05-07 17:52:36 +01001531 if (importEnabled && factory->GetImportFlags() == 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001532 {
Narumol Prangnawarate5f0b242021-05-07 17:52:36 +01001533 continue;
1534 }
1535 else if (!importEnabled && !factory->SupportsMapUnmap())
1536 {
Derek Lamberti84da38b2019-06-13 11:40:08 +01001537 continue;
1538 }
1539
1540 auto it = factoryScores.find(dst);
1541 if (it == factoryScores.end())
1542 {
1543 // Add new score to the table
1544 factoryScores[dst] = 0;
1545 if (topChoice == ITensorHandleFactory::LegacyFactoryId)
1546 {
1547 topChoice = dst;
1548 }
1549 }
1550 else
1551 {
1552 // Increase the score
1553 factoryScores[dst]++;
1554
1555 // Track the best option
1556 if (factoryScores[dst] > topScore)
1557 {
1558 topScore = factoryScores[dst];
1559 topChoice = dst;
1560 }
1561 }
1562 }
1563 }
1564
1565 return topChoice;
1566}
1567
1568// Find the handle factory for the output layer which results in fewest required copies.
1569ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backends,
1570 OutputSlot& slot,
1571 TensorHandleFactoryRegistry& registry)
1572{
Jan Eilers8eb25602020-03-09 12:13:48 +00001573 IgnoreUnused(backends, slot, registry);
Derek Lamberti94a88d22019-12-10 21:12:59 +00001574 return ITensorHandleFactory::DeferredFactoryId;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001575}
1576
1577// For all handle factories supported on the source backend, we wish to find the one which requires the fewest copies
1578// when considering all connections.
1579ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
1580 OutputSlot& outputSlot,
Narumol Prangnawarate5f0b242021-05-07 17:52:36 +01001581 TensorHandleFactoryRegistry& registry,
Francis Murtagh626bd902022-06-21 13:16:23 +00001582 bool exportEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001583{
1584 // First ensure the from backends can support the TensorHandeAPI
1585 Layer& layer = outputSlot.GetOwningLayer();
1586 auto frmBackend = backends.find(layer.GetBackendId());
1587 if (frmBackend == backends.end() ||
1588 !frmBackend->second->SupportsTensorAllocatorAPI())
1589 {
1590 return ITensorHandleFactory::LegacyFactoryId;
1591 }
1592
Narumol Prangnawarate5f0b242021-05-07 17:52:36 +01001593 bool outputConnection = false;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001594 for (auto&& connection : outputSlot.GetConnections())
1595 {
1596 const Layer& connectedLayer = connection->GetOwningLayer();
1597 if (connectedLayer.GetType() == LayerType::Output)
1598 {
Narumol Prangnawarate5f0b242021-05-07 17:52:36 +01001599 outputConnection = true;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001600 }
1601 }
1602
1603 IBackendInternal* srcBackend = frmBackend->second.get();
1604 auto srcPrefs = srcBackend->GetHandleFactoryPreferences();
1605
1606 // Initialize the scores
1607 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
1608 for (auto&& pref : srcPrefs)
1609 {
Francis Murtagh626bd902022-06-21 13:16:23 +00001610 if (exportEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001611 {
1612 ITensorHandleFactory* factory = registry.GetFactory(pref);
Narumol Prangnawarate5f0b242021-05-07 17:52:36 +01001613 if (outputConnection)
1614 {
1615 // Check if this is fallback case
1616 bool fallbackConnection = false;
1617 for (auto&& inputSlot : layer.GetInputSlots())
1618 {
1619 if (inputSlot.GetConnectedOutputSlot()->GetOwningLayer().GetBackendId() != layer.GetBackendId())
1620 {
1621 fallbackConnection = true;
1622 }
1623 }
1624 if (fallbackConnection)
1625 {
1626 auto factoryCap = factory->GetCapabilities(&layer, &layer, CapabilityClass::FallbackImportDisabled);
1627 // Cannot use factory import if fallback import is not supported.
1628 if (!factoryCap.empty())
1629 {
1630 continue;
1631 }
1632 }
1633 else if (factory->GetExportFlags() == 0)
1634 {
1635 continue;
1636 }
1637 }
1638 if (!outputConnection)
1639 {
1640 auto factoryCap = factory->GetCapabilities(&layer, &layer, CapabilityClass::FallbackImportDisabled);
1641 // Cannot use factory import if fallback import is not supported.
1642 if (!factoryCap.empty())
1643 {
1644 continue;
1645 }
1646 }
1647
1648 }
1649 else
1650 {
1651 // Only consider factories that support map/unmap
1652 ITensorHandleFactory* factory = registry.GetFactory(pref);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001653 if (!factory->SupportsMapUnmap())
1654 {
1655 // The current tensor handle factory does not support the map/unmap strategy, move to the next one
1656 continue;
1657 }
1658 }
1659
Narumol Prangnawarate5f0b242021-05-07 17:52:36 +01001660
Derek Lamberti84da38b2019-06-13 11:40:08 +01001661 auto it = factoryScores.find(pref);
1662 if (it == factoryScores.end())
1663 {
1664 // Add new score to the table
1665 factoryScores[pref] = 0;
1666 }
1667 }
1668
1669 // Score each handle factory based on how many times it requires copies on the slot connections
1670 for (auto&& connection : outputSlot.GetConnections())
1671 {
1672 const Layer& connectedLayer = connection->GetOwningLayer();
1673
1674 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001675 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +01001676
1677 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
1678 for (auto&& src : srcPrefs)
1679 {
1680 if (factoryScores.find(src) == factoryScores.end()) // Don't consider excluded factories
1681 {
1682 continue;
1683 }
1684
1685 for (auto&& dst : dstPrefs)
1686 {
1687 if (RequiresCopy(src, dst, registry))
1688 {
1689 // Copy avoided, increase the score
1690 factoryScores[src]++;
1691 break;
1692 }
1693 }
1694 }
1695 }
1696
1697 // Find the lowest score
1698 int minScore = std::numeric_limits<int>::max();
1699 for (auto it : factoryScores)
1700 {
1701 minScore = std::min(minScore, it.second);
1702 }
1703
1704 // Collect factories matching the best(lowest) score
1705 std::vector<ITensorHandleFactory::FactoryId> optimalFactories;
1706 for (auto it : factoryScores)
1707 {
1708 if (it.second == minScore)
1709 {
1710 optimalFactories.push_back(it.first);
1711 }
1712 }
1713
1714 // For all compatible Factories matching the best score, find the preferred one for the current layer.
1715 for (auto&& srcPref : srcPrefs)
1716 {
1717 for (auto&& comp : optimalFactories)
1718 {
1719 if (comp == srcPref)
1720 {
1721 return comp;
1722 }
1723 }
1724 }
1725
1726 return ITensorHandleFactory::LegacyFactoryId;
1727}
1728
Derek Lambertif674aa02019-08-01 15:56:25 +01001729EdgeStrategy CalculateEdgeStrategy(BackendsMap& backends,
1730 ITensorHandleFactory::FactoryId srcFactoryId,
1731 const Layer& layer,
1732 const Layer& connectedLayer,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001733 TensorHandleFactoryRegistry& registry,
1734 bool importEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001735{
1736 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001737 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +01001738
1739 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
1740
1741 // Legacy API check for backward compatibility
1742 if (srcFactoryId == ITensorHandleFactory::LegacyFactoryId || dstPrefs.empty())
1743 {
1744 if (layer.GetBackendId() != connectedLayer.GetBackendId())
1745 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001746 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001747 }
1748 else
1749 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001750 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001751 }
1752 }
1753
1754 // TensorHandleFactory API present, so perform more sophisticated strategies.
Derek Lambertif674aa02019-08-01 15:56:25 +01001755 // Dst Output layers don't require copy because they use import or map/unmap
Derek Lamberti84da38b2019-06-13 11:40:08 +01001756 if (connectedLayer.GetType() == LayerType::Output)
1757 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001758 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001759 }
1760
1761 // Search for direct match in prefs
1762 for (auto&& pref : dstPrefs)
1763 {
1764 if (pref == srcFactoryId)
1765 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001766 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001767 }
1768 }
1769
1770 // Search for export/import options
1771 ITensorHandleFactory* srcFactory = registry.GetFactory(srcFactoryId);
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001772 if (srcFactory->GetExportFlags() != 0 && importEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001773 {
1774 for (auto&& pref : dstPrefs)
1775 {
1776 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroyffab16f2019-11-07 14:37:09 +00001777
James Conroy47e863d2019-11-18 17:07:43 +00001778 // Handles cases when a destPref is not listed in TensorHandleFactoryRegistry
James Conroyffab16f2019-11-07 14:37:09 +00001779 if (!dstFactory) {
James Conroy47e863d2019-11-18 17:07:43 +00001780 continue;
James Conroyffab16f2019-11-07 14:37:09 +00001781 }
Derek Lambertif674aa02019-08-01 15:56:25 +01001782 if ((dstFactory->GetImportFlags() & srcFactory->GetExportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001783 {
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +01001784 auto srcCapability = srcFactory->GetCapabilities(&layer, &layer, CapabilityClass::PaddingRequired);
1785 auto dstCapability = dstFactory->GetCapabilities(&connectedLayer,
1786 &connectedLayer,
1787 CapabilityClass::PaddingRequired);
Narumol Prangnawarate5f0b242021-05-07 17:52:36 +01001788 auto srcFallback = srcFactory->GetCapabilities(&layer, &layer, CapabilityClass::FallbackImportDisabled);
1789 auto dstFallback = dstFactory->GetCapabilities(&connectedLayer,
1790 &connectedLayer,
1791 CapabilityClass::FallbackImportDisabled);
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +01001792 // Do not require memory copy if the source and destination do not require padding.
Narumol Prangnawarate5f0b242021-05-07 17:52:36 +01001793 if (srcCapability.empty() && dstCapability.empty() && srcFallback.empty() && dstFallback.empty())
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +01001794 {
1795 return EdgeStrategy::ExportToTarget;
1796 }
Derek Lamberti84da38b2019-06-13 11:40:08 +01001797 }
1798 }
1799 }
1800
1801 // Search for copy options via map/unmap
1802 if (srcFactory->SupportsMapUnmap())
1803 {
1804 for (auto&& pref : dstPrefs)
1805 {
1806 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroy47e863d2019-11-18 17:07:43 +00001807 if (dstFactory && dstFactory->SupportsMapUnmap())
Derek Lamberti84da38b2019-06-13 11:40:08 +01001808 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001809 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001810 }
1811 }
1812 }
1813
Derek Lambertif674aa02019-08-01 15:56:25 +01001814 return EdgeStrategy::Undefined;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001815}
1816
1817// Select the TensorHandleFactories and the corresponding memory strategy
1818OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
1819 BackendsMap& backends,
1820 TensorHandleFactoryRegistry& registry,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001821 bool importEnabled,
Francis Murtagh626bd902022-06-21 13:16:23 +00001822 bool exportEnabled,
Derek Lamberti84da38b2019-06-13 11:40:08 +01001823 Optional<std::vector<std::string>&> errMessages)
1824{
Derek Lambertif1e0ad32021-10-13 18:02:25 +01001825 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_SelectTensorHandleStrategy");
Derek Lamberti84da38b2019-06-13 11:40:08 +01001826 OptimizationResult result;
1827
Francis Murtagh626bd902022-06-21 13:16:23 +00001828 optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled, exportEnabled](Layer* layer)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001829 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001830 ARMNN_ASSERT(layer);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001831
1832 // Lets make sure the backend is in our list of supported backends. Something went wrong during backend
1833 // assignment if this check fails
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001834 ARMNN_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
Derek Lamberti84da38b2019-06-13 11:40:08 +01001835
1836 // Check each output separately
1837 for (unsigned int slotIdx = 0; slotIdx < layer->GetNumOutputSlots(); slotIdx++)
1838 {
1839 OutputSlot& outputSlot = layer->GetOutputSlot(slotIdx);
1840
1841 ITensorHandleFactory::FactoryId slotOption = ITensorHandleFactory::LegacyFactoryId;
1842
1843 // Calculate the factory to use which results in the fewest copies being made.
1844 switch(layer->GetType())
1845 {
1846 case LayerType::Input:
Narumol Prangnawarate5f0b242021-05-07 17:52:36 +01001847 slotOption = CalculateSlotOptionForInput(backends, outputSlot, registry, importEnabled);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001848 break;
1849 case LayerType::Output:
1850 slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
1851 break;
1852 default:
Francis Murtagh626bd902022-06-21 13:16:23 +00001853 slotOption = CalculateSlotOption(backends, outputSlot, registry, exportEnabled);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001854 break;
1855 }
1856 outputSlot.SetTensorHandleFactory(slotOption);
1857
Derek Lambertif674aa02019-08-01 15:56:25 +01001858 // Now determine the "best" edge strategy for each connection given the slotOption.
Derek Lamberti84da38b2019-06-13 11:40:08 +01001859 unsigned int connectionIdx = 0;
1860 for (auto&& connection : outputSlot.GetConnections())
1861 {
1862 const Layer& connectedLayer = connection->GetOwningLayer();
1863
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001864 EdgeStrategy strategy = CalculateEdgeStrategy(backends, slotOption, *layer, connectedLayer,
1865 registry, importEnabled);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001866
Derek Lambertif674aa02019-08-01 15:56:25 +01001867 if (strategy == EdgeStrategy::Undefined)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001868 {
1869 result.m_Error = true;
1870 if (errMessages)
1871 {
1872 errMessages.value().emplace_back("Could not find valid strategy required for compatibility"
1873 " between backends.");
1874 }
1875 return;
1876 }
1877
Derek Lambertif674aa02019-08-01 15:56:25 +01001878 outputSlot.SetEdgeStrategy(connectionIdx, strategy);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001879
1880 connectionIdx++;
1881 }
1882 }
1883 });
1884
1885 return result;
1886}
1887
John Mcloughlinc5ee0d72023-03-24 12:07:25 +00001888// Forwarding function to remain backward compatible with legacy OptimizerOptions
Cathal Corbetta3f4fba2022-03-21 09:27:08 +00001889IOptimizedNetworkPtr Optimize(const Graph& inGraph,
Matteo Martincigh49124022019-01-11 13:25:59 +00001890 const std::vector<BackendId>& backendPreferences,
1891 const IDeviceSpec& deviceSpec,
1892 const OptimizerOptions& options,
Rob Hughes23214432019-11-05 11:27:36 +00001893 Optional<std::vector<std::string>&> messages)
Matteo Martincigh49124022019-01-11 13:25:59 +00001894{
John Mcloughlinc5ee0d72023-03-24 12:07:25 +00001895 return Optimize(inGraph,
1896 backendPreferences,
1897 deviceSpec,
1898 OptimizerOptionsOpaque(options),
1899 messages);
1900}
1901
1902IOptimizedNetworkPtr Optimize(const Graph& inGraph,
1903 const std::vector<BackendId>& backendPreferences,
1904 const IDeviceSpec& deviceSpec,
1905 const OptimizerOptionsOpaque& options,
1906 Optional<std::vector<std::string>&> messages)
1907{
Jan Eilers17d34da2021-12-08 16:15:12 +00001908 ARMNN_LOG(debug) << options.ToString();
Jan Eilers6a71bb52021-10-26 17:41:18 +01001909
Derek Lambertif1e0ad32021-10-13 18:02:25 +01001910 // Enable profiling
Cathal Corbetta3f4fba2022-03-21 09:27:08 +00001911 auto profiler = inGraph.GetProfiler();
Derek Lambertif1e0ad32021-10-13 18:02:25 +01001912 ProfilerManager::GetInstance().RegisterProfiler(profiler.get());
John Mcloughlinc5ee0d72023-03-24 12:07:25 +00001913 profiler->EnableProfiling(options.GetProfilingEnabled());
Derek Lambertif1e0ad32021-10-13 18:02:25 +01001914
David Monahan8a570462023-11-22 13:24:25 +00001915 // Some backends don't play well together. Check here before continuing.
1916 {
1917 std::set<BackendId> backendSet(backendPreferences.begin(), backendPreferences.end());
1918 // GpuFsa cannot co-exist with GpuAcc.
1919 if (backendSet.find("GpuFsa") != backendSet.end() &&
1920 backendSet.find("GpuAcc") != backendSet.end())
1921 {
1922 throw InvalidArgumentException("The backends \"GpuAcc\" and \"GpuFsa\" cannot be specified "
1923 "for the same optimized network.");
1924 }
1925 }
1926
Derek Lambertif1e0ad32021-10-13 18:02:25 +01001927 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer");
Matteo Martincigh49124022019-01-11 13:25:59 +00001928 if (backendPreferences.empty())
1929 {
Mike Kelly3a613cc2020-09-29 20:50:35 +01001930 throw InvalidArgumentException("Invoked Optimize with no backends specified");
Matteo Martincigh49124022019-01-11 13:25:59 +00001931 }
1932
John Mcloughlinc5ee0d72023-03-24 12:07:25 +00001933 if (options.GetReduceFp32ToBf16())
Ryan OShea31441592022-11-07 16:20:48 +00001934 {
1935 throw InvalidArgumentException("BFloat16 optimization is currently ignored. In order to use Bf16 optimization "
1936 "Please use the FastMathEnabled backend option for CpuAcc or GpuAcc.");
1937 }
1938
John Mcloughlinc5ee0d72023-03-24 12:07:25 +00001939 if (options.GetReduceFp32ToFp16() && options.GetReduceFp32ToBf16())
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001940 {
1941 throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
1942 }
1943
Cathal Corbett521032f2021-10-07 11:46:40 +01001944 // Ensure TensorInfo is set on all output slots of ConstantLayers in the graph
Cathal Corbetta3f4fba2022-03-21 09:27:08 +00001945 inGraph.VerifyConstantLayerSetTensorInfo();
Cathal Corbett521032f2021-10-07 11:46:40 +01001946
Cathal Corbetta3f4fba2022-03-21 09:27:08 +00001947 std::unique_ptr<Graph> graph = std::make_unique<Graph>(inGraph);
Matteo Martincigh49124022019-01-11 13:25:59 +00001948
Francis Murtagh626bd902022-06-21 13:16:23 +00001949 // We need to pass on the information about whether import and export is enabled to the LoadNetwork phase.
1950 // The mechanism to do that is to add model options to the optimized network.
1951 armnn::BackendOptions importExport("Global",
John Mcloughlinc5ee0d72023-03-24 12:07:25 +00001952 {{"ImportEnabled", options.GetImportEnabled()},
1953 {"ExportEnabled", options.GetExportEnabled()}});
1954 ModelOptions optimizedOptions(options.GetModelOptions());
Francis Murtagh626bd902022-06-21 13:16:23 +00001955 optimizedOptions.push_back(importExport);
1956
1957 auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), optimizedOptions),
Sadik Armagan045f6be2020-09-10 13:37:32 +01001958 &IOptimizedNetwork::Destroy);
Matteo Martincigh49124022019-01-11 13:25:59 +00001959
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001960 IOptimizedNetwork* optNetObjPtr = optNet.get();
Matteo Martincigh49124022019-01-11 13:25:59 +00001961
Matteo Martincighadddddb2019-01-24 14:06:23 +00001962 // Get the optimized graph
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001963 Graph& optGraph = optNetObjPtr->pOptimizedNetworkImpl->GetGraph();
Matteo Martincighadddddb2019-01-24 14:06:23 +00001964
John Mcloughlinc5ee0d72023-03-24 12:07:25 +00001965 if(options.GetShapeInferenceMethod() == ShapeInferenceMethod::InferAndValidate)
Finn Williamsd218d982021-08-09 13:00:08 +01001966 {
1967 // Infer the tensor infos for all output slots. Throws an exception on failure
1968 optGraph.InferTensorInfos();
1969 }
Finn Williams84e025a2021-08-05 17:29:32 +01001970
Idriss Chaouch98e383e2023-08-28 14:28:31 +01001971 // Perform BroadcastToOptimizationLayer and then AddBroadcastReshapeLayer optimisation
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001972 using namespace optimizations;
Idriss Chaouch98e383e2023-08-28 14:28:31 +01001973 Optimizer::Pass(optGraph, MakeOptimizations(BroadcastToOptimizationLayer()));
1974
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001975 Optimizer::Pass(optGraph, MakeOptimizations(AddBroadcastReshapeLayer()));
1976
John Mcloughlinc5ee0d72023-03-24 12:07:25 +00001977 if(options.GetShapeInferenceMethod() == ShapeInferenceMethod::ValidateOnly)
Finn Williamsd218d982021-08-09 13:00:08 +01001978 {
1979 // Validate the tensor infos for all output slots. Throws an exception on failure
1980 optGraph.InferTensorInfos();
1981 }
1982
Cathal Corbett541880f2022-05-16 15:20:56 +01001983
Francis Murtaghd97db7e2023-01-16 13:11:29 +00001984 // Group Constant Layer optimizations together where possible.
1985 // This is important as:
1986 // FusePermuteIntoConstantLayer must happen before FoldPadIntoDepthwiseConvolution2d and
1987 // FuseBatchNormIntoDepthwiseConvolution2D.
1988 // ConvertConstDequantisationLayersToConstLayers must happen before FoldPadIntoConvolution2d
1989 Optimizer::Pass(optGraph, MakeOptimizations(FusePermuteIntoConstLayer(),
1990 ConvertConstDequantisationLayersToConstLayers()));
Matteo Martincigh49124022019-01-11 13:25:59 +00001991 // Perform optimisation passes
Matteo Martincighadddddb2019-01-24 14:06:23 +00001992 Optimizer::Pass(optGraph, MakeOptimizations(SquashEqualPermuteSiblings(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001993 SquashEqualTransposeSiblings(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001994 SquashEqualReshapeSiblings(),
1995 OptimizeInversePermutes(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001996 OptimizeInverseTransposes(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001997 MovePermuteUp(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001998 MoveTransposeUp(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001999 PermuteAsReshape(),
Mike Kelly490b7be2020-03-03 12:39:09 +00002000 TransposeAsReshape(),
Nina Drozd861985f2019-04-18 14:48:51 +01002001 OptimizeConsecutiveReshapes(),
Rob Hughes3a7d3a72019-09-24 16:59:56 +01002002 FoldPadIntoConvolution2d(),
Teresa Charlin5786eb72021-05-21 16:29:45 +01002003 FoldPadIntoDepthwiseConvolution2d(),
Diego Lopez Recasfe95d722021-03-19 12:40:16 +00002004 FoldPadIntoPooling2d(),
Idriss Chaouch98e383e2023-08-28 14:28:31 +01002005 BroadcastToOptimizationLayer(),
Mike Kelly490b7be2020-03-03 12:39:09 +00002006 PermuteAndBatchToSpaceAsDepthToSpace(),
Teresa Charlin06e03002020-10-15 13:16:07 +01002007 TransposeAndBatchToSpaceAsDepthToSpace(),
Mike Kelly90231b82020-11-05 15:44:56 +00002008 FuseBatchNormIntoConvolution2DFloat32(),
2009 FuseBatchNormIntoConvolution2DFloat16(),
2010 FuseBatchNormIntoDepthwiseConvolution2DFloat32(),
Francis Murtaghd97db7e2023-01-16 13:11:29 +00002011 FuseBatchNormIntoDepthwiseConvolution2DFloat16()));
Matteo Martincigh49124022019-01-11 13:25:59 +00002012
Matteo Martincigh49124022019-01-11 13:25:59 +00002013 // Initialize backend settings
2014 BackendSettings backendSettings(backendPreferences, deviceSpec);
Ryan OSheab4c49342023-07-25 14:28:27 +01002015 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
2016 if (availablePreferredBackends.empty())
Matteo Martincigh49124022019-01-11 13:25:59 +00002017 {
2018 std::stringstream failureMsg;
2019 failureMsg << "None of the preferred backends " << backendPreferences
2020 << " are supported. Current platform provides " << backendSettings.m_SupportedBackends;
Rob Hughes23214432019-11-05 11:27:36 +00002021 ReportError(failureMsg.str(), messages);
Mike Kelly3a613cc2020-09-29 20:50:35 +01002022 throw InvalidArgumentException(failureMsg.str());
Matteo Martincigh49124022019-01-11 13:25:59 +00002023 }
2024
Derek Lamberti84da38b2019-06-13 11:40:08 +01002025 // Create a map to temporarily hold initialized backend objects
2026 TensorHandleFactoryRegistry tensorHandleFactoryRegistry;
2027 BackendsMap backends = CreateSupportedBackends(tensorHandleFactoryRegistry, backendSettings);
2028
Ryan OSheab4c49342023-07-25 14:28:27 +01002029 if (options.GetReduceFp32ToFp16())
2030 {
2031 bool hasFp16 = CheckFp16Support(backends, availablePreferredBackends);
2032 if (hasFp16)
2033 {
2034 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_ReduceFp32ToFp16");
2035 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToFp16Converter()));
2036 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
2037 }
2038 }
2039
Matteo Martincigh49124022019-01-11 13:25:59 +00002040 // Assign an available backend to each layer
Matteo Martincighadddddb2019-01-24 14:06:23 +00002041 Graph::Iterator firstLayer = optGraph.begin();
2042 Graph::Iterator lastLayer = optGraph.end();
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002043 OptimizationResult assignBackendsResult = AssignBackends(optNetObjPtr->pOptimizedNetworkImpl.get(),
Derek Lamberti84da38b2019-06-13 11:40:08 +01002044 backendSettings,
2045 firstLayer,
2046 lastLayer,
Rob Hughes23214432019-11-05 11:27:36 +00002047 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +01002048 if (assignBackendsResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +00002049 {
2050 // Failed to assign a backend to each layer
Mike Kelly3a613cc2020-09-29 20:50:35 +01002051 throw InvalidArgumentException("Failed to assign a backend to each layer");
jimfly016b0b53d2018-10-08 14:43:01 +01002052 }
telsoa01c577f2c2018-08-31 09:22:23 +01002053
Matteo Martincighadddddb2019-01-24 14:06:23 +00002054 Optimizer::Pass(optGraph, MakeOptimizations(OptimizeInverseConversionsFp16(),
2055 OptimizeInverseConversionsFp32()));
telsoa01c577f2c2018-08-31 09:22:23 +01002056
Matteo Martincighadddddb2019-01-24 14:06:23 +00002057 // Apply the backend-specific optimizations
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002058 OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr->pOptimizedNetworkImpl.get(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00002059 backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +01002060 backends,
John Mcloughlinc5ee0d72023-03-24 12:07:25 +00002061 options.GetModelOptions(),
Rob Hughes23214432019-11-05 11:27:36 +00002062 messages);
Matteo Martincighadddddb2019-01-24 14:06:23 +00002063 if (backendOptimizationResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +00002064 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00002065 // Failed to apply the backend-specific optimizations
Mike Kelly3a613cc2020-09-29 20:50:35 +01002066 throw InvalidArgumentException("Failed to apply the backend-specific optimizations");
Matteo Martincigh49124022019-01-11 13:25:59 +00002067 }
2068
Keith Davis71ebf5f2022-09-28 17:13:20 +01002069 // Convert constants
2070 {
2071 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_ConvertConstants");
2072 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
2073 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
Keith Davis71ebf5f2022-09-28 17:13:20 +01002074 }
2075
2076 // This must occur after all topological changes to the graph and any redirection of variables
Matteo Martincighadddddb2019-01-24 14:06:23 +00002077 // If the debug flag is set, then insert a DebugLayer after each layer
2078 // Doing this after applying the backend optimizations as they might have changed some layers
John Mcloughlinc5ee0d72023-03-24 12:07:25 +00002079 if (options.GetDebugEnabled() && !options.GetDebugToFileEnabled())
Matteo Martincighadddddb2019-01-24 14:06:23 +00002080 {
2081 Optimizer::Pass(optGraph, MakeOptimizations(InsertDebugLayer()));
2082 }
John Mcloughlinc5ee0d72023-03-24 12:07:25 +00002083 else if (options.GetDebugToFileEnabled())
Keith Davis15f9c682022-10-14 15:50:33 +01002084 {
Keith Davisf63b4572022-10-19 14:53:05 +01002085 // Setup the output file path
Colm Donelane27983c2023-01-16 16:45:08 +00002086 try
2087 {
Ryan OSheaa3dc95e2023-03-20 11:10:40 +00002088#if !defined(ARMNN_DISABLE_FILESYSTEM)
Colm Donelane27983c2023-01-16 16:45:08 +00002089 auto result = armnnUtils::Filesystem::CreateDirectory("/ArmNNIntermediateLayerOutputs");
2090 ARMNN_LOG(info) << "Intermediate tensors will be written to: " << result;
Ryan OSheaa3dc95e2023-03-20 11:10:40 +00002091#endif
Colm Donelane27983c2023-01-16 16:45:08 +00002092 Optimizer::Pass(optGraph, MakeOptimizations(InsertDebugToFileLayer()));
2093 }
2094 catch (const armnn::RuntimeException& e)
2095 {
2096 // If we cannot create the output directory then we'll issue a warning and continue.
2097 ARMNN_LOG(warning) << "Unable to print intermediate layer outputs : " << e.what();
2098 }
Keith Davis15f9c682022-10-14 15:50:33 +01002099 }
Matteo Martincighadddddb2019-01-24 14:06:23 +00002100
Derek Lamberti84da38b2019-06-13 11:40:08 +01002101 // Calculate the compatibility strategies for tensor handles
2102 OptimizationResult strategyResult = SelectTensorHandleStrategy(optGraph,
2103 backends,
2104 tensorHandleFactoryRegistry,
John Mcloughlinc5ee0d72023-03-24 12:07:25 +00002105 options.GetImportEnabled(),
2106 options.GetExportEnabled(),
Rob Hughes23214432019-11-05 11:27:36 +00002107 messages);
Francis Murtagh626bd902022-06-21 13:16:23 +00002108
Derek Lamberti84da38b2019-06-13 11:40:08 +01002109 if (strategyResult.m_Error)
2110 {
2111 // Failed to apply the backend-specific optimizations
2112 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
2113 }
2114
2115 // Based on the tensor handle strategy determined above, insert copy layers where required.
Derek Lambertif1e0ad32021-10-13 18:02:25 +01002116 {
2117 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_AddCompatibilityLayers");
2118 optGraph.AddCompatibilityLayers(backends, tensorHandleFactoryRegistry);
2119 }
telsoa01c577f2c2018-08-31 09:22:23 +01002120
telsoa01c577f2c2018-08-31 09:22:23 +01002121 return optNet;
telsoa014fcda012018-03-09 14:13:49 +00002122}
Cathal Corbetta3f4fba2022-03-21 09:27:08 +00002123
John Mcloughlinc5ee0d72023-03-24 12:07:25 +00002124// Forwarding function to remain backward compatible with legacy OptimizerOptions
Cathal Corbetta3f4fba2022-03-21 09:27:08 +00002125IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
2126 const std::vector<BackendId>& backendPreferences,
2127 const IDeviceSpec& deviceSpec,
2128 const OptimizerOptions& options,
2129 Optional<std::vector<std::string>&> messages)
2130{
John Mcloughlinc5ee0d72023-03-24 12:07:25 +00002131 return Optimize(inNetwork,
2132 backendPreferences,
2133 deviceSpec,
2134 OptimizerOptionsOpaque(options),
2135 messages);
2136}
2137
2138IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
2139 const std::vector<BackendId>& backendPreferences,
2140 const IDeviceSpec& deviceSpec,
2141 const OptimizerOptionsOpaque& options,
2142 Optional<std::vector<std::string>&> messages)
2143{
Cathal Corbetta3f4fba2022-03-21 09:27:08 +00002144 return Optimize(inNetwork.pNetworkImpl->GetGraph(),
2145 backendPreferences,
2146 deviceSpec,
2147 options,
2148 messages);
2149}
2150
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002151bool NetworkImpl::GetShapeInferenceMethod()
telsoa014fcda012018-03-09 14:13:49 +00002152{
Mike Kelly80512b02022-05-16 23:10:42 +01002153 bool shapeInferenceMethod = false;
Finn Williamsf24effa2020-07-03 10:12:03 +01002154
Mike Kelly80512b02022-05-16 23:10:42 +01002155 ParseOptions(m_NetworkOptions, "ShapeInferenceMethod", [&](std::string name, const BackendOptions::Var& value)
2156 {
2157 if (name == "InferAndValidate")
2158 {
2159 shapeInferenceMethod |= value.AsBool();
2160 }
2161 });
2162 return shapeInferenceMethod;
telsoa014fcda012018-03-09 14:13:49 +00002163}
Mike Kelly80512b02022-05-16 23:10:42 +01002164
2165bool NetworkImpl::GetAllowExpandedDims()
2166{
2167 bool allowExpandedDims = false;
2168
2169 ParseOptions(m_NetworkOptions, "AllowExpandedDims", [&](std::string name, const BackendOptions::Var& value)
2170 {
2171 if (name == "AllowExpandedDims")
2172 {
2173 allowExpandedDims |= value.AsBool();
2174 }
2175 });
2176 return allowExpandedDims;
2177}
2178
Jim Flynnfcc72f52022-10-14 11:20:07 +01002179NetworkImpl::NetworkImpl(const NetworkOptions& networkOptions)
Finn Williamsf24effa2020-07-03 10:12:03 +01002180: m_NetworkOptions(networkOptions),
Mike Kelly80512b02022-05-16 23:10:42 +01002181 m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod(), GetAllowExpandedDims()))
Finn Williamsf24effa2020-07-03 10:12:03 +01002182{}
telsoa014fcda012018-03-09 14:13:49 +00002183
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002184NetworkImpl::~NetworkImpl()
telsoa014fcda012018-03-09 14:13:49 +00002185{
2186}
2187
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002188Status NetworkImpl::PrintGraph()
Jan Eilers99d9d4a2019-11-06 10:02:16 +00002189{
2190 m_Graph->Print();
2191 return Status::Success;
2192}
2193
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002194IConnectableLayer* NetworkImpl::AddInputLayer(LayerBindingId id, const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002195{
2196 return m_Graph->AddLayer<InputLayer>(id, name);
2197}
2198
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002199IConnectableLayer* NetworkImpl::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00002200 const char* name)
2201{
2202 return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
2203}
2204
mathad01b392e982021-04-07 12:07:30 +01002205IConnectableLayer* NetworkImpl::AddCastLayer(const char* name)
2206{
2207 return m_Graph->AddLayer<CastLayer>(name);
2208}
Simon Obute51f67772021-09-03 15:50:13 +01002209IConnectableLayer* NetworkImpl::AddChannelShuffleLayer(const ChannelShuffleDescriptor& channelShuffleDescriptor,
2210 const char* name)
2211{
2212 return m_Graph->AddLayer<ChannelShuffleLayer>(channelShuffleDescriptor, name);
2213}
mathad01b392e982021-04-07 12:07:30 +01002214
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002215IConnectableLayer* NetworkImpl::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01002216 const char* name)
2217{
2218 return m_Graph->AddLayer<ComparisonLayer>(comparisonDescriptor, name);
2219}
2220
Mike Kelly3ec30772023-03-08 13:47:17 +00002221IConnectableLayer* NetworkImpl::AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& elementwiseBinaryDesc,
2222 const char* name)
2223{
2224 return m_Graph->AddLayer<ElementwiseBinaryLayer>(elementwiseBinaryDesc, name);
2225}
2226
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002227IConnectableLayer* NetworkImpl::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
josh minor4a3c6102020-01-06 16:40:46 -06002228 const char* name)
2229{
2230 return m_Graph->AddLayer<ElementwiseUnaryLayer>(elementwiseUnaryDescriptor, name);
2231}
2232
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002233IConnectableLayer* NetworkImpl::AddFillLayer(const FillDescriptor& fillDescriptor,
Ryan OSheaec6c6802020-06-05 17:17:06 +01002234 const char* name)
2235{
2236 return m_Graph->AddLayer<FillLayer>(fillDescriptor, name);
2237}
2238
Matthew Sloyan81beae32021-07-13 19:46:11 +01002239IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
2240 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002241{
Matthew Sloyan81beae32021-07-13 19:46:11 +01002242 return m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00002243}
2244
Teresa Charlin9145e382023-08-17 18:44:58 +01002245IConnectableLayer* NetworkImpl::AddFusedLayer(const FusedDescriptor& fusedDescriptor,
2246 const char* name)
2247{
2248 return m_Graph->AddLayer<FusedLayer>(fusedDescriptor, name);
2249}
2250
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002251IConnectableLayer* NetworkImpl::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01002252 const char* name)
2253{
Jim Flynne242f2d2019-05-22 14:24:13 +01002254 return m_Graph->AddLayer<ConcatLayer>(concatDescriptor, name);
Jim Flynn906f9462019-05-10 13:55:21 +01002255}
2256
Keith Davisb4dd5cc2022-04-07 11:32:00 +01002257IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
2258 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002259{
Keith Davisb4dd5cc2022-04-07 11:32:00 +01002260 return m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
2261}
telsoa014fcda012018-03-09 14:13:49 +00002262
Cathal Corbetta3f4fba2022-03-21 09:27:08 +00002263IConnectableLayer* NetworkImpl::AddConvertFp16ToFp32Layer(const char* name)
2264{
2265 return m_Graph->AddLayer<ConvertFp16ToFp32Layer>(name);
2266}
2267
2268IConnectableLayer* NetworkImpl::AddConvertFp32ToFp16Layer(const char* name)
2269{
2270 return m_Graph->AddLayer<ConvertFp32ToFp16Layer>(name);
2271}
2272
Matthew Sloyanb63a3112021-09-08 13:05:51 +01002273IConnectableLayer* NetworkImpl::AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor,
Matthew Sloyanb63a3112021-09-08 13:05:51 +01002274 const char* name)
2275{
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01002276 return m_Graph->AddLayer<Convolution3dLayer>(convolution3dDescriptor, name);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01002277}
2278
2279IConnectableLayer* NetworkImpl::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
2280 const char* name)
2281{
2282 return m_Graph->AddLayer<DepthToSpaceLayer>(depthToSpaceDescriptor, name);
2283}
2284
Cathal Corbett06902652022-04-14 17:55:11 +01002285IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
2286 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
2287 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002288{
Cathal Corbett06902652022-04-14 17:55:11 +01002289 return m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00002290}
2291
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002292IConnectableLayer* NetworkImpl::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00002293 const ConstTensor& anchors, const char* name)
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00002294{
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00002295 const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
2296
James Conroy1f58f032021-04-27 17:13:27 +01002297 layer->m_Anchors = std::make_shared<ScopedTensorHandle>(anchors);
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00002298
2299 return layer;
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00002300}
2301
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002302IConnectableLayer* NetworkImpl::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00002303 const char* name)
2304{
2305 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
2306}
2307
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002308IConnectableLayer* NetworkImpl::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00002309 const char* name)
2310{
2311 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
2312}
2313
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002314IConnectableLayer* NetworkImpl::AddPooling3dLayer(const Pooling3dDescriptor& pooling3dDescriptor,
2315 const char* name)
2316{
2317 return m_Graph->AddLayer<Pooling3dLayer>(pooling3dDescriptor, name);
2318}
2319
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002320IConnectableLayer* NetworkImpl::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00002321 const char* name)
2322{
2323 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
2324}
2325
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002326IConnectableLayer* NetworkImpl::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor,
Nikhil Rajee391d52019-09-05 17:50:44 +01002327 const char* name)
2328{
2329 return m_Graph->AddLayer<ArgMinMaxLayer>(argMinMaxDescriptor, name);
2330}
2331
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002332IConnectableLayer* NetworkImpl::AddNormalizationLayer(const NormalizationDescriptor&
telsoa01c577f2c2018-08-31 09:22:23 +01002333normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00002334 const char* name)
2335{
2336 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
2337}
2338
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002339IConnectableLayer* NetworkImpl::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
Aron Virginas-Tar636ab402019-09-16 14:27:45 +01002340{
2341 return m_Graph->AddLayer<SliceLayer>(sliceDescriptor, name);
2342}
2343
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002344IConnectableLayer* NetworkImpl::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00002345 const char* name)
2346{
2347 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
2348}
2349
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002350IConnectableLayer* NetworkImpl::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00002351 const char* name)
2352{
2353 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
2354}
2355
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002356IConnectableLayer* NetworkImpl::AddMaximumLayer(const char* name)
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00002357{
2358 return m_Graph->AddLayer<MaximumLayer>(name);
2359}
2360
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002361IConnectableLayer* NetworkImpl::AddMinimumLayer(const char* name)
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002362{
2363 return m_Graph->AddLayer<MinimumLayer>(name);
2364}
2365
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002366IConnectableLayer* NetworkImpl::AddAdditionLayer(const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002367{
2368 return m_Graph->AddLayer<AdditionLayer>(name);
2369}
2370
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002371IConnectableLayer* NetworkImpl::AddMultiplicationLayer(const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002372{
2373 return m_Graph->AddLayer<MultiplicationLayer>(name);
2374}
2375
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002376IConnectableLayer* NetworkImpl::AddOutputLayer(LayerBindingId id, const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002377{
2378 return m_Graph->AddLayer<OutputLayer>(id, name);
2379}
2380
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002381IConnectableLayer* NetworkImpl::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
telsoa014fcda012018-03-09 14:13:49 +00002382 const ConstTensor& mean,
2383 const ConstTensor& variance,
2384 const ConstTensor& beta,
2385 const ConstTensor& gamma,
2386 const char* name)
2387{
2388 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
2389
James Conroy1f58f032021-04-27 17:13:27 +01002390 layer->m_Mean = std::make_shared<ScopedTensorHandle>(mean);
2391 layer->m_Variance = std::make_shared<ScopedTensorHandle>(variance);
2392 layer->m_Beta = std::make_shared<ScopedTensorHandle>(beta);
2393 layer->m_Gamma = std::make_shared<ScopedTensorHandle>(gamma);
telsoa014fcda012018-03-09 14:13:49 +00002394
2395 return layer;
2396}
2397
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002398IConnectableLayer* NetworkImpl::AddRankLayer(const char* name)
Finn Williams2605b232020-06-10 15:53:46 +01002399{
2400 return m_Graph->AddLayer<RankLayer>(name);
2401}
2402
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002403IConnectableLayer* NetworkImpl::AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
2404 const char* name)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00002405{
2406 return m_Graph->AddLayer<ReduceLayer>(reduceDescriptor, name);
2407}
2408
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002409IConnectableLayer* NetworkImpl::AddResizeLayer(const ResizeDescriptor& resizeDescriptor, const char* name)
Teresa Charlina9075df2019-06-27 15:41:57 +01002410{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002411 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
Teresa Charlina9075df2019-06-27 15:41:57 +01002412}
2413
Keith Davis3ae3f972021-05-21 16:33:48 +01002414IConnectableLayer* NetworkImpl::AddShapeLayer(const char* name)
2415{
2416 return m_Graph->AddLayer<ShapeLayer>(name);
2417}
2418
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002419IConnectableLayer* NetworkImpl::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
2420 const char* name)
Kevin Mayce5045a2019-10-02 14:07:47 +01002421{
2422 return m_Graph->AddLayer<InstanceNormalizationLayer>(desc, name);
2423}
2424
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002425IConnectableLayer* NetworkImpl::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
2426 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002427{
Matteo Martincighbcd3c852018-09-28 14:14:12 +01002428 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +00002429}
2430
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002431IConnectableLayer* NetworkImpl::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& desc,
Aron Virginas-Tarf982dea2019-10-11 14:07:53 +01002432 const char* name)
2433{
2434 return m_Graph->AddLayer<LogSoftmaxLayer>(desc, name);
2435}
2436
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002437IConnectableLayer* NetworkImpl::AddConstantLayer(const ConstTensor& input, const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002438{
telsoa01c577f2c2018-08-31 09:22:23 +01002439 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
2440
James Conroy1f58f032021-04-27 17:13:27 +01002441 layer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(input);
telsoa01c577f2c2018-08-31 09:22:23 +01002442
2443 return layer;
telsoa014fcda012018-03-09 14:13:49 +00002444}
2445
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002446IConnectableLayer* NetworkImpl::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01002447 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002448{
2449 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
2450}
2451
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002452IConnectableLayer* NetworkImpl::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00002453 const char* name)
2454{
2455 return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
2456}
2457
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002458IConnectableLayer* NetworkImpl::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
Aron Virginas-Tar972af152019-06-11 14:14:03 +01002459 const char* name)
2460{
2461 return m_Graph->AddLayer<SpaceToDepthLayer>(spaceToDepthDescriptor, name);
2462}
2463
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002464IConnectableLayer* NetworkImpl::AddFloorLayer(const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002465{
2466 return m_Graph->AddLayer<FloorLayer>(name);
2467}
2468
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002469IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01002470 const LstmInputParams& params,
2471 const char* name)
2472{
2473 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
2474
2475 //Lstm Basic Parameters
2476 layer->m_BasicParameters.m_InputToForgetWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002477 std::make_shared<ScopedTensorHandle>(*(params.m_InputToForgetWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002478 layer->m_BasicParameters.m_InputToCellWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002479 std::make_shared<ScopedTensorHandle>(*(params.m_InputToCellWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002480 layer->m_BasicParameters.m_InputToOutputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002481 std::make_shared<ScopedTensorHandle>(*(params.m_InputToOutputWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002482 layer->m_BasicParameters.m_RecurrentToForgetWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002483 std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToForgetWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002484 layer->m_BasicParameters.m_RecurrentToCellWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002485 std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToCellWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002486 layer->m_BasicParameters.m_RecurrentToOutputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002487 std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToOutputWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002488 layer->m_BasicParameters.m_ForgetGateBias =
James Conroy1f58f032021-04-27 17:13:27 +01002489 std::make_shared<ScopedTensorHandle>(*(params.m_ForgetGateBias));
telsoa01c577f2c2018-08-31 09:22:23 +01002490 layer->m_BasicParameters.m_CellBias =
James Conroy1f58f032021-04-27 17:13:27 +01002491 std::make_shared<ScopedTensorHandle>(*(params.m_CellBias));
telsoa01c577f2c2018-08-31 09:22:23 +01002492 layer->m_BasicParameters.m_OutputGateBias =
James Conroy1f58f032021-04-27 17:13:27 +01002493 std::make_shared<ScopedTensorHandle>(*(params.m_OutputGateBias));
telsoa01c577f2c2018-08-31 09:22:23 +01002494
2495 //Lstm Cifg parameters
2496 if(!descriptor.m_CifgEnabled)
2497 {
2498 if(params.m_InputToInputWeights == nullptr)
2499 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002500 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL "
2501 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002502 }
2503 if(params.m_RecurrentToInputWeights == nullptr)
2504 {
2505 throw InvalidArgumentException(
Jan Eilerse2062cd2020-03-30 15:07:45 +01002506 "AddLstmLayer: Recurrent To Input Weights cannot be NULL "
2507 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002508 }
2509 if(params.m_InputGateBias == nullptr)
2510 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002511 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL "
2512 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002513 }
2514 layer->m_CifgParameters.m_InputToInputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002515 std::make_shared<ScopedTensorHandle>(*(params.m_InputToInputWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002516 layer->m_CifgParameters.m_RecurrentToInputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002517 std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToInputWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002518 layer->m_CifgParameters.m_InputGateBias =
James Conroy1f58f032021-04-27 17:13:27 +01002519 std::make_shared<ScopedTensorHandle>(*(params.m_InputGateBias));
telsoa01c577f2c2018-08-31 09:22:23 +01002520 }
2521
2522 //Lstm projection parameters
2523 if(descriptor.m_ProjectionEnabled)
2524 {
2525 if(params.m_ProjectionWeights == nullptr)
2526 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002527 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL "
2528 "when projection is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002529 }
2530 layer->m_ProjectionParameters.m_ProjectionWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002531 std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002532 if(params.m_ProjectionBias != nullptr)
2533 {
2534 layer->m_ProjectionParameters.m_ProjectionBias =
James Conroy1f58f032021-04-27 17:13:27 +01002535 std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionBias));
telsoa01c577f2c2018-08-31 09:22:23 +01002536 }
2537 }
2538
2539 //Lstm Peephole params
2540 if(descriptor.m_PeepholeEnabled)
2541 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002542 if(!descriptor.m_CifgEnabled)
2543 {
2544 if(params.m_CellToInputWeights == nullptr)
2545 {
2546 throw InvalidArgumentException("AddLstmLayer: Cell To Input Weights cannot be NULL "
2547 "when Peephole is enabled and CIFG disabled.");
2548 }
2549
2550 layer->m_PeepholeParameters.m_CellToInputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002551 std::make_shared<ScopedTensorHandle>(*(params.m_CellToInputWeights));
Jan Eilerse2062cd2020-03-30 15:07:45 +01002552 }
2553
telsoa01c577f2c2018-08-31 09:22:23 +01002554 if(params.m_CellToForgetWeights == nullptr)
2555 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002556 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL "
2557 "when Peephole is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002558 }
2559 if(params.m_CellToOutputWeights == nullptr)
2560 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002561 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL "
2562 "when Peephole is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002563 }
Jan Eilerse2062cd2020-03-30 15:07:45 +01002564
telsoa01c577f2c2018-08-31 09:22:23 +01002565 layer->m_PeepholeParameters.m_CellToForgetWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002566 std::make_shared<ScopedTensorHandle>(*(params.m_CellToForgetWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002567 layer->m_PeepholeParameters.m_CellToOutputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002568 std::make_shared<ScopedTensorHandle>(*(params.m_CellToOutputWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002569 }
Jan Eilersf8c62972019-07-17 11:07:49 +01002570
2571 //Lstm Layer Normalization params
2572 if(descriptor.m_LayerNormEnabled)
2573 {
2574 if(!descriptor.m_CifgEnabled)
2575 {
2576 if(params.m_InputLayerNormWeights == nullptr)
2577 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002578 throw InvalidArgumentException("AddLstmLayer: Input layer normalization weights cannot be NULL "
2579 "when layer normalization is enabled and CIFG disabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01002580 }
2581 layer->m_LayerNormParameters.m_InputLayerNormWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002582 std::make_shared<ScopedTensorHandle>(*(params.m_InputLayerNormWeights));
Jan Eilersf8c62972019-07-17 11:07:49 +01002583 }
2584
2585 if(params.m_ForgetLayerNormWeights == nullptr)
2586 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002587 throw InvalidArgumentException("AddLstmLayer: Forget layer normalization weights cannot be NULL "
2588 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01002589 }
2590 if(params.m_CellLayerNormWeights == nullptr)
2591 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002592 throw InvalidArgumentException("AddLstmLayer: Cell layer normalization weights cannot be NULL "
2593 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01002594 }
2595 if(params.m_OutputLayerNormWeights == nullptr)
2596 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002597 throw InvalidArgumentException("AddLstmLayer: Output layer normalization weights cannot be NULL "
2598 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01002599 }
2600 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002601 std::make_shared<ScopedTensorHandle>(*(params.m_ForgetLayerNormWeights));
Jan Eilersf8c62972019-07-17 11:07:49 +01002602 layer->m_LayerNormParameters.m_CellLayerNormWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002603 std::make_shared<ScopedTensorHandle>(*(params.m_CellLayerNormWeights));
Jan Eilersf8c62972019-07-17 11:07:49 +01002604 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002605 std::make_shared<ScopedTensorHandle>(*(params.m_OutputLayerNormWeights));
Jan Eilersf8c62972019-07-17 11:07:49 +01002606 }
telsoa01c577f2c2018-08-31 09:22:23 +01002607 return layer;
2608}
2609
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002610IConnectableLayer* NetworkImpl::AddDivisionLayer(const char* name)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002611{
2612 return m_Graph->AddLayer<DivisionLayer>(name);
2613}
2614
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002615IConnectableLayer* NetworkImpl::AddSubtractionLayer(const char* name)
David Beck19526222018-09-12 16:00:08 +01002616{
2617 return m_Graph->AddLayer<SubtractionLayer>(name);
2618}
2619
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002620IConnectableLayer* NetworkImpl::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
narpra0132b90462018-09-13 11:07:48 +01002621{
2622 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
2623}
2624
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002625IConnectableLayer* NetworkImpl::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +01002626{
2627 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
2628}
2629
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002630IConnectableLayer *NetworkImpl::AddQuantizeLayer(const char *name)
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002631{
2632 return m_Graph->AddLayer<QuantizeLayer>(name);
2633}
2634
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002635IConnectableLayer* NetworkImpl::AddDequantizeLayer(const char* name)
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002636{
2637 return m_Graph->AddLayer<DequantizeLayer>(name);
2638}
2639
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002640IConnectableLayer* NetworkImpl::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01002641 const char* name)
Conor Kennedy430b5d82018-11-14 15:28:28 +00002642{
2643 return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
2644}
2645
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002646IConnectableLayer* NetworkImpl::AddGatherLayer(const GatherDescriptor& gatherDescriptor,
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01002647 const char* name)
Teresa Charlin52664732020-06-29 16:27:03 +01002648{
2649 return m_Graph->AddLayer<GatherLayer>(gatherDescriptor, name);
narpra01b89b05f2019-01-16 09:53:09 +00002650}
2651
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01002652IConnectableLayer* NetworkImpl::AddGatherNdLayer(const char* name)
2653{
2654 return m_Graph->AddLayer<GatherNdLayer>(name);
2655}
2656
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002657IConnectableLayer* NetworkImpl::AddMergeLayer(const char* name)
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002658{
2659 return m_Graph->AddLayer<MergeLayer>(name);
2660}
2661
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002662IConnectableLayer* NetworkImpl::AddSwitchLayer(const char* name)
Sadik Armaganeff363d2019-04-05 15:25:46 +01002663{
2664 return m_Graph->AddLayer<SwitchLayer>(name);
2665}
2666
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002667IConnectableLayer* NetworkImpl::AddPreluLayer(const char* name)
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002668{
2669 return m_Graph->AddLayer<PreluLayer>(name);
2670}
2671
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002672IConnectableLayer* NetworkImpl::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002673 const ConstTensor& weights,
2674 const Optional<ConstTensor>& biases,
2675 const char* name)
2676{
2677 if (descriptor.m_BiasEnabled && !biases.has_value())
2678 {
2679 throw InvalidArgumentException("AddTransposeConvolution2dLayer: Biases cannot be empty");
2680 }
2681
2682 const auto layer = m_Graph->AddLayer<TransposeConvolution2dLayer>(descriptor, name);
2683
James Conroy1f58f032021-04-27 17:13:27 +01002684 layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002685
2686 if (descriptor.m_BiasEnabled)
2687 {
James Conroy1f58f032021-04-27 17:13:27 +01002688 layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002689 }
2690
2691 return layer;
2692}
2693
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002694IConnectableLayer* NetworkImpl::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
Mike Kellyc9ea45a2020-02-28 18:11:58 +00002695 const char* name)
2696{
2697 return m_Graph->AddLayer<TransposeLayer>(transposeDescriptor, name);
2698}
2699
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002700IConnectableLayer* NetworkImpl::AddStackLayer(const StackDescriptor& stackDescriptor,
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01002701 const char* name)
2702{
2703 return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
2704}
2705
Derek Lamberti013c3902019-10-21 10:46:16 +01002706
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002707IConnectableLayer* NetworkImpl::AddStandInLayer(const StandInDescriptor& desc,
Derek Lamberti013c3902019-10-21 10:46:16 +01002708 const char* name)
2709{
2710 return m_Graph->AddLayer<StandInLayer>(desc, name);
2711}
2712
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002713IConnectableLayer* NetworkImpl::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
James Conroyee18dc82019-07-17 11:27:46 +01002714 const char* name)
2715{
2716 const auto layer = m_Graph->AddLayer<QuantizedLstmLayer>(name);
2717
2718 // InputToX weights
2719 layer->m_QuantizedLstmParameters.m_InputToInputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002720 std::make_shared<ScopedTensorHandle>(params.GetInputToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002721 layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002722 std::make_shared<ScopedTensorHandle>(params.GetInputToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002723 layer->m_QuantizedLstmParameters.m_InputToCellWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002724 std::make_shared<ScopedTensorHandle>(params.GetInputToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002725 layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002726 std::make_shared<ScopedTensorHandle>(params.GetInputToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002727
2728 // RecurrentToX weights
2729 layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002730 std::make_shared<ScopedTensorHandle>(params.GetRecurrentToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002731 layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002732 std::make_shared<ScopedTensorHandle>(params.GetRecurrentToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002733 layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002734 std::make_shared<ScopedTensorHandle>(params.GetRecurrentToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002735 layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002736 std::make_shared<ScopedTensorHandle>(params.GetRecurrentToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002737
2738 // Bias
2739 layer->m_QuantizedLstmParameters.m_InputGateBias =
James Conroy1f58f032021-04-27 17:13:27 +01002740 std::make_shared<ScopedTensorHandle>(params.GetInputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01002741 layer->m_QuantizedLstmParameters.m_ForgetGateBias =
James Conroy1f58f032021-04-27 17:13:27 +01002742 std::make_shared<ScopedTensorHandle>(params.GetForgetGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01002743 layer->m_QuantizedLstmParameters.m_CellBias =
James Conroy1f58f032021-04-27 17:13:27 +01002744 std::make_shared<ScopedTensorHandle>(params.GetCellBias());
James Conroyee18dc82019-07-17 11:27:46 +01002745 layer->m_QuantizedLstmParameters.m_OutputGateBias =
James Conroy1f58f032021-04-27 17:13:27 +01002746 std::make_shared<ScopedTensorHandle>(params.GetOutputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01002747
2748 return layer;
2749}
2750
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002751IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor,
James Conroy586a9aa2020-03-20 08:49:33 +00002752 const LstmInputParams& params,
2753 const char* name)
2754{
2755 const auto layer = m_Graph->AddLayer<QLstmLayer>(descriptor, name);
2756
2757 // QLstm Basic Parameters
2758 layer->m_BasicParameters.m_InputToForgetWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002759 std::make_shared<ScopedTensorHandle>(*(params.m_InputToForgetWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002760 layer->m_BasicParameters.m_InputToCellWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002761 std::make_shared<ScopedTensorHandle>(*(params.m_InputToCellWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002762 layer->m_BasicParameters.m_InputToOutputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002763 std::make_shared<ScopedTensorHandle>(*(params.m_InputToOutputWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002764 layer->m_BasicParameters.m_RecurrentToForgetWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002765 std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToForgetWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002766 layer->m_BasicParameters.m_RecurrentToCellWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002767 std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToCellWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002768 layer->m_BasicParameters.m_RecurrentToOutputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002769 std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToOutputWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002770 layer->m_BasicParameters.m_ForgetGateBias =
James Conroy1f58f032021-04-27 17:13:27 +01002771 std::make_shared<ScopedTensorHandle>(*(params.m_ForgetGateBias));
James Conroy586a9aa2020-03-20 08:49:33 +00002772 layer->m_BasicParameters.m_CellBias =
James Conroy1f58f032021-04-27 17:13:27 +01002773 std::make_shared<ScopedTensorHandle>(*(params.m_CellBias));
James Conroy586a9aa2020-03-20 08:49:33 +00002774 layer->m_BasicParameters.m_OutputGateBias =
James Conroy1f58f032021-04-27 17:13:27 +01002775 std::make_shared<ScopedTensorHandle>(*(params.m_OutputGateBias));
James Conroy586a9aa2020-03-20 08:49:33 +00002776
2777 // QLstm Cifg parameters
2778 if(!descriptor.m_CifgEnabled)
2779 {
2780 if(params.m_InputToInputWeights == nullptr)
2781 {
2782 throw InvalidArgumentException("AddQLstmLayer: Input To Input Weights cannot be NULL");
2783 }
2784
2785 if(params.m_RecurrentToInputWeights == nullptr)
2786 {
2787 throw InvalidArgumentException(
2788 "AddQLstmLayer: Recurrent To Input Weights cannot be NULL");
2789 }
2790
2791 if(params.m_InputGateBias == nullptr)
2792 {
2793 throw InvalidArgumentException("AddQLstmLayer: Input Gate Bias cannot be NULL");
2794 }
2795
2796 layer->m_CifgParameters.m_InputToInputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002797 std::make_shared<ScopedTensorHandle>(*(params.m_InputToInputWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002798 layer->m_CifgParameters.m_RecurrentToInputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002799 std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToInputWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002800 layer->m_CifgParameters.m_InputGateBias =
James Conroy1f58f032021-04-27 17:13:27 +01002801 std::make_shared<ScopedTensorHandle>(*(params.m_InputGateBias));
James Conroy586a9aa2020-03-20 08:49:33 +00002802 }
2803
2804 // QLstm Projection parameters
2805 if(descriptor.m_ProjectionEnabled)
2806 {
2807 if(params.m_ProjectionWeights == nullptr)
2808 {
2809 throw InvalidArgumentException("AddQLstmLayer: Projection Weights cannot be NULL");
2810 }
2811
James Conroy586a9aa2020-03-20 08:49:33 +00002812 layer->m_ProjectionParameters.m_ProjectionWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002813 std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionWeights));
James Conroyed324052020-05-18 15:16:42 +01002814
2815 // Projection bias is optional even if projection is enabled
Cathal Corbett727c2b52022-05-06 12:11:37 +01002816 if(params.m_ProjectionBias != nullptr)
James Conroyed324052020-05-18 15:16:42 +01002817 {
2818 layer->m_ProjectionParameters.m_ProjectionBias =
James Conroy1f58f032021-04-27 17:13:27 +01002819 std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionBias));
James Conroyed324052020-05-18 15:16:42 +01002820 }
2821
James Conroy586a9aa2020-03-20 08:49:33 +00002822 }
2823
2824 // QLstm Peephole params
2825 if(descriptor.m_PeepholeEnabled)
2826 {
2827 if(params.m_CellToForgetWeights == nullptr)
2828 {
2829 throw InvalidArgumentException("AddQLstmLayer: Cell To Forget Weights cannot be NULL");
2830 }
2831
2832 if(params.m_CellToOutputWeights == nullptr)
2833 {
2834 throw InvalidArgumentException("AddQLstmLayer: Cell To Output Weights cannot be NULL");
2835 }
2836
2837 if(!descriptor.m_CifgEnabled)
2838 {
2839 if(params.m_CellToInputWeights == nullptr)
2840 {
2841 throw InvalidArgumentException("AddQLstmLayer: Cell To Input Weights cannot be NULL");
2842 }
2843
2844 layer->m_PeepholeParameters.m_CellToInputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002845 std::make_shared<ScopedTensorHandle>(*(params.m_CellToInputWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002846 }
2847
2848 layer->m_PeepholeParameters.m_CellToForgetWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002849 std::make_shared<ScopedTensorHandle>(*(params.m_CellToForgetWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002850 layer->m_PeepholeParameters.m_CellToOutputWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002851 std::make_shared<ScopedTensorHandle>(*(params.m_CellToOutputWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002852 }
2853
2854 // QLstm Layer Normalization params
2855 if(descriptor.m_LayerNormEnabled)
2856 {
2857 if(params.m_ForgetLayerNormWeights == nullptr)
2858 {
2859 throw InvalidArgumentException("AddQLstmLayer: Forget layer normalization weights cannot be NULL");
2860 }
2861
2862 if(params.m_CellLayerNormWeights == nullptr)
2863 {
2864 throw InvalidArgumentException("AddQLstmLayer: Cell layer normalization weights cannot be NULL");
2865 }
2866
2867 if(params.m_OutputLayerNormWeights == nullptr)
2868 {
2869 throw InvalidArgumentException("AddQLstmLayer: Output layer normalization weights cannot be NULL");
2870 }
2871
2872 if(!descriptor.m_CifgEnabled)
2873 {
2874 if(params.m_InputLayerNormWeights == nullptr)
2875 {
2876 throw InvalidArgumentException("AddQLstmLayer: Input layer normalization weights cannot be NULL");
2877 }
2878
2879 layer->m_LayerNormParameters.m_InputLayerNormWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002880 std::make_shared<ScopedTensorHandle>(*(params.m_InputLayerNormWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002881 }
2882
2883 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002884 std::make_shared<ScopedTensorHandle>(*(params.m_ForgetLayerNormWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002885 layer->m_LayerNormParameters.m_CellLayerNormWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002886 std::make_shared<ScopedTensorHandle>(*(params.m_CellLayerNormWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002887 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
James Conroy1f58f032021-04-27 17:13:27 +01002888 std::make_shared<ScopedTensorHandle>(*(params.m_OutputLayerNormWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002889 }
2890 return layer;
2891}
2892
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002893IConnectableLayer* NetworkImpl::AddLogicalBinaryLayer(const LogicalBinaryDescriptor& logicalBinaryDescriptor,
Narumol Prangnawarat8ed39ae2021-07-15 16:16:25 +01002894 const char* name)
James Conroyaba90cd2020-11-06 16:28:18 +00002895{
2896 return m_Graph->AddLayer<LogicalBinaryLayer>(logicalBinaryDescriptor, name);
2897}
2898
Narumol Prangnawarat8ed39ae2021-07-15 16:16:25 +01002899IConnectableLayer* NetworkImpl::AddUnidirectionalSequenceLstmLayer(
2900 const UnidirectionalSequenceLstmDescriptor& descriptor,
2901 const LstmInputParams& params,
2902 const char* name)
2903{
2904 const auto layer = m_Graph->AddLayer<UnidirectionalSequenceLstmLayer>(descriptor, name);
2905
2906 //Lstm Basic Parameters
2907 layer->m_BasicParameters.m_InputToForgetWeights =
2908 std::make_shared<ScopedTensorHandle>(*(params.m_InputToForgetWeights));
2909 layer->m_BasicParameters.m_InputToCellWeights =
2910 std::make_shared<ScopedTensorHandle>(*(params.m_InputToCellWeights));
2911 layer->m_BasicParameters.m_InputToOutputWeights =
2912 std::make_shared<ScopedTensorHandle>(*(params.m_InputToOutputWeights));
2913 layer->m_BasicParameters.m_RecurrentToForgetWeights =
2914 std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToForgetWeights));
2915 layer->m_BasicParameters.m_RecurrentToCellWeights =
2916 std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToCellWeights));
2917 layer->m_BasicParameters.m_RecurrentToOutputWeights =
2918 std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToOutputWeights));
2919 layer->m_BasicParameters.m_ForgetGateBias =
2920 std::make_shared<ScopedTensorHandle>(*(params.m_ForgetGateBias));
2921 layer->m_BasicParameters.m_CellBias =
2922 std::make_shared<ScopedTensorHandle>(*(params.m_CellBias));
2923 layer->m_BasicParameters.m_OutputGateBias =
2924 std::make_shared<ScopedTensorHandle>(*(params.m_OutputGateBias));
2925
2926 //Lstm Cifg parameters
2927 if(!descriptor.m_CifgEnabled)
2928 {
2929 if(params.m_InputToInputWeights == nullptr)
2930 {
2931 throw InvalidArgumentException("AddUnidirectionalSequenceLstmLayer: Input To Input Weights cannot be NULL "
2932 "when CIFG is disabled.");
2933 }
2934 if(params.m_RecurrentToInputWeights == nullptr)
2935 {
2936 throw InvalidArgumentException(
2937 "AddUnidirectionalSequenceLstmLayer: Recurrent To Input Weights cannot be NULL "
2938 "when CIFG is disabled.");
2939 }
2940 if(params.m_InputGateBias == nullptr)
2941 {
2942 throw InvalidArgumentException("AddUnidirectionalSequenceLstmLayer: Input Gate Bias cannot be NULL "
2943 "when CIFG is disabled.");
2944 }
2945 layer->m_CifgParameters.m_InputToInputWeights =
2946 std::make_shared<ScopedTensorHandle>(*(params.m_InputToInputWeights));
2947 layer->m_CifgParameters.m_RecurrentToInputWeights =
2948 std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToInputWeights));
2949 layer->m_CifgParameters.m_InputGateBias =
2950 std::make_shared<ScopedTensorHandle>(*(params.m_InputGateBias));
2951 }
2952
2953 //Lstm projection parameters
2954 if(descriptor.m_ProjectionEnabled)
2955 {
2956 if(params.m_ProjectionWeights == nullptr)
2957 {
2958 throw InvalidArgumentException("AddUnidirectionalSequenceLstmLayer: Projection Weights cannot be NULL "
2959 "when projection is enabled.");
2960 }
2961 layer->m_ProjectionParameters.m_ProjectionWeights =
2962 std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionWeights));
2963 if(params.m_ProjectionBias != nullptr)
2964 {
2965 layer->m_ProjectionParameters.m_ProjectionBias =
2966 std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionBias));
2967 }
2968 }
2969
2970 //Lstm Peephole params
2971 if(descriptor.m_PeepholeEnabled)
2972 {
2973 if(!descriptor.m_CifgEnabled)
2974 {
2975 if(params.m_CellToInputWeights == nullptr)
2976 {
2977 throw InvalidArgumentException("AddUnidirectionalSequenceLstmLayer: Cell To Input Weights "
2978 "cannot be NULL when Peephole is enabled and CIFG disabled.");
2979 }
2980
2981 layer->m_PeepholeParameters.m_CellToInputWeights =
2982 std::make_shared<ScopedTensorHandle>(*(params.m_CellToInputWeights));
2983 }
2984
2985 if(params.m_CellToForgetWeights == nullptr)
2986 {
2987 throw InvalidArgumentException("AddUnidirectionalSequenceLstmLayer: Cell To Forget Weights cannot be NULL "
2988 "when Peephole is enabled.");
2989 }
2990 if(params.m_CellToOutputWeights == nullptr)
2991 {
2992 throw InvalidArgumentException("AddUnidirectionalSequenceLstmLayer: Cell To Output Weights cannot be NULL "
2993 "when Peephole is enabled.");
2994 }
2995
2996 layer->m_PeepholeParameters.m_CellToForgetWeights =
2997 std::make_shared<ScopedTensorHandle>(*(params.m_CellToForgetWeights));
2998 layer->m_PeepholeParameters.m_CellToOutputWeights =
2999 std::make_shared<ScopedTensorHandle>(*(params.m_CellToOutputWeights));
3000 }
3001
3002 //Lstm Layer Normalization params
3003 if(descriptor.m_LayerNormEnabled)
3004 {
3005 if(!descriptor.m_CifgEnabled)
3006 {
3007 if(params.m_InputLayerNormWeights == nullptr)
3008 {
3009 throw InvalidArgumentException("AddUnidirectionalSequenceLstmLayer: Input layer normalization weights "
3010 "cannot be NULL when layer normalization is enabled and CIFG disabled.");
3011 }
3012 layer->m_LayerNormParameters.m_InputLayerNormWeights =
3013 std::make_shared<ScopedTensorHandle>(*(params.m_InputLayerNormWeights));
3014 }
3015
3016 if(params.m_ForgetLayerNormWeights == nullptr)
3017 {
3018 throw InvalidArgumentException("AddUnidirectionalSequenceLstmLayer: Forget layer normalization weights "
3019 "cannot be NULL when layer normalization is enabled.");
3020 }
3021 if(params.m_CellLayerNormWeights == nullptr)
3022 {
3023 throw InvalidArgumentException("AddUnidirectionalSequenceLstmLayer: Cell layer normalization weights "
3024 "cannot be NULL when layer normalization is enabled.");
3025 }
3026 if(params.m_OutputLayerNormWeights == nullptr)
3027 {
3028 throw InvalidArgumentException("AddUnidirectionalSequenceLstmLayer: Output layer normalization weights "
3029 "cannot be NULL when layer normalization is enabled.");
3030 }
3031 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
3032 std::make_shared<ScopedTensorHandle>(*(params.m_ForgetLayerNormWeights));
3033 layer->m_LayerNormParameters.m_CellLayerNormWeights =
3034 std::make_shared<ScopedTensorHandle>(*(params.m_CellLayerNormWeights));
3035 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
3036 std::make_shared<ScopedTensorHandle>(*(params.m_OutputLayerNormWeights));
3037 }
3038 return layer;
3039}
3040
Samuel Yap6b478092022-07-06 15:36:03 +01003041IConnectableLayer* NetworkImpl::AddBatchMatMulLayer(const BatchMatMulDescriptor& desc, const char* name)
3042{
3043 return m_Graph->AddLayer<BatchMatMulLayer>(desc, name);
3044}
3045
Tracy Narinebb8d7592023-07-13 16:50:54 +01003046IConnectableLayer* NetworkImpl::AddReverseV2Layer(const char *name)
Tianle Cheng988354d2023-06-28 13:20:47 +01003047{
Tracy Narinebb8d7592023-07-13 16:50:54 +01003048 return m_Graph->AddLayer<ReverseV2Layer>(name);
Tianle Cheng988354d2023-06-28 13:20:47 +01003049}
3050
Teresa Charlin79a06a52023-07-13 17:16:45 +01003051IConnectableLayer* NetworkImpl::AddTileLayer(const TileDescriptor &desc, const char *name)
3052{
3053 return m_Graph->AddLayer<TileLayer>(desc, name);
3054}
3055
Cathal Corbett18655b82021-12-13 13:03:22 +00003056IConnectableLayer* NetworkImpl::AddPrecompiledLayer(const PreCompiledDescriptor& preCompiledDescriptor,
Cathal Corbett3ea01072022-01-06 10:29:43 +00003057 CompiledBlobPtr compiledBlobPtr,
Cathal Corbettcbfd7182021-12-15 17:12:59 +00003058 const Optional<BackendId>& backend,
3059 const char* name)
Cathal Corbett18655b82021-12-13 13:03:22 +00003060{
3061 // Method use is for backend users.
Cathal Corbettcbfd7182021-12-15 17:12:59 +00003062 PreCompiledLayer* layer;
3063 if (name)
3064 {
3065 layer = m_Graph->AddLayer<PreCompiledLayer>(preCompiledDescriptor, name);
3066 }
3067 else
3068 {
3069 layer = m_Graph->AddLayer<PreCompiledLayer>(preCompiledDescriptor, "pre-compiled");
3070 }
Cathal Corbett18655b82021-12-13 13:03:22 +00003071
3072 // Assign the pre-compiled object to layer
3073 // Pass only one compiled network, Arm NN does not handle multiple
3074 // pre-compiled objects in a single pre-compiled layer currently
3075 layer->SetPreCompiledObject(std::move(compiledBlobPtr));
3076
3077 if (backend.has_value())
3078 {
3079 layer->SetBackendId(backend.value());
3080 }
Francis Murtagh9d74ba62022-01-19 16:31:58 +00003081 else if (layer->GetBackendHint().has_value())
Cathal Corbett18655b82021-12-13 13:03:22 +00003082 {
3083 layer->SetBackendId(layer->GetBackendHint().value());
3084 }
3085
3086 return layer;
3087}
3088
Idriss Chaouch98e383e2023-08-28 14:28:31 +01003089IConnectableLayer* NetworkImpl::AddBroadcastToLayer(const BroadcastToDescriptor &desc, const char *name)
3090{
3091 return m_Graph->AddLayer<BroadcastToLayer>(desc, name);
3092}
3093
Tianle Cheng28288182024-02-23 17:56:54 +00003094IConnectableLayer* NetworkImpl::AddScatterNdLayer(const ScatterNdDescriptor &desc, const char *name)
3095{
3096 return m_Graph->AddLayer<ScatterNdLayer>(desc, name);
3097}
3098
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00003099void NetworkImpl::ExecuteStrategy(IStrategy& strategy) const
Finn Williamsb454c5c2021-02-09 15:56:23 +00003100{
3101 for (auto layer : GetGraph())
3102 {
3103 layer->ExecuteStrategy(strategy);
3104 };
3105}
3106
Mike Kelly0d677db2021-06-27 22:39:21 +01003107OptimizedNetworkImpl::OptimizedNetworkImpl(const OptimizedNetworkImpl& other, const ModelOptions& modelOptions)
3108 : m_Graph(new Graph(*other.m_Graph.get()))
Jim Flynnaf947722022-03-02 11:04:47 +00003109 , m_Guid(arm::pipe::IProfilingService::GetNextGuid())
Mike Kelly0d677db2021-06-27 22:39:21 +01003110 , m_ModelOptions(modelOptions)
3111{
3112}
3113
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00003114OptimizedNetworkImpl::OptimizedNetworkImpl(std::unique_ptr<Graph> graph)
Jim Flynnaf947722022-03-02 11:04:47 +00003115 : m_Graph(std::move(graph)), m_Guid(arm::pipe::IProfilingService::GetNextGuid())
telsoa014fcda012018-03-09 14:13:49 +00003116{
3117}
3118
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00003119OptimizedNetworkImpl::OptimizedNetworkImpl(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
Jim Flynnaf947722022-03-02 11:04:47 +00003120 : m_Graph(std::move(graph)), m_Guid(arm::pipe::IProfilingService::GetNextGuid()), m_ModelOptions(modelOptions)
Sadik Armagan045f6be2020-09-10 13:37:32 +01003121{
3122}
3123
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00003124OptimizedNetworkImpl::~OptimizedNetworkImpl()
telsoa014fcda012018-03-09 14:13:49 +00003125{
3126}
3127
Teresa Charlin83b42912022-07-07 14:24:59 +01003128void IOptimizedNetwork::ExecuteStrategy(IStrategy &strategy) const
3129{
3130 pOptimizedNetworkImpl->ExecuteStrategy(strategy);
3131}
3132
3133void OptimizedNetworkImpl::ExecuteStrategy(IStrategy &strategy) const
3134{
3135 for (auto layer : GetGraph())
3136 {
3137 layer->ExecuteStrategy(strategy);
3138 };
3139}
3140
telsoa014fcda012018-03-09 14:13:49 +00003141} // namespace armnn