blob: b9a0e47ec55ca377f15f5e5ced5d0cec5418400d [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlin52664732020-06-29 16:27:03 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matteo Martincigh49124022019-01-11 13:25:59 +00005
telsoa014fcda012018-03-09 14:13:49 +00006#include "Network.hpp"
7#include "Graph.hpp"
8#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01009#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010#include "Optimizer.hpp"
Derek Lambertiff05cc52019-04-26 13:05:17 +010011#include "SubgraphViewSelector.hpp"
Matteo Martincigh49124022019-01-11 13:25:59 +000012#include "BackendSettings.hpp"
David Beckac42efd2018-09-26 17:41:13 +010013#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000014
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000015#include <backendsCommon/CpuTensorHandle.hpp>
16#include <backendsCommon/WorkloadFactory.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000017#include <armnn/backends/IBackendInternal.hpp>
Derek Lamberti84da38b2019-06-13 11:40:08 +010018#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019
20#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000021#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022#include <armnn/TypesUtils.hpp>
Matteo Martincighc601aa62019-10-29 15:03:22 +000023#include <armnn/BackendRegistry.hpp>
Matthew Benthamf48afc62020-01-15 17:55:08 +000024#include <armnn/Logging.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010025#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000026#include <armnn/utility/IgnoreUnused.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010027#include <armnn/utility/PolymorphicDowncast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000028
Jan Eilers99d9d4a2019-11-06 10:02:16 +000029#include <ProfilingService.hpp>
30
telsoa014fcda012018-03-09 14:13:49 +000031#include <fcntl.h>
32#include <algorithm>
33#include <fstream>
34#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010035#include <vector>
36#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000037
telsoa014fcda012018-03-09 14:13:49 +000038namespace armnn
39{
40
Francis Murtagh3d2b4b22021-02-15 18:23:17 +000041INetwork::INetwork(NetworkOptions networkOptions) : pNetworkImpl(new NetworkImpl(networkOptions)) {}
42
43INetwork::~INetwork() = default;
44
45Status INetwork::PrintGraph()
46{
47 return pNetworkImpl->PrintGraph();
48}
49
50IConnectableLayer* INetwork::AddInputLayer(LayerBindingId id, const char* name)
51{
52 return pNetworkImpl->AddInputLayer(id, name);
53}
54
55
56IConnectableLayer* INetwork::AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
57 const char* name)
58{
59 return pNetworkImpl->AddArgMinMaxLayer(desc, name);
60}
61
62
63IConnectableLayer* INetwork::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
64 const char* name)
65{
66 return pNetworkImpl->AddComparisonLayer(comparisonDescriptor, name);
67}
68
69
70IConnectableLayer* INetwork::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
71 const char* name)
72{
73 return pNetworkImpl->AddConcatLayer(concatDescriptor, name);
74}
75
76
77IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
78 const ConstTensor& weights,
79 const Optional<ConstTensor>& biases,
80 const char* name)
81{
82 return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
83}
84
85
86IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
87 const ConstTensor& weights,
88 const char* name)
89{
90 Optional<ConstTensor> biases;
91 return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
92}
93
94
95IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
96 const ConstTensor& weights,
97 const ConstTensor& biases,
98 const char* name )
99{
100
101 return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor,
102 weights,
103 armnn::Optional<ConstTensor>(biases),
104 name);
105}
106
107
108IConnectableLayer* INetwork::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
109 const char* name)
110{
111 return pNetworkImpl->AddDepthToSpaceLayer(depthToSpaceDescriptor, name);
112}
113
114
115IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
116 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
117 const ConstTensor& weights,
118 const Optional<ConstTensor>& biases,
119 const char* name)
120{
121 return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
122}
123
124
125IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
126 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
127 const ConstTensor& weights,
128 const char* name)
129{
130 Optional<ConstTensor> biases;
131 return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
132}
133
134
135IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
136 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
137 const ConstTensor& weights,
138 const ConstTensor& biases,
139 const char* name)
140{
141 return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights,
142 armnn::Optional<ConstTensor>(biases), name);
143}
144
145
146IConnectableLayer* INetwork::AddDequantizeLayer(const char* name)
147{
148 return pNetworkImpl->AddDequantizeLayer(name);
149}
150
151
152IConnectableLayer* INetwork::AddDetectionPostProcessLayer(
153 const DetectionPostProcessDescriptor& descriptor,
154 const ConstTensor& anchors,
155 const char* name)
156{
157 return pNetworkImpl->AddDetectionPostProcessLayer(descriptor, anchors, name);
158}
159
160
161IConnectableLayer* INetwork::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
162 const char* name)
163{
164 return pNetworkImpl->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name);
165}
166
167
168IConnectableLayer* INetwork::AddFillLayer(const FillDescriptor& fillDescriptor,
169 const char* name)
170{
171 return pNetworkImpl->AddFillLayer(fillDescriptor, name);
172}
173
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000174IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
175 const ConstTensor& weights,
176 const Optional<ConstTensor>& biases,
177 const char* name)
178{
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000179 return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor,
180 armnn::Optional<ConstTensor>(weights),
181 biases,
182 name);
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000183}
184
185IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
186 const ConstTensor& weights,
187 const char* name)
188{
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000189 armnn::Optional<ConstTensor> biases;
190 return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor,
191 armnn::Optional<ConstTensor>(weights),
192 biases,
193 name);
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000194}
195
196IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
197 const ConstTensor& weights,
198 const ConstTensor& biases,
199 const char* name)
200{
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000201 return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor,
202 armnn::Optional<ConstTensor>(weights),
203 armnn::Optional<ConstTensor>(biases),
204 name);
205}
206
207IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
208 const Optional<ConstTensor>& weights,
209 const Optional<ConstTensor>& biases,
210 const char* name)
211{
212 return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights, biases, name);
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000213}
214
215IConnectableLayer* INetwork::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
216 const char* name)
217{
218 return pNetworkImpl->AddPermuteLayer(permuteDescriptor, name);
219}
220
221IConnectableLayer* INetwork::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
222 const char* name)
223{
224 return pNetworkImpl->AddBatchToSpaceNdLayer(batchToSpaceNdDescriptor, name);
225}
226
227IConnectableLayer* INetwork::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
228 const char* name)
229{
230 return pNetworkImpl->AddPooling2dLayer(pooling2dDescriptor, name);
231}
232
233IConnectableLayer* INetwork::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
234 const char* name)
235{
236 return pNetworkImpl->AddActivationLayer(activationDescriptor, name);
237}
238
239IConnectableLayer* INetwork::AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
240 const char* name)
241{
242 return pNetworkImpl->AddNormalizationLayer(normalizationDescriptor, name);
243}
244
245IConnectableLayer* INetwork::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
246{
247 return pNetworkImpl->AddSliceLayer(sliceDescriptor, name);
248}
249IConnectableLayer* INetwork::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
250 const char* name)
251{
252 return pNetworkImpl->AddSoftmaxLayer(softmaxDescriptor, name);
253}
254
255IConnectableLayer* INetwork::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
256 const char* name)
257{
258 return pNetworkImpl->AddSplitterLayer(splitterDescriptor, name);
259}
260
261IConnectableLayer* INetwork::AddMergeLayer(const char* name)
262{
263 return pNetworkImpl->AddMergeLayer(name);
264}
265
266IConnectableLayer* INetwork::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
267 const char* name)
268{
269 return pNetworkImpl->AddConcatLayer(mergerDescriptor, name);
270}
271
272IConnectableLayer* INetwork::AddAbsLayer(const char* name)
273{
274 return pNetworkImpl->AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
275}
276
277IConnectableLayer* INetwork::AddAdditionLayer(const char* name)
278{
279 return pNetworkImpl->AddAdditionLayer(name);
280}
281
282IConnectableLayer* INetwork::AddMultiplicationLayer(const char* name)
283{
284 return pNetworkImpl->AddMultiplicationLayer(name);
285}
286
287IConnectableLayer* INetwork::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
288 const ConstTensor& mean,
289 const ConstTensor& variance,
290 const ConstTensor& beta,
291 const ConstTensor& gamma,
292 const char* name)
293{
294 return pNetworkImpl->AddBatchNormalizationLayer(desc, mean, variance, beta, gamma, name);
295}
296
297IConnectableLayer* INetwork::AddRankLayer(const char* name)
298{
299 return pNetworkImpl->AddRankLayer(name);
300}
301
302IConnectableLayer* INetwork::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
303 const char* name)
304{
305 ResizeDescriptor resizeDescriptor;
306 resizeDescriptor.m_Method = ResizeMethod::Bilinear;
307 resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
308 resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
309 resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
310 resizeDescriptor.m_AlignCorners = descriptor.m_AlignCorners;
311 resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
312
313 return pNetworkImpl->AddResizeLayer(resizeDescriptor, name);
314}
315
316IConnectableLayer* INetwork::AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
317 const char* name)
318{
319 return pNetworkImpl->AddResizeLayer(resizeDescriptor, name);
320}
321
322IConnectableLayer* INetwork::AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
323 const char* name)
324{
325 return pNetworkImpl->AddReduceLayer(reduceDescriptor, name);
326}
327
328IConnectableLayer* INetwork::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
329 const char* name)
330{
331 return pNetworkImpl->AddInstanceNormalizationLayer(desc, name);
332}
333
334IConnectableLayer* INetwork::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
335 const char* name)
336{
337 return pNetworkImpl->AddL2NormalizationLayer(desc, name);
338}
339
340IConnectableLayer* INetwork::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& logSoftmaxDescriptor,
341 const char* name)
342{
343 return pNetworkImpl->AddLogSoftmaxLayer(logSoftmaxDescriptor, name);
344}
345
346IConnectableLayer* INetwork::AddConstantLayer(const ConstTensor& input,
347 const char* name)
348{
349 return pNetworkImpl->AddConstantLayer(input, name);
350}
351
352IConnectableLayer* INetwork::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
353 const char* name)
354{
355 return pNetworkImpl->AddReshapeLayer(reshapeDescriptor, name);
356}
357
358IConnectableLayer* INetwork::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
359 const char* name)
360{
361 return pNetworkImpl->AddSpaceToBatchNdLayer(spaceToBatchNdDescriptor, name);
362}
363
364IConnectableLayer* INetwork::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
365 const char* name)
366{
367 return pNetworkImpl->AddSpaceToDepthLayer(spaceToDepthDescriptor, name);
368}
369
370IConnectableLayer* INetwork::AddFloorLayer(const char* name)
371{
372 return pNetworkImpl->AddFloorLayer(name);
373}
374IConnectableLayer* INetwork::AddOutputLayer(LayerBindingId id, const char* name)
375{
376 return pNetworkImpl->AddOutputLayer(id, name);
377}
378
379IConnectableLayer* INetwork::AddLstmLayer(const LstmDescriptor& descriptor,
380 const LstmInputParams& params,
381 const char* name)
382{
383 return pNetworkImpl->AddLstmLayer(descriptor, params, name);
384}
385
386IConnectableLayer* INetwork::AddDivisionLayer(const char* name)
387{
388 return pNetworkImpl->AddDivisionLayer(name);
389}
390
391IConnectableLayer* INetwork::AddSubtractionLayer(const char* name)
392{
393 return pNetworkImpl->AddSubtractionLayer(name);
394}
395
396IConnectableLayer* INetwork::AddMaximumLayer(const char* name)
397{
398 return pNetworkImpl->AddMaximumLayer(name);
399}
400
401IConnectableLayer* INetwork::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
402{
403 return pNetworkImpl->AddMeanLayer(meanDescriptor, name);
404}
405
406IConnectableLayer* INetwork::AddPadLayer(const PadDescriptor& padDescriptor,
407 const char* name)
408{
409 return pNetworkImpl->AddPadLayer(padDescriptor, name);
410}
411
412IConnectableLayer* INetwork::AddQuantizeLayer(const char* name)
413{
414 return pNetworkImpl->AddQuantizeLayer(name);
415}
416
417IConnectableLayer* INetwork::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
418 const char* name)
419{
420 return pNetworkImpl->AddStridedSliceLayer(stridedSliceDescriptor, name);
421}
422
423IConnectableLayer* INetwork::AddMinimumLayer(const char* name)
424{
425 return pNetworkImpl->AddMinimumLayer(name);
426}
427
428IConnectableLayer* INetwork::AddGreaterLayer(const char* name)
429{
430 return pNetworkImpl->AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
431}
432
433IConnectableLayer* INetwork::AddEqualLayer(const char* name)
434{
435 return pNetworkImpl->AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
436}
437
438IConnectableLayer* INetwork::AddRsqrtLayer(const char* name)
439{
440 return pNetworkImpl->AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
441}
442
443IConnectableLayer* INetwork::AddGatherLayer(const char* name)
444{
445 GatherDescriptor gatherDescriptor{};
446 return pNetworkImpl->AddGatherLayer(gatherDescriptor, name);
447}
448
449IConnectableLayer* INetwork::AddGatherLayer(const GatherDescriptor& descriptor,
450 const char* name)
451{
452 return pNetworkImpl->AddGatherLayer(descriptor, name);
453}
454
455IConnectableLayer* INetwork::AddSwitchLayer(const char* name)
456{
457 return pNetworkImpl->AddSwitchLayer(name);
458}
459
460IConnectableLayer* INetwork::AddPreluLayer(const char* name)
461{
462 return pNetworkImpl->AddPreluLayer(name);
463}
464
465IConnectableLayer* INetwork::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
466 const ConstTensor& weights,
467 const Optional<ConstTensor>& biases,
468 const char* name)
469{
470 return pNetworkImpl->AddTransposeConvolution2dLayer(descriptor, weights, biases, name);
471}
472
473IConnectableLayer* INetwork::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
474 const char* name)
475{
476 return pNetworkImpl->AddTransposeLayer(transposeDescriptor, name);
477}
478
479IConnectableLayer* INetwork::AddStackLayer(const StackDescriptor& descriptor,
480 const char* name)
481{
482 return pNetworkImpl->AddStackLayer(descriptor, name);
483}
484
485IConnectableLayer* INetwork::AddStandInLayer(const StandInDescriptor& descriptor,
486 const char* name)
487{
488 return pNetworkImpl->AddStandInLayer(descriptor, name);
489}
490
491IConnectableLayer* INetwork::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
492 const char* name)
493{
494 return pNetworkImpl->AddQuantizedLstmLayer(params, name);
495}
496
497IConnectableLayer* INetwork::AddQLstmLayer(const QLstmDescriptor& descriptor,
498 const LstmInputParams& params,
499 const char* name)
500{
501 return pNetworkImpl->AddQLstmLayer(descriptor, params, name);
502}
503
504IConnectableLayer* INetwork::AddLogicalBinaryLayer(const LogicalBinaryDescriptor& descriptor,
505 const char* name)
506{
507 return pNetworkImpl->AddLogicalBinaryLayer(descriptor, name);
508}
509
510void INetwork::Accept(ILayerVisitor& visitor) const
511{
512 return pNetworkImpl->Accept(visitor);
513}
514
515void INetwork::ExecuteStrategy(IStrategy& strategy) const
516{
517 return pNetworkImpl->ExecuteStrategy(strategy);
518}
519
Finn Williamsf24effa2020-07-03 10:12:03 +0100520armnn::INetwork* INetwork::CreateRaw(NetworkOptions networkOptions)
telsoa014fcda012018-03-09 14:13:49 +0000521{
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000522 return new INetwork(networkOptions);
telsoa014fcda012018-03-09 14:13:49 +0000523}
524
Finn Williamsf24effa2020-07-03 10:12:03 +0100525armnn::INetworkPtr INetwork::Create(NetworkOptions networkOptions)
telsoa014fcda012018-03-09 14:13:49 +0000526{
Finn Williamsf24effa2020-07-03 10:12:03 +0100527 return INetworkPtr(CreateRaw(networkOptions), &INetwork::Destroy);
telsoa014fcda012018-03-09 14:13:49 +0000528}
529
530void INetwork::Destroy(INetwork* network)
531{
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000532 delete network;
telsoa014fcda012018-03-09 14:13:49 +0000533}
534
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000535
536IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<Graph> graph)
537 : pOptimizedNetworkImpl(new OptimizedNetworkImpl(std::move(graph))) {}
538
539IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<OptimizedNetworkImpl> impl)
540 : pOptimizedNetworkImpl(std::move(impl)) {}
541
542IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
543 : pOptimizedNetworkImpl(new OptimizedNetworkImpl(std::move(graph), modelOptions)) {}
544
545IOptimizedNetwork::~IOptimizedNetwork() = default;
546
telsoa014fcda012018-03-09 14:13:49 +0000547void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
548{
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000549 delete network;
telsoa014fcda012018-03-09 14:13:49 +0000550}
551
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000552Status IOptimizedNetwork::PrintGraph()
553{
554 return pOptimizedNetworkImpl->PrintGraph();
555}
556
557Status IOptimizedNetwork::SerializeToDot(std::ostream& stream) const
558{
559 return pOptimizedNetworkImpl->SerializeToDot(stream);
560}
561
562profiling::ProfilingGuid IOptimizedNetwork::GetGuid() const
563{
564 return pOptimizedNetworkImpl->GetGuid();
565}
566
567Status OptimizedNetworkImpl::PrintGraph()
telsoa014fcda012018-03-09 14:13:49 +0000568{
569 m_Graph->Print();
570 return Status::Success;
571}
572
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000573Status OptimizedNetworkImpl::SerializeToDot(std::ostream& stream) const
surmeh01bceff2f2018-03-29 16:29:27 +0100574{
575 return m_Graph->SerializeToDot(stream);
576}
577
Matteo Martincigh49124022019-01-11 13:25:59 +0000578void ReportError(const std::string& errorMessage,
579 Optional<std::vector<std::string>&> errorMessages)
580{
581 std::stringstream fullErrorMessage;
582 fullErrorMessage << "ERROR: " << errorMessage;
Derek Lamberti08446972019-11-26 16:38:31 +0000583 ARMNN_LOG(warning) << fullErrorMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +0000584 if (errorMessages)
585 {
586 errorMessages.value().push_back(fullErrorMessage.str());
587 }
588}
589
590void ReportWarning(const std::string& warningMessage,
591 Optional<std::vector<std::string>&> warningMessages)
592{
593 std::stringstream fullWarningMessage;
594 fullWarningMessage << "WARNING: " << warningMessage;
Derek Lamberti08446972019-11-26 16:38:31 +0000595 ARMNN_LOG(warning) << fullWarningMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +0000596 if (warningMessages)
597 {
598 warningMessages.value().push_back(fullWarningMessage.str());
599 }
600}
601
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000602OptimizationResult ReturnWithError(OptimizationResult res,
603 const Layer* layer,
604 const BackendSettings& backendSettings,
605 Optional<std::vector<std::string>&> errMessages)
606{
607 std::stringstream failureMsg;
608 failureMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
609 << " is not supported on any preferred backend " << backendSettings.m_PreferredBackends;
610 ReportError(failureMsg.str(), errMessages);
611
612 res.m_Error = true;
613 return res;
614}
615
616
jimfly016b0b53d2018-10-08 14:43:01 +0100617bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
618{
619 bool noErrors = true;
620 unsigned int numOutputs = layer->GetNumOutputSlots();
621 for (unsigned int i = 0; i < numOutputs; i++) {
David Monahanb8554702019-04-25 16:03:38 +0100622 OutputSlot& outputSlot = layer->GetOutputSlot(i);
623 TensorInfo info = outputSlot.GetTensorInfo();
Derek Lambertif90c56d2020-01-10 17:14:08 +0000624 if (DataType::QAsymmU8 == info.GetDataType()) {
jimfly016b0b53d2018-10-08 14:43:01 +0100625 if (0.f == info.GetQuantizationScale()) {
626 noErrors = false;
627 std::stringstream ss;
Matteo Martincigh49124022019-01-11 13:25:59 +0000628 ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
jimfly016b0b53d2018-10-08 14:43:01 +0100629 << " (" << layer->GetNameStr() << ") is of type"
630 << " Quantized 8 bit but its scale parameter has not been set";
Matteo Martincigh49124022019-01-11 13:25:59 +0000631 ReportError(ss.str(), errMessages);
jimfly016b0b53d2018-10-08 14:43:01 +0100632 }
David Monahanb8554702019-04-25 16:03:38 +0100633 // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
634 if ((info.GetQuantizationScale() != (1.0f / 256.0f) ||
635 info.GetQuantizationOffset() != 0) &&
636 layer->GetType() == armnn::LayerType::Softmax)
637 {
638 std::stringstream ss;
639 ss << "Quantization parameters for Softmax layer (Scale: " <<
640 info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() <<
641 ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
Derek Lamberti08446972019-11-26 16:38:31 +0000642 ARMNN_LOG(warning) << ss.str();
David Monahanb8554702019-04-25 16:03:38 +0100643 info.SetQuantizationScale((1.0f /256.0f));
644 info.SetQuantizationOffset(0);
645 outputSlot.SetTensorInfo(info);
646 }
jimfly016b0b53d2018-10-08 14:43:01 +0100647 }
648 }
649 return noErrors;
650}
651
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100652template <typename LayerT>
653LayerT* ConvertBf16ToFp32Weight(Layer* l)
654{
Jan Eilersbb446e52020-04-02 13:56:54 +0100655 LayerT* layer = PolymorphicDowncast<LayerT*>(l);
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100656 if ((layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected)
657 && layer->m_Weight)
658 {
659 const TensorInfo& info = layer->m_Weight->GetTensorInfo();
660
661 if (info.GetDataType() == DataType::BFloat16)
662 {
663 std::vector<float> newValues(info.GetNumElements());
664
665 armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(
Finn Williams4422cec2021-03-22 17:51:06 +0000666 layer->m_Weight->template GetConstTensor<armnn::BFloat16>(), info.GetNumElements(), newValues.data());
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100667
668 TensorInfo newInfo(info.GetShape(), DataType::Float32);
669 ConstTensor newInput(newInfo, newValues);
670 layer->m_Weight.reset(new ScopedCpuTensorHandle(newInput));
671 }
672 }
673 return layer;
674}
675
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000676OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings,
677 Graph& graph,
678 Layer* layer,
679 BackendId backend,
680 DataType dataTypeIn,
681 DataType dataTypeOut,
682 const std::vector<BackendId>& availablePreferredBackends,
683 std::string& reasonIfUnsupported,
684 Optional<std::vector<std::string>&> errMessages)
685{
686 OptimizationResult result;
687
688 // Helper lambda to compose meaningful error message before returning with error
689 auto ReturnError = [&](const Layer* layer)
690 {
691 return ReturnWithError(result, layer, backendSettings, errMessages);
692 };
693
694 // need to set the compute device on the layer
695 // before we can check if it is supported
696 layer->SetBackendId(backend);
697 if (!IWorkloadFactory::IsLayerSupported(*layer, EmptyOptional(), reasonIfUnsupported))
698 {
699 if (dataTypeIn == DataType::Float16 || dataTypeOut == DataType::Float16)
700 {
701 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
702 && layer->GetType() != LayerType::ConvertFp32ToFp16
703 && layer->GetType() != LayerType::ConvertFp16ToFp32)
704 {
705 // Insert FP16 -> FP32 conversion layer before current layer
706 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers;
707 if (dataTypeIn == DataType::Float16)
708 {
709 convertFp16ToFp32Layers =
710 InsertConvertFp16ToFp32LayersBefore(graph, *layer);
711 }
712
713 // Insert FP32 -> FP16 conversion layer after current layer
714 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers;
715 if (dataTypeOut == DataType::Float16)
716 {
717 convertFp32ToFp16Layers =
718 InsertConvertFp32ToFp16LayersAfter(graph, *layer);
719 }
720
721 // Assign a supported backend to the newly introduced conversion layers
722 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
723 {
724 bool supportedBackendFound = false;
725 std::string reasonIfUnsupported;
726
727 // Try preferred backend first
728 layer->SetBackendId(preferredBackend);
729 if (IWorkloadFactory::IsLayerSupported(*layer,
730 EmptyOptional(),
731 reasonIfUnsupported))
732 {
733 supportedBackendFound = true;
734 }
735 else
736 {
737 for (const auto& backend : availablePreferredBackends)
738 {
739 // Skip preferred backend (we already determined that it is not supported)
740 if (backend == preferredBackend)
741 {
742 continue;
743 }
744
745 layer->SetBackendId(backend);
746 if (IWorkloadFactory::IsLayerSupported(*layer,
747 EmptyOptional(),
748 reasonIfUnsupported))
749 {
750 supportedBackendFound = true;
751 break;
752 }
753 }
754 }
755
756 return supportedBackendFound;
757 };
758
759 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
760 {
761 if (!AssignFirstSupportedBackend(convertLayer, backend))
762 {
763 return ReturnError(convertLayer);
764 }
765 }
766
767 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
768 {
769 if (!AssignFirstSupportedBackend(convertLayer, backend))
770 {
771 return ReturnError(convertLayer);
772 }
773 }
774
775 return result;
776 }
777 }
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000778 else if (dataTypeIn == DataType::BFloat16 || dataTypeOut == DataType::BFloat16)
779 {
780 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
781 && layer->GetType() != LayerType::ConvertFp32ToBf16
782 && layer->GetType() != LayerType::ConvertBf16ToFp32)
783 {
784 // Insert BF16 -> FP32 conversion layer before current layer
785 std::vector<ConvertBf16ToFp32Layer*> convertBf16ToFp32Layers;
786 if (dataTypeIn == DataType::BFloat16)
787 {
788 convertBf16ToFp32Layers =
789 InsertConvertBf16ToFp32LayersBefore(graph, *layer);
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100790 if (layer->GetType() == LayerType::Convolution2d)
791 {
792 ConvertBf16ToFp32Weight<Convolution2dLayer>(layer);
793 }
794 else if (layer->GetType() == LayerType::FullyConnected)
795 {
796 ConvertBf16ToFp32Weight<FullyConnectedLayer>(layer);
797 }
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000798 }
799
800 // Insert FP32 -> BF16 conversion layer after current layer
801 std::vector<ConvertFp32ToBf16Layer*> convertFp32ToBf16Layers;
802 if (dataTypeOut == DataType::BFloat16)
803 {
804 convertFp32ToBf16Layers =
805 InsertConvertFp32ToBf16LayersAfter(graph, *layer);
806 }
807
808 // Assign a supported backend to the newly introduced conversion layers
809 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
810 {
811 bool supportedBackendFound = false;
812 std::string reasonIfUnsupported;
813
814 // Try preferred backend first
815 layer->SetBackendId(preferredBackend);
816 if (IWorkloadFactory::IsLayerSupported(*layer,
817 EmptyOptional(),
818 reasonIfUnsupported))
819 {
820 supportedBackendFound = true;
821 }
822 else
823 {
824 for (const auto& backend : availablePreferredBackends)
825 {
826 // Skip preferred backend (we already determined that it is not supported)
827 if (backend == preferredBackend)
828 {
829 continue;
830 }
831
832 layer->SetBackendId(backend);
833 if (IWorkloadFactory::IsLayerSupported(*layer,
834 EmptyOptional(),
835 reasonIfUnsupported))
836 {
837 supportedBackendFound = true;
838 break;
839 }
840 }
841 }
842
843 return supportedBackendFound;
844 };
845
846 for (ConvertBf16ToFp32Layer* convertLayer : convertBf16ToFp32Layers)
847 {
848 if (!AssignFirstSupportedBackend(convertLayer, backend))
849 {
850 return ReturnError(convertLayer);
851 }
852 }
853
854 for (ConvertFp32ToBf16Layer* convertLayer : convertFp32ToBf16Layers)
855 {
856 if (!AssignFirstSupportedBackend(convertLayer, backend))
857 {
858 return ReturnError(convertLayer);
859 }
860 }
861
862 return result;
863 }
864 }
865
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000866 std::stringstream warningMsg;
867 warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
868 << " is not supported on requested backend " << layer->GetBackendId().Get()
869 << " for input data type " << GetDataTypeName(dataTypeIn)
870 << " and output data type " << GetDataTypeName(dataTypeOut)
871 << " (reason: " << reasonIfUnsupported
872 << "), falling back to the next backend.";
873 ReportWarning(warningMsg.str(), errMessages);
874
875 return OptimizationResult(true, false);
876 }
877 else
878 {
879 return result;
880 }
881}
882
883
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000884OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
Matteo Martincigh49124022019-01-11 13:25:59 +0000885 BackendSettings& backendSettings,
886 Graph::Iterator& firstLayer,
887 Graph::Iterator& lastLayer,
888 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +0000889{
Matteo Martincigh49124022019-01-11 13:25:59 +0000890 OptimizationResult result;
telsoa014fcda012018-03-09 14:13:49 +0000891
Matteo Martincigh49124022019-01-11 13:25:59 +0000892 // Helper lambda to compose meaningful error message before returning with error
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000893 auto ReturnError = [&](const Layer* layer)
894 {
895 return ReturnWithError(result, layer, backendSettings, errMessages);
896 };
Matteo Martincigh49124022019-01-11 13:25:59 +0000897
telsoa01c577f2c2018-08-31 09:22:23 +0100898
Matteo Martincigh49124022019-01-11 13:25:59 +0000899 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
900 if (availablePreferredBackends.empty())
telsoa01c577f2c2018-08-31 09:22:23 +0100901 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000902 std::stringstream failureMsg;
903 failureMsg << "No preferred backends are available";
904 ReportError(failureMsg.str(), errMessages);
905
906 result.m_Error = true;
907 return result;
908 }
909
910 for (auto it = firstLayer; it != lastLayer; ++it)
911 {
912 auto layer = *it;
Aron Virginas-Tar87972be2019-11-13 15:16:28 +0000913
914 DataType dataTypeIn = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
915 layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
916 DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
917 layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
918
telsoa01c577f2c2018-08-31 09:22:23 +0100919 std::string reasonIfUnsupported;
920 bool found = false;
jimfly016b0b53d2018-10-08 14:43:01 +0100921 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
922 {
923 // don't bomb immediately, find all the quantized outputs
924 // which haven't had a scale set and report them all back.
Matteo Martincigh49124022019-01-11 13:25:59 +0000925 result.m_Error = true;
jimfly016b0b53d2018-10-08 14:43:01 +0100926 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000927
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000928 // First try assign layer to hint backend
929 if (layer->GetBackendHint().has_value() &&
930 backendSettings.IsBackendSupported(layer->GetBackendHint().value()) &&
931 AttemptBackendAssignment(backendSettings,
932 optNetObjPtr->GetGraph(),
933 layer,
934 layer->GetBackendHint().value(),
935 dataTypeIn,
936 dataTypeOut,
937 availablePreferredBackends,
938 reasonIfUnsupported,
939 errMessages).IsOk())
telsoa01c577f2c2018-08-31 09:22:23 +0100940 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000941 found = true;
942 backendSettings.m_SelectedBackends.insert(layer->GetBackendHint().value());
943 }
944 else
945 {
946 // Try assign layer to prefered list of backends
947 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100948 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000949 if (layer->GetBackendHint().has_value() &&
950 layer->GetBackendHint().value() == backend)
telsoa01c577f2c2018-08-31 09:22:23 +0100951 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000952 continue; //Don't re-test the backend hint
telsoa01c577f2c2018-08-31 09:22:23 +0100953 }
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000954
955 OptimizationResult res = AttemptBackendAssignment(backendSettings,
956 optNetObjPtr->GetGraph(),
957 layer,
958 backend,
959 dataTypeIn,
960 dataTypeOut,
961 availablePreferredBackends,
962 reasonIfUnsupported,
963 errMessages);
964
965 if (res.IsOk())
966 {
967 found = true;
968 backendSettings.m_SelectedBackends.insert(backend);
969 break;
970 }
971 else if (res.IsError())
972 {
973 return res; // Cannot continue.
974 // Note: we don't need to log the error as it would already
975 // be logged in AttemptBackendAssignment().
976 }
977 else
978 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100979 ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000980 }
telsoa01c577f2c2018-08-31 09:22:23 +0100981 }
982 }
983
984 // If the layer is unsupported by any devices, log and return a null network.
Matteo Martincigh49124022019-01-11 13:25:59 +0000985 if (!found)
986 {
telsoa01c577f2c2018-08-31 09:22:23 +0100987 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
988 // fallback we should set the compute device on the layer to CpuRef (these are not
989 // available as accelerated operations, or are only available under certain
990 // conditions, currently they comprise MemCopy, Constant, Permute)
991 armnn::LayerType layerType = layer->GetType();
Matteo Martincigh49124022019-01-11 13:25:59 +0000992 if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
993 layerType == armnn::LayerType::Constant ||
994 layerType == armnn::LayerType::Permute))
telsoa01c577f2c2018-08-31 09:22:23 +0100995 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000996 BackendId cpuBackendId(armnn::Compute::CpuRef);
997 layer->SetBackendId(cpuBackendId);
998 backendSettings.m_SelectedBackends.insert(cpuBackendId);
telsoa01c577f2c2018-08-31 09:22:23 +0100999 }
1000 else
1001 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +00001002 return ReturnError(layer);
telsoa01c577f2c2018-08-31 09:22:23 +01001003 }
1004 }
1005 }
Matteo Martincigh49124022019-01-11 13:25:59 +00001006
1007 return result;
1008}
1009
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001010OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
Matteo Martincighadddddb2019-01-24 14:06:23 +00001011 BackendSettings& backendSettings,
Derek Lambertiff05cc52019-04-26 13:05:17 +01001012 SubgraphView& subgraph,
Matteo Martincighadddddb2019-01-24 14:06:23 +00001013 Optional<std::vector<std::string>&> errMessages)
Matteo Martincigh49124022019-01-11 13:25:59 +00001014{
Derek Lambertiff05cc52019-04-26 13:05:17 +01001015 Graph::Iterator firstLayer = subgraph.begin();
1016 Graph::Iterator lastLayer = subgraph.end();
Matteo Martincighadddddb2019-01-24 14:06:23 +00001017 return AssignBackends(optNetObjPtr,
1018 backendSettings,
1019 firstLayer,
1020 lastLayer,
1021 errMessages);
1022}
1023
Derek Lamberti84da38b2019-06-13 11:40:08 +01001024BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry& handleFactoryRegistry,
1025 BackendSettings& backendSettings)
1026{
1027 BackendsMap backends;
1028 auto const& backendRegistry = BackendRegistryInstance();
1029 for (auto&& selectedBackend : backendSettings.m_SupportedBackends)
1030 {
1031 auto backendFactory = backendRegistry.GetFactory(selectedBackend);
1032 auto backendObjPtr = backendFactory();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001033 ARMNN_ASSERT(backendObjPtr);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001034
1035 backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
1036
1037 backends[backendObjPtr->GetId()] = std::move(backendObjPtr);
1038 }
1039
1040 return backends;
1041}
1042
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001043OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
Matteo Martincighadddddb2019-01-24 14:06:23 +00001044 BackendSettings& backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +01001045 BackendsMap& backends,
Mike Kelly07810fc2020-11-12 10:58:48 +00001046 const ModelOptions& modelOptions,
Matteo Martincighadddddb2019-01-24 14:06:23 +00001047 Optional<std::vector<std::string>&> errMessages)
1048{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001049 ARMNN_ASSERT(optNetObjPtr);
Matteo Martincigh49124022019-01-11 13:25:59 +00001050
1051 OptimizationResult result;
1052
Matteo Martincighadddddb2019-01-24 14:06:23 +00001053 // Get the optimized graph
1054 Graph& optGraph = optNetObjPtr->GetGraph();
Matteo Martincigh49124022019-01-11 13:25:59 +00001055
Matteo Martincighadddddb2019-01-24 14:06:23 +00001056 // Run backend specific optimizations
Matteo Martincighadddddb2019-01-24 14:06:23 +00001057 for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
Matteo Martincigh49124022019-01-11 13:25:59 +00001058 {
Derek Lamberti84da38b2019-06-13 11:40:08 +01001059 auto backendObjPtr = backends.find(selectedBackend)->second.get();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001060 ARMNN_ASSERT(backendObjPtr);
Matteo Martincighadddddb2019-01-24 14:06:23 +00001061
1062 // Select sub-graphs based on backend
Derek Lambertiff05cc52019-04-26 13:05:17 +01001063 SubgraphViewSelector::Subgraphs subgraphs =
Rob Hughes65c32262019-07-23 15:33:39 +01001064 SubgraphViewSelector::SelectSubgraphs(optGraph,
Matteo Martincigh602af092019-05-01 10:31:27 +01001065 // Select layers assigned to the requested backend
1066 [&backendObjPtr](const Layer& layer)
1067 {
1068 return layer.GetType() != LayerType::Input &&
1069 layer.GetType() != LayerType::Output &&
1070 layer.GetBackendId() == backendObjPtr->GetId();
1071 });
Derek Lambertiff05cc52019-04-26 13:05:17 +01001072 if (subgraphs.empty())
Matteo Martincigh49124022019-01-11 13:25:59 +00001073 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001074 // No sub-graphs found, try with next selected backend
1075 continue;
Matteo Martincigh49124022019-01-11 13:25:59 +00001076 }
Matteo Martincighadddddb2019-01-24 14:06:23 +00001077
1078 // Try to optimize each sub-graph
Derek Lambertiff05cc52019-04-26 13:05:17 +01001079 for (auto& subgraph : subgraphs)
Matteo Martincigh49124022019-01-11 13:25:59 +00001080 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001081 // Try to optimize the current sub-graph
Mike Kelly07810fc2020-11-12 10:58:48 +00001082 OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph, modelOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001083 ARMNN_ASSERT(optimizationViews.Validate(*subgraph));
Matteo Martincighadddddb2019-01-24 14:06:23 +00001084
1085 // Optimization attempted, check the resulting optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +01001086 for (auto& substitution : optimizationViews.GetSubstitutions())
Matteo Martincighadddddb2019-01-24 14:06:23 +00001087 {
1088 // Sub-graph optimized, substitute the sub-graph with the new optimized one in the main optimized graph
Matteo Martincigh84924332019-05-09 12:46:16 +01001089 SubgraphView& replacementSubgraph = substitution.m_ReplacementSubgraph;
1090 SubgraphView& substitutableSubgraph = substitution.m_SubstitutableSubgraph;
1091 optGraph.SubstituteSubgraph(substitutableSubgraph, replacementSubgraph);
Matteo Martincighadddddb2019-01-24 14:06:23 +00001092
1093 // Assign the current backend to the optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +01001094 std::for_each(replacementSubgraph.begin(), replacementSubgraph.end(), [&selectedBackend](Layer* l)
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001095 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001096 ARMNN_ASSERT(l);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001097 l->SetBackendId(selectedBackend);
1098 });
Matteo Martincighadddddb2019-01-24 14:06:23 +00001099 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001100
Matteo Martincigh84924332019-05-09 12:46:16 +01001101 if (!optimizationViews.GetFailedSubgraphs().empty())
Matteo Martincighadddddb2019-01-24 14:06:23 +00001102 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001103 std::stringstream warningMsg;
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001104 warningMsg << "Some sub-graph(s) failed to optimized on " << backendObjPtr->GetId() << " backend.";
Matteo Martincighadddddb2019-01-24 14:06:23 +00001105 ReportWarning(warningMsg.str(), errMessages);
1106
1107 // Failed to optimize the given sub-graph, re-assign the sub-graph layers to other available backends
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001108 BackendSettings settingsCopy(backendSettings);
Matteo Martincighadddddb2019-01-24 14:06:23 +00001109 if (!backendObjPtr->GetId().IsCpuRef())
1110 {
1111 // Add the current backend to the list of backends to ignore
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001112 settingsCopy.m_IgnoredBackends.insert(backendObjPtr->GetId());
Matteo Martincighadddddb2019-01-24 14:06:23 +00001113 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001114
1115 int count=0;
Matteo Martincigh84924332019-05-09 12:46:16 +01001116 for (auto& failedSubgraph : optimizationViews.GetFailedSubgraphs())
Matteo Martincighadddddb2019-01-24 14:06:23 +00001117 {
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001118 // An error occurred: the optimization was attempted but not performed, try different backends
1119 std::stringstream subgraphMsg;
1120 subgraphMsg << "Re-assigning backends to " << failedSubgraph.GetLayers().size()
1121 << " layers inside sub-graph " << count++;
Matteo Martincigh328d92b2019-07-04 17:52:55 +01001122 ReportWarning(subgraphMsg.str(), errMessages);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001123
1124 OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr,
1125 settingsCopy,
1126 *subgraph,
1127 errMessages);
1128 if (reassignmentResult.m_Error)
1129 {
1130 // Failed to re-assign one of the remaining backends to each layer of the sub-graph
1131 result.m_Error = true;
1132 return result;
1133 }
Matteo Martincighadddddb2019-01-24 14:06:23 +00001134 }
Matteo Martincigh49124022019-01-11 13:25:59 +00001135 }
1136 }
1137 }
1138
1139 return result;
1140}
1141
Derek Lamberti84da38b2019-06-13 11:40:08 +01001142bool RequiresCopy(ITensorHandleFactory::FactoryId src,
1143 ITensorHandleFactory::FactoryId dst,
1144 TensorHandleFactoryRegistry& registry)
1145{
1146 if (src != dst)
1147 {
1148 ITensorHandleFactory* srcFactory = registry.GetFactory(src);
1149 ITensorHandleFactory* dstFactory = registry.GetFactory(dst);
1150
Matteo Martincigha6539ed2019-08-27 13:43:32 +01001151 if (srcFactory && dstFactory &&
1152 (srcFactory->GetExportFlags() & dstFactory->GetImportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001153 {
1154 return false;
1155 }
1156 return true;
1157 }
1158 return false;
1159}
1160
1161// Find the handle factory for the input layer which results in fewest required copies.
1162ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backends,
1163 OutputSlot& slot,
1164 TensorHandleFactoryRegistry& registry)
1165{
1166 Layer& layer = slot.GetOwningLayer();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001167 ARMNN_ASSERT(layer.GetType() == LayerType::Input);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001168
1169 // Explicitly select the tensorhandle factory for InputLayer because the rules for it are slightly different. It
1170 // doesn't matter which backend it is assigned to because they all use the same implementation, which
1171 // requires Map/Unmap support. This means that, so long as the handle type supports map/unmap semantics, we can
1172 // select a factory with maximum compatibility with the layers connected to the InputLayer.
1173
1174 // First ensure the from backends can support the TensorHandeAPI
1175 auto frmBackend = backends.find(layer.GetBackendId());
1176 if (frmBackend == backends.end() ||
1177 !frmBackend->second->SupportsTensorAllocatorAPI())
1178 {
1179 return ITensorHandleFactory::LegacyFactoryId;
1180 }
1181
1182 // Go through all connections to the output slot and determine the TensorHandleFactory which results in the
1183 // fewest copies.
1184 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
1185 int topScore = 0;
1186 ITensorHandleFactory::FactoryId topChoice = ITensorHandleFactory::LegacyFactoryId;
1187
1188 for (auto&& connection : slot.GetConnections())
1189 {
1190 const Layer& connectedLayer = connection->GetOwningLayer();
1191
1192 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001193 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +01001194
1195 if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
1196 {
1197 // The destination backend does not support the tensor allocator API, move to the next one
1198 continue;
1199 }
1200
1201 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
1202 for (auto&& dst : dstPrefs)
1203 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001204 // Input layers use the mem copy workload or import, so the selected factory must
1205 // support either the map/unmap API or Import API
Derek Lamberti84da38b2019-06-13 11:40:08 +01001206 ITensorHandleFactory* factory = registry.GetFactory(dst);
Derek Lambertif674aa02019-08-01 15:56:25 +01001207 if (!factory->SupportsMapUnmap() &&
1208 !CheckFlag(factory->GetImportFlags(), MemorySource::Malloc)) // Just support cpu mem imports for now
Derek Lamberti84da38b2019-06-13 11:40:08 +01001209 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001210 // The current tensor handle factory does not support the map/unmap or import
1211 // strategy, move to the next one
Derek Lamberti84da38b2019-06-13 11:40:08 +01001212 continue;
1213 }
1214
1215 auto it = factoryScores.find(dst);
1216 if (it == factoryScores.end())
1217 {
1218 // Add new score to the table
1219 factoryScores[dst] = 0;
1220 if (topChoice == ITensorHandleFactory::LegacyFactoryId)
1221 {
1222 topChoice = dst;
1223 }
1224 }
1225 else
1226 {
1227 // Increase the score
1228 factoryScores[dst]++;
1229
1230 // Track the best option
1231 if (factoryScores[dst] > topScore)
1232 {
1233 topScore = factoryScores[dst];
1234 topChoice = dst;
1235 }
1236 }
1237 }
1238 }
1239
1240 return topChoice;
1241}
1242
1243// Find the handle factory for the output layer which results in fewest required copies.
1244ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backends,
1245 OutputSlot& slot,
1246 TensorHandleFactoryRegistry& registry)
1247{
Jan Eilers8eb25602020-03-09 12:13:48 +00001248 IgnoreUnused(backends, slot, registry);
Derek Lamberti94a88d22019-12-10 21:12:59 +00001249 return ITensorHandleFactory::DeferredFactoryId;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001250}
1251
1252// For all handle factories supported on the source backend, we wish to find the one which requires the fewest copies
1253// when considering all connections.
1254ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
1255 OutputSlot& outputSlot,
1256 TensorHandleFactoryRegistry& registry)
1257{
1258 // First ensure the from backends can support the TensorHandeAPI
1259 Layer& layer = outputSlot.GetOwningLayer();
1260 auto frmBackend = backends.find(layer.GetBackendId());
1261 if (frmBackend == backends.end() ||
1262 !frmBackend->second->SupportsTensorAllocatorAPI())
1263 {
1264 return ITensorHandleFactory::LegacyFactoryId;
1265 }
1266
1267 // Connections to Output Layers requires support for map/unmap on the TensorHandle.
1268 bool requiresMapUnmap = false;
1269 for (auto&& connection : outputSlot.GetConnections())
1270 {
1271 const Layer& connectedLayer = connection->GetOwningLayer();
1272 if (connectedLayer.GetType() == LayerType::Output)
1273 {
1274 requiresMapUnmap = true;
1275 }
1276 }
1277
1278 IBackendInternal* srcBackend = frmBackend->second.get();
1279 auto srcPrefs = srcBackend->GetHandleFactoryPreferences();
1280
1281 // Initialize the scores
1282 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
1283 for (auto&& pref : srcPrefs)
1284 {
1285 if (requiresMapUnmap) // Only consider factories that support map/unmap if required
1286 {
1287 ITensorHandleFactory* factory = registry.GetFactory(pref);
1288 if (!factory->SupportsMapUnmap())
1289 {
1290 // The current tensor handle factory does not support the map/unmap strategy, move to the next one
1291 continue;
1292 }
1293 }
1294
1295 auto it = factoryScores.find(pref);
1296 if (it == factoryScores.end())
1297 {
1298 // Add new score to the table
1299 factoryScores[pref] = 0;
1300 }
1301 }
1302
1303 // Score each handle factory based on how many times it requires copies on the slot connections
1304 for (auto&& connection : outputSlot.GetConnections())
1305 {
1306 const Layer& connectedLayer = connection->GetOwningLayer();
1307
1308 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001309 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +01001310
1311 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
1312 for (auto&& src : srcPrefs)
1313 {
1314 if (factoryScores.find(src) == factoryScores.end()) // Don't consider excluded factories
1315 {
1316 continue;
1317 }
1318
1319 for (auto&& dst : dstPrefs)
1320 {
1321 if (RequiresCopy(src, dst, registry))
1322 {
1323 // Copy avoided, increase the score
1324 factoryScores[src]++;
1325 break;
1326 }
1327 }
1328 }
1329 }
1330
1331 // Find the lowest score
1332 int minScore = std::numeric_limits<int>::max();
1333 for (auto it : factoryScores)
1334 {
1335 minScore = std::min(minScore, it.second);
1336 }
1337
1338 // Collect factories matching the best(lowest) score
1339 std::vector<ITensorHandleFactory::FactoryId> optimalFactories;
1340 for (auto it : factoryScores)
1341 {
1342 if (it.second == minScore)
1343 {
1344 optimalFactories.push_back(it.first);
1345 }
1346 }
1347
1348 // For all compatible Factories matching the best score, find the preferred one for the current layer.
1349 for (auto&& srcPref : srcPrefs)
1350 {
1351 for (auto&& comp : optimalFactories)
1352 {
1353 if (comp == srcPref)
1354 {
1355 return comp;
1356 }
1357 }
1358 }
1359
1360 return ITensorHandleFactory::LegacyFactoryId;
1361}
1362
Derek Lambertif674aa02019-08-01 15:56:25 +01001363EdgeStrategy CalculateEdgeStrategy(BackendsMap& backends,
1364 ITensorHandleFactory::FactoryId srcFactoryId,
1365 const Layer& layer,
1366 const Layer& connectedLayer,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001367 TensorHandleFactoryRegistry& registry,
1368 bool importEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001369{
1370 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001371 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +01001372
1373 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
1374
1375 // Legacy API check for backward compatibility
1376 if (srcFactoryId == ITensorHandleFactory::LegacyFactoryId || dstPrefs.empty())
1377 {
1378 if (layer.GetBackendId() != connectedLayer.GetBackendId())
1379 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001380 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001381 }
1382 else
1383 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001384 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001385 }
1386 }
1387
1388 // TensorHandleFactory API present, so perform more sophisticated strategies.
Derek Lambertif674aa02019-08-01 15:56:25 +01001389 // Dst Output layers don't require copy because they use import or map/unmap
Derek Lamberti84da38b2019-06-13 11:40:08 +01001390 if (connectedLayer.GetType() == LayerType::Output)
1391 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001392 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001393 }
1394
1395 // Search for direct match in prefs
1396 for (auto&& pref : dstPrefs)
1397 {
1398 if (pref == srcFactoryId)
1399 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001400 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001401 }
1402 }
1403
1404 // Search for export/import options
1405 ITensorHandleFactory* srcFactory = registry.GetFactory(srcFactoryId);
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001406 if (srcFactory->GetExportFlags() != 0 && importEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001407 {
1408 for (auto&& pref : dstPrefs)
1409 {
1410 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroyffab16f2019-11-07 14:37:09 +00001411
James Conroy47e863d2019-11-18 17:07:43 +00001412 // Handles cases when a destPref is not listed in TensorHandleFactoryRegistry
James Conroyffab16f2019-11-07 14:37:09 +00001413 if (!dstFactory) {
James Conroy47e863d2019-11-18 17:07:43 +00001414 continue;
James Conroyffab16f2019-11-07 14:37:09 +00001415 }
1416
Derek Lambertif674aa02019-08-01 15:56:25 +01001417 if ((dstFactory->GetImportFlags() & srcFactory->GetExportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001418 {
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +01001419 auto srcCapability = srcFactory->GetCapabilities(&layer, &layer, CapabilityClass::PaddingRequired);
1420 auto dstCapability = dstFactory->GetCapabilities(&connectedLayer,
1421 &connectedLayer,
1422 CapabilityClass::PaddingRequired);
1423 // Do not require memory copy if the source and destination do not require padding.
1424 if (srcCapability.empty() && dstCapability.empty())
1425 {
1426 return EdgeStrategy::ExportToTarget;
1427 }
Derek Lamberti84da38b2019-06-13 11:40:08 +01001428 }
1429 }
1430 }
1431
1432 // Search for copy options via map/unmap
1433 if (srcFactory->SupportsMapUnmap())
1434 {
1435 for (auto&& pref : dstPrefs)
1436 {
1437 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroy47e863d2019-11-18 17:07:43 +00001438 if (dstFactory && dstFactory->SupportsMapUnmap())
Derek Lamberti84da38b2019-06-13 11:40:08 +01001439 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001440 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001441 }
1442 }
1443 }
1444
Derek Lambertif674aa02019-08-01 15:56:25 +01001445 return EdgeStrategy::Undefined;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001446}
1447
1448// Select the TensorHandleFactories and the corresponding memory strategy
1449OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
1450 BackendsMap& backends,
1451 TensorHandleFactoryRegistry& registry,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001452 bool importEnabled,
Derek Lamberti84da38b2019-06-13 11:40:08 +01001453 Optional<std::vector<std::string>&> errMessages)
1454{
1455 OptimizationResult result;
1456
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001457 optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled](Layer* layer)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001458 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001459 ARMNN_ASSERT(layer);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001460
1461 // Lets make sure the backend is in our list of supported backends. Something went wrong during backend
1462 // assignment if this check fails
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001463 ARMNN_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
Derek Lamberti84da38b2019-06-13 11:40:08 +01001464
1465 // Check each output separately
1466 for (unsigned int slotIdx = 0; slotIdx < layer->GetNumOutputSlots(); slotIdx++)
1467 {
1468 OutputSlot& outputSlot = layer->GetOutputSlot(slotIdx);
1469
1470 ITensorHandleFactory::FactoryId slotOption = ITensorHandleFactory::LegacyFactoryId;
1471
1472 // Calculate the factory to use which results in the fewest copies being made.
1473 switch(layer->GetType())
1474 {
1475 case LayerType::Input:
1476 slotOption = CalculateSlotOptionForInput(backends, outputSlot, registry);
1477 break;
1478 case LayerType::Output:
1479 slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
1480 break;
1481 default:
1482 slotOption = CalculateSlotOption(backends, outputSlot, registry);
1483 break;
1484 }
1485 outputSlot.SetTensorHandleFactory(slotOption);
1486
Derek Lambertif674aa02019-08-01 15:56:25 +01001487 // Now determine the "best" edge strategy for each connection given the slotOption.
Derek Lamberti84da38b2019-06-13 11:40:08 +01001488 unsigned int connectionIdx = 0;
1489 for (auto&& connection : outputSlot.GetConnections())
1490 {
1491 const Layer& connectedLayer = connection->GetOwningLayer();
1492
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001493 EdgeStrategy strategy = CalculateEdgeStrategy(backends, slotOption, *layer, connectedLayer,
1494 registry, importEnabled);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001495
Derek Lambertif674aa02019-08-01 15:56:25 +01001496 if (strategy == EdgeStrategy::Undefined)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001497 {
1498 result.m_Error = true;
1499 if (errMessages)
1500 {
1501 errMessages.value().emplace_back("Could not find valid strategy required for compatibility"
1502 " between backends.");
1503 }
1504 return;
1505 }
1506
Derek Lambertif674aa02019-08-01 15:56:25 +01001507 outputSlot.SetEdgeStrategy(connectionIdx, strategy);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001508
1509 connectionIdx++;
1510 }
1511 }
1512 });
1513
1514 return result;
1515}
1516
Matteo Martincigh49124022019-01-11 13:25:59 +00001517IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
1518 const std::vector<BackendId>& backendPreferences,
1519 const IDeviceSpec& deviceSpec,
1520 const OptimizerOptions& options,
Rob Hughes23214432019-11-05 11:27:36 +00001521 Optional<std::vector<std::string>&> messages)
Matteo Martincigh49124022019-01-11 13:25:59 +00001522{
1523 if (backendPreferences.empty())
1524 {
Mike Kelly3a613cc2020-09-29 20:50:35 +01001525 throw InvalidArgumentException("Invoked Optimize with no backends specified");
Matteo Martincigh49124022019-01-11 13:25:59 +00001526 }
1527
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001528 if (options.m_ReduceFp32ToFp16 && options.m_ReduceFp32ToBf16)
1529 {
1530 throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
1531 }
1532
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001533 std::unique_ptr<Graph> graph = std::make_unique<Graph>(inNetwork.pNetworkImpl->GetGraph());
Matteo Martincigh49124022019-01-11 13:25:59 +00001534
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001535 auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), options.m_ModelOptions),
Sadik Armagan045f6be2020-09-10 13:37:32 +01001536 &IOptimizedNetwork::Destroy);
Matteo Martincigh49124022019-01-11 13:25:59 +00001537
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001538 IOptimizedNetwork* optNetObjPtr = optNet.get();
Matteo Martincigh49124022019-01-11 13:25:59 +00001539
Matteo Martincighadddddb2019-01-24 14:06:23 +00001540 // Get the optimized graph
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001541 Graph& optGraph = optNetObjPtr->pOptimizedNetworkImpl->GetGraph();
Matteo Martincighadddddb2019-01-24 14:06:23 +00001542
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001543 // Perform AddBroadcastReshapeLayer optimisation
1544 using namespace optimizations;
1545 Optimizer::Pass(optGraph, MakeOptimizations(AddBroadcastReshapeLayer()));
1546
Narumol Prangnawaratbbf71a62020-09-07 14:05:22 +01001547 // Infer the tensor infos for all output slots. Throws an exception on failure
1548 optGraph.InferTensorInfos();
1549
Matteo Martincigh49124022019-01-11 13:25:59 +00001550 // Perform optimisation passes
Matteo Martincighadddddb2019-01-24 14:06:23 +00001551 Optimizer::Pass(optGraph, MakeOptimizations(SquashEqualPermuteSiblings(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001552 SquashEqualTransposeSiblings(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001553 SquashEqualReshapeSiblings(),
1554 OptimizeInversePermutes(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001555 OptimizeInverseTransposes(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001556 MovePermuteUp(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001557 MoveTransposeUp(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001558 PermuteAsReshape(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001559 TransposeAsReshape(),
Nina Drozd861985f2019-04-18 14:48:51 +01001560 OptimizeConsecutiveReshapes(),
Rob Hughes3a7d3a72019-09-24 16:59:56 +01001561 FoldPadIntoConvolution2d(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001562 PermuteAndBatchToSpaceAsDepthToSpace(),
Teresa Charlin06e03002020-10-15 13:16:07 +01001563 TransposeAndBatchToSpaceAsDepthToSpace(),
Mike Kelly90231b82020-11-05 15:44:56 +00001564 FuseBatchNormIntoConvolution2DFloat32(),
1565 FuseBatchNormIntoConvolution2DFloat16(),
1566 FuseBatchNormIntoDepthwiseConvolution2DFloat32(),
1567 FuseBatchNormIntoDepthwiseConvolution2DFloat16()));
Matteo Martincigh49124022019-01-11 13:25:59 +00001568
Matteo Martincigh49124022019-01-11 13:25:59 +00001569 // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
1570 if (options.m_ReduceFp32ToFp16)
1571 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001572 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToFp16Converter()));
Derek Lambertidd6804b2019-11-27 09:29:57 +00001573 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
Matteo Martincigh49124022019-01-11 13:25:59 +00001574 }
1575
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001576 // If Fp32 to Bf16 optimization is set convert Fp32 network to Bf16
Narumol Prangnawarat57ef0082020-03-26 09:20:43 +00001577 // Convert input of Convolution2d and FullyConnected from Fp32 to Bf16
1578 // Only Constant weight of Convolution2d and FullyConnected are converted from Fp32 to Bf16
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001579 if (options.m_ReduceFp32ToBf16)
1580 {
1581 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToBf16Converter()));
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001582 }
1583
Matteo Martincigh49124022019-01-11 13:25:59 +00001584 // Initialize backend settings
1585 BackendSettings backendSettings(backendPreferences, deviceSpec);
1586 if (backendSettings.GetAvailablePreferredBackends().empty())
1587 {
1588 std::stringstream failureMsg;
1589 failureMsg << "None of the preferred backends " << backendPreferences
1590 << " are supported. Current platform provides " << backendSettings.m_SupportedBackends;
Rob Hughes23214432019-11-05 11:27:36 +00001591 ReportError(failureMsg.str(), messages);
Mike Kelly3a613cc2020-09-29 20:50:35 +01001592 throw InvalidArgumentException(failureMsg.str());
Matteo Martincigh49124022019-01-11 13:25:59 +00001593 }
1594
Derek Lamberti84da38b2019-06-13 11:40:08 +01001595 // Create a map to temporarily hold initialized backend objects
1596 TensorHandleFactoryRegistry tensorHandleFactoryRegistry;
1597 BackendsMap backends = CreateSupportedBackends(tensorHandleFactoryRegistry, backendSettings);
1598
Matteo Martincigh49124022019-01-11 13:25:59 +00001599 // Assign an available backend to each layer
Matteo Martincighadddddb2019-01-24 14:06:23 +00001600 Graph::Iterator firstLayer = optGraph.begin();
1601 Graph::Iterator lastLayer = optGraph.end();
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001602 OptimizationResult assignBackendsResult = AssignBackends(optNetObjPtr->pOptimizedNetworkImpl.get(),
Derek Lamberti84da38b2019-06-13 11:40:08 +01001603 backendSettings,
1604 firstLayer,
1605 lastLayer,
Rob Hughes23214432019-11-05 11:27:36 +00001606 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001607 if (assignBackendsResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +00001608 {
1609 // Failed to assign a backend to each layer
Mike Kelly3a613cc2020-09-29 20:50:35 +01001610 throw InvalidArgumentException("Failed to assign a backend to each layer");
jimfly016b0b53d2018-10-08 14:43:01 +01001611 }
telsoa01c577f2c2018-08-31 09:22:23 +01001612
Matteo Martincighadddddb2019-01-24 14:06:23 +00001613 Optimizer::Pass(optGraph, MakeOptimizations(OptimizeInverseConversionsFp16(),
1614 OptimizeInverseConversionsFp32()));
telsoa01c577f2c2018-08-31 09:22:23 +01001615
Matteo Martincighadddddb2019-01-24 14:06:23 +00001616 // Apply the backend-specific optimizations
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001617 OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr->pOptimizedNetworkImpl.get(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001618 backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +01001619 backends,
Mike Kelly07810fc2020-11-12 10:58:48 +00001620 options.m_ModelOptions,
Rob Hughes23214432019-11-05 11:27:36 +00001621 messages);
Matteo Martincighadddddb2019-01-24 14:06:23 +00001622 if (backendOptimizationResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +00001623 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001624 // Failed to apply the backend-specific optimizations
Mike Kelly3a613cc2020-09-29 20:50:35 +01001625 throw InvalidArgumentException("Failed to apply the backend-specific optimizations");
Matteo Martincigh49124022019-01-11 13:25:59 +00001626 }
1627
Matteo Martincighadddddb2019-01-24 14:06:23 +00001628 // If the debug flag is set, then insert a DebugLayer after each layer
1629 // Doing this after applying the backend optimizations as they might have changed some layers
1630 if (options.m_Debug)
1631 {
1632 Optimizer::Pass(optGraph, MakeOptimizations(InsertDebugLayer()));
1633 }
1634
Derek Lamberti84da38b2019-06-13 11:40:08 +01001635 // Calculate the compatibility strategies for tensor handles
1636 OptimizationResult strategyResult = SelectTensorHandleStrategy(optGraph,
1637 backends,
1638 tensorHandleFactoryRegistry,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001639 options.m_ImportEnabled,
Rob Hughes23214432019-11-05 11:27:36 +00001640 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001641 if (strategyResult.m_Error)
1642 {
1643 // Failed to apply the backend-specific optimizations
1644 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
1645 }
1646
1647 // Based on the tensor handle strategy determined above, insert copy layers where required.
Derek Lambertif674aa02019-08-01 15:56:25 +01001648 optGraph.AddCompatibilityLayers(backends, tensorHandleFactoryRegistry);
telsoa01c577f2c2018-08-31 09:22:23 +01001649
1650 // Convert constants
Matteo Martincighadddddb2019-01-24 14:06:23 +00001651 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
1652 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
telsoa01c577f2c2018-08-31 09:22:23 +01001653
Derek Lamberti84da38b2019-06-13 11:40:08 +01001654 // Run backend specific optimizations (deprecated)
Matteo Martincigh49124022019-01-11 13:25:59 +00001655 for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
David Beck263e3492018-11-09 14:46:40 +00001656 {
1657 auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
1658 auto backendPtr = factoryFun();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001659 ARMNN_ASSERT(backendPtr.get() != nullptr);
David Beck263e3492018-11-09 14:46:40 +00001660
Matteo Martincighed735042019-05-22 09:42:43 +01001661 ARMNN_NO_DEPRECATE_WARN_BEGIN
David Beck263e3492018-11-09 14:46:40 +00001662 auto backendSpecificOptimizations = backendPtr->GetOptimizations();
Matteo Martincighed735042019-05-22 09:42:43 +01001663 ARMNN_NO_DEPRECATE_WARN_END
1664
David Beck263e3492018-11-09 14:46:40 +00001665 if (!backendSpecificOptimizations.empty())
1666 {
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001667 Optimizer::Pass(optNetObjPtr->pOptimizedNetworkImpl->GetGraph(), backendSpecificOptimizations);
David Beck263e3492018-11-09 14:46:40 +00001668 }
1669 }
1670
telsoa01c577f2c2018-08-31 09:22:23 +01001671 return optNet;
telsoa014fcda012018-03-09 14:13:49 +00001672}
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001673bool NetworkImpl::GetShapeInferenceMethod()
telsoa014fcda012018-03-09 14:13:49 +00001674{
Finn Williamsf24effa2020-07-03 10:12:03 +01001675 if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() == "ShapeInferenceMethod")
1676 {
1677 return m_NetworkOptions[0].GetOption(0).GetValue().AsBool();
1678 }
1679
1680 return false;
telsoa014fcda012018-03-09 14:13:49 +00001681}
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001682NetworkImpl::NetworkImpl(NetworkOptions networkOptions)
Finn Williamsf24effa2020-07-03 10:12:03 +01001683: m_NetworkOptions(networkOptions),
1684 m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod()))
1685{}
telsoa014fcda012018-03-09 14:13:49 +00001686
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001687NetworkImpl::~NetworkImpl()
telsoa014fcda012018-03-09 14:13:49 +00001688{
1689}
1690
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001691Status NetworkImpl::PrintGraph()
Jan Eilers99d9d4a2019-11-06 10:02:16 +00001692{
1693 m_Graph->Print();
1694 return Status::Success;
1695}
1696
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001697IConnectableLayer* NetworkImpl::AddInputLayer(LayerBindingId id, const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001698{
1699 return m_Graph->AddLayer<InputLayer>(id, name);
1700}
1701
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001702IConnectableLayer* NetworkImpl::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001703 const char* name)
1704{
1705 return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
1706}
1707
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001708IConnectableLayer* NetworkImpl::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001709 const char* name)
1710{
1711 return m_Graph->AddLayer<ComparisonLayer>(comparisonDescriptor, name);
1712}
1713
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001714IConnectableLayer* NetworkImpl::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
josh minor4a3c6102020-01-06 16:40:46 -06001715 const char* name)
1716{
1717 return m_Graph->AddLayer<ElementwiseUnaryLayer>(elementwiseUnaryDescriptor, name);
1718}
1719
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001720IConnectableLayer* NetworkImpl::AddFillLayer(const FillDescriptor& fillDescriptor,
Ryan OSheaec6c6802020-06-05 17:17:06 +01001721 const char* name)
1722{
1723 return m_Graph->AddLayer<FillLayer>(fillDescriptor, name);
1724}
1725
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001726IConnectableLayer* NetworkImpl::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001727 const Optional<ConstTensor>& weights,
1728 const Optional<ConstTensor>& biases,
1729 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001730{
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001731 if (fullyConnectedDescriptor.m_ConstantWeights && !weights.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001732 {
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001733 throw InvalidArgumentException("AddFullyConnectedLayer: weights cannot be empty");
1734
1735 if (fullyConnectedDescriptor.m_BiasEnabled && !biases.has_value())
1736 {
1737 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be empty");
1738 }
telsoa014fcda012018-03-09 14:13:49 +00001739 }
1740
1741 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
1742
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001743 if (fullyConnectedDescriptor.m_ConstantWeights)
telsoa014fcda012018-03-09 14:13:49 +00001744 {
Finn Williams4422cec2021-03-22 17:51:06 +00001745 layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights.value());
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001746 if (fullyConnectedDescriptor.m_BiasEnabled)
1747 {
Finn Williams4422cec2021-03-22 17:51:06 +00001748 layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001749 }
telsoa014fcda012018-03-09 14:13:49 +00001750 }
1751
1752 return layer;
1753}
1754
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001755IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001756 const Optional<ConstTensor>& weights,
1757 const Optional<ConstTensor>& biases,
1758 const char* name)
1759{
1760 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
1761}
1762
1763IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001764 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001765 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001766 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001767{
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001768 Optional<ConstTensor> optionalWeights(weights);
1769 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, optionalWeights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001770}
1771
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001772IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001773 const ConstTensor& weights,
1774 const char* name)
1775{
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001776 Optional<ConstTensor> optionalWeights(weights);
Matteo Martincighfc598e12019-05-14 10:36:13 +01001777 Optional<ConstTensor> biases;
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001778 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, optionalWeights, biases, name);
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001779}
1780
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001781IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001782 const ConstTensor& weights,
1783 const ConstTensor& biases,
1784 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001785{
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001786 Optional<ConstTensor> optionalWeights(weights);
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001787 Optional<ConstTensor> optionalBiases(biases);
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001788 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, optionalWeights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001789}
1790
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001791IConnectableLayer* NetworkImpl::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001792 const char* name)
1793{
Jim Flynne242f2d2019-05-22 14:24:13 +01001794 return m_Graph->AddLayer<ConcatLayer>(concatDescriptor, name);
Jim Flynn906f9462019-05-10 13:55:21 +01001795}
1796
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001797IConnectableLayer* NetworkImpl::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
1798 const ConstTensor& weights,
1799 const Optional<ConstTensor>& biases,
1800 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001801{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001802 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001803 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001804 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001805 }
1806
1807 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
1808
Finn Williams4422cec2021-03-22 17:51:06 +00001809 layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights);
telsoa014fcda012018-03-09 14:13:49 +00001810
1811 if (convolution2dDescriptor.m_BiasEnabled)
1812 {
Finn Williams4422cec2021-03-22 17:51:06 +00001813 layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001814 }
1815
1816 return layer;
1817}
1818
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001819IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001820 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001821 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001822 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001823{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001824 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001825}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001826
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001827IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001828 const ConstTensor& weights,
1829 const char* name)
1830{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001831 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001832 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1833}
1834
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001835IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001836 const ConstTensor& weights,
1837 const ConstTensor& biases,
1838 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001839{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001840 Optional<ConstTensor> optionalBiases(biases);
1841 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001842}
1843
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001844IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayerImpl(
telsoa014fcda012018-03-09 14:13:49 +00001845 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1846 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001847 const Optional<ConstTensor>& biases,
telsoa014fcda012018-03-09 14:13:49 +00001848 const char* name)
1849{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001850 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001851 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001852 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001853 }
1854
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00001855 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001856
Finn Williams4422cec2021-03-22 17:51:06 +00001857 layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights);
telsoa014fcda012018-03-09 14:13:49 +00001858
1859 if (convolution2dDescriptor.m_BiasEnabled)
1860 {
Finn Williams4422cec2021-03-22 17:51:06 +00001861 layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001862 }
1863
1864 return layer;
1865}
1866
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001867IConnectableLayer* NetworkImpl::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
Aron Virginas-Tardd6247f2019-09-19 14:31:17 +01001868 const char* name)
1869{
1870 return m_Graph->AddLayer<DepthToSpaceLayer>(depthToSpaceDescriptor, name);
1871}
1872
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001873IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001874 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1875 const ConstTensor& weights,
1876 const Optional<ConstTensor>& biases,
1877 const char* name)
1878{
1879 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1880}
1881
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001882IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
telsoa014fcda012018-03-09 14:13:49 +00001883 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1884 const ConstTensor& weights,
1885 const char* name)
1886{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001887 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001888 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001889}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001890
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001891IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
telsoa014fcda012018-03-09 14:13:49 +00001892 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1893 const ConstTensor& weights,
1894 const ConstTensor& biases,
1895 const char* name)
1896{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001897 Optional<ConstTensor> optionalBiases(biases);
1898 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001899}
1900
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001901IConnectableLayer* NetworkImpl::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001902 const ConstTensor& anchors, const char* name)
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001903{
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001904 const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
1905
Finn Williams4422cec2021-03-22 17:51:06 +00001906 layer->m_Anchors = std::make_shared<ScopedCpuTensorHandle>(anchors);
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001907
1908 return layer;
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001909}
1910
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001911IConnectableLayer* NetworkImpl::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001912 const char* name)
1913{
1914 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
1915}
1916
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001917IConnectableLayer* NetworkImpl::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001918 const char* name)
1919{
1920 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
1921}
1922
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001923IConnectableLayer* NetworkImpl::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001924 const char* name)
1925{
1926 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
1927}
1928
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001929IConnectableLayer* NetworkImpl::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor,
Nikhil Rajee391d52019-09-05 17:50:44 +01001930 const char* name)
1931{
1932 return m_Graph->AddLayer<ArgMinMaxLayer>(argMinMaxDescriptor, name);
1933}
1934
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001935IConnectableLayer* NetworkImpl::AddNormalizationLayer(const NormalizationDescriptor&
telsoa01c577f2c2018-08-31 09:22:23 +01001936normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001937 const char* name)
1938{
1939 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
1940}
1941
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001942IConnectableLayer* NetworkImpl::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
Aron Virginas-Tar636ab402019-09-16 14:27:45 +01001943{
1944 return m_Graph->AddLayer<SliceLayer>(sliceDescriptor, name);
1945}
1946
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001947IConnectableLayer* NetworkImpl::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001948 const char* name)
1949{
1950 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
1951}
1952
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001953IConnectableLayer* NetworkImpl::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001954 const char* name)
1955{
1956 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
1957}
1958
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001959IConnectableLayer* NetworkImpl::AddMaximumLayer(const char* name)
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001960{
1961 return m_Graph->AddLayer<MaximumLayer>(name);
1962}
1963
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001964IConnectableLayer* NetworkImpl::AddMinimumLayer(const char* name)
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001965{
1966 return m_Graph->AddLayer<MinimumLayer>(name);
1967}
1968
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001969IConnectableLayer* NetworkImpl::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001970 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001971{
Jim Flynne242f2d2019-05-22 14:24:13 +01001972 return AddConcatLayer(mergerDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001973}
1974
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001975IConnectableLayer* NetworkImpl::AddAbsLayer(const char * name)
Kevin May868eb142019-09-04 17:29:31 +01001976{
josh minor4a3c6102020-01-06 16:40:46 -06001977 return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
Kevin May868eb142019-09-04 17:29:31 +01001978}
1979
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001980IConnectableLayer* NetworkImpl::AddAdditionLayer(const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001981{
1982 return m_Graph->AddLayer<AdditionLayer>(name);
1983}
1984
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001985IConnectableLayer* NetworkImpl::AddMultiplicationLayer(const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001986{
1987 return m_Graph->AddLayer<MultiplicationLayer>(name);
1988}
1989
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001990IConnectableLayer* NetworkImpl::AddOutputLayer(LayerBindingId id, const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001991{
1992 return m_Graph->AddLayer<OutputLayer>(id, name);
1993}
1994
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001995IConnectableLayer* NetworkImpl::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
telsoa014fcda012018-03-09 14:13:49 +00001996 const ConstTensor& mean,
1997 const ConstTensor& variance,
1998 const ConstTensor& beta,
1999 const ConstTensor& gamma,
2000 const char* name)
2001{
2002 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
2003
Finn Williams4422cec2021-03-22 17:51:06 +00002004 layer->m_Mean = std::make_shared<ScopedCpuTensorHandle>(mean);
2005 layer->m_Variance = std::make_shared<ScopedCpuTensorHandle>(variance);
2006 layer->m_Beta = std::make_shared<ScopedCpuTensorHandle>(beta);
2007 layer->m_Gamma = std::make_shared<ScopedCpuTensorHandle>(gamma);
telsoa014fcda012018-03-09 14:13:49 +00002008
2009 return layer;
2010}
2011
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002012IConnectableLayer* NetworkImpl::AddRankLayer(const char* name)
Finn Williams2605b232020-06-10 15:53:46 +01002013{
2014 return m_Graph->AddLayer<RankLayer>(name);
2015}
2016
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002017IConnectableLayer* NetworkImpl::AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
2018 const char* name)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00002019{
2020 return m_Graph->AddLayer<ReduceLayer>(reduceDescriptor, name);
2021}
2022
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002023IConnectableLayer* NetworkImpl::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
2024 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002025{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002026 ResizeDescriptor resizeDescriptor;
David Monahan4a0c9b92020-05-30 09:48:39 +01002027 resizeDescriptor.m_Method = ResizeMethod::Bilinear;
2028 resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
2029 resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
2030 resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
2031 resizeDescriptor.m_AlignCorners = descriptor.m_AlignCorners;
2032 resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002033
2034 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00002035}
2036
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002037IConnectableLayer* NetworkImpl::AddResizeLayer(const ResizeDescriptor& resizeDescriptor, const char* name)
Teresa Charlina9075df2019-06-27 15:41:57 +01002038{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002039 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
Teresa Charlina9075df2019-06-27 15:41:57 +01002040}
2041
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002042IConnectableLayer* NetworkImpl::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
2043 const char* name)
Kevin Mayce5045a2019-10-02 14:07:47 +01002044{
2045 return m_Graph->AddLayer<InstanceNormalizationLayer>(desc, name);
2046}
2047
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002048IConnectableLayer* NetworkImpl::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
2049 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002050{
Matteo Martincighbcd3c852018-09-28 14:14:12 +01002051 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +00002052}
2053
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002054IConnectableLayer* NetworkImpl::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& desc,
Aron Virginas-Tarf982dea2019-10-11 14:07:53 +01002055 const char* name)
2056{
2057 return m_Graph->AddLayer<LogSoftmaxLayer>(desc, name);
2058}
2059
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002060IConnectableLayer* NetworkImpl::AddConstantLayer(const ConstTensor& input, const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002061{
telsoa01c577f2c2018-08-31 09:22:23 +01002062 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
2063
Finn Williams4422cec2021-03-22 17:51:06 +00002064 layer->m_LayerOutput = std::make_shared<ScopedCpuTensorHandle>(input);
telsoa01c577f2c2018-08-31 09:22:23 +01002065
2066 return layer;
telsoa014fcda012018-03-09 14:13:49 +00002067}
2068
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002069IConnectableLayer* NetworkImpl::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01002070 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002071{
2072 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
2073}
2074
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002075IConnectableLayer* NetworkImpl::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00002076 const char* name)
2077{
2078 return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
2079}
2080
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002081IConnectableLayer* NetworkImpl::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
Aron Virginas-Tar972af152019-06-11 14:14:03 +01002082 const char* name)
2083{
2084 return m_Graph->AddLayer<SpaceToDepthLayer>(spaceToDepthDescriptor, name);
2085}
2086
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002087IConnectableLayer* NetworkImpl::AddFloorLayer(const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002088{
2089 return m_Graph->AddLayer<FloorLayer>(name);
2090}
2091
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002092IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01002093 const LstmInputParams& params,
2094 const char* name)
2095{
2096 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
2097
2098 //Lstm Basic Parameters
2099 layer->m_BasicParameters.m_InputToForgetWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002100 std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002101 layer->m_BasicParameters.m_InputToCellWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002102 std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002103 layer->m_BasicParameters.m_InputToOutputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002104 std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002105 layer->m_BasicParameters.m_RecurrentToForgetWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002106 std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002107 layer->m_BasicParameters.m_RecurrentToCellWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002108 std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002109 layer->m_BasicParameters.m_RecurrentToOutputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002110 std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002111 layer->m_BasicParameters.m_ForgetGateBias =
Finn Williams4422cec2021-03-22 17:51:06 +00002112 std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
telsoa01c577f2c2018-08-31 09:22:23 +01002113 layer->m_BasicParameters.m_CellBias =
Finn Williams4422cec2021-03-22 17:51:06 +00002114 std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellBias));
telsoa01c577f2c2018-08-31 09:22:23 +01002115 layer->m_BasicParameters.m_OutputGateBias =
Finn Williams4422cec2021-03-22 17:51:06 +00002116 std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
telsoa01c577f2c2018-08-31 09:22:23 +01002117
2118 //Lstm Cifg parameters
2119 if(!descriptor.m_CifgEnabled)
2120 {
2121 if(params.m_InputToInputWeights == nullptr)
2122 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002123 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL "
2124 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002125 }
2126 if(params.m_RecurrentToInputWeights == nullptr)
2127 {
2128 throw InvalidArgumentException(
Jan Eilerse2062cd2020-03-30 15:07:45 +01002129 "AddLstmLayer: Recurrent To Input Weights cannot be NULL "
2130 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002131 }
2132 if(params.m_InputGateBias == nullptr)
2133 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002134 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL "
2135 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002136 }
2137 layer->m_CifgParameters.m_InputToInputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002138 std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002139 layer->m_CifgParameters.m_RecurrentToInputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002140 std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002141 layer->m_CifgParameters.m_InputGateBias =
Finn Williams4422cec2021-03-22 17:51:06 +00002142 std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
telsoa01c577f2c2018-08-31 09:22:23 +01002143 }
2144
2145 //Lstm projection parameters
2146 if(descriptor.m_ProjectionEnabled)
2147 {
2148 if(params.m_ProjectionWeights == nullptr)
2149 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002150 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL "
2151 "when projection is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002152 }
2153 layer->m_ProjectionParameters.m_ProjectionWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002154 std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002155 if(params.m_ProjectionBias != nullptr)
2156 {
2157 layer->m_ProjectionParameters.m_ProjectionBias =
Finn Williams4422cec2021-03-22 17:51:06 +00002158 std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
telsoa01c577f2c2018-08-31 09:22:23 +01002159 }
2160 }
2161
2162 //Lstm Peephole params
2163 if(descriptor.m_PeepholeEnabled)
2164 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002165 if(!descriptor.m_CifgEnabled)
2166 {
2167 if(params.m_CellToInputWeights == nullptr)
2168 {
2169 throw InvalidArgumentException("AddLstmLayer: Cell To Input Weights cannot be NULL "
2170 "when Peephole is enabled and CIFG disabled.");
2171 }
2172
2173 layer->m_PeepholeParameters.m_CellToInputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002174 std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
Jan Eilerse2062cd2020-03-30 15:07:45 +01002175 }
2176
telsoa01c577f2c2018-08-31 09:22:23 +01002177 if(params.m_CellToForgetWeights == nullptr)
2178 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002179 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL "
2180 "when Peephole is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002181 }
2182 if(params.m_CellToOutputWeights == nullptr)
2183 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002184 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL "
2185 "when Peephole is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002186 }
Jan Eilerse2062cd2020-03-30 15:07:45 +01002187
telsoa01c577f2c2018-08-31 09:22:23 +01002188 layer->m_PeepholeParameters.m_CellToForgetWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002189 std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002190 layer->m_PeepholeParameters.m_CellToOutputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002191 std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002192 }
Jan Eilersf8c62972019-07-17 11:07:49 +01002193
2194 //Lstm Layer Normalization params
2195 if(descriptor.m_LayerNormEnabled)
2196 {
2197 if(!descriptor.m_CifgEnabled)
2198 {
2199 if(params.m_InputLayerNormWeights == nullptr)
2200 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002201 throw InvalidArgumentException("AddLstmLayer: Input layer normalization weights cannot be NULL "
2202 "when layer normalization is enabled and CIFG disabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01002203 }
2204 layer->m_LayerNormParameters.m_InputLayerNormWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002205 std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
Jan Eilersf8c62972019-07-17 11:07:49 +01002206 }
2207
2208 if(params.m_ForgetLayerNormWeights == nullptr)
2209 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002210 throw InvalidArgumentException("AddLstmLayer: Forget layer normalization weights cannot be NULL "
2211 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01002212 }
2213 if(params.m_CellLayerNormWeights == nullptr)
2214 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002215 throw InvalidArgumentException("AddLstmLayer: Cell layer normalization weights cannot be NULL "
2216 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01002217 }
2218 if(params.m_OutputLayerNormWeights == nullptr)
2219 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002220 throw InvalidArgumentException("AddLstmLayer: Output layer normalization weights cannot be NULL "
2221 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01002222 }
2223 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002224 std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
Jan Eilersf8c62972019-07-17 11:07:49 +01002225 layer->m_LayerNormParameters.m_CellLayerNormWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002226 std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
Jan Eilersf8c62972019-07-17 11:07:49 +01002227 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002228 std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
Jan Eilersf8c62972019-07-17 11:07:49 +01002229 }
telsoa01c577f2c2018-08-31 09:22:23 +01002230 return layer;
2231}
2232
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002233IConnectableLayer* NetworkImpl::AddDivisionLayer(const char* name)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002234{
2235 return m_Graph->AddLayer<DivisionLayer>(name);
2236}
2237
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002238IConnectableLayer* NetworkImpl::AddSubtractionLayer(const char* name)
David Beck19526222018-09-12 16:00:08 +01002239{
2240 return m_Graph->AddLayer<SubtractionLayer>(name);
2241}
2242
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002243IConnectableLayer* NetworkImpl::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
narpra0132b90462018-09-13 11:07:48 +01002244{
2245 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
2246}
2247
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002248IConnectableLayer* NetworkImpl::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +01002249{
2250 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
2251}
2252
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002253IConnectableLayer *NetworkImpl::AddQuantizeLayer(const char *name)
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002254{
2255 return m_Graph->AddLayer<QuantizeLayer>(name);
2256}
2257
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002258IConnectableLayer* NetworkImpl::AddDequantizeLayer(const char* name)
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002259{
2260 return m_Graph->AddLayer<DequantizeLayer>(name);
2261}
2262
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002263IConnectableLayer* NetworkImpl::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
Conor Kennedy430b5d82018-11-14 15:28:28 +00002264 const char* name)
2265{
2266 return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
2267}
2268
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002269IConnectableLayer* NetworkImpl::AddGreaterLayer(const char* name)
Matteo Martincigh59a950c2018-12-13 12:48:25 +00002270{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01002271 return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
Matteo Martincigh59a950c2018-12-13 12:48:25 +00002272}
2273
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002274IConnectableLayer* NetworkImpl::AddEqualLayer(const char* name)
FrancisMurtagh20995952018-12-17 12:11:36 +00002275{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01002276 return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
FrancisMurtagh20995952018-12-17 12:11:36 +00002277}
2278
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002279IConnectableLayer* NetworkImpl::AddRsqrtLayer(const char * name)
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00002280{
josh minor4a3c6102020-01-06 16:40:46 -06002281 return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00002282}
2283
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002284IConnectableLayer* NetworkImpl::AddGatherLayer(const char* name)
narpra01b89b05f2019-01-16 09:53:09 +00002285{
Teresa Charlin52664732020-06-29 16:27:03 +01002286 GatherDescriptor gatherDescriptor{};
2287 return AddGatherLayer(gatherDescriptor, name);
2288}
2289
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002290IConnectableLayer* NetworkImpl::AddGatherLayer(const GatherDescriptor& gatherDescriptor,
Teresa Charlin52664732020-06-29 16:27:03 +01002291 const char* name)
2292{
2293 return m_Graph->AddLayer<GatherLayer>(gatherDescriptor, name);
narpra01b89b05f2019-01-16 09:53:09 +00002294}
2295
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002296IConnectableLayer* NetworkImpl::AddMergeLayer(const char* name)
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002297{
2298 return m_Graph->AddLayer<MergeLayer>(name);
2299}
2300
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002301IConnectableLayer* NetworkImpl::AddSwitchLayer(const char* name)
Sadik Armaganeff363d2019-04-05 15:25:46 +01002302{
2303 return m_Graph->AddLayer<SwitchLayer>(name);
2304}
2305
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002306IConnectableLayer* NetworkImpl::AddPreluLayer(const char* name)
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002307{
2308 return m_Graph->AddLayer<PreluLayer>(name);
2309}
2310
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002311IConnectableLayer* NetworkImpl::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002312 const ConstTensor& weights,
2313 const Optional<ConstTensor>& biases,
2314 const char* name)
2315{
2316 if (descriptor.m_BiasEnabled && !biases.has_value())
2317 {
2318 throw InvalidArgumentException("AddTransposeConvolution2dLayer: Biases cannot be empty");
2319 }
2320
2321 const auto layer = m_Graph->AddLayer<TransposeConvolution2dLayer>(descriptor, name);
2322
Finn Williams4422cec2021-03-22 17:51:06 +00002323 layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights);
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002324
2325 if (descriptor.m_BiasEnabled)
2326 {
Finn Williams4422cec2021-03-22 17:51:06 +00002327 layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002328 }
2329
2330 return layer;
2331}
2332
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002333IConnectableLayer* NetworkImpl::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
Mike Kellyc9ea45a2020-02-28 18:11:58 +00002334 const char* name)
2335{
2336 return m_Graph->AddLayer<TransposeLayer>(transposeDescriptor, name);
2337}
2338
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002339IConnectableLayer* NetworkImpl::AddStackLayer(const StackDescriptor& stackDescriptor,
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01002340 const char* name)
2341{
2342 return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
2343}
2344
Derek Lamberti013c3902019-10-21 10:46:16 +01002345
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002346IConnectableLayer* NetworkImpl::AddStandInLayer(const StandInDescriptor& desc,
Derek Lamberti013c3902019-10-21 10:46:16 +01002347 const char* name)
2348{
2349 return m_Graph->AddLayer<StandInLayer>(desc, name);
2350}
2351
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002352IConnectableLayer* NetworkImpl::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
James Conroyee18dc82019-07-17 11:27:46 +01002353 const char* name)
2354{
2355 const auto layer = m_Graph->AddLayer<QuantizedLstmLayer>(name);
2356
2357 // InputToX weights
2358 layer->m_QuantizedLstmParameters.m_InputToInputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002359 std::make_shared<ScopedCpuTensorHandle>(params.GetInputToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002360 layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002361 std::make_shared<ScopedCpuTensorHandle>(params.GetInputToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002362 layer->m_QuantizedLstmParameters.m_InputToCellWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002363 std::make_shared<ScopedCpuTensorHandle>(params.GetInputToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002364 layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002365 std::make_shared<ScopedCpuTensorHandle>(params.GetInputToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002366
2367 // RecurrentToX weights
2368 layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002369 std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002370 layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002371 std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002372 layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002373 std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002374 layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002375 std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002376
2377 // Bias
2378 layer->m_QuantizedLstmParameters.m_InputGateBias =
Finn Williams4422cec2021-03-22 17:51:06 +00002379 std::make_shared<ScopedCpuTensorHandle>(params.GetInputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01002380 layer->m_QuantizedLstmParameters.m_ForgetGateBias =
Finn Williams4422cec2021-03-22 17:51:06 +00002381 std::make_shared<ScopedCpuTensorHandle>(params.GetForgetGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01002382 layer->m_QuantizedLstmParameters.m_CellBias =
Finn Williams4422cec2021-03-22 17:51:06 +00002383 std::make_shared<ScopedCpuTensorHandle>(params.GetCellBias());
James Conroyee18dc82019-07-17 11:27:46 +01002384 layer->m_QuantizedLstmParameters.m_OutputGateBias =
Finn Williams4422cec2021-03-22 17:51:06 +00002385 std::make_shared<ScopedCpuTensorHandle>(params.GetOutputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01002386
2387 return layer;
2388}
2389
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002390IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor,
James Conroy586a9aa2020-03-20 08:49:33 +00002391 const LstmInputParams& params,
2392 const char* name)
2393{
2394 const auto layer = m_Graph->AddLayer<QLstmLayer>(descriptor, name);
2395
2396 // QLstm Basic Parameters
2397 layer->m_BasicParameters.m_InputToForgetWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002398 std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002399 layer->m_BasicParameters.m_InputToCellWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002400 std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002401 layer->m_BasicParameters.m_InputToOutputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002402 std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002403 layer->m_BasicParameters.m_RecurrentToForgetWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002404 std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002405 layer->m_BasicParameters.m_RecurrentToCellWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002406 std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002407 layer->m_BasicParameters.m_RecurrentToOutputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002408 std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002409 layer->m_BasicParameters.m_ForgetGateBias =
Finn Williams4422cec2021-03-22 17:51:06 +00002410 std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
James Conroy586a9aa2020-03-20 08:49:33 +00002411 layer->m_BasicParameters.m_CellBias =
Finn Williams4422cec2021-03-22 17:51:06 +00002412 std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellBias));
James Conroy586a9aa2020-03-20 08:49:33 +00002413 layer->m_BasicParameters.m_OutputGateBias =
Finn Williams4422cec2021-03-22 17:51:06 +00002414 std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
James Conroy586a9aa2020-03-20 08:49:33 +00002415
2416 // QLstm Cifg parameters
2417 if(!descriptor.m_CifgEnabled)
2418 {
2419 if(params.m_InputToInputWeights == nullptr)
2420 {
2421 throw InvalidArgumentException("AddQLstmLayer: Input To Input Weights cannot be NULL");
2422 }
2423
2424 if(params.m_RecurrentToInputWeights == nullptr)
2425 {
2426 throw InvalidArgumentException(
2427 "AddQLstmLayer: Recurrent To Input Weights cannot be NULL");
2428 }
2429
2430 if(params.m_InputGateBias == nullptr)
2431 {
2432 throw InvalidArgumentException("AddQLstmLayer: Input Gate Bias cannot be NULL");
2433 }
2434
2435 layer->m_CifgParameters.m_InputToInputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002436 std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002437 layer->m_CifgParameters.m_RecurrentToInputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002438 std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002439 layer->m_CifgParameters.m_InputGateBias =
Finn Williams4422cec2021-03-22 17:51:06 +00002440 std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
James Conroy586a9aa2020-03-20 08:49:33 +00002441 }
2442
2443 // QLstm Projection parameters
2444 if(descriptor.m_ProjectionEnabled)
2445 {
2446 if(params.m_ProjectionWeights == nullptr)
2447 {
2448 throw InvalidArgumentException("AddQLstmLayer: Projection Weights cannot be NULL");
2449 }
2450
James Conroy586a9aa2020-03-20 08:49:33 +00002451 layer->m_ProjectionParameters.m_ProjectionWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002452 std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
James Conroyed324052020-05-18 15:16:42 +01002453
2454 // Projection bias is optional even if projection is enabled
2455 if(params.m_ProjectionWeights != nullptr)
2456 {
2457 layer->m_ProjectionParameters.m_ProjectionBias =
Finn Williams4422cec2021-03-22 17:51:06 +00002458 std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
James Conroyed324052020-05-18 15:16:42 +01002459 }
2460
James Conroy586a9aa2020-03-20 08:49:33 +00002461 }
2462
2463 // QLstm Peephole params
2464 if(descriptor.m_PeepholeEnabled)
2465 {
2466 if(params.m_CellToForgetWeights == nullptr)
2467 {
2468 throw InvalidArgumentException("AddQLstmLayer: Cell To Forget Weights cannot be NULL");
2469 }
2470
2471 if(params.m_CellToOutputWeights == nullptr)
2472 {
2473 throw InvalidArgumentException("AddQLstmLayer: Cell To Output Weights cannot be NULL");
2474 }
2475
2476 if(!descriptor.m_CifgEnabled)
2477 {
2478 if(params.m_CellToInputWeights == nullptr)
2479 {
2480 throw InvalidArgumentException("AddQLstmLayer: Cell To Input Weights cannot be NULL");
2481 }
2482
2483 layer->m_PeepholeParameters.m_CellToInputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002484 std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002485 }
2486
2487 layer->m_PeepholeParameters.m_CellToForgetWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002488 std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002489 layer->m_PeepholeParameters.m_CellToOutputWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002490 std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002491 }
2492
2493 // QLstm Layer Normalization params
2494 if(descriptor.m_LayerNormEnabled)
2495 {
2496 if(params.m_ForgetLayerNormWeights == nullptr)
2497 {
2498 throw InvalidArgumentException("AddQLstmLayer: Forget layer normalization weights cannot be NULL");
2499 }
2500
2501 if(params.m_CellLayerNormWeights == nullptr)
2502 {
2503 throw InvalidArgumentException("AddQLstmLayer: Cell layer normalization weights cannot be NULL");
2504 }
2505
2506 if(params.m_OutputLayerNormWeights == nullptr)
2507 {
2508 throw InvalidArgumentException("AddQLstmLayer: Output layer normalization weights cannot be NULL");
2509 }
2510
2511 if(!descriptor.m_CifgEnabled)
2512 {
2513 if(params.m_InputLayerNormWeights == nullptr)
2514 {
2515 throw InvalidArgumentException("AddQLstmLayer: Input layer normalization weights cannot be NULL");
2516 }
2517
2518 layer->m_LayerNormParameters.m_InputLayerNormWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002519 std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002520 }
2521
2522 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002523 std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002524 layer->m_LayerNormParameters.m_CellLayerNormWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002525 std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002526 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
Finn Williams4422cec2021-03-22 17:51:06 +00002527 std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
James Conroy586a9aa2020-03-20 08:49:33 +00002528 }
2529 return layer;
2530}
2531
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002532IConnectableLayer* NetworkImpl::AddLogicalBinaryLayer(const LogicalBinaryDescriptor& logicalBinaryDescriptor,
James Conroyaba90cd2020-11-06 16:28:18 +00002533 const char* name)
2534{
2535 return m_Graph->AddLayer<LogicalBinaryLayer>(logicalBinaryDescriptor, name);
2536}
2537
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002538void NetworkImpl::Accept(ILayerVisitor& visitor) const
Mike Kelly8c1701a2019-02-11 17:01:27 +00002539{
2540 for (auto layer : GetGraph())
2541 {
2542 layer->Accept(visitor);
2543 };
2544}
2545
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002546void NetworkImpl::ExecuteStrategy(IStrategy& strategy) const
Finn Williamsb454c5c2021-02-09 15:56:23 +00002547{
2548 for (auto layer : GetGraph())
2549 {
2550 layer->ExecuteStrategy(strategy);
2551 };
2552}
2553
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002554OptimizedNetworkImpl::OptimizedNetworkImpl(std::unique_ptr<Graph> graph)
Sadik Armagan3184c902020-03-18 10:57:30 +00002555 : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid())
telsoa014fcda012018-03-09 14:13:49 +00002556{
2557}
2558
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002559OptimizedNetworkImpl::OptimizedNetworkImpl(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
Sadik Armagan045f6be2020-09-10 13:37:32 +01002560 : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid()), m_ModelOptions(modelOptions)
2561{
2562}
2563
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002564OptimizedNetworkImpl::~OptimizedNetworkImpl()
telsoa014fcda012018-03-09 14:13:49 +00002565{
2566}
2567
2568} // namespace armnn