blob: 9373a6ac155ddcf94da395e90473e79e4ba633ae [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlin52664732020-06-29 16:27:03 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matteo Martincigh49124022019-01-11 13:25:59 +00005
telsoa014fcda012018-03-09 14:13:49 +00006#include "Network.hpp"
7#include "Graph.hpp"
8#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01009#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010#include "Optimizer.hpp"
Derek Lambertiff05cc52019-04-26 13:05:17 +010011#include "SubgraphViewSelector.hpp"
Matteo Martincigh49124022019-01-11 13:25:59 +000012#include "BackendSettings.hpp"
David Beckac42efd2018-09-26 17:41:13 +010013#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000014
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000015#include <backendsCommon/CpuTensorHandle.hpp>
16#include <backendsCommon/WorkloadFactory.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000017#include <armnn/backends/IBackendInternal.hpp>
Derek Lamberti84da38b2019-06-13 11:40:08 +010018#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019
20#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000021#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022#include <armnn/TypesUtils.hpp>
Matteo Martincighc601aa62019-10-29 15:03:22 +000023#include <armnn/BackendRegistry.hpp>
Matthew Benthamf48afc62020-01-15 17:55:08 +000024#include <armnn/Logging.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010025#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000026#include <armnn/utility/IgnoreUnused.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010027#include <armnn/utility/PolymorphicDowncast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000028
Jan Eilers99d9d4a2019-11-06 10:02:16 +000029#include <ProfilingService.hpp>
30
telsoa014fcda012018-03-09 14:13:49 +000031#include <fcntl.h>
32#include <algorithm>
33#include <fstream>
34#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010035#include <vector>
36#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000037
telsoa014fcda012018-03-09 14:13:49 +000038namespace armnn
39{
40
Francis Murtagh3d2b4b22021-02-15 18:23:17 +000041INetwork::INetwork(NetworkOptions networkOptions) : pNetworkImpl(new NetworkImpl(networkOptions)) {}
42
43INetwork::~INetwork() = default;
44
45Status INetwork::PrintGraph()
46{
47 return pNetworkImpl->PrintGraph();
48}
49
50IConnectableLayer* INetwork::AddInputLayer(LayerBindingId id, const char* name)
51{
52 return pNetworkImpl->AddInputLayer(id, name);
53}
54
55
56IConnectableLayer* INetwork::AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
57 const char* name)
58{
59 return pNetworkImpl->AddArgMinMaxLayer(desc, name);
60}
61
62
63IConnectableLayer* INetwork::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
64 const char* name)
65{
66 return pNetworkImpl->AddComparisonLayer(comparisonDescriptor, name);
67}
68
69
70IConnectableLayer* INetwork::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
71 const char* name)
72{
73 return pNetworkImpl->AddConcatLayer(concatDescriptor, name);
74}
75
76
77IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
78 const ConstTensor& weights,
79 const Optional<ConstTensor>& biases,
80 const char* name)
81{
82 return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
83}
84
85
86IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
87 const ConstTensor& weights,
88 const char* name)
89{
90 Optional<ConstTensor> biases;
91 return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
92}
93
94
95IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
96 const ConstTensor& weights,
97 const ConstTensor& biases,
98 const char* name )
99{
100
101 return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor,
102 weights,
103 armnn::Optional<ConstTensor>(biases),
104 name);
105}
106
107
108IConnectableLayer* INetwork::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
109 const char* name)
110{
111 return pNetworkImpl->AddDepthToSpaceLayer(depthToSpaceDescriptor, name);
112}
113
114
115IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
116 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
117 const ConstTensor& weights,
118 const Optional<ConstTensor>& biases,
119 const char* name)
120{
121 return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
122}
123
124
125IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
126 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
127 const ConstTensor& weights,
128 const char* name)
129{
130 Optional<ConstTensor> biases;
131 return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
132}
133
134
135IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
136 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
137 const ConstTensor& weights,
138 const ConstTensor& biases,
139 const char* name)
140{
141 return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights,
142 armnn::Optional<ConstTensor>(biases), name);
143}
144
145
146IConnectableLayer* INetwork::AddDequantizeLayer(const char* name)
147{
148 return pNetworkImpl->AddDequantizeLayer(name);
149}
150
151
152IConnectableLayer* INetwork::AddDetectionPostProcessLayer(
153 const DetectionPostProcessDescriptor& descriptor,
154 const ConstTensor& anchors,
155 const char* name)
156{
157 return pNetworkImpl->AddDetectionPostProcessLayer(descriptor, anchors, name);
158}
159
160
161IConnectableLayer* INetwork::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
162 const char* name)
163{
164 return pNetworkImpl->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name);
165}
166
167
168IConnectableLayer* INetwork::AddFillLayer(const FillDescriptor& fillDescriptor,
169 const char* name)
170{
171 return pNetworkImpl->AddFillLayer(fillDescriptor, name);
172}
173
174
175IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
176 const ConstTensor& weights,
177 const Optional<ConstTensor>& biases,
178 const char* name)
179{
180 return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights, biases, name);
181}
182
183IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
184 const ConstTensor& weights,
185 const char* name)
186{
187 Optional<ConstTensor> biases;
188 return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights, biases, name);
189}
190
191IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
192 const ConstTensor& weights,
193 const ConstTensor& biases,
194 const char* name)
195{
196 return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights,
197 armnn::Optional<ConstTensor>(biases), name);
198}
199
200IConnectableLayer* INetwork::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
201 const char* name)
202{
203 return pNetworkImpl->AddPermuteLayer(permuteDescriptor, name);
204}
205
206IConnectableLayer* INetwork::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
207 const char* name)
208{
209 return pNetworkImpl->AddBatchToSpaceNdLayer(batchToSpaceNdDescriptor, name);
210}
211
212IConnectableLayer* INetwork::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
213 const char* name)
214{
215 return pNetworkImpl->AddPooling2dLayer(pooling2dDescriptor, name);
216}
217
218IConnectableLayer* INetwork::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
219 const char* name)
220{
221 return pNetworkImpl->AddActivationLayer(activationDescriptor, name);
222}
223
224IConnectableLayer* INetwork::AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
225 const char* name)
226{
227 return pNetworkImpl->AddNormalizationLayer(normalizationDescriptor, name);
228}
229
230IConnectableLayer* INetwork::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
231{
232 return pNetworkImpl->AddSliceLayer(sliceDescriptor, name);
233}
234IConnectableLayer* INetwork::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
235 const char* name)
236{
237 return pNetworkImpl->AddSoftmaxLayer(softmaxDescriptor, name);
238}
239
240IConnectableLayer* INetwork::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
241 const char* name)
242{
243 return pNetworkImpl->AddSplitterLayer(splitterDescriptor, name);
244}
245
246IConnectableLayer* INetwork::AddMergeLayer(const char* name)
247{
248 return pNetworkImpl->AddMergeLayer(name);
249}
250
251IConnectableLayer* INetwork::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
252 const char* name)
253{
254 return pNetworkImpl->AddConcatLayer(mergerDescriptor, name);
255}
256
257IConnectableLayer* INetwork::AddAbsLayer(const char* name)
258{
259 return pNetworkImpl->AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
260}
261
262IConnectableLayer* INetwork::AddAdditionLayer(const char* name)
263{
264 return pNetworkImpl->AddAdditionLayer(name);
265}
266
267IConnectableLayer* INetwork::AddMultiplicationLayer(const char* name)
268{
269 return pNetworkImpl->AddMultiplicationLayer(name);
270}
271
272IConnectableLayer* INetwork::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
273 const ConstTensor& mean,
274 const ConstTensor& variance,
275 const ConstTensor& beta,
276 const ConstTensor& gamma,
277 const char* name)
278{
279 return pNetworkImpl->AddBatchNormalizationLayer(desc, mean, variance, beta, gamma, name);
280}
281
282IConnectableLayer* INetwork::AddRankLayer(const char* name)
283{
284 return pNetworkImpl->AddRankLayer(name);
285}
286
287IConnectableLayer* INetwork::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
288 const char* name)
289{
290 ResizeDescriptor resizeDescriptor;
291 resizeDescriptor.m_Method = ResizeMethod::Bilinear;
292 resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
293 resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
294 resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
295 resizeDescriptor.m_AlignCorners = descriptor.m_AlignCorners;
296 resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
297
298 return pNetworkImpl->AddResizeLayer(resizeDescriptor, name);
299}
300
301IConnectableLayer* INetwork::AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
302 const char* name)
303{
304 return pNetworkImpl->AddResizeLayer(resizeDescriptor, name);
305}
306
307IConnectableLayer* INetwork::AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
308 const char* name)
309{
310 return pNetworkImpl->AddReduceLayer(reduceDescriptor, name);
311}
312
313IConnectableLayer* INetwork::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
314 const char* name)
315{
316 return pNetworkImpl->AddInstanceNormalizationLayer(desc, name);
317}
318
319IConnectableLayer* INetwork::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
320 const char* name)
321{
322 return pNetworkImpl->AddL2NormalizationLayer(desc, name);
323}
324
325IConnectableLayer* INetwork::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& logSoftmaxDescriptor,
326 const char* name)
327{
328 return pNetworkImpl->AddLogSoftmaxLayer(logSoftmaxDescriptor, name);
329}
330
331IConnectableLayer* INetwork::AddConstantLayer(const ConstTensor& input,
332 const char* name)
333{
334 return pNetworkImpl->AddConstantLayer(input, name);
335}
336
337IConnectableLayer* INetwork::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
338 const char* name)
339{
340 return pNetworkImpl->AddReshapeLayer(reshapeDescriptor, name);
341}
342
343IConnectableLayer* INetwork::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
344 const char* name)
345{
346 return pNetworkImpl->AddSpaceToBatchNdLayer(spaceToBatchNdDescriptor, name);
347}
348
349IConnectableLayer* INetwork::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
350 const char* name)
351{
352 return pNetworkImpl->AddSpaceToDepthLayer(spaceToDepthDescriptor, name);
353}
354
355IConnectableLayer* INetwork::AddFloorLayer(const char* name)
356{
357 return pNetworkImpl->AddFloorLayer(name);
358}
359IConnectableLayer* INetwork::AddOutputLayer(LayerBindingId id, const char* name)
360{
361 return pNetworkImpl->AddOutputLayer(id, name);
362}
363
364IConnectableLayer* INetwork::AddLstmLayer(const LstmDescriptor& descriptor,
365 const LstmInputParams& params,
366 const char* name)
367{
368 return pNetworkImpl->AddLstmLayer(descriptor, params, name);
369}
370
371IConnectableLayer* INetwork::AddDivisionLayer(const char* name)
372{
373 return pNetworkImpl->AddDivisionLayer(name);
374}
375
376IConnectableLayer* INetwork::AddSubtractionLayer(const char* name)
377{
378 return pNetworkImpl->AddSubtractionLayer(name);
379}
380
381IConnectableLayer* INetwork::AddMaximumLayer(const char* name)
382{
383 return pNetworkImpl->AddMaximumLayer(name);
384}
385
386IConnectableLayer* INetwork::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
387{
388 return pNetworkImpl->AddMeanLayer(meanDescriptor, name);
389}
390
391IConnectableLayer* INetwork::AddPadLayer(const PadDescriptor& padDescriptor,
392 const char* name)
393{
394 return pNetworkImpl->AddPadLayer(padDescriptor, name);
395}
396
397IConnectableLayer* INetwork::AddQuantizeLayer(const char* name)
398{
399 return pNetworkImpl->AddQuantizeLayer(name);
400}
401
402IConnectableLayer* INetwork::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
403 const char* name)
404{
405 return pNetworkImpl->AddStridedSliceLayer(stridedSliceDescriptor, name);
406}
407
408IConnectableLayer* INetwork::AddMinimumLayer(const char* name)
409{
410 return pNetworkImpl->AddMinimumLayer(name);
411}
412
413IConnectableLayer* INetwork::AddGreaterLayer(const char* name)
414{
415 return pNetworkImpl->AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
416}
417
418IConnectableLayer* INetwork::AddEqualLayer(const char* name)
419{
420 return pNetworkImpl->AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
421}
422
423IConnectableLayer* INetwork::AddRsqrtLayer(const char* name)
424{
425 return pNetworkImpl->AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
426}
427
428IConnectableLayer* INetwork::AddGatherLayer(const char* name)
429{
430 GatherDescriptor gatherDescriptor{};
431 return pNetworkImpl->AddGatherLayer(gatherDescriptor, name);
432}
433
434IConnectableLayer* INetwork::AddGatherLayer(const GatherDescriptor& descriptor,
435 const char* name)
436{
437 return pNetworkImpl->AddGatherLayer(descriptor, name);
438}
439
440IConnectableLayer* INetwork::AddSwitchLayer(const char* name)
441{
442 return pNetworkImpl->AddSwitchLayer(name);
443}
444
445IConnectableLayer* INetwork::AddPreluLayer(const char* name)
446{
447 return pNetworkImpl->AddPreluLayer(name);
448}
449
450IConnectableLayer* INetwork::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
451 const ConstTensor& weights,
452 const Optional<ConstTensor>& biases,
453 const char* name)
454{
455 return pNetworkImpl->AddTransposeConvolution2dLayer(descriptor, weights, biases, name);
456}
457
458IConnectableLayer* INetwork::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
459 const char* name)
460{
461 return pNetworkImpl->AddTransposeLayer(transposeDescriptor, name);
462}
463
464IConnectableLayer* INetwork::AddStackLayer(const StackDescriptor& descriptor,
465 const char* name)
466{
467 return pNetworkImpl->AddStackLayer(descriptor, name);
468}
469
470IConnectableLayer* INetwork::AddStandInLayer(const StandInDescriptor& descriptor,
471 const char* name)
472{
473 return pNetworkImpl->AddStandInLayer(descriptor, name);
474}
475
476IConnectableLayer* INetwork::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
477 const char* name)
478{
479 return pNetworkImpl->AddQuantizedLstmLayer(params, name);
480}
481
482IConnectableLayer* INetwork::AddQLstmLayer(const QLstmDescriptor& descriptor,
483 const LstmInputParams& params,
484 const char* name)
485{
486 return pNetworkImpl->AddQLstmLayer(descriptor, params, name);
487}
488
489IConnectableLayer* INetwork::AddLogicalBinaryLayer(const LogicalBinaryDescriptor& descriptor,
490 const char* name)
491{
492 return pNetworkImpl->AddLogicalBinaryLayer(descriptor, name);
493}
494
495void INetwork::Accept(ILayerVisitor& visitor) const
496{
497 return pNetworkImpl->Accept(visitor);
498}
499
500void INetwork::ExecuteStrategy(IStrategy& strategy) const
501{
502 return pNetworkImpl->ExecuteStrategy(strategy);
503}
504
Finn Williamsf24effa2020-07-03 10:12:03 +0100505armnn::INetwork* INetwork::CreateRaw(NetworkOptions networkOptions)
telsoa014fcda012018-03-09 14:13:49 +0000506{
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000507 return new INetwork(networkOptions);
telsoa014fcda012018-03-09 14:13:49 +0000508}
509
Finn Williamsf24effa2020-07-03 10:12:03 +0100510armnn::INetworkPtr INetwork::Create(NetworkOptions networkOptions)
telsoa014fcda012018-03-09 14:13:49 +0000511{
Finn Williamsf24effa2020-07-03 10:12:03 +0100512 return INetworkPtr(CreateRaw(networkOptions), &INetwork::Destroy);
telsoa014fcda012018-03-09 14:13:49 +0000513}
514
515void INetwork::Destroy(INetwork* network)
516{
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000517 delete network;
telsoa014fcda012018-03-09 14:13:49 +0000518}
519
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000520
521IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<Graph> graph)
522 : pOptimizedNetworkImpl(new OptimizedNetworkImpl(std::move(graph))) {}
523
524IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<OptimizedNetworkImpl> impl)
525 : pOptimizedNetworkImpl(std::move(impl)) {}
526
527IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
528 : pOptimizedNetworkImpl(new OptimizedNetworkImpl(std::move(graph), modelOptions)) {}
529
530IOptimizedNetwork::~IOptimizedNetwork() = default;
531
telsoa014fcda012018-03-09 14:13:49 +0000532void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
533{
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000534 delete network;
telsoa014fcda012018-03-09 14:13:49 +0000535}
536
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000537Status IOptimizedNetwork::PrintGraph()
538{
539 return pOptimizedNetworkImpl->PrintGraph();
540}
541
542Status IOptimizedNetwork::SerializeToDot(std::ostream& stream) const
543{
544 return pOptimizedNetworkImpl->SerializeToDot(stream);
545}
546
547profiling::ProfilingGuid IOptimizedNetwork::GetGuid() const
548{
549 return pOptimizedNetworkImpl->GetGuid();
550}
551
552Status OptimizedNetworkImpl::PrintGraph()
telsoa014fcda012018-03-09 14:13:49 +0000553{
554 m_Graph->Print();
555 return Status::Success;
556}
557
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000558Status OptimizedNetworkImpl::SerializeToDot(std::ostream& stream) const
surmeh01bceff2f2018-03-29 16:29:27 +0100559{
560 return m_Graph->SerializeToDot(stream);
561}
562
Matteo Martincigh49124022019-01-11 13:25:59 +0000563void ReportError(const std::string& errorMessage,
564 Optional<std::vector<std::string>&> errorMessages)
565{
566 std::stringstream fullErrorMessage;
567 fullErrorMessage << "ERROR: " << errorMessage;
Derek Lamberti08446972019-11-26 16:38:31 +0000568 ARMNN_LOG(warning) << fullErrorMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +0000569 if (errorMessages)
570 {
571 errorMessages.value().push_back(fullErrorMessage.str());
572 }
573}
574
575void ReportWarning(const std::string& warningMessage,
576 Optional<std::vector<std::string>&> warningMessages)
577{
578 std::stringstream fullWarningMessage;
579 fullWarningMessage << "WARNING: " << warningMessage;
Derek Lamberti08446972019-11-26 16:38:31 +0000580 ARMNN_LOG(warning) << fullWarningMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +0000581 if (warningMessages)
582 {
583 warningMessages.value().push_back(fullWarningMessage.str());
584 }
585}
586
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000587OptimizationResult ReturnWithError(OptimizationResult res,
588 const Layer* layer,
589 const BackendSettings& backendSettings,
590 Optional<std::vector<std::string>&> errMessages)
591{
592 std::stringstream failureMsg;
593 failureMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
594 << " is not supported on any preferred backend " << backendSettings.m_PreferredBackends;
595 ReportError(failureMsg.str(), errMessages);
596
597 res.m_Error = true;
598 return res;
599}
600
601
jimfly016b0b53d2018-10-08 14:43:01 +0100602bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
603{
604 bool noErrors = true;
605 unsigned int numOutputs = layer->GetNumOutputSlots();
606 for (unsigned int i = 0; i < numOutputs; i++) {
David Monahanb8554702019-04-25 16:03:38 +0100607 OutputSlot& outputSlot = layer->GetOutputSlot(i);
608 TensorInfo info = outputSlot.GetTensorInfo();
Derek Lambertif90c56d2020-01-10 17:14:08 +0000609 if (DataType::QAsymmU8 == info.GetDataType()) {
jimfly016b0b53d2018-10-08 14:43:01 +0100610 if (0.f == info.GetQuantizationScale()) {
611 noErrors = false;
612 std::stringstream ss;
Matteo Martincigh49124022019-01-11 13:25:59 +0000613 ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
jimfly016b0b53d2018-10-08 14:43:01 +0100614 << " (" << layer->GetNameStr() << ") is of type"
615 << " Quantized 8 bit but its scale parameter has not been set";
Matteo Martincigh49124022019-01-11 13:25:59 +0000616 ReportError(ss.str(), errMessages);
jimfly016b0b53d2018-10-08 14:43:01 +0100617 }
David Monahanb8554702019-04-25 16:03:38 +0100618 // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
619 if ((info.GetQuantizationScale() != (1.0f / 256.0f) ||
620 info.GetQuantizationOffset() != 0) &&
621 layer->GetType() == armnn::LayerType::Softmax)
622 {
623 std::stringstream ss;
624 ss << "Quantization parameters for Softmax layer (Scale: " <<
625 info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() <<
626 ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
Derek Lamberti08446972019-11-26 16:38:31 +0000627 ARMNN_LOG(warning) << ss.str();
David Monahanb8554702019-04-25 16:03:38 +0100628 info.SetQuantizationScale((1.0f /256.0f));
629 info.SetQuantizationOffset(0);
630 outputSlot.SetTensorInfo(info);
631 }
jimfly016b0b53d2018-10-08 14:43:01 +0100632 }
633 }
634 return noErrors;
635}
636
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100637template <typename LayerT>
638LayerT* ConvertBf16ToFp32Weight(Layer* l)
639{
Jan Eilersbb446e52020-04-02 13:56:54 +0100640 LayerT* layer = PolymorphicDowncast<LayerT*>(l);
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100641 if ((layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected)
642 && layer->m_Weight)
643 {
644 const TensorInfo& info = layer->m_Weight->GetTensorInfo();
645
646 if (info.GetDataType() == DataType::BFloat16)
647 {
648 std::vector<float> newValues(info.GetNumElements());
649
650 armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(
651 layer->m_Weight->template GetTensor<armnn::BFloat16>(), info.GetNumElements(), newValues.data());
652
653 TensorInfo newInfo(info.GetShape(), DataType::Float32);
654 ConstTensor newInput(newInfo, newValues);
655 layer->m_Weight.reset(new ScopedCpuTensorHandle(newInput));
656 }
657 }
658 return layer;
659}
660
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000661OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings,
662 Graph& graph,
663 Layer* layer,
664 BackendId backend,
665 DataType dataTypeIn,
666 DataType dataTypeOut,
667 const std::vector<BackendId>& availablePreferredBackends,
668 std::string& reasonIfUnsupported,
669 Optional<std::vector<std::string>&> errMessages)
670{
671 OptimizationResult result;
672
673 // Helper lambda to compose meaningful error message before returning with error
674 auto ReturnError = [&](const Layer* layer)
675 {
676 return ReturnWithError(result, layer, backendSettings, errMessages);
677 };
678
679 // need to set the compute device on the layer
680 // before we can check if it is supported
681 layer->SetBackendId(backend);
682 if (!IWorkloadFactory::IsLayerSupported(*layer, EmptyOptional(), reasonIfUnsupported))
683 {
684 if (dataTypeIn == DataType::Float16 || dataTypeOut == DataType::Float16)
685 {
686 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
687 && layer->GetType() != LayerType::ConvertFp32ToFp16
688 && layer->GetType() != LayerType::ConvertFp16ToFp32)
689 {
690 // Insert FP16 -> FP32 conversion layer before current layer
691 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers;
692 if (dataTypeIn == DataType::Float16)
693 {
694 convertFp16ToFp32Layers =
695 InsertConvertFp16ToFp32LayersBefore(graph, *layer);
696 }
697
698 // Insert FP32 -> FP16 conversion layer after current layer
699 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers;
700 if (dataTypeOut == DataType::Float16)
701 {
702 convertFp32ToFp16Layers =
703 InsertConvertFp32ToFp16LayersAfter(graph, *layer);
704 }
705
706 // Assign a supported backend to the newly introduced conversion layers
707 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
708 {
709 bool supportedBackendFound = false;
710 std::string reasonIfUnsupported;
711
712 // Try preferred backend first
713 layer->SetBackendId(preferredBackend);
714 if (IWorkloadFactory::IsLayerSupported(*layer,
715 EmptyOptional(),
716 reasonIfUnsupported))
717 {
718 supportedBackendFound = true;
719 }
720 else
721 {
722 for (const auto& backend : availablePreferredBackends)
723 {
724 // Skip preferred backend (we already determined that it is not supported)
725 if (backend == preferredBackend)
726 {
727 continue;
728 }
729
730 layer->SetBackendId(backend);
731 if (IWorkloadFactory::IsLayerSupported(*layer,
732 EmptyOptional(),
733 reasonIfUnsupported))
734 {
735 supportedBackendFound = true;
736 break;
737 }
738 }
739 }
740
741 return supportedBackendFound;
742 };
743
744 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
745 {
746 if (!AssignFirstSupportedBackend(convertLayer, backend))
747 {
748 return ReturnError(convertLayer);
749 }
750 }
751
752 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
753 {
754 if (!AssignFirstSupportedBackend(convertLayer, backend))
755 {
756 return ReturnError(convertLayer);
757 }
758 }
759
760 return result;
761 }
762 }
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000763 else if (dataTypeIn == DataType::BFloat16 || dataTypeOut == DataType::BFloat16)
764 {
765 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
766 && layer->GetType() != LayerType::ConvertFp32ToBf16
767 && layer->GetType() != LayerType::ConvertBf16ToFp32)
768 {
769 // Insert BF16 -> FP32 conversion layer before current layer
770 std::vector<ConvertBf16ToFp32Layer*> convertBf16ToFp32Layers;
771 if (dataTypeIn == DataType::BFloat16)
772 {
773 convertBf16ToFp32Layers =
774 InsertConvertBf16ToFp32LayersBefore(graph, *layer);
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100775 if (layer->GetType() == LayerType::Convolution2d)
776 {
777 ConvertBf16ToFp32Weight<Convolution2dLayer>(layer);
778 }
779 else if (layer->GetType() == LayerType::FullyConnected)
780 {
781 ConvertBf16ToFp32Weight<FullyConnectedLayer>(layer);
782 }
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000783 }
784
785 // Insert FP32 -> BF16 conversion layer after current layer
786 std::vector<ConvertFp32ToBf16Layer*> convertFp32ToBf16Layers;
787 if (dataTypeOut == DataType::BFloat16)
788 {
789 convertFp32ToBf16Layers =
790 InsertConvertFp32ToBf16LayersAfter(graph, *layer);
791 }
792
793 // Assign a supported backend to the newly introduced conversion layers
794 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
795 {
796 bool supportedBackendFound = false;
797 std::string reasonIfUnsupported;
798
799 // Try preferred backend first
800 layer->SetBackendId(preferredBackend);
801 if (IWorkloadFactory::IsLayerSupported(*layer,
802 EmptyOptional(),
803 reasonIfUnsupported))
804 {
805 supportedBackendFound = true;
806 }
807 else
808 {
809 for (const auto& backend : availablePreferredBackends)
810 {
811 // Skip preferred backend (we already determined that it is not supported)
812 if (backend == preferredBackend)
813 {
814 continue;
815 }
816
817 layer->SetBackendId(backend);
818 if (IWorkloadFactory::IsLayerSupported(*layer,
819 EmptyOptional(),
820 reasonIfUnsupported))
821 {
822 supportedBackendFound = true;
823 break;
824 }
825 }
826 }
827
828 return supportedBackendFound;
829 };
830
831 for (ConvertBf16ToFp32Layer* convertLayer : convertBf16ToFp32Layers)
832 {
833 if (!AssignFirstSupportedBackend(convertLayer, backend))
834 {
835 return ReturnError(convertLayer);
836 }
837 }
838
839 for (ConvertFp32ToBf16Layer* convertLayer : convertFp32ToBf16Layers)
840 {
841 if (!AssignFirstSupportedBackend(convertLayer, backend))
842 {
843 return ReturnError(convertLayer);
844 }
845 }
846
847 return result;
848 }
849 }
850
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000851 std::stringstream warningMsg;
852 warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
853 << " is not supported on requested backend " << layer->GetBackendId().Get()
854 << " for input data type " << GetDataTypeName(dataTypeIn)
855 << " and output data type " << GetDataTypeName(dataTypeOut)
856 << " (reason: " << reasonIfUnsupported
857 << "), falling back to the next backend.";
858 ReportWarning(warningMsg.str(), errMessages);
859
860 return OptimizationResult(true, false);
861 }
862 else
863 {
864 return result;
865 }
866}
867
868
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000869OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
Matteo Martincigh49124022019-01-11 13:25:59 +0000870 BackendSettings& backendSettings,
871 Graph::Iterator& firstLayer,
872 Graph::Iterator& lastLayer,
873 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +0000874{
Matteo Martincigh49124022019-01-11 13:25:59 +0000875 OptimizationResult result;
telsoa014fcda012018-03-09 14:13:49 +0000876
Matteo Martincigh49124022019-01-11 13:25:59 +0000877 // Helper lambda to compose meaningful error message before returning with error
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000878 auto ReturnError = [&](const Layer* layer)
879 {
880 return ReturnWithError(result, layer, backendSettings, errMessages);
881 };
Matteo Martincigh49124022019-01-11 13:25:59 +0000882
telsoa01c577f2c2018-08-31 09:22:23 +0100883
Matteo Martincigh49124022019-01-11 13:25:59 +0000884 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
885 if (availablePreferredBackends.empty())
telsoa01c577f2c2018-08-31 09:22:23 +0100886 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000887 std::stringstream failureMsg;
888 failureMsg << "No preferred backends are available";
889 ReportError(failureMsg.str(), errMessages);
890
891 result.m_Error = true;
892 return result;
893 }
894
895 for (auto it = firstLayer; it != lastLayer; ++it)
896 {
897 auto layer = *it;
Aron Virginas-Tar87972be2019-11-13 15:16:28 +0000898
899 DataType dataTypeIn = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
900 layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
901 DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
902 layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
903
telsoa01c577f2c2018-08-31 09:22:23 +0100904 std::string reasonIfUnsupported;
905 bool found = false;
jimfly016b0b53d2018-10-08 14:43:01 +0100906 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
907 {
908 // don't bomb immediately, find all the quantized outputs
909 // which haven't had a scale set and report them all back.
Matteo Martincigh49124022019-01-11 13:25:59 +0000910 result.m_Error = true;
jimfly016b0b53d2018-10-08 14:43:01 +0100911 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000912
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000913 // First try assign layer to hint backend
914 if (layer->GetBackendHint().has_value() &&
915 backendSettings.IsBackendSupported(layer->GetBackendHint().value()) &&
916 AttemptBackendAssignment(backendSettings,
917 optNetObjPtr->GetGraph(),
918 layer,
919 layer->GetBackendHint().value(),
920 dataTypeIn,
921 dataTypeOut,
922 availablePreferredBackends,
923 reasonIfUnsupported,
924 errMessages).IsOk())
telsoa01c577f2c2018-08-31 09:22:23 +0100925 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000926 found = true;
927 backendSettings.m_SelectedBackends.insert(layer->GetBackendHint().value());
928 }
929 else
930 {
931 // Try assign layer to prefered list of backends
932 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100933 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000934 if (layer->GetBackendHint().has_value() &&
935 layer->GetBackendHint().value() == backend)
telsoa01c577f2c2018-08-31 09:22:23 +0100936 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000937 continue; //Don't re-test the backend hint
telsoa01c577f2c2018-08-31 09:22:23 +0100938 }
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000939
940 OptimizationResult res = AttemptBackendAssignment(backendSettings,
941 optNetObjPtr->GetGraph(),
942 layer,
943 backend,
944 dataTypeIn,
945 dataTypeOut,
946 availablePreferredBackends,
947 reasonIfUnsupported,
948 errMessages);
949
950 if (res.IsOk())
951 {
952 found = true;
953 backendSettings.m_SelectedBackends.insert(backend);
954 break;
955 }
956 else if (res.IsError())
957 {
958 return res; // Cannot continue.
959 // Note: we don't need to log the error as it would already
960 // be logged in AttemptBackendAssignment().
961 }
962 else
963 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100964 ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000965 }
telsoa01c577f2c2018-08-31 09:22:23 +0100966 }
967 }
968
969 // If the layer is unsupported by any devices, log and return a null network.
Matteo Martincigh49124022019-01-11 13:25:59 +0000970 if (!found)
971 {
telsoa01c577f2c2018-08-31 09:22:23 +0100972 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
973 // fallback we should set the compute device on the layer to CpuRef (these are not
974 // available as accelerated operations, or are only available under certain
975 // conditions, currently they comprise MemCopy, Constant, Permute)
976 armnn::LayerType layerType = layer->GetType();
Matteo Martincigh49124022019-01-11 13:25:59 +0000977 if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
978 layerType == armnn::LayerType::Constant ||
979 layerType == armnn::LayerType::Permute))
telsoa01c577f2c2018-08-31 09:22:23 +0100980 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000981 BackendId cpuBackendId(armnn::Compute::CpuRef);
982 layer->SetBackendId(cpuBackendId);
983 backendSettings.m_SelectedBackends.insert(cpuBackendId);
telsoa01c577f2c2018-08-31 09:22:23 +0100984 }
985 else
986 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000987 return ReturnError(layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100988 }
989 }
990 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000991
992 return result;
993}
994
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000995OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000996 BackendSettings& backendSettings,
Derek Lambertiff05cc52019-04-26 13:05:17 +0100997 SubgraphView& subgraph,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000998 Optional<std::vector<std::string>&> errMessages)
Matteo Martincigh49124022019-01-11 13:25:59 +0000999{
Derek Lambertiff05cc52019-04-26 13:05:17 +01001000 Graph::Iterator firstLayer = subgraph.begin();
1001 Graph::Iterator lastLayer = subgraph.end();
Matteo Martincighadddddb2019-01-24 14:06:23 +00001002 return AssignBackends(optNetObjPtr,
1003 backendSettings,
1004 firstLayer,
1005 lastLayer,
1006 errMessages);
1007}
1008
Derek Lamberti84da38b2019-06-13 11:40:08 +01001009BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry& handleFactoryRegistry,
1010 BackendSettings& backendSettings)
1011{
1012 BackendsMap backends;
1013 auto const& backendRegistry = BackendRegistryInstance();
1014 for (auto&& selectedBackend : backendSettings.m_SupportedBackends)
1015 {
1016 auto backendFactory = backendRegistry.GetFactory(selectedBackend);
1017 auto backendObjPtr = backendFactory();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001018 ARMNN_ASSERT(backendObjPtr);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001019
1020 backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
1021
1022 backends[backendObjPtr->GetId()] = std::move(backendObjPtr);
1023 }
1024
1025 return backends;
1026}
1027
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001028OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
Matteo Martincighadddddb2019-01-24 14:06:23 +00001029 BackendSettings& backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +01001030 BackendsMap& backends,
Mike Kelly07810fc2020-11-12 10:58:48 +00001031 const ModelOptions& modelOptions,
Matteo Martincighadddddb2019-01-24 14:06:23 +00001032 Optional<std::vector<std::string>&> errMessages)
1033{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001034 ARMNN_ASSERT(optNetObjPtr);
Matteo Martincigh49124022019-01-11 13:25:59 +00001035
1036 OptimizationResult result;
1037
Matteo Martincighadddddb2019-01-24 14:06:23 +00001038 // Get the optimized graph
1039 Graph& optGraph = optNetObjPtr->GetGraph();
Matteo Martincigh49124022019-01-11 13:25:59 +00001040
Matteo Martincighadddddb2019-01-24 14:06:23 +00001041 // Run backend specific optimizations
Matteo Martincighadddddb2019-01-24 14:06:23 +00001042 for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
Matteo Martincigh49124022019-01-11 13:25:59 +00001043 {
Derek Lamberti84da38b2019-06-13 11:40:08 +01001044 auto backendObjPtr = backends.find(selectedBackend)->second.get();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001045 ARMNN_ASSERT(backendObjPtr);
Matteo Martincighadddddb2019-01-24 14:06:23 +00001046
1047 // Select sub-graphs based on backend
Derek Lambertiff05cc52019-04-26 13:05:17 +01001048 SubgraphViewSelector::Subgraphs subgraphs =
Rob Hughes65c32262019-07-23 15:33:39 +01001049 SubgraphViewSelector::SelectSubgraphs(optGraph,
Matteo Martincigh602af092019-05-01 10:31:27 +01001050 // Select layers assigned to the requested backend
1051 [&backendObjPtr](const Layer& layer)
1052 {
1053 return layer.GetType() != LayerType::Input &&
1054 layer.GetType() != LayerType::Output &&
1055 layer.GetBackendId() == backendObjPtr->GetId();
1056 });
Derek Lambertiff05cc52019-04-26 13:05:17 +01001057 if (subgraphs.empty())
Matteo Martincigh49124022019-01-11 13:25:59 +00001058 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001059 // No sub-graphs found, try with next selected backend
1060 continue;
Matteo Martincigh49124022019-01-11 13:25:59 +00001061 }
Matteo Martincighadddddb2019-01-24 14:06:23 +00001062
1063 // Try to optimize each sub-graph
Derek Lambertiff05cc52019-04-26 13:05:17 +01001064 for (auto& subgraph : subgraphs)
Matteo Martincigh49124022019-01-11 13:25:59 +00001065 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001066 // Try to optimize the current sub-graph
Mike Kelly07810fc2020-11-12 10:58:48 +00001067 OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph, modelOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001068 ARMNN_ASSERT(optimizationViews.Validate(*subgraph));
Matteo Martincighadddddb2019-01-24 14:06:23 +00001069
1070 // Optimization attempted, check the resulting optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +01001071 for (auto& substitution : optimizationViews.GetSubstitutions())
Matteo Martincighadddddb2019-01-24 14:06:23 +00001072 {
1073 // Sub-graph optimized, substitute the sub-graph with the new optimized one in the main optimized graph
Matteo Martincigh84924332019-05-09 12:46:16 +01001074 SubgraphView& replacementSubgraph = substitution.m_ReplacementSubgraph;
1075 SubgraphView& substitutableSubgraph = substitution.m_SubstitutableSubgraph;
1076 optGraph.SubstituteSubgraph(substitutableSubgraph, replacementSubgraph);
Matteo Martincighadddddb2019-01-24 14:06:23 +00001077
1078 // Assign the current backend to the optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +01001079 std::for_each(replacementSubgraph.begin(), replacementSubgraph.end(), [&selectedBackend](Layer* l)
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001080 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001081 ARMNN_ASSERT(l);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001082 l->SetBackendId(selectedBackend);
1083 });
Matteo Martincighadddddb2019-01-24 14:06:23 +00001084 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001085
Matteo Martincigh84924332019-05-09 12:46:16 +01001086 if (!optimizationViews.GetFailedSubgraphs().empty())
Matteo Martincighadddddb2019-01-24 14:06:23 +00001087 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001088 std::stringstream warningMsg;
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001089 warningMsg << "Some sub-graph(s) failed to optimized on " << backendObjPtr->GetId() << " backend.";
Matteo Martincighadddddb2019-01-24 14:06:23 +00001090 ReportWarning(warningMsg.str(), errMessages);
1091
1092 // Failed to optimize the given sub-graph, re-assign the sub-graph layers to other available backends
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001093 BackendSettings settingsCopy(backendSettings);
Matteo Martincighadddddb2019-01-24 14:06:23 +00001094 if (!backendObjPtr->GetId().IsCpuRef())
1095 {
1096 // Add the current backend to the list of backends to ignore
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001097 settingsCopy.m_IgnoredBackends.insert(backendObjPtr->GetId());
Matteo Martincighadddddb2019-01-24 14:06:23 +00001098 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001099
1100 int count=0;
Matteo Martincigh84924332019-05-09 12:46:16 +01001101 for (auto& failedSubgraph : optimizationViews.GetFailedSubgraphs())
Matteo Martincighadddddb2019-01-24 14:06:23 +00001102 {
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001103 // An error occurred: the optimization was attempted but not performed, try different backends
1104 std::stringstream subgraphMsg;
1105 subgraphMsg << "Re-assigning backends to " << failedSubgraph.GetLayers().size()
1106 << " layers inside sub-graph " << count++;
Matteo Martincigh328d92b2019-07-04 17:52:55 +01001107 ReportWarning(subgraphMsg.str(), errMessages);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +01001108
1109 OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr,
1110 settingsCopy,
1111 *subgraph,
1112 errMessages);
1113 if (reassignmentResult.m_Error)
1114 {
1115 // Failed to re-assign one of the remaining backends to each layer of the sub-graph
1116 result.m_Error = true;
1117 return result;
1118 }
Matteo Martincighadddddb2019-01-24 14:06:23 +00001119 }
Matteo Martincigh49124022019-01-11 13:25:59 +00001120 }
1121 }
1122 }
1123
1124 return result;
1125}
1126
Derek Lamberti84da38b2019-06-13 11:40:08 +01001127bool RequiresCopy(ITensorHandleFactory::FactoryId src,
1128 ITensorHandleFactory::FactoryId dst,
1129 TensorHandleFactoryRegistry& registry)
1130{
1131 if (src != dst)
1132 {
1133 ITensorHandleFactory* srcFactory = registry.GetFactory(src);
1134 ITensorHandleFactory* dstFactory = registry.GetFactory(dst);
1135
Matteo Martincigha6539ed2019-08-27 13:43:32 +01001136 if (srcFactory && dstFactory &&
1137 (srcFactory->GetExportFlags() & dstFactory->GetImportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001138 {
1139 return false;
1140 }
1141 return true;
1142 }
1143 return false;
1144}
1145
1146// Find the handle factory for the input layer which results in fewest required copies.
1147ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backends,
1148 OutputSlot& slot,
1149 TensorHandleFactoryRegistry& registry)
1150{
1151 Layer& layer = slot.GetOwningLayer();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001152 ARMNN_ASSERT(layer.GetType() == LayerType::Input);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001153
1154 // Explicitly select the tensorhandle factory for InputLayer because the rules for it are slightly different. It
1155 // doesn't matter which backend it is assigned to because they all use the same implementation, which
1156 // requires Map/Unmap support. This means that, so long as the handle type supports map/unmap semantics, we can
1157 // select a factory with maximum compatibility with the layers connected to the InputLayer.
1158
1159 // First ensure the from backends can support the TensorHandeAPI
1160 auto frmBackend = backends.find(layer.GetBackendId());
1161 if (frmBackend == backends.end() ||
1162 !frmBackend->second->SupportsTensorAllocatorAPI())
1163 {
1164 return ITensorHandleFactory::LegacyFactoryId;
1165 }
1166
1167 // Go through all connections to the output slot and determine the TensorHandleFactory which results in the
1168 // fewest copies.
1169 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
1170 int topScore = 0;
1171 ITensorHandleFactory::FactoryId topChoice = ITensorHandleFactory::LegacyFactoryId;
1172
1173 for (auto&& connection : slot.GetConnections())
1174 {
1175 const Layer& connectedLayer = connection->GetOwningLayer();
1176
1177 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001178 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +01001179
1180 if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
1181 {
1182 // The destination backend does not support the tensor allocator API, move to the next one
1183 continue;
1184 }
1185
1186 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
1187 for (auto&& dst : dstPrefs)
1188 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001189 // Input layers use the mem copy workload or import, so the selected factory must
1190 // support either the map/unmap API or Import API
Derek Lamberti84da38b2019-06-13 11:40:08 +01001191 ITensorHandleFactory* factory = registry.GetFactory(dst);
Derek Lambertif674aa02019-08-01 15:56:25 +01001192 if (!factory->SupportsMapUnmap() &&
1193 !CheckFlag(factory->GetImportFlags(), MemorySource::Malloc)) // Just support cpu mem imports for now
Derek Lamberti84da38b2019-06-13 11:40:08 +01001194 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001195 // The current tensor handle factory does not support the map/unmap or import
1196 // strategy, move to the next one
Derek Lamberti84da38b2019-06-13 11:40:08 +01001197 continue;
1198 }
1199
1200 auto it = factoryScores.find(dst);
1201 if (it == factoryScores.end())
1202 {
1203 // Add new score to the table
1204 factoryScores[dst] = 0;
1205 if (topChoice == ITensorHandleFactory::LegacyFactoryId)
1206 {
1207 topChoice = dst;
1208 }
1209 }
1210 else
1211 {
1212 // Increase the score
1213 factoryScores[dst]++;
1214
1215 // Track the best option
1216 if (factoryScores[dst] > topScore)
1217 {
1218 topScore = factoryScores[dst];
1219 topChoice = dst;
1220 }
1221 }
1222 }
1223 }
1224
1225 return topChoice;
1226}
1227
1228// Find the handle factory for the output layer which results in fewest required copies.
1229ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backends,
1230 OutputSlot& slot,
1231 TensorHandleFactoryRegistry& registry)
1232{
Jan Eilers8eb25602020-03-09 12:13:48 +00001233 IgnoreUnused(backends, slot, registry);
Derek Lamberti94a88d22019-12-10 21:12:59 +00001234 return ITensorHandleFactory::DeferredFactoryId;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001235}
1236
1237// For all handle factories supported on the source backend, we wish to find the one which requires the fewest copies
1238// when considering all connections.
1239ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
1240 OutputSlot& outputSlot,
1241 TensorHandleFactoryRegistry& registry)
1242{
1243 // First ensure the from backends can support the TensorHandeAPI
1244 Layer& layer = outputSlot.GetOwningLayer();
1245 auto frmBackend = backends.find(layer.GetBackendId());
1246 if (frmBackend == backends.end() ||
1247 !frmBackend->second->SupportsTensorAllocatorAPI())
1248 {
1249 return ITensorHandleFactory::LegacyFactoryId;
1250 }
1251
1252 // Connections to Output Layers requires support for map/unmap on the TensorHandle.
1253 bool requiresMapUnmap = false;
1254 for (auto&& connection : outputSlot.GetConnections())
1255 {
1256 const Layer& connectedLayer = connection->GetOwningLayer();
1257 if (connectedLayer.GetType() == LayerType::Output)
1258 {
1259 requiresMapUnmap = true;
1260 }
1261 }
1262
1263 IBackendInternal* srcBackend = frmBackend->second.get();
1264 auto srcPrefs = srcBackend->GetHandleFactoryPreferences();
1265
1266 // Initialize the scores
1267 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
1268 for (auto&& pref : srcPrefs)
1269 {
1270 if (requiresMapUnmap) // Only consider factories that support map/unmap if required
1271 {
1272 ITensorHandleFactory* factory = registry.GetFactory(pref);
1273 if (!factory->SupportsMapUnmap())
1274 {
1275 // The current tensor handle factory does not support the map/unmap strategy, move to the next one
1276 continue;
1277 }
1278 }
1279
1280 auto it = factoryScores.find(pref);
1281 if (it == factoryScores.end())
1282 {
1283 // Add new score to the table
1284 factoryScores[pref] = 0;
1285 }
1286 }
1287
1288 // Score each handle factory based on how many times it requires copies on the slot connections
1289 for (auto&& connection : outputSlot.GetConnections())
1290 {
1291 const Layer& connectedLayer = connection->GetOwningLayer();
1292
1293 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001294 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +01001295
1296 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
1297 for (auto&& src : srcPrefs)
1298 {
1299 if (factoryScores.find(src) == factoryScores.end()) // Don't consider excluded factories
1300 {
1301 continue;
1302 }
1303
1304 for (auto&& dst : dstPrefs)
1305 {
1306 if (RequiresCopy(src, dst, registry))
1307 {
1308 // Copy avoided, increase the score
1309 factoryScores[src]++;
1310 break;
1311 }
1312 }
1313 }
1314 }
1315
1316 // Find the lowest score
1317 int minScore = std::numeric_limits<int>::max();
1318 for (auto it : factoryScores)
1319 {
1320 minScore = std::min(minScore, it.second);
1321 }
1322
1323 // Collect factories matching the best(lowest) score
1324 std::vector<ITensorHandleFactory::FactoryId> optimalFactories;
1325 for (auto it : factoryScores)
1326 {
1327 if (it.second == minScore)
1328 {
1329 optimalFactories.push_back(it.first);
1330 }
1331 }
1332
1333 // For all compatible Factories matching the best score, find the preferred one for the current layer.
1334 for (auto&& srcPref : srcPrefs)
1335 {
1336 for (auto&& comp : optimalFactories)
1337 {
1338 if (comp == srcPref)
1339 {
1340 return comp;
1341 }
1342 }
1343 }
1344
1345 return ITensorHandleFactory::LegacyFactoryId;
1346}
1347
Derek Lambertif674aa02019-08-01 15:56:25 +01001348EdgeStrategy CalculateEdgeStrategy(BackendsMap& backends,
1349 ITensorHandleFactory::FactoryId srcFactoryId,
1350 const Layer& layer,
1351 const Layer& connectedLayer,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001352 TensorHandleFactoryRegistry& registry,
1353 bool importEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001354{
1355 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001356 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +01001357
1358 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
1359
1360 // Legacy API check for backward compatibility
1361 if (srcFactoryId == ITensorHandleFactory::LegacyFactoryId || dstPrefs.empty())
1362 {
1363 if (layer.GetBackendId() != connectedLayer.GetBackendId())
1364 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001365 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001366 }
1367 else
1368 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001369 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001370 }
1371 }
1372
1373 // TensorHandleFactory API present, so perform more sophisticated strategies.
Derek Lambertif674aa02019-08-01 15:56:25 +01001374 // Dst Output layers don't require copy because they use import or map/unmap
Derek Lamberti84da38b2019-06-13 11:40:08 +01001375 if (connectedLayer.GetType() == LayerType::Output)
1376 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001377 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001378 }
1379
1380 // Search for direct match in prefs
1381 for (auto&& pref : dstPrefs)
1382 {
1383 if (pref == srcFactoryId)
1384 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001385 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001386 }
1387 }
1388
1389 // Search for export/import options
1390 ITensorHandleFactory* srcFactory = registry.GetFactory(srcFactoryId);
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001391 if (srcFactory->GetExportFlags() != 0 && importEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001392 {
1393 for (auto&& pref : dstPrefs)
1394 {
1395 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroyffab16f2019-11-07 14:37:09 +00001396
James Conroy47e863d2019-11-18 17:07:43 +00001397 // Handles cases when a destPref is not listed in TensorHandleFactoryRegistry
James Conroyffab16f2019-11-07 14:37:09 +00001398 if (!dstFactory) {
James Conroy47e863d2019-11-18 17:07:43 +00001399 continue;
James Conroyffab16f2019-11-07 14:37:09 +00001400 }
1401
Derek Lambertif674aa02019-08-01 15:56:25 +01001402 if ((dstFactory->GetImportFlags() & srcFactory->GetExportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001403 {
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +01001404 auto srcCapability = srcFactory->GetCapabilities(&layer, &layer, CapabilityClass::PaddingRequired);
1405 auto dstCapability = dstFactory->GetCapabilities(&connectedLayer,
1406 &connectedLayer,
1407 CapabilityClass::PaddingRequired);
1408 // Do not require memory copy if the source and destination do not require padding.
1409 if (srcCapability.empty() && dstCapability.empty())
1410 {
1411 return EdgeStrategy::ExportToTarget;
1412 }
Derek Lamberti84da38b2019-06-13 11:40:08 +01001413 }
1414 }
1415 }
1416
1417 // Search for copy options via map/unmap
1418 if (srcFactory->SupportsMapUnmap())
1419 {
1420 for (auto&& pref : dstPrefs)
1421 {
1422 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroy47e863d2019-11-18 17:07:43 +00001423 if (dstFactory && dstFactory->SupportsMapUnmap())
Derek Lamberti84da38b2019-06-13 11:40:08 +01001424 {
Derek Lambertif674aa02019-08-01 15:56:25 +01001425 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001426 }
1427 }
1428 }
1429
Derek Lambertif674aa02019-08-01 15:56:25 +01001430 return EdgeStrategy::Undefined;
Derek Lamberti84da38b2019-06-13 11:40:08 +01001431}
1432
1433// Select the TensorHandleFactories and the corresponding memory strategy
1434OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
1435 BackendsMap& backends,
1436 TensorHandleFactoryRegistry& registry,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001437 bool importEnabled,
Derek Lamberti84da38b2019-06-13 11:40:08 +01001438 Optional<std::vector<std::string>&> errMessages)
1439{
1440 OptimizationResult result;
1441
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001442 optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled](Layer* layer)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001443 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001444 ARMNN_ASSERT(layer);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001445
1446 // Lets make sure the backend is in our list of supported backends. Something went wrong during backend
1447 // assignment if this check fails
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001448 ARMNN_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
Derek Lamberti84da38b2019-06-13 11:40:08 +01001449
1450 // Check each output separately
1451 for (unsigned int slotIdx = 0; slotIdx < layer->GetNumOutputSlots(); slotIdx++)
1452 {
1453 OutputSlot& outputSlot = layer->GetOutputSlot(slotIdx);
1454
1455 ITensorHandleFactory::FactoryId slotOption = ITensorHandleFactory::LegacyFactoryId;
1456
1457 // Calculate the factory to use which results in the fewest copies being made.
1458 switch(layer->GetType())
1459 {
1460 case LayerType::Input:
1461 slotOption = CalculateSlotOptionForInput(backends, outputSlot, registry);
1462 break;
1463 case LayerType::Output:
1464 slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
1465 break;
1466 default:
1467 slotOption = CalculateSlotOption(backends, outputSlot, registry);
1468 break;
1469 }
1470 outputSlot.SetTensorHandleFactory(slotOption);
1471
Derek Lambertif674aa02019-08-01 15:56:25 +01001472 // Now determine the "best" edge strategy for each connection given the slotOption.
Derek Lamberti84da38b2019-06-13 11:40:08 +01001473 unsigned int connectionIdx = 0;
1474 for (auto&& connection : outputSlot.GetConnections())
1475 {
1476 const Layer& connectedLayer = connection->GetOwningLayer();
1477
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001478 EdgeStrategy strategy = CalculateEdgeStrategy(backends, slotOption, *layer, connectedLayer,
1479 registry, importEnabled);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001480
Derek Lambertif674aa02019-08-01 15:56:25 +01001481 if (strategy == EdgeStrategy::Undefined)
Derek Lamberti84da38b2019-06-13 11:40:08 +01001482 {
1483 result.m_Error = true;
1484 if (errMessages)
1485 {
1486 errMessages.value().emplace_back("Could not find valid strategy required for compatibility"
1487 " between backends.");
1488 }
1489 return;
1490 }
1491
Derek Lambertif674aa02019-08-01 15:56:25 +01001492 outputSlot.SetEdgeStrategy(connectionIdx, strategy);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001493
1494 connectionIdx++;
1495 }
1496 }
1497 });
1498
1499 return result;
1500}
1501
Matteo Martincigh49124022019-01-11 13:25:59 +00001502IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
1503 const std::vector<BackendId>& backendPreferences,
1504 const IDeviceSpec& deviceSpec,
1505 const OptimizerOptions& options,
Rob Hughes23214432019-11-05 11:27:36 +00001506 Optional<std::vector<std::string>&> messages)
Matteo Martincigh49124022019-01-11 13:25:59 +00001507{
1508 if (backendPreferences.empty())
1509 {
Mike Kelly3a613cc2020-09-29 20:50:35 +01001510 throw InvalidArgumentException("Invoked Optimize with no backends specified");
Matteo Martincigh49124022019-01-11 13:25:59 +00001511 }
1512
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001513 if (options.m_ReduceFp32ToFp16 && options.m_ReduceFp32ToBf16)
1514 {
1515 throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
1516 }
1517
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001518 std::unique_ptr<Graph> graph = std::make_unique<Graph>(inNetwork.pNetworkImpl->GetGraph());
Matteo Martincigh49124022019-01-11 13:25:59 +00001519
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001520 auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), options.m_ModelOptions),
Sadik Armagan045f6be2020-09-10 13:37:32 +01001521 &IOptimizedNetwork::Destroy);
Matteo Martincigh49124022019-01-11 13:25:59 +00001522
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001523 IOptimizedNetwork* optNetObjPtr = optNet.get();
Matteo Martincigh49124022019-01-11 13:25:59 +00001524
Matteo Martincighadddddb2019-01-24 14:06:23 +00001525 // Get the optimized graph
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001526 Graph& optGraph = optNetObjPtr->pOptimizedNetworkImpl->GetGraph();
Matteo Martincighadddddb2019-01-24 14:06:23 +00001527
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001528 // Perform AddBroadcastReshapeLayer optimisation
1529 using namespace optimizations;
1530 Optimizer::Pass(optGraph, MakeOptimizations(AddBroadcastReshapeLayer()));
1531
Narumol Prangnawaratbbf71a62020-09-07 14:05:22 +01001532 // Infer the tensor infos for all output slots. Throws an exception on failure
1533 optGraph.InferTensorInfos();
1534
Matteo Martincigh49124022019-01-11 13:25:59 +00001535 // Perform optimisation passes
Matteo Martincighadddddb2019-01-24 14:06:23 +00001536 Optimizer::Pass(optGraph, MakeOptimizations(SquashEqualPermuteSiblings(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001537 SquashEqualTransposeSiblings(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001538 SquashEqualReshapeSiblings(),
1539 OptimizeInversePermutes(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001540 OptimizeInverseTransposes(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001541 MovePermuteUp(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001542 MoveTransposeUp(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001543 PermuteAsReshape(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001544 TransposeAsReshape(),
Nina Drozd861985f2019-04-18 14:48:51 +01001545 OptimizeConsecutiveReshapes(),
Rob Hughes3a7d3a72019-09-24 16:59:56 +01001546 FoldPadIntoConvolution2d(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001547 PermuteAndBatchToSpaceAsDepthToSpace(),
Teresa Charlin06e03002020-10-15 13:16:07 +01001548 TransposeAndBatchToSpaceAsDepthToSpace(),
Mike Kelly90231b82020-11-05 15:44:56 +00001549 FuseBatchNormIntoConvolution2DFloat32(),
1550 FuseBatchNormIntoConvolution2DFloat16(),
1551 FuseBatchNormIntoDepthwiseConvolution2DFloat32(),
1552 FuseBatchNormIntoDepthwiseConvolution2DFloat16()));
Matteo Martincigh49124022019-01-11 13:25:59 +00001553
Matteo Martincigh49124022019-01-11 13:25:59 +00001554 // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
1555 if (options.m_ReduceFp32ToFp16)
1556 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001557 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToFp16Converter()));
Derek Lambertidd6804b2019-11-27 09:29:57 +00001558 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
Matteo Martincigh49124022019-01-11 13:25:59 +00001559 }
1560
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001561 // If Fp32 to Bf16 optimization is set convert Fp32 network to Bf16
Narumol Prangnawarat57ef0082020-03-26 09:20:43 +00001562 // Convert input of Convolution2d and FullyConnected from Fp32 to Bf16
1563 // Only Constant weight of Convolution2d and FullyConnected are converted from Fp32 to Bf16
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001564 if (options.m_ReduceFp32ToBf16)
1565 {
1566 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToBf16Converter()));
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001567 }
1568
Matteo Martincigh49124022019-01-11 13:25:59 +00001569 // Initialize backend settings
1570 BackendSettings backendSettings(backendPreferences, deviceSpec);
1571 if (backendSettings.GetAvailablePreferredBackends().empty())
1572 {
1573 std::stringstream failureMsg;
1574 failureMsg << "None of the preferred backends " << backendPreferences
1575 << " are supported. Current platform provides " << backendSettings.m_SupportedBackends;
Rob Hughes23214432019-11-05 11:27:36 +00001576 ReportError(failureMsg.str(), messages);
Mike Kelly3a613cc2020-09-29 20:50:35 +01001577 throw InvalidArgumentException(failureMsg.str());
Matteo Martincigh49124022019-01-11 13:25:59 +00001578 }
1579
Derek Lamberti84da38b2019-06-13 11:40:08 +01001580 // Create a map to temporarily hold initialized backend objects
1581 TensorHandleFactoryRegistry tensorHandleFactoryRegistry;
1582 BackendsMap backends = CreateSupportedBackends(tensorHandleFactoryRegistry, backendSettings);
1583
Matteo Martincigh49124022019-01-11 13:25:59 +00001584 // Assign an available backend to each layer
Matteo Martincighadddddb2019-01-24 14:06:23 +00001585 Graph::Iterator firstLayer = optGraph.begin();
1586 Graph::Iterator lastLayer = optGraph.end();
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001587 OptimizationResult assignBackendsResult = AssignBackends(optNetObjPtr->pOptimizedNetworkImpl.get(),
Derek Lamberti84da38b2019-06-13 11:40:08 +01001588 backendSettings,
1589 firstLayer,
1590 lastLayer,
Rob Hughes23214432019-11-05 11:27:36 +00001591 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001592 if (assignBackendsResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +00001593 {
1594 // Failed to assign a backend to each layer
Mike Kelly3a613cc2020-09-29 20:50:35 +01001595 throw InvalidArgumentException("Failed to assign a backend to each layer");
jimfly016b0b53d2018-10-08 14:43:01 +01001596 }
telsoa01c577f2c2018-08-31 09:22:23 +01001597
Matteo Martincighadddddb2019-01-24 14:06:23 +00001598 Optimizer::Pass(optGraph, MakeOptimizations(OptimizeInverseConversionsFp16(),
1599 OptimizeInverseConversionsFp32()));
telsoa01c577f2c2018-08-31 09:22:23 +01001600
Matteo Martincighadddddb2019-01-24 14:06:23 +00001601 // Apply the backend-specific optimizations
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001602 OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr->pOptimizedNetworkImpl.get(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001603 backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +01001604 backends,
Mike Kelly07810fc2020-11-12 10:58:48 +00001605 options.m_ModelOptions,
Rob Hughes23214432019-11-05 11:27:36 +00001606 messages);
Matteo Martincighadddddb2019-01-24 14:06:23 +00001607 if (backendOptimizationResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +00001608 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001609 // Failed to apply the backend-specific optimizations
Mike Kelly3a613cc2020-09-29 20:50:35 +01001610 throw InvalidArgumentException("Failed to apply the backend-specific optimizations");
Matteo Martincigh49124022019-01-11 13:25:59 +00001611 }
1612
Matteo Martincighadddddb2019-01-24 14:06:23 +00001613 // If the debug flag is set, then insert a DebugLayer after each layer
1614 // Doing this after applying the backend optimizations as they might have changed some layers
1615 if (options.m_Debug)
1616 {
1617 Optimizer::Pass(optGraph, MakeOptimizations(InsertDebugLayer()));
1618 }
1619
Derek Lamberti84da38b2019-06-13 11:40:08 +01001620 // Calculate the compatibility strategies for tensor handles
1621 OptimizationResult strategyResult = SelectTensorHandleStrategy(optGraph,
1622 backends,
1623 tensorHandleFactoryRegistry,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001624 options.m_ImportEnabled,
Rob Hughes23214432019-11-05 11:27:36 +00001625 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001626 if (strategyResult.m_Error)
1627 {
1628 // Failed to apply the backend-specific optimizations
1629 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
1630 }
1631
1632 // Based on the tensor handle strategy determined above, insert copy layers where required.
Derek Lambertif674aa02019-08-01 15:56:25 +01001633 optGraph.AddCompatibilityLayers(backends, tensorHandleFactoryRegistry);
telsoa01c577f2c2018-08-31 09:22:23 +01001634
1635 // Convert constants
Matteo Martincighadddddb2019-01-24 14:06:23 +00001636 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
1637 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
telsoa01c577f2c2018-08-31 09:22:23 +01001638
Derek Lamberti84da38b2019-06-13 11:40:08 +01001639 // Run backend specific optimizations (deprecated)
Matteo Martincigh49124022019-01-11 13:25:59 +00001640 for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
David Beck263e3492018-11-09 14:46:40 +00001641 {
1642 auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
1643 auto backendPtr = factoryFun();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001644 ARMNN_ASSERT(backendPtr.get() != nullptr);
David Beck263e3492018-11-09 14:46:40 +00001645
Matteo Martincighed735042019-05-22 09:42:43 +01001646 ARMNN_NO_DEPRECATE_WARN_BEGIN
David Beck263e3492018-11-09 14:46:40 +00001647 auto backendSpecificOptimizations = backendPtr->GetOptimizations();
Matteo Martincighed735042019-05-22 09:42:43 +01001648 ARMNN_NO_DEPRECATE_WARN_END
1649
David Beck263e3492018-11-09 14:46:40 +00001650 if (!backendSpecificOptimizations.empty())
1651 {
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001652 Optimizer::Pass(optNetObjPtr->pOptimizedNetworkImpl->GetGraph(), backendSpecificOptimizations);
David Beck263e3492018-11-09 14:46:40 +00001653 }
1654 }
1655
telsoa01c577f2c2018-08-31 09:22:23 +01001656 return optNet;
telsoa014fcda012018-03-09 14:13:49 +00001657}
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001658bool NetworkImpl::GetShapeInferenceMethod()
telsoa014fcda012018-03-09 14:13:49 +00001659{
Finn Williamsf24effa2020-07-03 10:12:03 +01001660 if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() == "ShapeInferenceMethod")
1661 {
1662 return m_NetworkOptions[0].GetOption(0).GetValue().AsBool();
1663 }
1664
1665 return false;
telsoa014fcda012018-03-09 14:13:49 +00001666}
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001667NetworkImpl::NetworkImpl(NetworkOptions networkOptions)
Finn Williamsf24effa2020-07-03 10:12:03 +01001668: m_NetworkOptions(networkOptions),
1669 m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod()))
1670{}
telsoa014fcda012018-03-09 14:13:49 +00001671
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001672NetworkImpl::~NetworkImpl()
telsoa014fcda012018-03-09 14:13:49 +00001673{
1674}
1675
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001676Status NetworkImpl::PrintGraph()
Jan Eilers99d9d4a2019-11-06 10:02:16 +00001677{
1678 m_Graph->Print();
1679 return Status::Success;
1680}
1681
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001682IConnectableLayer* NetworkImpl::AddInputLayer(LayerBindingId id, const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001683{
1684 return m_Graph->AddLayer<InputLayer>(id, name);
1685}
1686
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001687IConnectableLayer* NetworkImpl::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001688 const char* name)
1689{
1690 return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
1691}
1692
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001693IConnectableLayer* NetworkImpl::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001694 const char* name)
1695{
1696 return m_Graph->AddLayer<ComparisonLayer>(comparisonDescriptor, name);
1697}
1698
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001699IConnectableLayer* NetworkImpl::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
josh minor4a3c6102020-01-06 16:40:46 -06001700 const char* name)
1701{
1702 return m_Graph->AddLayer<ElementwiseUnaryLayer>(elementwiseUnaryDescriptor, name);
1703}
1704
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001705IConnectableLayer* NetworkImpl::AddFillLayer(const FillDescriptor& fillDescriptor,
Ryan OSheaec6c6802020-06-05 17:17:06 +01001706 const char* name)
1707{
1708 return m_Graph->AddLayer<FillLayer>(fillDescriptor, name);
1709}
1710
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001711IConnectableLayer* NetworkImpl::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001712 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001713 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001714 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001715{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001716 if (fullyConnectedDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001717 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001718 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001719 }
1720
1721 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
1722
1723 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1724
1725 if (fullyConnectedDescriptor.m_BiasEnabled)
1726 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001727 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001728 }
1729
1730 return layer;
1731}
1732
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001733IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001734 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001735 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001736 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001737{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001738 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001739}
1740
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001741IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001742 const ConstTensor& weights,
1743 const char* name)
1744{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001745 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001746 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
1747}
1748
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001749IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001750 const ConstTensor& weights,
1751 const ConstTensor& biases,
1752 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001753{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001754 Optional<ConstTensor> optionalBiases(biases);
1755 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001756}
1757
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001758IConnectableLayer* NetworkImpl::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001759 const char* name)
1760{
Jim Flynne242f2d2019-05-22 14:24:13 +01001761 return m_Graph->AddLayer<ConcatLayer>(concatDescriptor, name);
Jim Flynn906f9462019-05-10 13:55:21 +01001762}
1763
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001764IConnectableLayer* NetworkImpl::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
1765 const ConstTensor& weights,
1766 const Optional<ConstTensor>& biases,
1767 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001768{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001769 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001770 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001771 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001772 }
1773
1774 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
1775
1776 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1777
1778 if (convolution2dDescriptor.m_BiasEnabled)
1779 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001780 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001781 }
1782
1783 return layer;
1784}
1785
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001786IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001787 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001788 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001789 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001790{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001791 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001792}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001793
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001794IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001795 const ConstTensor& weights,
1796 const char* name)
1797{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001798 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001799 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1800}
1801
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001802IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001803 const ConstTensor& weights,
1804 const ConstTensor& biases,
1805 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001806{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001807 Optional<ConstTensor> optionalBiases(biases);
1808 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001809}
1810
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001811IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayerImpl(
telsoa014fcda012018-03-09 14:13:49 +00001812 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1813 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001814 const Optional<ConstTensor>& biases,
telsoa014fcda012018-03-09 14:13:49 +00001815 const char* name)
1816{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001817 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001818 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001819 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001820 }
1821
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00001822 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001823
1824 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1825
1826 if (convolution2dDescriptor.m_BiasEnabled)
1827 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001828 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001829 }
1830
1831 return layer;
1832}
1833
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001834IConnectableLayer* NetworkImpl::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
Aron Virginas-Tardd6247f2019-09-19 14:31:17 +01001835 const char* name)
1836{
1837 return m_Graph->AddLayer<DepthToSpaceLayer>(depthToSpaceDescriptor, name);
1838}
1839
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001840IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001841 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1842 const ConstTensor& weights,
1843 const Optional<ConstTensor>& biases,
1844 const char* name)
1845{
1846 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1847}
1848
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001849IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
telsoa014fcda012018-03-09 14:13:49 +00001850 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1851 const ConstTensor& weights,
1852 const char* name)
1853{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001854 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001855 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001856}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001857
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001858IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
telsoa014fcda012018-03-09 14:13:49 +00001859 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1860 const ConstTensor& weights,
1861 const ConstTensor& biases,
1862 const char* name)
1863{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001864 Optional<ConstTensor> optionalBiases(biases);
1865 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001866}
1867
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001868IConnectableLayer* NetworkImpl::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001869 const ConstTensor& anchors, const char* name)
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001870{
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001871 const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
1872
1873 layer->m_Anchors = std::make_unique<ScopedCpuTensorHandle>(anchors);
1874
1875 return layer;
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001876}
1877
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001878IConnectableLayer* NetworkImpl::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001879 const char* name)
1880{
1881 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
1882}
1883
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001884IConnectableLayer* NetworkImpl::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001885 const char* name)
1886{
1887 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
1888}
1889
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001890IConnectableLayer* NetworkImpl::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001891 const char* name)
1892{
1893 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
1894}
1895
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001896IConnectableLayer* NetworkImpl::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor,
Nikhil Rajee391d52019-09-05 17:50:44 +01001897 const char* name)
1898{
1899 return m_Graph->AddLayer<ArgMinMaxLayer>(argMinMaxDescriptor, name);
1900}
1901
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001902IConnectableLayer* NetworkImpl::AddNormalizationLayer(const NormalizationDescriptor&
telsoa01c577f2c2018-08-31 09:22:23 +01001903normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001904 const char* name)
1905{
1906 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
1907}
1908
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001909IConnectableLayer* NetworkImpl::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
Aron Virginas-Tar636ab402019-09-16 14:27:45 +01001910{
1911 return m_Graph->AddLayer<SliceLayer>(sliceDescriptor, name);
1912}
1913
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001914IConnectableLayer* NetworkImpl::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001915 const char* name)
1916{
1917 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
1918}
1919
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001920IConnectableLayer* NetworkImpl::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001921 const char* name)
1922{
1923 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
1924}
1925
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001926IConnectableLayer* NetworkImpl::AddMaximumLayer(const char* name)
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001927{
1928 return m_Graph->AddLayer<MaximumLayer>(name);
1929}
1930
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001931IConnectableLayer* NetworkImpl::AddMinimumLayer(const char* name)
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001932{
1933 return m_Graph->AddLayer<MinimumLayer>(name);
1934}
1935
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001936IConnectableLayer* NetworkImpl::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001937 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001938{
Jim Flynne242f2d2019-05-22 14:24:13 +01001939 return AddConcatLayer(mergerDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001940}
1941
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001942IConnectableLayer* NetworkImpl::AddAbsLayer(const char * name)
Kevin May868eb142019-09-04 17:29:31 +01001943{
josh minor4a3c6102020-01-06 16:40:46 -06001944 return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
Kevin May868eb142019-09-04 17:29:31 +01001945}
1946
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001947IConnectableLayer* NetworkImpl::AddAdditionLayer(const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001948{
1949 return m_Graph->AddLayer<AdditionLayer>(name);
1950}
1951
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001952IConnectableLayer* NetworkImpl::AddMultiplicationLayer(const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001953{
1954 return m_Graph->AddLayer<MultiplicationLayer>(name);
1955}
1956
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001957IConnectableLayer* NetworkImpl::AddOutputLayer(LayerBindingId id, const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001958{
1959 return m_Graph->AddLayer<OutputLayer>(id, name);
1960}
1961
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001962IConnectableLayer* NetworkImpl::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
telsoa014fcda012018-03-09 14:13:49 +00001963 const ConstTensor& mean,
1964 const ConstTensor& variance,
1965 const ConstTensor& beta,
1966 const ConstTensor& gamma,
1967 const char* name)
1968{
1969 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
1970
1971 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
1972 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
1973 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
1974 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
1975
1976 return layer;
1977}
1978
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001979IConnectableLayer* NetworkImpl::AddRankLayer(const char* name)
Finn Williams2605b232020-06-10 15:53:46 +01001980{
1981 return m_Graph->AddLayer<RankLayer>(name);
1982}
1983
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001984IConnectableLayer* NetworkImpl::AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
1985 const char* name)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00001986{
1987 return m_Graph->AddLayer<ReduceLayer>(reduceDescriptor, name);
1988}
1989
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00001990IConnectableLayer* NetworkImpl::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
1991 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001992{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001993 ResizeDescriptor resizeDescriptor;
David Monahan4a0c9b92020-05-30 09:48:39 +01001994 resizeDescriptor.m_Method = ResizeMethod::Bilinear;
1995 resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
1996 resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
1997 resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
1998 resizeDescriptor.m_AlignCorners = descriptor.m_AlignCorners;
1999 resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002000
2001 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00002002}
2003
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002004IConnectableLayer* NetworkImpl::AddResizeLayer(const ResizeDescriptor& resizeDescriptor, const char* name)
Teresa Charlina9075df2019-06-27 15:41:57 +01002005{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002006 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
Teresa Charlina9075df2019-06-27 15:41:57 +01002007}
2008
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002009IConnectableLayer* NetworkImpl::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
2010 const char* name)
Kevin Mayce5045a2019-10-02 14:07:47 +01002011{
2012 return m_Graph->AddLayer<InstanceNormalizationLayer>(desc, name);
2013}
2014
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002015IConnectableLayer* NetworkImpl::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
2016 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002017{
Matteo Martincighbcd3c852018-09-28 14:14:12 +01002018 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +00002019}
2020
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002021IConnectableLayer* NetworkImpl::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& desc,
Aron Virginas-Tarf982dea2019-10-11 14:07:53 +01002022 const char* name)
2023{
2024 return m_Graph->AddLayer<LogSoftmaxLayer>(desc, name);
2025}
2026
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002027IConnectableLayer* NetworkImpl::AddConstantLayer(const ConstTensor& input, const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002028{
telsoa01c577f2c2018-08-31 09:22:23 +01002029 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
2030
2031 layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
2032
2033 return layer;
telsoa014fcda012018-03-09 14:13:49 +00002034}
2035
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002036IConnectableLayer* NetworkImpl::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01002037 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002038{
2039 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
2040}
2041
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002042IConnectableLayer* NetworkImpl::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00002043 const char* name)
2044{
2045 return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
2046}
2047
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002048IConnectableLayer* NetworkImpl::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
Aron Virginas-Tar972af152019-06-11 14:14:03 +01002049 const char* name)
2050{
2051 return m_Graph->AddLayer<SpaceToDepthLayer>(spaceToDepthDescriptor, name);
2052}
2053
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002054IConnectableLayer* NetworkImpl::AddFloorLayer(const char* name)
telsoa014fcda012018-03-09 14:13:49 +00002055{
2056 return m_Graph->AddLayer<FloorLayer>(name);
2057}
2058
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002059IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01002060 const LstmInputParams& params,
2061 const char* name)
2062{
2063 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
2064
2065 //Lstm Basic Parameters
2066 layer->m_BasicParameters.m_InputToForgetWeights =
2067 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
2068 layer->m_BasicParameters.m_InputToCellWeights =
2069 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
2070 layer->m_BasicParameters.m_InputToOutputWeights =
2071 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
2072 layer->m_BasicParameters.m_RecurrentToForgetWeights =
2073 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
2074 layer->m_BasicParameters.m_RecurrentToCellWeights =
2075 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
2076 layer->m_BasicParameters.m_RecurrentToOutputWeights =
2077 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
2078 layer->m_BasicParameters.m_ForgetGateBias =
2079 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
2080 layer->m_BasicParameters.m_CellBias =
2081 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
2082 layer->m_BasicParameters.m_OutputGateBias =
2083 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
2084
2085 //Lstm Cifg parameters
2086 if(!descriptor.m_CifgEnabled)
2087 {
2088 if(params.m_InputToInputWeights == nullptr)
2089 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002090 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL "
2091 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002092 }
2093 if(params.m_RecurrentToInputWeights == nullptr)
2094 {
2095 throw InvalidArgumentException(
Jan Eilerse2062cd2020-03-30 15:07:45 +01002096 "AddLstmLayer: Recurrent To Input Weights cannot be NULL "
2097 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002098 }
2099 if(params.m_InputGateBias == nullptr)
2100 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002101 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL "
2102 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002103 }
2104 layer->m_CifgParameters.m_InputToInputWeights =
2105 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
2106 layer->m_CifgParameters.m_RecurrentToInputWeights =
2107 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01002108 layer->m_CifgParameters.m_InputGateBias =
2109 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
2110 }
2111
2112 //Lstm projection parameters
2113 if(descriptor.m_ProjectionEnabled)
2114 {
2115 if(params.m_ProjectionWeights == nullptr)
2116 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002117 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL "
2118 "when projection is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002119 }
2120 layer->m_ProjectionParameters.m_ProjectionWeights =
2121 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
2122 if(params.m_ProjectionBias != nullptr)
2123 {
2124 layer->m_ProjectionParameters.m_ProjectionBias =
2125 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
2126 }
2127 }
2128
2129 //Lstm Peephole params
2130 if(descriptor.m_PeepholeEnabled)
2131 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002132 if(!descriptor.m_CifgEnabled)
2133 {
2134 if(params.m_CellToInputWeights == nullptr)
2135 {
2136 throw InvalidArgumentException("AddLstmLayer: Cell To Input Weights cannot be NULL "
2137 "when Peephole is enabled and CIFG disabled.");
2138 }
2139
2140 layer->m_PeepholeParameters.m_CellToInputWeights =
2141 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
2142 }
2143
telsoa01c577f2c2018-08-31 09:22:23 +01002144 if(params.m_CellToForgetWeights == nullptr)
2145 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002146 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL "
2147 "when Peephole is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002148 }
2149 if(params.m_CellToOutputWeights == nullptr)
2150 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002151 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL "
2152 "when Peephole is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01002153 }
Jan Eilerse2062cd2020-03-30 15:07:45 +01002154
telsoa01c577f2c2018-08-31 09:22:23 +01002155 layer->m_PeepholeParameters.m_CellToForgetWeights =
2156 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
2157 layer->m_PeepholeParameters.m_CellToOutputWeights =
2158 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
2159 }
Jan Eilersf8c62972019-07-17 11:07:49 +01002160
2161 //Lstm Layer Normalization params
2162 if(descriptor.m_LayerNormEnabled)
2163 {
2164 if(!descriptor.m_CifgEnabled)
2165 {
2166 if(params.m_InputLayerNormWeights == nullptr)
2167 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002168 throw InvalidArgumentException("AddLstmLayer: Input layer normalization weights cannot be NULL "
2169 "when layer normalization is enabled and CIFG disabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01002170 }
2171 layer->m_LayerNormParameters.m_InputLayerNormWeights =
2172 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
2173 }
2174
2175 if(params.m_ForgetLayerNormWeights == nullptr)
2176 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002177 throw InvalidArgumentException("AddLstmLayer: Forget layer normalization weights cannot be NULL "
2178 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01002179 }
2180 if(params.m_CellLayerNormWeights == nullptr)
2181 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002182 throw InvalidArgumentException("AddLstmLayer: Cell layer normalization weights cannot be NULL "
2183 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01002184 }
2185 if(params.m_OutputLayerNormWeights == nullptr)
2186 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01002187 throw InvalidArgumentException("AddLstmLayer: Output layer normalization weights cannot be NULL "
2188 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01002189 }
2190 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
2191 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
2192 layer->m_LayerNormParameters.m_CellLayerNormWeights =
2193 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
2194 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
2195 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
2196 }
telsoa01c577f2c2018-08-31 09:22:23 +01002197 return layer;
2198}
2199
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002200IConnectableLayer* NetworkImpl::AddDivisionLayer(const char* name)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002201{
2202 return m_Graph->AddLayer<DivisionLayer>(name);
2203}
2204
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002205IConnectableLayer* NetworkImpl::AddSubtractionLayer(const char* name)
David Beck19526222018-09-12 16:00:08 +01002206{
2207 return m_Graph->AddLayer<SubtractionLayer>(name);
2208}
2209
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002210IConnectableLayer* NetworkImpl::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
narpra0132b90462018-09-13 11:07:48 +01002211{
2212 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
2213}
2214
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002215IConnectableLayer* NetworkImpl::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +01002216{
2217 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
2218}
2219
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002220IConnectableLayer *NetworkImpl::AddQuantizeLayer(const char *name)
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002221{
2222 return m_Graph->AddLayer<QuantizeLayer>(name);
2223}
2224
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002225IConnectableLayer* NetworkImpl::AddDequantizeLayer(const char* name)
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002226{
2227 return m_Graph->AddLayer<DequantizeLayer>(name);
2228}
2229
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002230IConnectableLayer* NetworkImpl::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
Conor Kennedy430b5d82018-11-14 15:28:28 +00002231 const char* name)
2232{
2233 return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
2234}
2235
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002236IConnectableLayer* NetworkImpl::AddGreaterLayer(const char* name)
Matteo Martincigh59a950c2018-12-13 12:48:25 +00002237{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01002238 return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
Matteo Martincigh59a950c2018-12-13 12:48:25 +00002239}
2240
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002241IConnectableLayer* NetworkImpl::AddEqualLayer(const char* name)
FrancisMurtagh20995952018-12-17 12:11:36 +00002242{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01002243 return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
FrancisMurtagh20995952018-12-17 12:11:36 +00002244}
2245
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002246IConnectableLayer* NetworkImpl::AddRsqrtLayer(const char * name)
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00002247{
josh minor4a3c6102020-01-06 16:40:46 -06002248 return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00002249}
2250
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002251IConnectableLayer* NetworkImpl::AddGatherLayer(const char* name)
narpra01b89b05f2019-01-16 09:53:09 +00002252{
Teresa Charlin52664732020-06-29 16:27:03 +01002253 GatherDescriptor gatherDescriptor{};
2254 return AddGatherLayer(gatherDescriptor, name);
2255}
2256
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002257IConnectableLayer* NetworkImpl::AddGatherLayer(const GatherDescriptor& gatherDescriptor,
Teresa Charlin52664732020-06-29 16:27:03 +01002258 const char* name)
2259{
2260 return m_Graph->AddLayer<GatherLayer>(gatherDescriptor, name);
narpra01b89b05f2019-01-16 09:53:09 +00002261}
2262
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002263IConnectableLayer* NetworkImpl::AddMergeLayer(const char* name)
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002264{
2265 return m_Graph->AddLayer<MergeLayer>(name);
2266}
2267
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002268IConnectableLayer* NetworkImpl::AddSwitchLayer(const char* name)
Sadik Armaganeff363d2019-04-05 15:25:46 +01002269{
2270 return m_Graph->AddLayer<SwitchLayer>(name);
2271}
2272
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002273IConnectableLayer* NetworkImpl::AddPreluLayer(const char* name)
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002274{
2275 return m_Graph->AddLayer<PreluLayer>(name);
2276}
2277
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002278IConnectableLayer* NetworkImpl::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002279 const ConstTensor& weights,
2280 const Optional<ConstTensor>& biases,
2281 const char* name)
2282{
2283 if (descriptor.m_BiasEnabled && !biases.has_value())
2284 {
2285 throw InvalidArgumentException("AddTransposeConvolution2dLayer: Biases cannot be empty");
2286 }
2287
2288 const auto layer = m_Graph->AddLayer<TransposeConvolution2dLayer>(descriptor, name);
2289
2290 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
2291
2292 if (descriptor.m_BiasEnabled)
2293 {
2294 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
2295 }
2296
2297 return layer;
2298}
2299
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002300IConnectableLayer* NetworkImpl::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
Mike Kellyc9ea45a2020-02-28 18:11:58 +00002301 const char* name)
2302{
2303 return m_Graph->AddLayer<TransposeLayer>(transposeDescriptor, name);
2304}
2305
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002306IConnectableLayer* NetworkImpl::AddStackLayer(const StackDescriptor& stackDescriptor,
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01002307 const char* name)
2308{
2309 return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
2310}
2311
Derek Lamberti013c3902019-10-21 10:46:16 +01002312
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002313IConnectableLayer* NetworkImpl::AddStandInLayer(const StandInDescriptor& desc,
Derek Lamberti013c3902019-10-21 10:46:16 +01002314 const char* name)
2315{
2316 return m_Graph->AddLayer<StandInLayer>(desc, name);
2317}
2318
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002319IConnectableLayer* NetworkImpl::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
James Conroyee18dc82019-07-17 11:27:46 +01002320 const char* name)
2321{
2322 const auto layer = m_Graph->AddLayer<QuantizedLstmLayer>(name);
2323
2324 // InputToX weights
2325 layer->m_QuantizedLstmParameters.m_InputToInputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01002326 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002327 layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01002328 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002329 layer->m_QuantizedLstmParameters.m_InputToCellWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01002330 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002331 layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01002332 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002333
2334 // RecurrentToX weights
2335 layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01002336 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002337 layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01002338 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002339 layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01002340 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002341 layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01002342 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01002343
2344 // Bias
2345 layer->m_QuantizedLstmParameters.m_InputGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01002346 std::make_unique<ScopedCpuTensorHandle>(params.GetInputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01002347 layer->m_QuantizedLstmParameters.m_ForgetGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01002348 std::make_unique<ScopedCpuTensorHandle>(params.GetForgetGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01002349 layer->m_QuantizedLstmParameters.m_CellBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01002350 std::make_unique<ScopedCpuTensorHandle>(params.GetCellBias());
James Conroyee18dc82019-07-17 11:27:46 +01002351 layer->m_QuantizedLstmParameters.m_OutputGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01002352 std::make_unique<ScopedCpuTensorHandle>(params.GetOutputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01002353
2354 return layer;
2355}
2356
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002357IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor,
James Conroy586a9aa2020-03-20 08:49:33 +00002358 const LstmInputParams& params,
2359 const char* name)
2360{
2361 const auto layer = m_Graph->AddLayer<QLstmLayer>(descriptor, name);
2362
2363 // QLstm Basic Parameters
2364 layer->m_BasicParameters.m_InputToForgetWeights =
2365 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
2366 layer->m_BasicParameters.m_InputToCellWeights =
2367 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
2368 layer->m_BasicParameters.m_InputToOutputWeights =
2369 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
2370 layer->m_BasicParameters.m_RecurrentToForgetWeights =
2371 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
2372 layer->m_BasicParameters.m_RecurrentToCellWeights =
2373 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
2374 layer->m_BasicParameters.m_RecurrentToOutputWeights =
2375 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
2376 layer->m_BasicParameters.m_ForgetGateBias =
2377 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
2378 layer->m_BasicParameters.m_CellBias =
2379 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
2380 layer->m_BasicParameters.m_OutputGateBias =
2381 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
2382
2383 // QLstm Cifg parameters
2384 if(!descriptor.m_CifgEnabled)
2385 {
2386 if(params.m_InputToInputWeights == nullptr)
2387 {
2388 throw InvalidArgumentException("AddQLstmLayer: Input To Input Weights cannot be NULL");
2389 }
2390
2391 if(params.m_RecurrentToInputWeights == nullptr)
2392 {
2393 throw InvalidArgumentException(
2394 "AddQLstmLayer: Recurrent To Input Weights cannot be NULL");
2395 }
2396
2397 if(params.m_InputGateBias == nullptr)
2398 {
2399 throw InvalidArgumentException("AddQLstmLayer: Input Gate Bias cannot be NULL");
2400 }
2401
2402 layer->m_CifgParameters.m_InputToInputWeights =
2403 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
2404 layer->m_CifgParameters.m_RecurrentToInputWeights =
2405 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
2406 layer->m_CifgParameters.m_InputGateBias =
2407 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
2408 }
2409
2410 // QLstm Projection parameters
2411 if(descriptor.m_ProjectionEnabled)
2412 {
2413 if(params.m_ProjectionWeights == nullptr)
2414 {
2415 throw InvalidArgumentException("AddQLstmLayer: Projection Weights cannot be NULL");
2416 }
2417
James Conroy586a9aa2020-03-20 08:49:33 +00002418 layer->m_ProjectionParameters.m_ProjectionWeights =
2419 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
James Conroyed324052020-05-18 15:16:42 +01002420
2421 // Projection bias is optional even if projection is enabled
2422 if(params.m_ProjectionWeights != nullptr)
2423 {
2424 layer->m_ProjectionParameters.m_ProjectionBias =
2425 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
2426 }
2427
James Conroy586a9aa2020-03-20 08:49:33 +00002428 }
2429
2430 // QLstm Peephole params
2431 if(descriptor.m_PeepholeEnabled)
2432 {
2433 if(params.m_CellToForgetWeights == nullptr)
2434 {
2435 throw InvalidArgumentException("AddQLstmLayer: Cell To Forget Weights cannot be NULL");
2436 }
2437
2438 if(params.m_CellToOutputWeights == nullptr)
2439 {
2440 throw InvalidArgumentException("AddQLstmLayer: Cell To Output Weights cannot be NULL");
2441 }
2442
2443 if(!descriptor.m_CifgEnabled)
2444 {
2445 if(params.m_CellToInputWeights == nullptr)
2446 {
2447 throw InvalidArgumentException("AddQLstmLayer: Cell To Input Weights cannot be NULL");
2448 }
2449
2450 layer->m_PeepholeParameters.m_CellToInputWeights =
2451 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
2452 }
2453
2454 layer->m_PeepholeParameters.m_CellToForgetWeights =
2455 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
2456 layer->m_PeepholeParameters.m_CellToOutputWeights =
2457 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
2458 }
2459
2460 // QLstm Layer Normalization params
2461 if(descriptor.m_LayerNormEnabled)
2462 {
2463 if(params.m_ForgetLayerNormWeights == nullptr)
2464 {
2465 throw InvalidArgumentException("AddQLstmLayer: Forget layer normalization weights cannot be NULL");
2466 }
2467
2468 if(params.m_CellLayerNormWeights == nullptr)
2469 {
2470 throw InvalidArgumentException("AddQLstmLayer: Cell layer normalization weights cannot be NULL");
2471 }
2472
2473 if(params.m_OutputLayerNormWeights == nullptr)
2474 {
2475 throw InvalidArgumentException("AddQLstmLayer: Output layer normalization weights cannot be NULL");
2476 }
2477
2478 if(!descriptor.m_CifgEnabled)
2479 {
2480 if(params.m_InputLayerNormWeights == nullptr)
2481 {
2482 throw InvalidArgumentException("AddQLstmLayer: Input layer normalization weights cannot be NULL");
2483 }
2484
2485 layer->m_LayerNormParameters.m_InputLayerNormWeights =
2486 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
2487 }
2488
2489 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
2490 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
2491 layer->m_LayerNormParameters.m_CellLayerNormWeights =
2492 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
2493 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
2494 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
2495 }
2496 return layer;
2497}
2498
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002499IConnectableLayer* NetworkImpl::AddLogicalBinaryLayer(const LogicalBinaryDescriptor& logicalBinaryDescriptor,
James Conroyaba90cd2020-11-06 16:28:18 +00002500 const char* name)
2501{
2502 return m_Graph->AddLayer<LogicalBinaryLayer>(logicalBinaryDescriptor, name);
2503}
2504
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002505void NetworkImpl::Accept(ILayerVisitor& visitor) const
Mike Kelly8c1701a2019-02-11 17:01:27 +00002506{
2507 for (auto layer : GetGraph())
2508 {
2509 layer->Accept(visitor);
2510 };
2511}
2512
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002513void NetworkImpl::ExecuteStrategy(IStrategy& strategy) const
Finn Williamsb454c5c2021-02-09 15:56:23 +00002514{
2515 for (auto layer : GetGraph())
2516 {
2517 layer->ExecuteStrategy(strategy);
2518 };
2519}
2520
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002521OptimizedNetworkImpl::OptimizedNetworkImpl(std::unique_ptr<Graph> graph)
Sadik Armagan3184c902020-03-18 10:57:30 +00002522 : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid())
telsoa014fcda012018-03-09 14:13:49 +00002523{
2524}
2525
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002526OptimizedNetworkImpl::OptimizedNetworkImpl(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
Sadik Armagan045f6be2020-09-10 13:37:32 +01002527 : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid()), m_ModelOptions(modelOptions)
2528{
2529}
2530
Francis Murtagh3d2b4b22021-02-15 18:23:17 +00002531OptimizedNetworkImpl::~OptimizedNetworkImpl()
telsoa014fcda012018-03-09 14:13:49 +00002532{
2533}
2534
2535} // namespace armnn