blob: 7a6fa8f78c9b8cf896cb6129fcc3c3ef3fa85dbe [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matteo Martincigh49124022019-01-11 13:25:59 +00005
telsoa014fcda012018-03-09 14:13:49 +00006#include "Network.hpp"
7#include "Graph.hpp"
8#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01009#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010#include "Optimizer.hpp"
Derek Lambertiff05cc52019-04-26 13:05:17 +010011#include "SubgraphViewSelector.hpp"
Matteo Martincigh49124022019-01-11 13:25:59 +000012#include "BackendSettings.hpp"
David Beckac42efd2018-09-26 17:41:13 +010013#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000014
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000015#include <backendsCommon/CpuTensorHandle.hpp>
16#include <backendsCommon/WorkloadFactory.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000017#include <armnn/backends/IBackendInternal.hpp>
Derek Lamberti84da38b2019-06-13 11:40:08 +010018#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019
20#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000021#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022#include <armnn/TypesUtils.hpp>
Matteo Martincighc601aa62019-10-29 15:03:22 +000023#include <armnn/BackendRegistry.hpp>
Matthew Benthamf48afc62020-01-15 17:55:08 +000024#include <armnn/Logging.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000025#include <armnn/utility/IgnoreUnused.hpp>
telsoa014fcda012018-03-09 14:13:49 +000026
Jan Eilers99d9d4a2019-11-06 10:02:16 +000027#include <ProfilingService.hpp>
28
telsoa014fcda012018-03-09 14:13:49 +000029#include <fcntl.h>
30#include <algorithm>
31#include <fstream>
32#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010033#include <vector>
34#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000035
36#include <boost/assert.hpp>
37#include <boost/format.hpp>
telsoa014fcda012018-03-09 14:13:49 +000038#include <boost/numeric/conversion/converter_policies.hpp>
39#include <boost/cast.hpp>
40
41namespace armnn
42{
43
44armnn::INetwork* INetwork::CreateRaw()
45{
46 return new Network();
47}
48
49armnn::INetworkPtr INetwork::Create()
50{
51 return INetworkPtr(CreateRaw(), &INetwork::Destroy);
52}
53
54void INetwork::Destroy(INetwork* network)
55{
56 delete boost::polymorphic_downcast<Network*>(network);
57}
58
telsoa014fcda012018-03-09 14:13:49 +000059void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
60{
61 delete boost::polymorphic_downcast<OptimizedNetwork*>(network);
62}
63
64Status OptimizedNetwork::PrintGraph()
65{
66 m_Graph->Print();
67 return Status::Success;
68}
69
surmeh01bceff2f2018-03-29 16:29:27 +010070Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
71{
72 return m_Graph->SerializeToDot(stream);
73}
74
Matteo Martincigh49124022019-01-11 13:25:59 +000075void ReportError(const std::string& errorMessage,
76 Optional<std::vector<std::string>&> errorMessages)
77{
78 std::stringstream fullErrorMessage;
79 fullErrorMessage << "ERROR: " << errorMessage;
Derek Lamberti08446972019-11-26 16:38:31 +000080 ARMNN_LOG(warning) << fullErrorMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +000081 if (errorMessages)
82 {
83 errorMessages.value().push_back(fullErrorMessage.str());
84 }
85}
86
87void ReportWarning(const std::string& warningMessage,
88 Optional<std::vector<std::string>&> warningMessages)
89{
90 std::stringstream fullWarningMessage;
91 fullWarningMessage << "WARNING: " << warningMessage;
Derek Lamberti08446972019-11-26 16:38:31 +000092 ARMNN_LOG(warning) << fullWarningMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +000093 if (warningMessages)
94 {
95 warningMessages.value().push_back(fullWarningMessage.str());
96 }
97}
98
Derek Lamberti4a9e24b2020-01-03 16:53:38 +000099OptimizationResult ReturnWithError(OptimizationResult res,
100 const Layer* layer,
101 const BackendSettings& backendSettings,
102 Optional<std::vector<std::string>&> errMessages)
103{
104 std::stringstream failureMsg;
105 failureMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
106 << " is not supported on any preferred backend " << backendSettings.m_PreferredBackends;
107 ReportError(failureMsg.str(), errMessages);
108
109 res.m_Error = true;
110 return res;
111}
112
113
jimfly016b0b53d2018-10-08 14:43:01 +0100114bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
115{
116 bool noErrors = true;
117 unsigned int numOutputs = layer->GetNumOutputSlots();
118 for (unsigned int i = 0; i < numOutputs; i++) {
David Monahanb8554702019-04-25 16:03:38 +0100119 OutputSlot& outputSlot = layer->GetOutputSlot(i);
120 TensorInfo info = outputSlot.GetTensorInfo();
Derek Lambertif90c56d2020-01-10 17:14:08 +0000121 if (DataType::QAsymmU8 == info.GetDataType()) {
jimfly016b0b53d2018-10-08 14:43:01 +0100122 if (0.f == info.GetQuantizationScale()) {
123 noErrors = false;
124 std::stringstream ss;
Matteo Martincigh49124022019-01-11 13:25:59 +0000125 ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
jimfly016b0b53d2018-10-08 14:43:01 +0100126 << " (" << layer->GetNameStr() << ") is of type"
127 << " Quantized 8 bit but its scale parameter has not been set";
Matteo Martincigh49124022019-01-11 13:25:59 +0000128 ReportError(ss.str(), errMessages);
jimfly016b0b53d2018-10-08 14:43:01 +0100129 }
David Monahanb8554702019-04-25 16:03:38 +0100130 // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
131 if ((info.GetQuantizationScale() != (1.0f / 256.0f) ||
132 info.GetQuantizationOffset() != 0) &&
133 layer->GetType() == armnn::LayerType::Softmax)
134 {
135 std::stringstream ss;
136 ss << "Quantization parameters for Softmax layer (Scale: " <<
137 info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() <<
138 ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
Derek Lamberti08446972019-11-26 16:38:31 +0000139 ARMNN_LOG(warning) << ss.str();
David Monahanb8554702019-04-25 16:03:38 +0100140 info.SetQuantizationScale((1.0f /256.0f));
141 info.SetQuantizationOffset(0);
142 outputSlot.SetTensorInfo(info);
143 }
jimfly016b0b53d2018-10-08 14:43:01 +0100144 }
145 }
146 return noErrors;
147}
148
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000149OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings,
150 Graph& graph,
151 Layer* layer,
152 BackendId backend,
153 DataType dataTypeIn,
154 DataType dataTypeOut,
155 const std::vector<BackendId>& availablePreferredBackends,
156 std::string& reasonIfUnsupported,
157 Optional<std::vector<std::string>&> errMessages)
158{
159 OptimizationResult result;
160
161 // Helper lambda to compose meaningful error message before returning with error
162 auto ReturnError = [&](const Layer* layer)
163 {
164 return ReturnWithError(result, layer, backendSettings, errMessages);
165 };
166
167 // need to set the compute device on the layer
168 // before we can check if it is supported
169 layer->SetBackendId(backend);
170 if (!IWorkloadFactory::IsLayerSupported(*layer, EmptyOptional(), reasonIfUnsupported))
171 {
172 if (dataTypeIn == DataType::Float16 || dataTypeOut == DataType::Float16)
173 {
174 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
175 && layer->GetType() != LayerType::ConvertFp32ToFp16
176 && layer->GetType() != LayerType::ConvertFp16ToFp32)
177 {
178 // Insert FP16 -> FP32 conversion layer before current layer
179 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers;
180 if (dataTypeIn == DataType::Float16)
181 {
182 convertFp16ToFp32Layers =
183 InsertConvertFp16ToFp32LayersBefore(graph, *layer);
184 }
185
186 // Insert FP32 -> FP16 conversion layer after current layer
187 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers;
188 if (dataTypeOut == DataType::Float16)
189 {
190 convertFp32ToFp16Layers =
191 InsertConvertFp32ToFp16LayersAfter(graph, *layer);
192 }
193
194 // Assign a supported backend to the newly introduced conversion layers
195 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
196 {
197 bool supportedBackendFound = false;
198 std::string reasonIfUnsupported;
199
200 // Try preferred backend first
201 layer->SetBackendId(preferredBackend);
202 if (IWorkloadFactory::IsLayerSupported(*layer,
203 EmptyOptional(),
204 reasonIfUnsupported))
205 {
206 supportedBackendFound = true;
207 }
208 else
209 {
210 for (const auto& backend : availablePreferredBackends)
211 {
212 // Skip preferred backend (we already determined that it is not supported)
213 if (backend == preferredBackend)
214 {
215 continue;
216 }
217
218 layer->SetBackendId(backend);
219 if (IWorkloadFactory::IsLayerSupported(*layer,
220 EmptyOptional(),
221 reasonIfUnsupported))
222 {
223 supportedBackendFound = true;
224 break;
225 }
226 }
227 }
228
229 return supportedBackendFound;
230 };
231
232 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
233 {
234 if (!AssignFirstSupportedBackend(convertLayer, backend))
235 {
236 return ReturnError(convertLayer);
237 }
238 }
239
240 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
241 {
242 if (!AssignFirstSupportedBackend(convertLayer, backend))
243 {
244 return ReturnError(convertLayer);
245 }
246 }
247
248 return result;
249 }
250 }
251 std::stringstream warningMsg;
252 warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
253 << " is not supported on requested backend " << layer->GetBackendId().Get()
254 << " for input data type " << GetDataTypeName(dataTypeIn)
255 << " and output data type " << GetDataTypeName(dataTypeOut)
256 << " (reason: " << reasonIfUnsupported
257 << "), falling back to the next backend.";
258 ReportWarning(warningMsg.str(), errMessages);
259
260 return OptimizationResult(true, false);
261 }
262 else
263 {
264 return result;
265 }
266}
267
268
Matteo Martincigh49124022019-01-11 13:25:59 +0000269OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
270 BackendSettings& backendSettings,
271 Graph::Iterator& firstLayer,
272 Graph::Iterator& lastLayer,
273 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +0000274{
Matteo Martincigh49124022019-01-11 13:25:59 +0000275 OptimizationResult result;
telsoa014fcda012018-03-09 14:13:49 +0000276
Matteo Martincigh49124022019-01-11 13:25:59 +0000277 // Helper lambda to compose meaningful error message before returning with error
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000278 auto ReturnError = [&](const Layer* layer)
279 {
280 return ReturnWithError(result, layer, backendSettings, errMessages);
281 };
Matteo Martincigh49124022019-01-11 13:25:59 +0000282
telsoa01c577f2c2018-08-31 09:22:23 +0100283
Matteo Martincigh49124022019-01-11 13:25:59 +0000284 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
285 if (availablePreferredBackends.empty())
telsoa01c577f2c2018-08-31 09:22:23 +0100286 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000287 std::stringstream failureMsg;
288 failureMsg << "No preferred backends are available";
289 ReportError(failureMsg.str(), errMessages);
290
291 result.m_Error = true;
292 return result;
293 }
294
295 for (auto it = firstLayer; it != lastLayer; ++it)
296 {
297 auto layer = *it;
Aron Virginas-Tar87972be2019-11-13 15:16:28 +0000298
299 DataType dataTypeIn = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
300 layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
301 DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
302 layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
303
telsoa01c577f2c2018-08-31 09:22:23 +0100304 std::string reasonIfUnsupported;
305 bool found = false;
jimfly016b0b53d2018-10-08 14:43:01 +0100306 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
307 {
308 // don't bomb immediately, find all the quantized outputs
309 // which haven't had a scale set and report them all back.
Matteo Martincigh49124022019-01-11 13:25:59 +0000310 result.m_Error = true;
jimfly016b0b53d2018-10-08 14:43:01 +0100311 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000312
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000313 // First try assign layer to hint backend
314 if (layer->GetBackendHint().has_value() &&
315 backendSettings.IsBackendSupported(layer->GetBackendHint().value()) &&
316 AttemptBackendAssignment(backendSettings,
317 optNetObjPtr->GetGraph(),
318 layer,
319 layer->GetBackendHint().value(),
320 dataTypeIn,
321 dataTypeOut,
322 availablePreferredBackends,
323 reasonIfUnsupported,
324 errMessages).IsOk())
telsoa01c577f2c2018-08-31 09:22:23 +0100325 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000326 found = true;
327 backendSettings.m_SelectedBackends.insert(layer->GetBackendHint().value());
328 }
329 else
330 {
331 // Try assign layer to prefered list of backends
332 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100333 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000334 if (layer->GetBackendHint().has_value() &&
335 layer->GetBackendHint().value() == backend)
telsoa01c577f2c2018-08-31 09:22:23 +0100336 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000337 continue; //Don't re-test the backend hint
telsoa01c577f2c2018-08-31 09:22:23 +0100338 }
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000339
340 OptimizationResult res = AttemptBackendAssignment(backendSettings,
341 optNetObjPtr->GetGraph(),
342 layer,
343 backend,
344 dataTypeIn,
345 dataTypeOut,
346 availablePreferredBackends,
347 reasonIfUnsupported,
348 errMessages);
349
350 if (res.IsOk())
351 {
352 found = true;
353 backendSettings.m_SelectedBackends.insert(backend);
354 break;
355 }
356 else if (res.IsError())
357 {
358 return res; // Cannot continue.
359 // Note: we don't need to log the error as it would already
360 // be logged in AttemptBackendAssignment().
361 }
362 else
363 {
364 BOOST_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
365 }
telsoa01c577f2c2018-08-31 09:22:23 +0100366 }
367 }
368
369 // If the layer is unsupported by any devices, log and return a null network.
Matteo Martincigh49124022019-01-11 13:25:59 +0000370 if (!found)
371 {
telsoa01c577f2c2018-08-31 09:22:23 +0100372 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
373 // fallback we should set the compute device on the layer to CpuRef (these are not
374 // available as accelerated operations, or are only available under certain
375 // conditions, currently they comprise MemCopy, Constant, Permute)
376 armnn::LayerType layerType = layer->GetType();
Matteo Martincigh49124022019-01-11 13:25:59 +0000377 if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
378 layerType == armnn::LayerType::Constant ||
379 layerType == armnn::LayerType::Permute))
telsoa01c577f2c2018-08-31 09:22:23 +0100380 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000381 BackendId cpuBackendId(armnn::Compute::CpuRef);
382 layer->SetBackendId(cpuBackendId);
383 backendSettings.m_SelectedBackends.insert(cpuBackendId);
telsoa01c577f2c2018-08-31 09:22:23 +0100384 }
385 else
386 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000387 return ReturnError(layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100388 }
389 }
390 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000391
392 return result;
393}
394
Matteo Martincighadddddb2019-01-24 14:06:23 +0000395OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
396 BackendSettings& backendSettings,
Derek Lambertiff05cc52019-04-26 13:05:17 +0100397 SubgraphView& subgraph,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000398 Optional<std::vector<std::string>&> errMessages)
Matteo Martincigh49124022019-01-11 13:25:59 +0000399{
Derek Lambertiff05cc52019-04-26 13:05:17 +0100400 Graph::Iterator firstLayer = subgraph.begin();
401 Graph::Iterator lastLayer = subgraph.end();
Matteo Martincighadddddb2019-01-24 14:06:23 +0000402 return AssignBackends(optNetObjPtr,
403 backendSettings,
404 firstLayer,
405 lastLayer,
406 errMessages);
407}
408
Derek Lamberti84da38b2019-06-13 11:40:08 +0100409BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry& handleFactoryRegistry,
410 BackendSettings& backendSettings)
411{
412 BackendsMap backends;
413 auto const& backendRegistry = BackendRegistryInstance();
414 for (auto&& selectedBackend : backendSettings.m_SupportedBackends)
415 {
416 auto backendFactory = backendRegistry.GetFactory(selectedBackend);
417 auto backendObjPtr = backendFactory();
418 BOOST_ASSERT(backendObjPtr);
419
420 backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
421
422 backends[backendObjPtr->GetId()] = std::move(backendObjPtr);
423 }
424
425 return backends;
426}
427
Matteo Martincighadddddb2019-01-24 14:06:23 +0000428OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
429 BackendSettings& backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +0100430 BackendsMap& backends,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000431 Optional<std::vector<std::string>&> errMessages)
432{
433 BOOST_ASSERT(optNetObjPtr);
Matteo Martincigh49124022019-01-11 13:25:59 +0000434
435 OptimizationResult result;
436
Matteo Martincighadddddb2019-01-24 14:06:23 +0000437 // Get the optimized graph
438 Graph& optGraph = optNetObjPtr->GetGraph();
Matteo Martincigh49124022019-01-11 13:25:59 +0000439
Matteo Martincighadddddb2019-01-24 14:06:23 +0000440 // Run backend specific optimizations
Matteo Martincighadddddb2019-01-24 14:06:23 +0000441 for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
Matteo Martincigh49124022019-01-11 13:25:59 +0000442 {
Derek Lamberti84da38b2019-06-13 11:40:08 +0100443 auto backendObjPtr = backends.find(selectedBackend)->second.get();
Matteo Martincighadddddb2019-01-24 14:06:23 +0000444 BOOST_ASSERT(backendObjPtr);
445
446 // Select sub-graphs based on backend
Derek Lambertiff05cc52019-04-26 13:05:17 +0100447 SubgraphViewSelector::Subgraphs subgraphs =
Rob Hughes65c32262019-07-23 15:33:39 +0100448 SubgraphViewSelector::SelectSubgraphs(optGraph,
Matteo Martincigh602af092019-05-01 10:31:27 +0100449 // Select layers assigned to the requested backend
450 [&backendObjPtr](const Layer& layer)
451 {
452 return layer.GetType() != LayerType::Input &&
453 layer.GetType() != LayerType::Output &&
454 layer.GetBackendId() == backendObjPtr->GetId();
455 });
Derek Lambertiff05cc52019-04-26 13:05:17 +0100456 if (subgraphs.empty())
Matteo Martincigh49124022019-01-11 13:25:59 +0000457 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000458 // No sub-graphs found, try with next selected backend
459 continue;
Matteo Martincigh49124022019-01-11 13:25:59 +0000460 }
Matteo Martincighadddddb2019-01-24 14:06:23 +0000461
462 // Try to optimize each sub-graph
Derek Lambertiff05cc52019-04-26 13:05:17 +0100463 for (auto& subgraph : subgraphs)
Matteo Martincigh49124022019-01-11 13:25:59 +0000464 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000465 // Try to optimize the current sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100466 OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph);
467 BOOST_ASSERT(optimizationViews.Validate(*subgraph));
Matteo Martincighadddddb2019-01-24 14:06:23 +0000468
469 // Optimization attempted, check the resulting optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100470 for (auto& substitution : optimizationViews.GetSubstitutions())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000471 {
472 // Sub-graph optimized, substitute the sub-graph with the new optimized one in the main optimized graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100473 SubgraphView& replacementSubgraph = substitution.m_ReplacementSubgraph;
474 SubgraphView& substitutableSubgraph = substitution.m_SubstitutableSubgraph;
475 optGraph.SubstituteSubgraph(substitutableSubgraph, replacementSubgraph);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000476
477 // Assign the current backend to the optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100478 std::for_each(replacementSubgraph.begin(), replacementSubgraph.end(), [&selectedBackend](Layer* l)
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100479 {
480 BOOST_ASSERT(l);
481 l->SetBackendId(selectedBackend);
482 });
Matteo Martincighadddddb2019-01-24 14:06:23 +0000483 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100484
Matteo Martincigh84924332019-05-09 12:46:16 +0100485 if (!optimizationViews.GetFailedSubgraphs().empty())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000486 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000487 std::stringstream warningMsg;
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100488 warningMsg << "Some sub-graph(s) failed to optimized on " << backendObjPtr->GetId() << " backend.";
Matteo Martincighadddddb2019-01-24 14:06:23 +0000489 ReportWarning(warningMsg.str(), errMessages);
490
491 // Failed to optimize the given sub-graph, re-assign the sub-graph layers to other available backends
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100492 BackendSettings settingsCopy(backendSettings);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000493 if (!backendObjPtr->GetId().IsCpuRef())
494 {
495 // Add the current backend to the list of backends to ignore
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100496 settingsCopy.m_IgnoredBackends.insert(backendObjPtr->GetId());
Matteo Martincighadddddb2019-01-24 14:06:23 +0000497 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100498
499 int count=0;
Matteo Martincigh84924332019-05-09 12:46:16 +0100500 for (auto& failedSubgraph : optimizationViews.GetFailedSubgraphs())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000501 {
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100502 // An error occurred: the optimization was attempted but not performed, try different backends
503 std::stringstream subgraphMsg;
504 subgraphMsg << "Re-assigning backends to " << failedSubgraph.GetLayers().size()
505 << " layers inside sub-graph " << count++;
Matteo Martincigh328d92b2019-07-04 17:52:55 +0100506 ReportWarning(subgraphMsg.str(), errMessages);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100507
508 OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr,
509 settingsCopy,
510 *subgraph,
511 errMessages);
512 if (reassignmentResult.m_Error)
513 {
514 // Failed to re-assign one of the remaining backends to each layer of the sub-graph
515 result.m_Error = true;
516 return result;
517 }
Matteo Martincighadddddb2019-01-24 14:06:23 +0000518 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000519 }
520 }
521 }
522
523 return result;
524}
525
Derek Lamberti84da38b2019-06-13 11:40:08 +0100526bool RequiresCopy(ITensorHandleFactory::FactoryId src,
527 ITensorHandleFactory::FactoryId dst,
528 TensorHandleFactoryRegistry& registry)
529{
530 if (src != dst)
531 {
532 ITensorHandleFactory* srcFactory = registry.GetFactory(src);
533 ITensorHandleFactory* dstFactory = registry.GetFactory(dst);
534
Matteo Martincigha6539ed2019-08-27 13:43:32 +0100535 if (srcFactory && dstFactory &&
536 (srcFactory->GetExportFlags() & dstFactory->GetImportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100537 {
538 return false;
539 }
540 return true;
541 }
542 return false;
543}
544
545// Find the handle factory for the input layer which results in fewest required copies.
546ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backends,
547 OutputSlot& slot,
548 TensorHandleFactoryRegistry& registry)
549{
550 Layer& layer = slot.GetOwningLayer();
551 BOOST_ASSERT(layer.GetType() == LayerType::Input);
552
553 // Explicitly select the tensorhandle factory for InputLayer because the rules for it are slightly different. It
554 // doesn't matter which backend it is assigned to because they all use the same implementation, which
555 // requires Map/Unmap support. This means that, so long as the handle type supports map/unmap semantics, we can
556 // select a factory with maximum compatibility with the layers connected to the InputLayer.
557
558 // First ensure the from backends can support the TensorHandeAPI
559 auto frmBackend = backends.find(layer.GetBackendId());
560 if (frmBackend == backends.end() ||
561 !frmBackend->second->SupportsTensorAllocatorAPI())
562 {
563 return ITensorHandleFactory::LegacyFactoryId;
564 }
565
566 // Go through all connections to the output slot and determine the TensorHandleFactory which results in the
567 // fewest copies.
568 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
569 int topScore = 0;
570 ITensorHandleFactory::FactoryId topChoice = ITensorHandleFactory::LegacyFactoryId;
571
572 for (auto&& connection : slot.GetConnections())
573 {
574 const Layer& connectedLayer = connection->GetOwningLayer();
575
576 auto toBackend = backends.find(connectedLayer.GetBackendId());
577 BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
578
579 if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
580 {
581 // The destination backend does not support the tensor allocator API, move to the next one
582 continue;
583 }
584
585 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
586 for (auto&& dst : dstPrefs)
587 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100588 // Input layers use the mem copy workload or import, so the selected factory must
589 // support either the map/unmap API or Import API
Derek Lamberti84da38b2019-06-13 11:40:08 +0100590 ITensorHandleFactory* factory = registry.GetFactory(dst);
Derek Lambertif674aa02019-08-01 15:56:25 +0100591 if (!factory->SupportsMapUnmap() &&
592 !CheckFlag(factory->GetImportFlags(), MemorySource::Malloc)) // Just support cpu mem imports for now
Derek Lamberti84da38b2019-06-13 11:40:08 +0100593 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100594 // The current tensor handle factory does not support the map/unmap or import
595 // strategy, move to the next one
Derek Lamberti84da38b2019-06-13 11:40:08 +0100596 continue;
597 }
598
599 auto it = factoryScores.find(dst);
600 if (it == factoryScores.end())
601 {
602 // Add new score to the table
603 factoryScores[dst] = 0;
604 if (topChoice == ITensorHandleFactory::LegacyFactoryId)
605 {
606 topChoice = dst;
607 }
608 }
609 else
610 {
611 // Increase the score
612 factoryScores[dst]++;
613
614 // Track the best option
615 if (factoryScores[dst] > topScore)
616 {
617 topScore = factoryScores[dst];
618 topChoice = dst;
619 }
620 }
621 }
622 }
623
624 return topChoice;
625}
626
627// Find the handle factory for the output layer which results in fewest required copies.
628ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backends,
629 OutputSlot& slot,
630 TensorHandleFactoryRegistry& registry)
631{
Jan Eilers8eb25602020-03-09 12:13:48 +0000632 IgnoreUnused(backends, slot, registry);
Derek Lamberti94a88d22019-12-10 21:12:59 +0000633 return ITensorHandleFactory::DeferredFactoryId;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100634}
635
636// For all handle factories supported on the source backend, we wish to find the one which requires the fewest copies
637// when considering all connections.
638ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
639 OutputSlot& outputSlot,
640 TensorHandleFactoryRegistry& registry)
641{
642 // First ensure the from backends can support the TensorHandeAPI
643 Layer& layer = outputSlot.GetOwningLayer();
644 auto frmBackend = backends.find(layer.GetBackendId());
645 if (frmBackend == backends.end() ||
646 !frmBackend->second->SupportsTensorAllocatorAPI())
647 {
648 return ITensorHandleFactory::LegacyFactoryId;
649 }
650
651 // Connections to Output Layers requires support for map/unmap on the TensorHandle.
652 bool requiresMapUnmap = false;
653 for (auto&& connection : outputSlot.GetConnections())
654 {
655 const Layer& connectedLayer = connection->GetOwningLayer();
656 if (connectedLayer.GetType() == LayerType::Output)
657 {
658 requiresMapUnmap = true;
659 }
660 }
661
662 IBackendInternal* srcBackend = frmBackend->second.get();
663 auto srcPrefs = srcBackend->GetHandleFactoryPreferences();
664
665 // Initialize the scores
666 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
667 for (auto&& pref : srcPrefs)
668 {
669 if (requiresMapUnmap) // Only consider factories that support map/unmap if required
670 {
671 ITensorHandleFactory* factory = registry.GetFactory(pref);
672 if (!factory->SupportsMapUnmap())
673 {
674 // The current tensor handle factory does not support the map/unmap strategy, move to the next one
675 continue;
676 }
677 }
678
679 auto it = factoryScores.find(pref);
680 if (it == factoryScores.end())
681 {
682 // Add new score to the table
683 factoryScores[pref] = 0;
684 }
685 }
686
687 // Score each handle factory based on how many times it requires copies on the slot connections
688 for (auto&& connection : outputSlot.GetConnections())
689 {
690 const Layer& connectedLayer = connection->GetOwningLayer();
691
692 auto toBackend = backends.find(connectedLayer.GetBackendId());
693 BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
694
695 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
696 for (auto&& src : srcPrefs)
697 {
698 if (factoryScores.find(src) == factoryScores.end()) // Don't consider excluded factories
699 {
700 continue;
701 }
702
703 for (auto&& dst : dstPrefs)
704 {
705 if (RequiresCopy(src, dst, registry))
706 {
707 // Copy avoided, increase the score
708 factoryScores[src]++;
709 break;
710 }
711 }
712 }
713 }
714
715 // Find the lowest score
716 int minScore = std::numeric_limits<int>::max();
717 for (auto it : factoryScores)
718 {
719 minScore = std::min(minScore, it.second);
720 }
721
722 // Collect factories matching the best(lowest) score
723 std::vector<ITensorHandleFactory::FactoryId> optimalFactories;
724 for (auto it : factoryScores)
725 {
726 if (it.second == minScore)
727 {
728 optimalFactories.push_back(it.first);
729 }
730 }
731
732 // For all compatible Factories matching the best score, find the preferred one for the current layer.
733 for (auto&& srcPref : srcPrefs)
734 {
735 for (auto&& comp : optimalFactories)
736 {
737 if (comp == srcPref)
738 {
739 return comp;
740 }
741 }
742 }
743
744 return ITensorHandleFactory::LegacyFactoryId;
745}
746
Derek Lambertif674aa02019-08-01 15:56:25 +0100747EdgeStrategy CalculateEdgeStrategy(BackendsMap& backends,
748 ITensorHandleFactory::FactoryId srcFactoryId,
749 const Layer& layer,
750 const Layer& connectedLayer,
751 TensorHandleFactoryRegistry& registry)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100752{
753 auto toBackend = backends.find(connectedLayer.GetBackendId());
754 BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
755
756 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
757
758 // Legacy API check for backward compatibility
759 if (srcFactoryId == ITensorHandleFactory::LegacyFactoryId || dstPrefs.empty())
760 {
761 if (layer.GetBackendId() != connectedLayer.GetBackendId())
762 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100763 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100764 }
765 else
766 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100767 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100768 }
769 }
770
771 // TensorHandleFactory API present, so perform more sophisticated strategies.
Derek Lambertif674aa02019-08-01 15:56:25 +0100772 // Dst Output layers don't require copy because they use import or map/unmap
Derek Lamberti84da38b2019-06-13 11:40:08 +0100773 if (connectedLayer.GetType() == LayerType::Output)
774 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100775 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100776 }
777
778 // Search for direct match in prefs
779 for (auto&& pref : dstPrefs)
780 {
781 if (pref == srcFactoryId)
782 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100783 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100784 }
785 }
786
787 // Search for export/import options
788 ITensorHandleFactory* srcFactory = registry.GetFactory(srcFactoryId);
Derek Lambertif674aa02019-08-01 15:56:25 +0100789 if (srcFactory->GetExportFlags() != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100790 {
791 for (auto&& pref : dstPrefs)
792 {
793 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroyffab16f2019-11-07 14:37:09 +0000794
James Conroy47e863d2019-11-18 17:07:43 +0000795 // Handles cases when a destPref is not listed in TensorHandleFactoryRegistry
James Conroyffab16f2019-11-07 14:37:09 +0000796 if (!dstFactory) {
James Conroy47e863d2019-11-18 17:07:43 +0000797 continue;
James Conroyffab16f2019-11-07 14:37:09 +0000798 }
799
Derek Lambertif674aa02019-08-01 15:56:25 +0100800 if ((dstFactory->GetImportFlags() & srcFactory->GetExportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100801 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100802 return EdgeStrategy::ExportToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100803 }
804 }
805 }
806
807 // Search for copy options via map/unmap
808 if (srcFactory->SupportsMapUnmap())
809 {
810 for (auto&& pref : dstPrefs)
811 {
812 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroy47e863d2019-11-18 17:07:43 +0000813 if (dstFactory && dstFactory->SupportsMapUnmap())
Derek Lamberti84da38b2019-06-13 11:40:08 +0100814 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100815 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100816 }
817 }
818 }
819
Derek Lambertif674aa02019-08-01 15:56:25 +0100820 return EdgeStrategy::Undefined;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100821}
822
823// Select the TensorHandleFactories and the corresponding memory strategy
824OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
825 BackendsMap& backends,
826 TensorHandleFactoryRegistry& registry,
827 Optional<std::vector<std::string>&> errMessages)
828{
829 OptimizationResult result;
830
831 optGraph.ForEachLayer([&backends, &registry, &result, &errMessages](Layer* layer)
832 {
833 BOOST_ASSERT(layer);
834
835 // Lets make sure the backend is in our list of supported backends. Something went wrong during backend
836 // assignment if this check fails
837 BOOST_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
838
839 // Check each output separately
840 for (unsigned int slotIdx = 0; slotIdx < layer->GetNumOutputSlots(); slotIdx++)
841 {
842 OutputSlot& outputSlot = layer->GetOutputSlot(slotIdx);
843
844 ITensorHandleFactory::FactoryId slotOption = ITensorHandleFactory::LegacyFactoryId;
845
846 // Calculate the factory to use which results in the fewest copies being made.
847 switch(layer->GetType())
848 {
849 case LayerType::Input:
850 slotOption = CalculateSlotOptionForInput(backends, outputSlot, registry);
851 break;
852 case LayerType::Output:
853 slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
854 break;
855 default:
856 slotOption = CalculateSlotOption(backends, outputSlot, registry);
857 break;
858 }
859 outputSlot.SetTensorHandleFactory(slotOption);
860
Derek Lambertif674aa02019-08-01 15:56:25 +0100861 // Now determine the "best" edge strategy for each connection given the slotOption.
Derek Lamberti84da38b2019-06-13 11:40:08 +0100862 unsigned int connectionIdx = 0;
863 for (auto&& connection : outputSlot.GetConnections())
864 {
865 const Layer& connectedLayer = connection->GetOwningLayer();
866
Derek Lambertif674aa02019-08-01 15:56:25 +0100867 EdgeStrategy strategy = CalculateEdgeStrategy(backends, slotOption, *layer, connectedLayer, registry);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100868
Derek Lambertif674aa02019-08-01 15:56:25 +0100869 if (strategy == EdgeStrategy::Undefined)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100870 {
871 result.m_Error = true;
872 if (errMessages)
873 {
874 errMessages.value().emplace_back("Could not find valid strategy required for compatibility"
875 " between backends.");
876 }
877 return;
878 }
879
Derek Lambertif674aa02019-08-01 15:56:25 +0100880 outputSlot.SetEdgeStrategy(connectionIdx, strategy);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100881
882 connectionIdx++;
883 }
884 }
885 });
886
887 return result;
888}
889
Matteo Martincigh49124022019-01-11 13:25:59 +0000890IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
891 const std::vector<BackendId>& backendPreferences,
892 const IDeviceSpec& deviceSpec,
893 const OptimizerOptions& options,
Rob Hughes23214432019-11-05 11:27:36 +0000894 Optional<std::vector<std::string>&> messages)
Matteo Martincigh49124022019-01-11 13:25:59 +0000895{
896 if (backendPreferences.empty())
897 {
898 throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
899 }
900
901 const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork);
902 std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
903
904 auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
905
906 OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
907
Matteo Martincighadddddb2019-01-24 14:06:23 +0000908 // Get the optimized graph
909 Graph& optGraph = optNetObjPtr->GetGraph();
910
Matteo Martincigh49124022019-01-11 13:25:59 +0000911 // Perform optimisation passes
912 using namespace optimizations;
Matteo Martincighadddddb2019-01-24 14:06:23 +0000913 Optimizer::Pass(optGraph, MakeOptimizations(SquashEqualPermuteSiblings(),
Mike Kelly490b7be2020-03-03 12:39:09 +0000914 SquashEqualTransposeSiblings(),
Matteo Martincighadddddb2019-01-24 14:06:23 +0000915 SquashEqualReshapeSiblings(),
916 OptimizeInversePermutes(),
Mike Kelly490b7be2020-03-03 12:39:09 +0000917 OptimizeInverseTransposes(),
Matteo Martincighadddddb2019-01-24 14:06:23 +0000918 MovePermuteUp(),
Mike Kelly490b7be2020-03-03 12:39:09 +0000919 MoveTransposeUp(),
Matteo Martincighadddddb2019-01-24 14:06:23 +0000920 PermuteAsReshape(),
Mike Kelly490b7be2020-03-03 12:39:09 +0000921 TransposeAsReshape(),
Nina Drozd861985f2019-04-18 14:48:51 +0100922 OptimizeConsecutiveReshapes(),
Rob Hughes3a7d3a72019-09-24 16:59:56 +0100923 FoldPadIntoConvolution2d(),
Mike Kelly490b7be2020-03-03 12:39:09 +0000924 PermuteAndBatchToSpaceAsDepthToSpace(),
925 TransposeAndBatchToSpaceAsDepthToSpace()));
Matteo Martincigh49124022019-01-11 13:25:59 +0000926
Matteo Martincighadddddb2019-01-24 14:06:23 +0000927 // Infer the tensor infos for all output slots. Throws an exception on failure
928 optGraph.InferTensorInfos();
Matteo Martincigh49124022019-01-11 13:25:59 +0000929
930 // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
931 if (options.m_ReduceFp32ToFp16)
932 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000933 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToFp16Converter()));
Derek Lambertidd6804b2019-11-27 09:29:57 +0000934 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
Matteo Martincigh49124022019-01-11 13:25:59 +0000935 }
936
937 // Initialize backend settings
938 BackendSettings backendSettings(backendPreferences, deviceSpec);
939 if (backendSettings.GetAvailablePreferredBackends().empty())
940 {
941 std::stringstream failureMsg;
942 failureMsg << "None of the preferred backends " << backendPreferences
943 << " are supported. Current platform provides " << backendSettings.m_SupportedBackends;
Rob Hughes23214432019-11-05 11:27:36 +0000944 ReportError(failureMsg.str(), messages);
Matteo Martincigh49124022019-01-11 13:25:59 +0000945 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
946 }
947
Derek Lamberti84da38b2019-06-13 11:40:08 +0100948 // Create a map to temporarily hold initialized backend objects
949 TensorHandleFactoryRegistry tensorHandleFactoryRegistry;
950 BackendsMap backends = CreateSupportedBackends(tensorHandleFactoryRegistry, backendSettings);
951
Matteo Martincigh49124022019-01-11 13:25:59 +0000952 // Assign an available backend to each layer
Matteo Martincighadddddb2019-01-24 14:06:23 +0000953 Graph::Iterator firstLayer = optGraph.begin();
954 Graph::Iterator lastLayer = optGraph.end();
Derek Lamberti84da38b2019-06-13 11:40:08 +0100955 OptimizationResult assignBackendsResult = AssignBackends(optNetObjPtr,
956 backendSettings,
957 firstLayer,
958 lastLayer,
Rob Hughes23214432019-11-05 11:27:36 +0000959 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100960 if (assignBackendsResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +0000961 {
962 // Failed to assign a backend to each layer
jimfly016b0b53d2018-10-08 14:43:01 +0100963 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
964 }
telsoa01c577f2c2018-08-31 09:22:23 +0100965
Matteo Martincighadddddb2019-01-24 14:06:23 +0000966 Optimizer::Pass(optGraph, MakeOptimizations(OptimizeInverseConversionsFp16(),
967 OptimizeInverseConversionsFp32()));
telsoa01c577f2c2018-08-31 09:22:23 +0100968
Matteo Martincighadddddb2019-01-24 14:06:23 +0000969 // Apply the backend-specific optimizations
970 OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr,
971 backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +0100972 backends,
Rob Hughes23214432019-11-05 11:27:36 +0000973 messages);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000974 if (backendOptimizationResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +0000975 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000976 // Failed to apply the backend-specific optimizations
977 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
Matteo Martincigh49124022019-01-11 13:25:59 +0000978 }
979
Matteo Martincighadddddb2019-01-24 14:06:23 +0000980 // If the debug flag is set, then insert a DebugLayer after each layer
981 // Doing this after applying the backend optimizations as they might have changed some layers
982 if (options.m_Debug)
983 {
984 Optimizer::Pass(optGraph, MakeOptimizations(InsertDebugLayer()));
985 }
986
Derek Lamberti84da38b2019-06-13 11:40:08 +0100987 // Calculate the compatibility strategies for tensor handles
988 OptimizationResult strategyResult = SelectTensorHandleStrategy(optGraph,
989 backends,
990 tensorHandleFactoryRegistry,
Rob Hughes23214432019-11-05 11:27:36 +0000991 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100992 if (strategyResult.m_Error)
993 {
994 // Failed to apply the backend-specific optimizations
995 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
996 }
997
998 // Based on the tensor handle strategy determined above, insert copy layers where required.
Derek Lambertif674aa02019-08-01 15:56:25 +0100999 optGraph.AddCompatibilityLayers(backends, tensorHandleFactoryRegistry);
telsoa01c577f2c2018-08-31 09:22:23 +01001000
1001 // Convert constants
Matteo Martincighadddddb2019-01-24 14:06:23 +00001002 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
1003 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
telsoa01c577f2c2018-08-31 09:22:23 +01001004
Derek Lamberti84da38b2019-06-13 11:40:08 +01001005 // Run backend specific optimizations (deprecated)
Matteo Martincigh49124022019-01-11 13:25:59 +00001006 for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
David Beck263e3492018-11-09 14:46:40 +00001007 {
1008 auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
1009 auto backendPtr = factoryFun();
1010 BOOST_ASSERT(backendPtr.get() != nullptr);
1011
Matteo Martincighed735042019-05-22 09:42:43 +01001012 ARMNN_NO_DEPRECATE_WARN_BEGIN
David Beck263e3492018-11-09 14:46:40 +00001013 auto backendSpecificOptimizations = backendPtr->GetOptimizations();
Matteo Martincighed735042019-05-22 09:42:43 +01001014 ARMNN_NO_DEPRECATE_WARN_END
1015
David Beck263e3492018-11-09 14:46:40 +00001016 if (!backendSpecificOptimizations.empty())
1017 {
1018 Optimizer::Pass(optNetObjPtr->GetGraph(), backendSpecificOptimizations);
1019 }
1020 }
1021
telsoa01c577f2c2018-08-31 09:22:23 +01001022 return optNet;
telsoa014fcda012018-03-09 14:13:49 +00001023}
1024
1025Network::Network()
Sadik Armagan3184c902020-03-18 10:57:30 +00001026: m_Graph(std::make_unique<Graph>())
telsoa014fcda012018-03-09 14:13:49 +00001027{
1028}
1029
1030Network::~Network()
1031{
1032}
1033
Jan Eilers99d9d4a2019-11-06 10:02:16 +00001034Status Network::PrintGraph()
1035{
1036 m_Graph->Print();
1037 return Status::Success;
1038}
1039
telsoa014fcda012018-03-09 14:13:49 +00001040IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
1041{
1042 return m_Graph->AddLayer<InputLayer>(id, name);
1043}
1044
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001045IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
1046 const char* name)
1047{
1048 return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
1049}
1050
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001051IConnectableLayer* Network::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
1052 const char* name)
1053{
1054 return m_Graph->AddLayer<ComparisonLayer>(comparisonDescriptor, name);
1055}
1056
josh minor4a3c6102020-01-06 16:40:46 -06001057IConnectableLayer* Network::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
1058 const char* name)
1059{
1060 return m_Graph->AddLayer<ElementwiseUnaryLayer>(elementwiseUnaryDescriptor, name);
1061}
1062
telsoa014fcda012018-03-09 14:13:49 +00001063IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001064 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001065 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001066 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001067{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001068 if (fullyConnectedDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001069 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001070 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001071 }
1072
1073 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
1074
1075 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1076
1077 if (fullyConnectedDescriptor.m_BiasEnabled)
1078 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001079 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001080 }
1081
1082 return layer;
1083}
1084
1085IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001086 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001087 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001088 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001089{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001090 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001091}
1092
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001093IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
1094 const ConstTensor& weights,
1095 const char* name)
1096{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001097 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001098 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
1099}
1100
telsoa014fcda012018-03-09 14:13:49 +00001101IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001102 const ConstTensor& weights,
1103 const ConstTensor& biases,
1104 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001105{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001106 Optional<ConstTensor> optionalBiases(biases);
1107 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001108}
1109
Jim Flynne242f2d2019-05-22 14:24:13 +01001110IConnectableLayer* Network::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001111 const char* name)
1112{
Jim Flynne242f2d2019-05-22 14:24:13 +01001113 return m_Graph->AddLayer<ConcatLayer>(concatDescriptor, name);
Jim Flynn906f9462019-05-10 13:55:21 +01001114}
1115
telsoa014fcda012018-03-09 14:13:49 +00001116IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001117 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001118 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001119 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001120{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001121 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001122 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001123 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001124 }
1125
1126 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
1127
1128 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1129
1130 if (convolution2dDescriptor.m_BiasEnabled)
1131 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001132 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001133 }
1134
1135 return layer;
1136}
1137
1138IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001139 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001140 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001141 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001142{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001143 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001144}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001145
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001146IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
1147 const ConstTensor& weights,
1148 const char* name)
1149{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001150 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001151 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1152}
1153
telsoa014fcda012018-03-09 14:13:49 +00001154IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001155 const ConstTensor& weights,
1156 const ConstTensor& biases,
1157 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001158{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001159 Optional<ConstTensor> optionalBiases(biases);
1160 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001161}
1162
1163IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
1164 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1165 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001166 const Optional<ConstTensor>& biases,
telsoa014fcda012018-03-09 14:13:49 +00001167 const char* name)
1168{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001169 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001170 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001171 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001172 }
1173
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00001174 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001175
1176 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1177
1178 if (convolution2dDescriptor.m_BiasEnabled)
1179 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001180 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001181 }
1182
1183 return layer;
1184}
1185
Aron Virginas-Tardd6247f2019-09-19 14:31:17 +01001186IConnectableLayer* Network::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
1187 const char* name)
1188{
1189 return m_Graph->AddLayer<DepthToSpaceLayer>(depthToSpaceDescriptor, name);
1190}
1191
telsoa014fcda012018-03-09 14:13:49 +00001192IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001193 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1194 const ConstTensor& weights,
1195 const Optional<ConstTensor>& biases,
1196 const char* name)
1197{
1198 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1199}
1200
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001201IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
telsoa014fcda012018-03-09 14:13:49 +00001202 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1203 const ConstTensor& weights,
1204 const char* name)
1205{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001206 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001207 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001208}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001209
telsoa014fcda012018-03-09 14:13:49 +00001210IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
1211 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1212 const ConstTensor& weights,
1213 const ConstTensor& biases,
1214 const char* name)
1215{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001216 Optional<ConstTensor> optionalBiases(biases);
1217 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001218}
1219
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001220IConnectableLayer* Network::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001221 const ConstTensor& anchors, const char* name)
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001222{
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001223 const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
1224
1225 layer->m_Anchors = std::make_unique<ScopedCpuTensorHandle>(anchors);
1226
1227 return layer;
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001228}
1229
telsoa014fcda012018-03-09 14:13:49 +00001230IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
1231 const char* name)
1232{
1233 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
1234}
1235
1236IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
1237 const char* name)
1238{
1239 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
1240}
1241
1242IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
1243 const char* name)
1244{
1245 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
1246}
1247
Nikhil Rajee391d52019-09-05 17:50:44 +01001248IConnectableLayer* Network::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor,
1249 const char* name)
1250{
1251 return m_Graph->AddLayer<ArgMinMaxLayer>(argMinMaxDescriptor, name);
1252}
1253
telsoa01c577f2c2018-08-31 09:22:23 +01001254IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
1255normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001256 const char* name)
1257{
1258 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
1259}
1260
Aron Virginas-Tar636ab402019-09-16 14:27:45 +01001261IConnectableLayer* Network::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
1262{
1263 return m_Graph->AddLayer<SliceLayer>(sliceDescriptor, name);
1264}
1265
telsoa014fcda012018-03-09 14:13:49 +00001266IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
1267 const char* name)
1268{
1269 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
1270}
1271
1272IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
1273 const char* name)
1274{
1275 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
1276}
1277
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001278IConnectableLayer* Network::AddMaximumLayer(const char* name)
1279{
1280 return m_Graph->AddLayer<MaximumLayer>(name);
1281}
1282
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001283IConnectableLayer* Network::AddMinimumLayer(const char* name)
1284{
1285 return m_Graph->AddLayer<MinimumLayer>(name);
1286}
1287
Jim Flynne242f2d2019-05-22 14:24:13 +01001288IConnectableLayer* Network::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001289 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001290{
Jim Flynne242f2d2019-05-22 14:24:13 +01001291 return AddConcatLayer(mergerDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001292}
1293
Kevin May868eb142019-09-04 17:29:31 +01001294IConnectableLayer* Network::AddAbsLayer(const char * name)
1295{
josh minor4a3c6102020-01-06 16:40:46 -06001296 return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
Kevin May868eb142019-09-04 17:29:31 +01001297}
1298
telsoa014fcda012018-03-09 14:13:49 +00001299IConnectableLayer* Network::AddAdditionLayer(const char* name)
1300{
1301 return m_Graph->AddLayer<AdditionLayer>(name);
1302}
1303
1304IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
1305{
1306 return m_Graph->AddLayer<MultiplicationLayer>(name);
1307}
1308
1309IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
1310{
1311 return m_Graph->AddLayer<OutputLayer>(id, name);
1312}
1313
1314IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
1315 const ConstTensor& mean,
1316 const ConstTensor& variance,
1317 const ConstTensor& beta,
1318 const ConstTensor& gamma,
1319 const char* name)
1320{
1321 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
1322
1323 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
1324 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
1325 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
1326 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
1327
1328 return layer;
1329}
1330
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001331IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
1332 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001333{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001334 ResizeDescriptor resizeDescriptor;
1335 resizeDescriptor.m_Method = ResizeMethod::Bilinear;
1336 resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
1337 resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
1338 resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
1339
1340 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001341}
1342
Teresa Charlina9075df2019-06-27 15:41:57 +01001343IConnectableLayer* Network::AddResizeLayer(const ResizeDescriptor&
1344resizeDescriptor, const char* name)
1345{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001346 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
Teresa Charlina9075df2019-06-27 15:41:57 +01001347}
1348
Kevin Mayce5045a2019-10-02 14:07:47 +01001349IConnectableLayer* Network::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
1350 const char* name)
1351{
1352 return m_Graph->AddLayer<InstanceNormalizationLayer>(desc, name);
1353}
1354
Matteo Martincighbcd3c852018-09-28 14:14:12 +01001355IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
1356 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001357{
Matteo Martincighbcd3c852018-09-28 14:14:12 +01001358 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +00001359}
1360
Aron Virginas-Tarf982dea2019-10-11 14:07:53 +01001361IConnectableLayer* Network::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& desc,
1362 const char* name)
1363{
1364 return m_Graph->AddLayer<LogSoftmaxLayer>(desc, name);
1365}
1366
telsoa014fcda012018-03-09 14:13:49 +00001367IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
1368{
telsoa01c577f2c2018-08-31 09:22:23 +01001369 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
1370
1371 layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
1372
1373 return layer;
telsoa014fcda012018-03-09 14:13:49 +00001374}
1375
telsoa01c577f2c2018-08-31 09:22:23 +01001376IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
1377 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001378{
1379 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
1380}
1381
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001382IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1383 const char* name)
1384{
1385 return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
1386}
1387
Aron Virginas-Tar972af152019-06-11 14:14:03 +01001388IConnectableLayer* Network::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
1389 const char* name)
1390{
1391 return m_Graph->AddLayer<SpaceToDepthLayer>(spaceToDepthDescriptor, name);
1392}
1393
telsoa014fcda012018-03-09 14:13:49 +00001394IConnectableLayer* Network::AddFloorLayer(const char* name)
1395{
1396 return m_Graph->AddLayer<FloorLayer>(name);
1397}
1398
telsoa01c577f2c2018-08-31 09:22:23 +01001399IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
1400 const LstmInputParams& params,
1401 const char* name)
1402{
1403 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
1404
1405 //Lstm Basic Parameters
1406 layer->m_BasicParameters.m_InputToForgetWeights =
1407 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
1408 layer->m_BasicParameters.m_InputToCellWeights =
1409 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
1410 layer->m_BasicParameters.m_InputToOutputWeights =
1411 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
1412 layer->m_BasicParameters.m_RecurrentToForgetWeights =
1413 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
1414 layer->m_BasicParameters.m_RecurrentToCellWeights =
1415 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
1416 layer->m_BasicParameters.m_RecurrentToOutputWeights =
1417 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
1418 layer->m_BasicParameters.m_ForgetGateBias =
1419 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
1420 layer->m_BasicParameters.m_CellBias =
1421 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
1422 layer->m_BasicParameters.m_OutputGateBias =
1423 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
1424
1425 //Lstm Cifg parameters
1426 if(!descriptor.m_CifgEnabled)
1427 {
1428 if(params.m_InputToInputWeights == nullptr)
1429 {
1430 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL");
1431 }
1432 if(params.m_RecurrentToInputWeights == nullptr)
1433 {
1434 throw InvalidArgumentException(
1435 "AddLstmLayer: Recurrent To Input Weights cannot be NULL");
1436 }
1437 if(params.m_InputGateBias == nullptr)
1438 {
1439 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL");
1440 }
1441 layer->m_CifgParameters.m_InputToInputWeights =
1442 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
1443 layer->m_CifgParameters.m_RecurrentToInputWeights =
1444 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
1445 // In the VTS tests, cell-to-input weights may be null, even if the other CIFG params are not.
1446 if(params.m_CellToInputWeights != nullptr)
1447 {
1448 layer->m_CifgParameters.m_CellToInputWeights =
1449 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
1450 }
1451 layer->m_CifgParameters.m_InputGateBias =
1452 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
1453 }
1454
1455 //Lstm projection parameters
1456 if(descriptor.m_ProjectionEnabled)
1457 {
1458 if(params.m_ProjectionWeights == nullptr)
1459 {
1460 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL");
1461 }
1462 layer->m_ProjectionParameters.m_ProjectionWeights =
1463 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
1464 if(params.m_ProjectionBias != nullptr)
1465 {
1466 layer->m_ProjectionParameters.m_ProjectionBias =
1467 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
1468 }
1469 }
1470
1471 //Lstm Peephole params
1472 if(descriptor.m_PeepholeEnabled)
1473 {
1474 if(params.m_CellToForgetWeights == nullptr)
1475 {
1476 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL");
1477 }
1478 if(params.m_CellToOutputWeights == nullptr)
1479 {
1480 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL");
1481 }
1482 layer->m_PeepholeParameters.m_CellToForgetWeights =
1483 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
1484 layer->m_PeepholeParameters.m_CellToOutputWeights =
1485 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
1486 }
Jan Eilersf8c62972019-07-17 11:07:49 +01001487
1488 //Lstm Layer Normalization params
1489 if(descriptor.m_LayerNormEnabled)
1490 {
1491 if(!descriptor.m_CifgEnabled)
1492 {
1493 if(params.m_InputLayerNormWeights == nullptr)
1494 {
1495 throw InvalidArgumentException("AddLstmLayer: Input layer normalization weights cannot be NULL");
1496 }
1497 layer->m_LayerNormParameters.m_InputLayerNormWeights =
1498 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
1499 }
1500
1501 if(params.m_ForgetLayerNormWeights == nullptr)
1502 {
1503 throw InvalidArgumentException("AddLstmLayer: Forget layer normalization weights cannot be NULL");
1504 }
1505 if(params.m_CellLayerNormWeights == nullptr)
1506 {
1507 throw InvalidArgumentException("AddLstmLayer: Cell layer normalization weights cannot be NULL");
1508 }
1509 if(params.m_OutputLayerNormWeights == nullptr)
1510 {
1511 throw InvalidArgumentException("AddLstmLayer: Output layer normalization weights cannot be NULL");
1512 }
1513 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
1514 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
1515 layer->m_LayerNormParameters.m_CellLayerNormWeights =
1516 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
1517 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
1518 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
1519 }
telsoa01c577f2c2018-08-31 09:22:23 +01001520 return layer;
1521}
1522
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001523IConnectableLayer* Network::AddDivisionLayer(const char* name)
1524{
1525 return m_Graph->AddLayer<DivisionLayer>(name);
1526}
1527
David Beck19526222018-09-12 16:00:08 +01001528IConnectableLayer* Network::AddSubtractionLayer(const char* name)
1529{
1530 return m_Graph->AddLayer<SubtractionLayer>(name);
1531}
1532
narpra0132b90462018-09-13 11:07:48 +01001533IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
1534{
1535 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
1536}
1537
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +01001538IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
1539{
1540 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
1541}
1542
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001543IConnectableLayer *Network::AddQuantizeLayer(const char *name)
1544{
1545 return m_Graph->AddLayer<QuantizeLayer>(name);
1546}
1547
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001548IConnectableLayer* Network::AddDequantizeLayer(const char* name)
1549{
1550 return m_Graph->AddLayer<DequantizeLayer>(name);
1551}
1552
Conor Kennedy430b5d82018-11-14 15:28:28 +00001553IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
1554 const char* name)
1555{
1556 return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
1557}
1558
Matteo Martincigh59a950c2018-12-13 12:48:25 +00001559IConnectableLayer* Network::AddGreaterLayer(const char* name)
1560{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001561 return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
Matteo Martincigh59a950c2018-12-13 12:48:25 +00001562}
1563
FrancisMurtagh20995952018-12-17 12:11:36 +00001564IConnectableLayer* Network::AddEqualLayer(const char* name)
1565{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001566 return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
FrancisMurtagh20995952018-12-17 12:11:36 +00001567}
1568
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001569IConnectableLayer* Network::AddRsqrtLayer(const char * name)
1570{
josh minor4a3c6102020-01-06 16:40:46 -06001571 return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001572}
1573
narpra01b89b05f2019-01-16 09:53:09 +00001574IConnectableLayer* Network::AddGatherLayer(const char* name)
1575{
1576 return m_Graph->AddLayer<GatherLayer>(name);
1577}
1578
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001579IConnectableLayer* Network::AddMergeLayer(const char* name)
1580{
1581 return m_Graph->AddLayer<MergeLayer>(name);
1582}
1583
Sadik Armaganeff363d2019-04-05 15:25:46 +01001584IConnectableLayer* Network::AddSwitchLayer(const char* name)
1585{
1586 return m_Graph->AddLayer<SwitchLayer>(name);
1587}
1588
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01001589IConnectableLayer* Network::AddPreluLayer(const char* name)
1590{
1591 return m_Graph->AddLayer<PreluLayer>(name);
1592}
1593
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01001594IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
1595 const ConstTensor& weights,
1596 const Optional<ConstTensor>& biases,
1597 const char* name)
1598{
1599 if (descriptor.m_BiasEnabled && !biases.has_value())
1600 {
1601 throw InvalidArgumentException("AddTransposeConvolution2dLayer: Biases cannot be empty");
1602 }
1603
1604 const auto layer = m_Graph->AddLayer<TransposeConvolution2dLayer>(descriptor, name);
1605
1606 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1607
1608 if (descriptor.m_BiasEnabled)
1609 {
1610 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
1611 }
1612
1613 return layer;
1614}
1615
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001616IConnectableLayer* Network::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
1617 const char* name)
1618{
1619 return m_Graph->AddLayer<TransposeLayer>(transposeDescriptor, name);
1620}
1621
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001622IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor,
1623 const char* name)
1624{
1625 return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
1626}
1627
Derek Lamberti013c3902019-10-21 10:46:16 +01001628
1629IConnectableLayer* Network::AddStandInLayer(const StandInDescriptor& desc,
1630 const char* name)
1631{
1632 return m_Graph->AddLayer<StandInLayer>(desc, name);
1633}
1634
James Conroyee18dc82019-07-17 11:27:46 +01001635IConnectableLayer* Network::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
1636 const char* name)
1637{
1638 const auto layer = m_Graph->AddLayer<QuantizedLstmLayer>(name);
1639
1640 // InputToX weights
1641 layer->m_QuantizedLstmParameters.m_InputToInputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001642 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001643 layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001644 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001645 layer->m_QuantizedLstmParameters.m_InputToCellWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001646 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001647 layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001648 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001649
1650 // RecurrentToX weights
1651 layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001652 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001653 layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001654 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001655 layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001656 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001657 layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001658 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001659
1660 // Bias
1661 layer->m_QuantizedLstmParameters.m_InputGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001662 std::make_unique<ScopedCpuTensorHandle>(params.GetInputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001663 layer->m_QuantizedLstmParameters.m_ForgetGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001664 std::make_unique<ScopedCpuTensorHandle>(params.GetForgetGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001665 layer->m_QuantizedLstmParameters.m_CellBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001666 std::make_unique<ScopedCpuTensorHandle>(params.GetCellBias());
James Conroyee18dc82019-07-17 11:27:46 +01001667 layer->m_QuantizedLstmParameters.m_OutputGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001668 std::make_unique<ScopedCpuTensorHandle>(params.GetOutputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001669
1670 return layer;
1671}
1672
James Conroy586a9aa2020-03-20 08:49:33 +00001673IConnectableLayer* Network::AddQLstmLayer(const QLstmDescriptor& descriptor,
1674 const LstmInputParams& params,
1675 const char* name)
1676{
1677 const auto layer = m_Graph->AddLayer<QLstmLayer>(descriptor, name);
1678
1679 // QLstm Basic Parameters
1680 layer->m_BasicParameters.m_InputToForgetWeights =
1681 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
1682 layer->m_BasicParameters.m_InputToCellWeights =
1683 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
1684 layer->m_BasicParameters.m_InputToOutputWeights =
1685 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
1686 layer->m_BasicParameters.m_RecurrentToForgetWeights =
1687 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
1688 layer->m_BasicParameters.m_RecurrentToCellWeights =
1689 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
1690 layer->m_BasicParameters.m_RecurrentToOutputWeights =
1691 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
1692 layer->m_BasicParameters.m_ForgetGateBias =
1693 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
1694 layer->m_BasicParameters.m_CellBias =
1695 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
1696 layer->m_BasicParameters.m_OutputGateBias =
1697 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
1698
1699 // QLstm Cifg parameters
1700 if(!descriptor.m_CifgEnabled)
1701 {
1702 if(params.m_InputToInputWeights == nullptr)
1703 {
1704 throw InvalidArgumentException("AddQLstmLayer: Input To Input Weights cannot be NULL");
1705 }
1706
1707 if(params.m_RecurrentToInputWeights == nullptr)
1708 {
1709 throw InvalidArgumentException(
1710 "AddQLstmLayer: Recurrent To Input Weights cannot be NULL");
1711 }
1712
1713 if(params.m_InputGateBias == nullptr)
1714 {
1715 throw InvalidArgumentException("AddQLstmLayer: Input Gate Bias cannot be NULL");
1716 }
1717
1718 layer->m_CifgParameters.m_InputToInputWeights =
1719 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
1720 layer->m_CifgParameters.m_RecurrentToInputWeights =
1721 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
1722 layer->m_CifgParameters.m_InputGateBias =
1723 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
1724 }
1725
1726 // QLstm Projection parameters
1727 if(descriptor.m_ProjectionEnabled)
1728 {
1729 if(params.m_ProjectionWeights == nullptr)
1730 {
1731 throw InvalidArgumentException("AddQLstmLayer: Projection Weights cannot be NULL");
1732 }
1733
1734 if(params.m_ProjectionBias == nullptr)
1735 {
1736 throw InvalidArgumentException("AddQLstmLayer: Projection Biases cannot be NULL");
1737 }
1738
1739 layer->m_ProjectionParameters.m_ProjectionWeights =
1740 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
1741 layer->m_ProjectionParameters.m_ProjectionBias =
1742 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
1743 }
1744
1745 // QLstm Peephole params
1746 if(descriptor.m_PeepholeEnabled)
1747 {
1748 if(params.m_CellToForgetWeights == nullptr)
1749 {
1750 throw InvalidArgumentException("AddQLstmLayer: Cell To Forget Weights cannot be NULL");
1751 }
1752
1753 if(params.m_CellToOutputWeights == nullptr)
1754 {
1755 throw InvalidArgumentException("AddQLstmLayer: Cell To Output Weights cannot be NULL");
1756 }
1757
1758 if(!descriptor.m_CifgEnabled)
1759 {
1760 if(params.m_CellToInputWeights == nullptr)
1761 {
1762 throw InvalidArgumentException("AddQLstmLayer: Cell To Input Weights cannot be NULL");
1763 }
1764
1765 layer->m_PeepholeParameters.m_CellToInputWeights =
1766 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
1767 }
1768
1769 layer->m_PeepholeParameters.m_CellToForgetWeights =
1770 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
1771 layer->m_PeepholeParameters.m_CellToOutputWeights =
1772 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
1773 }
1774
1775 // QLstm Layer Normalization params
1776 if(descriptor.m_LayerNormEnabled)
1777 {
1778 if(params.m_ForgetLayerNormWeights == nullptr)
1779 {
1780 throw InvalidArgumentException("AddQLstmLayer: Forget layer normalization weights cannot be NULL");
1781 }
1782
1783 if(params.m_CellLayerNormWeights == nullptr)
1784 {
1785 throw InvalidArgumentException("AddQLstmLayer: Cell layer normalization weights cannot be NULL");
1786 }
1787
1788 if(params.m_OutputLayerNormWeights == nullptr)
1789 {
1790 throw InvalidArgumentException("AddQLstmLayer: Output layer normalization weights cannot be NULL");
1791 }
1792
1793 if(!descriptor.m_CifgEnabled)
1794 {
1795 if(params.m_InputLayerNormWeights == nullptr)
1796 {
1797 throw InvalidArgumentException("AddQLstmLayer: Input layer normalization weights cannot be NULL");
1798 }
1799
1800 layer->m_LayerNormParameters.m_InputLayerNormWeights =
1801 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
1802 }
1803
1804 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
1805 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
1806 layer->m_LayerNormParameters.m_CellLayerNormWeights =
1807 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
1808 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
1809 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
1810 }
1811 return layer;
1812}
1813
Mike Kelly8c1701a2019-02-11 17:01:27 +00001814void Network::Accept(ILayerVisitor& visitor) const
1815{
1816 for (auto layer : GetGraph())
1817 {
1818 layer->Accept(visitor);
1819 };
1820}
1821
telsoa014fcda012018-03-09 14:13:49 +00001822OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
Sadik Armagan3184c902020-03-18 10:57:30 +00001823 : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid())
telsoa014fcda012018-03-09 14:13:49 +00001824{
1825}
1826
1827OptimizedNetwork::~OptimizedNetwork()
1828{
1829}
1830
1831} // namespace armnn