blob: 55bf51af000211ea8c8dde502394edfe8458cc94 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matteo Martincigh49124022019-01-11 13:25:59 +00005
telsoa014fcda012018-03-09 14:13:49 +00006#include "Network.hpp"
7#include "Graph.hpp"
8#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01009#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010#include "Optimizer.hpp"
Derek Lambertiff05cc52019-04-26 13:05:17 +010011#include "SubgraphViewSelector.hpp"
Matteo Martincigh49124022019-01-11 13:25:59 +000012#include "BackendSettings.hpp"
David Beckac42efd2018-09-26 17:41:13 +010013#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000014
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000015#include <backendsCommon/CpuTensorHandle.hpp>
16#include <backendsCommon/WorkloadFactory.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000017#include <armnn/backends/IBackendInternal.hpp>
Derek Lamberti84da38b2019-06-13 11:40:08 +010018#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019
20#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000021#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022#include <armnn/TypesUtils.hpp>
Matteo Martincighc601aa62019-10-29 15:03:22 +000023#include <armnn/BackendRegistry.hpp>
Matthew Benthamf48afc62020-01-15 17:55:08 +000024#include <armnn/Logging.hpp>
telsoa014fcda012018-03-09 14:13:49 +000025
Jan Eilers99d9d4a2019-11-06 10:02:16 +000026#include <ProfilingService.hpp>
27
telsoa014fcda012018-03-09 14:13:49 +000028#include <fcntl.h>
29#include <algorithm>
30#include <fstream>
31#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010032#include <vector>
33#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000034
35#include <boost/assert.hpp>
36#include <boost/format.hpp>
telsoa014fcda012018-03-09 14:13:49 +000037#include <boost/numeric/conversion/converter_policies.hpp>
38#include <boost/cast.hpp>
39
40namespace armnn
41{
42
43armnn::INetwork* INetwork::CreateRaw()
44{
45 return new Network();
46}
47
48armnn::INetworkPtr INetwork::Create()
49{
50 return INetworkPtr(CreateRaw(), &INetwork::Destroy);
51}
52
53void INetwork::Destroy(INetwork* network)
54{
55 delete boost::polymorphic_downcast<Network*>(network);
56}
57
telsoa014fcda012018-03-09 14:13:49 +000058void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
59{
60 delete boost::polymorphic_downcast<OptimizedNetwork*>(network);
61}
62
63Status OptimizedNetwork::PrintGraph()
64{
65 m_Graph->Print();
66 return Status::Success;
67}
68
surmeh01bceff2f2018-03-29 16:29:27 +010069Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
70{
71 return m_Graph->SerializeToDot(stream);
72}
73
Matteo Martincigh49124022019-01-11 13:25:59 +000074void ReportError(const std::string& errorMessage,
75 Optional<std::vector<std::string>&> errorMessages)
76{
77 std::stringstream fullErrorMessage;
78 fullErrorMessage << "ERROR: " << errorMessage;
Derek Lamberti08446972019-11-26 16:38:31 +000079 ARMNN_LOG(warning) << fullErrorMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +000080 if (errorMessages)
81 {
82 errorMessages.value().push_back(fullErrorMessage.str());
83 }
84}
85
86void ReportWarning(const std::string& warningMessage,
87 Optional<std::vector<std::string>&> warningMessages)
88{
89 std::stringstream fullWarningMessage;
90 fullWarningMessage << "WARNING: " << warningMessage;
Derek Lamberti08446972019-11-26 16:38:31 +000091 ARMNN_LOG(warning) << fullWarningMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +000092 if (warningMessages)
93 {
94 warningMessages.value().push_back(fullWarningMessage.str());
95 }
96}
97
Derek Lamberti4a9e24b2020-01-03 16:53:38 +000098OptimizationResult ReturnWithError(OptimizationResult res,
99 const Layer* layer,
100 const BackendSettings& backendSettings,
101 Optional<std::vector<std::string>&> errMessages)
102{
103 std::stringstream failureMsg;
104 failureMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
105 << " is not supported on any preferred backend " << backendSettings.m_PreferredBackends;
106 ReportError(failureMsg.str(), errMessages);
107
108 res.m_Error = true;
109 return res;
110}
111
112
jimfly016b0b53d2018-10-08 14:43:01 +0100113bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
114{
115 bool noErrors = true;
116 unsigned int numOutputs = layer->GetNumOutputSlots();
117 for (unsigned int i = 0; i < numOutputs; i++) {
David Monahanb8554702019-04-25 16:03:38 +0100118 OutputSlot& outputSlot = layer->GetOutputSlot(i);
119 TensorInfo info = outputSlot.GetTensorInfo();
Derek Lambertif90c56d2020-01-10 17:14:08 +0000120 if (DataType::QAsymmU8 == info.GetDataType()) {
jimfly016b0b53d2018-10-08 14:43:01 +0100121 if (0.f == info.GetQuantizationScale()) {
122 noErrors = false;
123 std::stringstream ss;
Matteo Martincigh49124022019-01-11 13:25:59 +0000124 ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
jimfly016b0b53d2018-10-08 14:43:01 +0100125 << " (" << layer->GetNameStr() << ") is of type"
126 << " Quantized 8 bit but its scale parameter has not been set";
Matteo Martincigh49124022019-01-11 13:25:59 +0000127 ReportError(ss.str(), errMessages);
jimfly016b0b53d2018-10-08 14:43:01 +0100128 }
David Monahanb8554702019-04-25 16:03:38 +0100129 // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
130 if ((info.GetQuantizationScale() != (1.0f / 256.0f) ||
131 info.GetQuantizationOffset() != 0) &&
132 layer->GetType() == armnn::LayerType::Softmax)
133 {
134 std::stringstream ss;
135 ss << "Quantization parameters for Softmax layer (Scale: " <<
136 info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() <<
137 ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
Derek Lamberti08446972019-11-26 16:38:31 +0000138 ARMNN_LOG(warning) << ss.str();
David Monahanb8554702019-04-25 16:03:38 +0100139 info.SetQuantizationScale((1.0f /256.0f));
140 info.SetQuantizationOffset(0);
141 outputSlot.SetTensorInfo(info);
142 }
jimfly016b0b53d2018-10-08 14:43:01 +0100143 }
144 }
145 return noErrors;
146}
147
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000148OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings,
149 Graph& graph,
150 Layer* layer,
151 BackendId backend,
152 DataType dataTypeIn,
153 DataType dataTypeOut,
154 const std::vector<BackendId>& availablePreferredBackends,
155 std::string& reasonIfUnsupported,
156 Optional<std::vector<std::string>&> errMessages)
157{
158 OptimizationResult result;
159
160 // Helper lambda to compose meaningful error message before returning with error
161 auto ReturnError = [&](const Layer* layer)
162 {
163 return ReturnWithError(result, layer, backendSettings, errMessages);
164 };
165
166 // need to set the compute device on the layer
167 // before we can check if it is supported
168 layer->SetBackendId(backend);
169 if (!IWorkloadFactory::IsLayerSupported(*layer, EmptyOptional(), reasonIfUnsupported))
170 {
171 if (dataTypeIn == DataType::Float16 || dataTypeOut == DataType::Float16)
172 {
173 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
174 && layer->GetType() != LayerType::ConvertFp32ToFp16
175 && layer->GetType() != LayerType::ConvertFp16ToFp32)
176 {
177 // Insert FP16 -> FP32 conversion layer before current layer
178 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers;
179 if (dataTypeIn == DataType::Float16)
180 {
181 convertFp16ToFp32Layers =
182 InsertConvertFp16ToFp32LayersBefore(graph, *layer);
183 }
184
185 // Insert FP32 -> FP16 conversion layer after current layer
186 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers;
187 if (dataTypeOut == DataType::Float16)
188 {
189 convertFp32ToFp16Layers =
190 InsertConvertFp32ToFp16LayersAfter(graph, *layer);
191 }
192
193 // Assign a supported backend to the newly introduced conversion layers
194 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
195 {
196 bool supportedBackendFound = false;
197 std::string reasonIfUnsupported;
198
199 // Try preferred backend first
200 layer->SetBackendId(preferredBackend);
201 if (IWorkloadFactory::IsLayerSupported(*layer,
202 EmptyOptional(),
203 reasonIfUnsupported))
204 {
205 supportedBackendFound = true;
206 }
207 else
208 {
209 for (const auto& backend : availablePreferredBackends)
210 {
211 // Skip preferred backend (we already determined that it is not supported)
212 if (backend == preferredBackend)
213 {
214 continue;
215 }
216
217 layer->SetBackendId(backend);
218 if (IWorkloadFactory::IsLayerSupported(*layer,
219 EmptyOptional(),
220 reasonIfUnsupported))
221 {
222 supportedBackendFound = true;
223 break;
224 }
225 }
226 }
227
228 return supportedBackendFound;
229 };
230
231 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
232 {
233 if (!AssignFirstSupportedBackend(convertLayer, backend))
234 {
235 return ReturnError(convertLayer);
236 }
237 }
238
239 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
240 {
241 if (!AssignFirstSupportedBackend(convertLayer, backend))
242 {
243 return ReturnError(convertLayer);
244 }
245 }
246
247 return result;
248 }
249 }
250 std::stringstream warningMsg;
251 warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
252 << " is not supported on requested backend " << layer->GetBackendId().Get()
253 << " for input data type " << GetDataTypeName(dataTypeIn)
254 << " and output data type " << GetDataTypeName(dataTypeOut)
255 << " (reason: " << reasonIfUnsupported
256 << "), falling back to the next backend.";
257 ReportWarning(warningMsg.str(), errMessages);
258
259 return OptimizationResult(true, false);
260 }
261 else
262 {
263 return result;
264 }
265}
266
267
Matteo Martincigh49124022019-01-11 13:25:59 +0000268OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
269 BackendSettings& backendSettings,
270 Graph::Iterator& firstLayer,
271 Graph::Iterator& lastLayer,
272 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +0000273{
Matteo Martincigh49124022019-01-11 13:25:59 +0000274 OptimizationResult result;
telsoa014fcda012018-03-09 14:13:49 +0000275
Matteo Martincigh49124022019-01-11 13:25:59 +0000276 // Helper lambda to compose meaningful error message before returning with error
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000277 auto ReturnError = [&](const Layer* layer)
278 {
279 return ReturnWithError(result, layer, backendSettings, errMessages);
280 };
Matteo Martincigh49124022019-01-11 13:25:59 +0000281
telsoa01c577f2c2018-08-31 09:22:23 +0100282
Matteo Martincigh49124022019-01-11 13:25:59 +0000283 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
284 if (availablePreferredBackends.empty())
telsoa01c577f2c2018-08-31 09:22:23 +0100285 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000286 std::stringstream failureMsg;
287 failureMsg << "No preferred backends are available";
288 ReportError(failureMsg.str(), errMessages);
289
290 result.m_Error = true;
291 return result;
292 }
293
294 for (auto it = firstLayer; it != lastLayer; ++it)
295 {
296 auto layer = *it;
Aron Virginas-Tar87972be2019-11-13 15:16:28 +0000297
298 DataType dataTypeIn = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
299 layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
300 DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
301 layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
302
telsoa01c577f2c2018-08-31 09:22:23 +0100303 std::string reasonIfUnsupported;
304 bool found = false;
jimfly016b0b53d2018-10-08 14:43:01 +0100305 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
306 {
307 // don't bomb immediately, find all the quantized outputs
308 // which haven't had a scale set and report them all back.
Matteo Martincigh49124022019-01-11 13:25:59 +0000309 result.m_Error = true;
jimfly016b0b53d2018-10-08 14:43:01 +0100310 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000311
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000312 // First try assign layer to hint backend
313 if (layer->GetBackendHint().has_value() &&
314 backendSettings.IsBackendSupported(layer->GetBackendHint().value()) &&
315 AttemptBackendAssignment(backendSettings,
316 optNetObjPtr->GetGraph(),
317 layer,
318 layer->GetBackendHint().value(),
319 dataTypeIn,
320 dataTypeOut,
321 availablePreferredBackends,
322 reasonIfUnsupported,
323 errMessages).IsOk())
telsoa01c577f2c2018-08-31 09:22:23 +0100324 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000325 found = true;
326 backendSettings.m_SelectedBackends.insert(layer->GetBackendHint().value());
327 }
328 else
329 {
330 // Try assign layer to prefered list of backends
331 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100332 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000333 if (layer->GetBackendHint().has_value() &&
334 layer->GetBackendHint().value() == backend)
telsoa01c577f2c2018-08-31 09:22:23 +0100335 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000336 continue; //Don't re-test the backend hint
telsoa01c577f2c2018-08-31 09:22:23 +0100337 }
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000338
339 OptimizationResult res = AttemptBackendAssignment(backendSettings,
340 optNetObjPtr->GetGraph(),
341 layer,
342 backend,
343 dataTypeIn,
344 dataTypeOut,
345 availablePreferredBackends,
346 reasonIfUnsupported,
347 errMessages);
348
349 if (res.IsOk())
350 {
351 found = true;
352 backendSettings.m_SelectedBackends.insert(backend);
353 break;
354 }
355 else if (res.IsError())
356 {
357 return res; // Cannot continue.
358 // Note: we don't need to log the error as it would already
359 // be logged in AttemptBackendAssignment().
360 }
361 else
362 {
363 BOOST_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
364 }
telsoa01c577f2c2018-08-31 09:22:23 +0100365 }
366 }
367
368 // If the layer is unsupported by any devices, log and return a null network.
Matteo Martincigh49124022019-01-11 13:25:59 +0000369 if (!found)
370 {
telsoa01c577f2c2018-08-31 09:22:23 +0100371 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
372 // fallback we should set the compute device on the layer to CpuRef (these are not
373 // available as accelerated operations, or are only available under certain
374 // conditions, currently they comprise MemCopy, Constant, Permute)
375 armnn::LayerType layerType = layer->GetType();
Matteo Martincigh49124022019-01-11 13:25:59 +0000376 if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
377 layerType == armnn::LayerType::Constant ||
378 layerType == armnn::LayerType::Permute))
telsoa01c577f2c2018-08-31 09:22:23 +0100379 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000380 BackendId cpuBackendId(armnn::Compute::CpuRef);
381 layer->SetBackendId(cpuBackendId);
382 backendSettings.m_SelectedBackends.insert(cpuBackendId);
telsoa01c577f2c2018-08-31 09:22:23 +0100383 }
384 else
385 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000386 return ReturnError(layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100387 }
388 }
389 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000390
391 return result;
392}
393
Matteo Martincighadddddb2019-01-24 14:06:23 +0000394OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
395 BackendSettings& backendSettings,
Derek Lambertiff05cc52019-04-26 13:05:17 +0100396 SubgraphView& subgraph,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000397 Optional<std::vector<std::string>&> errMessages)
Matteo Martincigh49124022019-01-11 13:25:59 +0000398{
Derek Lambertiff05cc52019-04-26 13:05:17 +0100399 Graph::Iterator firstLayer = subgraph.begin();
400 Graph::Iterator lastLayer = subgraph.end();
Matteo Martincighadddddb2019-01-24 14:06:23 +0000401 return AssignBackends(optNetObjPtr,
402 backendSettings,
403 firstLayer,
404 lastLayer,
405 errMessages);
406}
407
Derek Lamberti84da38b2019-06-13 11:40:08 +0100408BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry& handleFactoryRegistry,
409 BackendSettings& backendSettings)
410{
411 BackendsMap backends;
412 auto const& backendRegistry = BackendRegistryInstance();
413 for (auto&& selectedBackend : backendSettings.m_SupportedBackends)
414 {
415 auto backendFactory = backendRegistry.GetFactory(selectedBackend);
416 auto backendObjPtr = backendFactory();
417 BOOST_ASSERT(backendObjPtr);
418
419 backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
420
421 backends[backendObjPtr->GetId()] = std::move(backendObjPtr);
422 }
423
424 return backends;
425}
426
Matteo Martincighadddddb2019-01-24 14:06:23 +0000427OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
428 BackendSettings& backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +0100429 BackendsMap& backends,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000430 Optional<std::vector<std::string>&> errMessages)
431{
432 BOOST_ASSERT(optNetObjPtr);
Matteo Martincigh49124022019-01-11 13:25:59 +0000433
434 OptimizationResult result;
435
Matteo Martincighadddddb2019-01-24 14:06:23 +0000436 // Get the optimized graph
437 Graph& optGraph = optNetObjPtr->GetGraph();
Matteo Martincigh49124022019-01-11 13:25:59 +0000438
Matteo Martincighadddddb2019-01-24 14:06:23 +0000439 // Run backend specific optimizations
Matteo Martincighadddddb2019-01-24 14:06:23 +0000440 for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
Matteo Martincigh49124022019-01-11 13:25:59 +0000441 {
Derek Lamberti84da38b2019-06-13 11:40:08 +0100442 auto backendObjPtr = backends.find(selectedBackend)->second.get();
Matteo Martincighadddddb2019-01-24 14:06:23 +0000443 BOOST_ASSERT(backendObjPtr);
444
445 // Select sub-graphs based on backend
Derek Lambertiff05cc52019-04-26 13:05:17 +0100446 SubgraphViewSelector::Subgraphs subgraphs =
Rob Hughes65c32262019-07-23 15:33:39 +0100447 SubgraphViewSelector::SelectSubgraphs(optGraph,
Matteo Martincigh602af092019-05-01 10:31:27 +0100448 // Select layers assigned to the requested backend
449 [&backendObjPtr](const Layer& layer)
450 {
451 return layer.GetType() != LayerType::Input &&
452 layer.GetType() != LayerType::Output &&
453 layer.GetBackendId() == backendObjPtr->GetId();
454 });
Derek Lambertiff05cc52019-04-26 13:05:17 +0100455 if (subgraphs.empty())
Matteo Martincigh49124022019-01-11 13:25:59 +0000456 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000457 // No sub-graphs found, try with next selected backend
458 continue;
Matteo Martincigh49124022019-01-11 13:25:59 +0000459 }
Matteo Martincighadddddb2019-01-24 14:06:23 +0000460
461 // Try to optimize each sub-graph
Derek Lambertiff05cc52019-04-26 13:05:17 +0100462 for (auto& subgraph : subgraphs)
Matteo Martincigh49124022019-01-11 13:25:59 +0000463 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000464 // Try to optimize the current sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100465 OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph);
466 BOOST_ASSERT(optimizationViews.Validate(*subgraph));
Matteo Martincighadddddb2019-01-24 14:06:23 +0000467
468 // Optimization attempted, check the resulting optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100469 for (auto& substitution : optimizationViews.GetSubstitutions())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000470 {
471 // Sub-graph optimized, substitute the sub-graph with the new optimized one in the main optimized graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100472 SubgraphView& replacementSubgraph = substitution.m_ReplacementSubgraph;
473 SubgraphView& substitutableSubgraph = substitution.m_SubstitutableSubgraph;
474 optGraph.SubstituteSubgraph(substitutableSubgraph, replacementSubgraph);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000475
476 // Assign the current backend to the optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100477 std::for_each(replacementSubgraph.begin(), replacementSubgraph.end(), [&selectedBackend](Layer* l)
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100478 {
479 BOOST_ASSERT(l);
480 l->SetBackendId(selectedBackend);
481 });
Matteo Martincighadddddb2019-01-24 14:06:23 +0000482 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100483
Matteo Martincigh84924332019-05-09 12:46:16 +0100484 if (!optimizationViews.GetFailedSubgraphs().empty())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000485 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000486 std::stringstream warningMsg;
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100487 warningMsg << "Some sub-graph(s) failed to optimized on " << backendObjPtr->GetId() << " backend.";
Matteo Martincighadddddb2019-01-24 14:06:23 +0000488 ReportWarning(warningMsg.str(), errMessages);
489
490 // Failed to optimize the given sub-graph, re-assign the sub-graph layers to other available backends
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100491 BackendSettings settingsCopy(backendSettings);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000492 if (!backendObjPtr->GetId().IsCpuRef())
493 {
494 // Add the current backend to the list of backends to ignore
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100495 settingsCopy.m_IgnoredBackends.insert(backendObjPtr->GetId());
Matteo Martincighadddddb2019-01-24 14:06:23 +0000496 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100497
498 int count=0;
Matteo Martincigh84924332019-05-09 12:46:16 +0100499 for (auto& failedSubgraph : optimizationViews.GetFailedSubgraphs())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000500 {
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100501 // An error occurred: the optimization was attempted but not performed, try different backends
502 std::stringstream subgraphMsg;
503 subgraphMsg << "Re-assigning backends to " << failedSubgraph.GetLayers().size()
504 << " layers inside sub-graph " << count++;
Matteo Martincigh328d92b2019-07-04 17:52:55 +0100505 ReportWarning(subgraphMsg.str(), errMessages);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100506
507 OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr,
508 settingsCopy,
509 *subgraph,
510 errMessages);
511 if (reassignmentResult.m_Error)
512 {
513 // Failed to re-assign one of the remaining backends to each layer of the sub-graph
514 result.m_Error = true;
515 return result;
516 }
Matteo Martincighadddddb2019-01-24 14:06:23 +0000517 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000518 }
519 }
520 }
521
522 return result;
523}
524
Derek Lamberti84da38b2019-06-13 11:40:08 +0100525bool RequiresCopy(ITensorHandleFactory::FactoryId src,
526 ITensorHandleFactory::FactoryId dst,
527 TensorHandleFactoryRegistry& registry)
528{
529 if (src != dst)
530 {
531 ITensorHandleFactory* srcFactory = registry.GetFactory(src);
532 ITensorHandleFactory* dstFactory = registry.GetFactory(dst);
533
Matteo Martincigha6539ed2019-08-27 13:43:32 +0100534 if (srcFactory && dstFactory &&
535 (srcFactory->GetExportFlags() & dstFactory->GetImportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100536 {
537 return false;
538 }
539 return true;
540 }
541 return false;
542}
543
544// Find the handle factory for the input layer which results in fewest required copies.
545ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backends,
546 OutputSlot& slot,
547 TensorHandleFactoryRegistry& registry)
548{
549 Layer& layer = slot.GetOwningLayer();
550 BOOST_ASSERT(layer.GetType() == LayerType::Input);
551
552 // Explicitly select the tensorhandle factory for InputLayer because the rules for it are slightly different. It
553 // doesn't matter which backend it is assigned to because they all use the same implementation, which
554 // requires Map/Unmap support. This means that, so long as the handle type supports map/unmap semantics, we can
555 // select a factory with maximum compatibility with the layers connected to the InputLayer.
556
557 // First ensure the from backends can support the TensorHandeAPI
558 auto frmBackend = backends.find(layer.GetBackendId());
559 if (frmBackend == backends.end() ||
560 !frmBackend->second->SupportsTensorAllocatorAPI())
561 {
562 return ITensorHandleFactory::LegacyFactoryId;
563 }
564
565 // Go through all connections to the output slot and determine the TensorHandleFactory which results in the
566 // fewest copies.
567 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
568 int topScore = 0;
569 ITensorHandleFactory::FactoryId topChoice = ITensorHandleFactory::LegacyFactoryId;
570
571 for (auto&& connection : slot.GetConnections())
572 {
573 const Layer& connectedLayer = connection->GetOwningLayer();
574
575 auto toBackend = backends.find(connectedLayer.GetBackendId());
576 BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
577
578 if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
579 {
580 // The destination backend does not support the tensor allocator API, move to the next one
581 continue;
582 }
583
584 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
585 for (auto&& dst : dstPrefs)
586 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100587 // Input layers use the mem copy workload or import, so the selected factory must
588 // support either the map/unmap API or Import API
Derek Lamberti84da38b2019-06-13 11:40:08 +0100589 ITensorHandleFactory* factory = registry.GetFactory(dst);
Derek Lambertif674aa02019-08-01 15:56:25 +0100590 if (!factory->SupportsMapUnmap() &&
591 !CheckFlag(factory->GetImportFlags(), MemorySource::Malloc)) // Just support cpu mem imports for now
Derek Lamberti84da38b2019-06-13 11:40:08 +0100592 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100593 // The current tensor handle factory does not support the map/unmap or import
594 // strategy, move to the next one
Derek Lamberti84da38b2019-06-13 11:40:08 +0100595 continue;
596 }
597
598 auto it = factoryScores.find(dst);
599 if (it == factoryScores.end())
600 {
601 // Add new score to the table
602 factoryScores[dst] = 0;
603 if (topChoice == ITensorHandleFactory::LegacyFactoryId)
604 {
605 topChoice = dst;
606 }
607 }
608 else
609 {
610 // Increase the score
611 factoryScores[dst]++;
612
613 // Track the best option
614 if (factoryScores[dst] > topScore)
615 {
616 topScore = factoryScores[dst];
617 topChoice = dst;
618 }
619 }
620 }
621 }
622
623 return topChoice;
624}
625
626// Find the handle factory for the output layer which results in fewest required copies.
627ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backends,
628 OutputSlot& slot,
629 TensorHandleFactoryRegistry& registry)
630{
Derek Lamberti94a88d22019-12-10 21:12:59 +0000631 boost::ignore_unused(backends, slot, registry);
632 return ITensorHandleFactory::DeferredFactoryId;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100633}
634
635// For all handle factories supported on the source backend, we wish to find the one which requires the fewest copies
636// when considering all connections.
637ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
638 OutputSlot& outputSlot,
639 TensorHandleFactoryRegistry& registry)
640{
641 // First ensure the from backends can support the TensorHandeAPI
642 Layer& layer = outputSlot.GetOwningLayer();
643 auto frmBackend = backends.find(layer.GetBackendId());
644 if (frmBackend == backends.end() ||
645 !frmBackend->second->SupportsTensorAllocatorAPI())
646 {
647 return ITensorHandleFactory::LegacyFactoryId;
648 }
649
650 // Connections to Output Layers requires support for map/unmap on the TensorHandle.
651 bool requiresMapUnmap = false;
652 for (auto&& connection : outputSlot.GetConnections())
653 {
654 const Layer& connectedLayer = connection->GetOwningLayer();
655 if (connectedLayer.GetType() == LayerType::Output)
656 {
657 requiresMapUnmap = true;
658 }
659 }
660
661 IBackendInternal* srcBackend = frmBackend->second.get();
662 auto srcPrefs = srcBackend->GetHandleFactoryPreferences();
663
664 // Initialize the scores
665 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
666 for (auto&& pref : srcPrefs)
667 {
668 if (requiresMapUnmap) // Only consider factories that support map/unmap if required
669 {
670 ITensorHandleFactory* factory = registry.GetFactory(pref);
671 if (!factory->SupportsMapUnmap())
672 {
673 // The current tensor handle factory does not support the map/unmap strategy, move to the next one
674 continue;
675 }
676 }
677
678 auto it = factoryScores.find(pref);
679 if (it == factoryScores.end())
680 {
681 // Add new score to the table
682 factoryScores[pref] = 0;
683 }
684 }
685
686 // Score each handle factory based on how many times it requires copies on the slot connections
687 for (auto&& connection : outputSlot.GetConnections())
688 {
689 const Layer& connectedLayer = connection->GetOwningLayer();
690
691 auto toBackend = backends.find(connectedLayer.GetBackendId());
692 BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
693
694 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
695 for (auto&& src : srcPrefs)
696 {
697 if (factoryScores.find(src) == factoryScores.end()) // Don't consider excluded factories
698 {
699 continue;
700 }
701
702 for (auto&& dst : dstPrefs)
703 {
704 if (RequiresCopy(src, dst, registry))
705 {
706 // Copy avoided, increase the score
707 factoryScores[src]++;
708 break;
709 }
710 }
711 }
712 }
713
714 // Find the lowest score
715 int minScore = std::numeric_limits<int>::max();
716 for (auto it : factoryScores)
717 {
718 minScore = std::min(minScore, it.second);
719 }
720
721 // Collect factories matching the best(lowest) score
722 std::vector<ITensorHandleFactory::FactoryId> optimalFactories;
723 for (auto it : factoryScores)
724 {
725 if (it.second == minScore)
726 {
727 optimalFactories.push_back(it.first);
728 }
729 }
730
731 // For all compatible Factories matching the best score, find the preferred one for the current layer.
732 for (auto&& srcPref : srcPrefs)
733 {
734 for (auto&& comp : optimalFactories)
735 {
736 if (comp == srcPref)
737 {
738 return comp;
739 }
740 }
741 }
742
743 return ITensorHandleFactory::LegacyFactoryId;
744}
745
Derek Lambertif674aa02019-08-01 15:56:25 +0100746EdgeStrategy CalculateEdgeStrategy(BackendsMap& backends,
747 ITensorHandleFactory::FactoryId srcFactoryId,
748 const Layer& layer,
749 const Layer& connectedLayer,
750 TensorHandleFactoryRegistry& registry)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100751{
752 auto toBackend = backends.find(connectedLayer.GetBackendId());
753 BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
754
755 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
756
757 // Legacy API check for backward compatibility
758 if (srcFactoryId == ITensorHandleFactory::LegacyFactoryId || dstPrefs.empty())
759 {
760 if (layer.GetBackendId() != connectedLayer.GetBackendId())
761 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100762 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100763 }
764 else
765 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100766 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100767 }
768 }
769
770 // TensorHandleFactory API present, so perform more sophisticated strategies.
Derek Lambertif674aa02019-08-01 15:56:25 +0100771 // Dst Output layers don't require copy because they use import or map/unmap
Derek Lamberti84da38b2019-06-13 11:40:08 +0100772 if (connectedLayer.GetType() == LayerType::Output)
773 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100774 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100775 }
776
777 // Search for direct match in prefs
778 for (auto&& pref : dstPrefs)
779 {
780 if (pref == srcFactoryId)
781 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100782 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100783 }
784 }
785
786 // Search for export/import options
787 ITensorHandleFactory* srcFactory = registry.GetFactory(srcFactoryId);
Derek Lambertif674aa02019-08-01 15:56:25 +0100788 if (srcFactory->GetExportFlags() != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100789 {
790 for (auto&& pref : dstPrefs)
791 {
792 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroyffab16f2019-11-07 14:37:09 +0000793
James Conroy47e863d2019-11-18 17:07:43 +0000794 // Handles cases when a destPref is not listed in TensorHandleFactoryRegistry
James Conroyffab16f2019-11-07 14:37:09 +0000795 if (!dstFactory) {
James Conroy47e863d2019-11-18 17:07:43 +0000796 continue;
James Conroyffab16f2019-11-07 14:37:09 +0000797 }
798
Derek Lambertif674aa02019-08-01 15:56:25 +0100799 if ((dstFactory->GetImportFlags() & srcFactory->GetExportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100800 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100801 return EdgeStrategy::ExportToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100802 }
803 }
804 }
805
806 // Search for copy options via map/unmap
807 if (srcFactory->SupportsMapUnmap())
808 {
809 for (auto&& pref : dstPrefs)
810 {
811 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroy47e863d2019-11-18 17:07:43 +0000812 if (dstFactory && dstFactory->SupportsMapUnmap())
Derek Lamberti84da38b2019-06-13 11:40:08 +0100813 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100814 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100815 }
816 }
817 }
818
Derek Lambertif674aa02019-08-01 15:56:25 +0100819 return EdgeStrategy::Undefined;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100820}
821
822// Select the TensorHandleFactories and the corresponding memory strategy
823OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
824 BackendsMap& backends,
825 TensorHandleFactoryRegistry& registry,
826 Optional<std::vector<std::string>&> errMessages)
827{
828 OptimizationResult result;
829
830 optGraph.ForEachLayer([&backends, &registry, &result, &errMessages](Layer* layer)
831 {
832 BOOST_ASSERT(layer);
833
834 // Lets make sure the backend is in our list of supported backends. Something went wrong during backend
835 // assignment if this check fails
836 BOOST_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
837
838 // Check each output separately
839 for (unsigned int slotIdx = 0; slotIdx < layer->GetNumOutputSlots(); slotIdx++)
840 {
841 OutputSlot& outputSlot = layer->GetOutputSlot(slotIdx);
842
843 ITensorHandleFactory::FactoryId slotOption = ITensorHandleFactory::LegacyFactoryId;
844
845 // Calculate the factory to use which results in the fewest copies being made.
846 switch(layer->GetType())
847 {
848 case LayerType::Input:
849 slotOption = CalculateSlotOptionForInput(backends, outputSlot, registry);
850 break;
851 case LayerType::Output:
852 slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
853 break;
854 default:
855 slotOption = CalculateSlotOption(backends, outputSlot, registry);
856 break;
857 }
858 outputSlot.SetTensorHandleFactory(slotOption);
859
Derek Lambertif674aa02019-08-01 15:56:25 +0100860 // Now determine the "best" edge strategy for each connection given the slotOption.
Derek Lamberti84da38b2019-06-13 11:40:08 +0100861 unsigned int connectionIdx = 0;
862 for (auto&& connection : outputSlot.GetConnections())
863 {
864 const Layer& connectedLayer = connection->GetOwningLayer();
865
Derek Lambertif674aa02019-08-01 15:56:25 +0100866 EdgeStrategy strategy = CalculateEdgeStrategy(backends, slotOption, *layer, connectedLayer, registry);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100867
Derek Lambertif674aa02019-08-01 15:56:25 +0100868 if (strategy == EdgeStrategy::Undefined)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100869 {
870 result.m_Error = true;
871 if (errMessages)
872 {
873 errMessages.value().emplace_back("Could not find valid strategy required for compatibility"
874 " between backends.");
875 }
876 return;
877 }
878
Derek Lambertif674aa02019-08-01 15:56:25 +0100879 outputSlot.SetEdgeStrategy(connectionIdx, strategy);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100880
881 connectionIdx++;
882 }
883 }
884 });
885
886 return result;
887}
888
Matteo Martincigh49124022019-01-11 13:25:59 +0000889IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
890 const std::vector<BackendId>& backendPreferences,
891 const IDeviceSpec& deviceSpec,
892 const OptimizerOptions& options,
Rob Hughes23214432019-11-05 11:27:36 +0000893 Optional<std::vector<std::string>&> messages)
Matteo Martincigh49124022019-01-11 13:25:59 +0000894{
895 if (backendPreferences.empty())
896 {
897 throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
898 }
899
900 const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork);
901 std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
902
903 auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
904
905 OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
906
Matteo Martincighadddddb2019-01-24 14:06:23 +0000907 // Get the optimized graph
908 Graph& optGraph = optNetObjPtr->GetGraph();
909
Matteo Martincigh49124022019-01-11 13:25:59 +0000910 // Perform optimisation passes
911 using namespace optimizations;
Matteo Martincighadddddb2019-01-24 14:06:23 +0000912 Optimizer::Pass(optGraph, MakeOptimizations(SquashEqualPermuteSiblings(),
913 SquashEqualReshapeSiblings(),
914 OptimizeInversePermutes(),
915 MovePermuteUp(),
916 PermuteAsReshape(),
Nina Drozd861985f2019-04-18 14:48:51 +0100917 OptimizeConsecutiveReshapes(),
Rob Hughes3a7d3a72019-09-24 16:59:56 +0100918 FoldPadIntoConvolution2d(),
919 PermuteAndBatchToSpaceAsDepthToSpace()));
Matteo Martincigh49124022019-01-11 13:25:59 +0000920
Matteo Martincighadddddb2019-01-24 14:06:23 +0000921 // Infer the tensor infos for all output slots. Throws an exception on failure
922 optGraph.InferTensorInfos();
Matteo Martincigh49124022019-01-11 13:25:59 +0000923
924 // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
925 if (options.m_ReduceFp32ToFp16)
926 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000927 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToFp16Converter()));
Derek Lambertidd6804b2019-11-27 09:29:57 +0000928 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
Matteo Martincigh49124022019-01-11 13:25:59 +0000929 }
930
931 // Initialize backend settings
932 BackendSettings backendSettings(backendPreferences, deviceSpec);
933 if (backendSettings.GetAvailablePreferredBackends().empty())
934 {
935 std::stringstream failureMsg;
936 failureMsg << "None of the preferred backends " << backendPreferences
937 << " are supported. Current platform provides " << backendSettings.m_SupportedBackends;
Rob Hughes23214432019-11-05 11:27:36 +0000938 ReportError(failureMsg.str(), messages);
Matteo Martincigh49124022019-01-11 13:25:59 +0000939 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
940 }
941
Derek Lamberti84da38b2019-06-13 11:40:08 +0100942 // Create a map to temporarily hold initialized backend objects
943 TensorHandleFactoryRegistry tensorHandleFactoryRegistry;
944 BackendsMap backends = CreateSupportedBackends(tensorHandleFactoryRegistry, backendSettings);
945
Matteo Martincigh49124022019-01-11 13:25:59 +0000946 // Assign an available backend to each layer
Matteo Martincighadddddb2019-01-24 14:06:23 +0000947 Graph::Iterator firstLayer = optGraph.begin();
948 Graph::Iterator lastLayer = optGraph.end();
Derek Lamberti84da38b2019-06-13 11:40:08 +0100949 OptimizationResult assignBackendsResult = AssignBackends(optNetObjPtr,
950 backendSettings,
951 firstLayer,
952 lastLayer,
Rob Hughes23214432019-11-05 11:27:36 +0000953 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100954 if (assignBackendsResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +0000955 {
956 // Failed to assign a backend to each layer
jimfly016b0b53d2018-10-08 14:43:01 +0100957 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
958 }
telsoa01c577f2c2018-08-31 09:22:23 +0100959
Matteo Martincighadddddb2019-01-24 14:06:23 +0000960 Optimizer::Pass(optGraph, MakeOptimizations(OptimizeInverseConversionsFp16(),
961 OptimizeInverseConversionsFp32()));
telsoa01c577f2c2018-08-31 09:22:23 +0100962
Matteo Martincighadddddb2019-01-24 14:06:23 +0000963 // Apply the backend-specific optimizations
964 OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr,
965 backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +0100966 backends,
Rob Hughes23214432019-11-05 11:27:36 +0000967 messages);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000968 if (backendOptimizationResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +0000969 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000970 // Failed to apply the backend-specific optimizations
971 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
Matteo Martincigh49124022019-01-11 13:25:59 +0000972 }
973
Matteo Martincighadddddb2019-01-24 14:06:23 +0000974 // If the debug flag is set, then insert a DebugLayer after each layer
975 // Doing this after applying the backend optimizations as they might have changed some layers
976 if (options.m_Debug)
977 {
978 Optimizer::Pass(optGraph, MakeOptimizations(InsertDebugLayer()));
979 }
980
Derek Lamberti84da38b2019-06-13 11:40:08 +0100981 // Calculate the compatibility strategies for tensor handles
982 OptimizationResult strategyResult = SelectTensorHandleStrategy(optGraph,
983 backends,
984 tensorHandleFactoryRegistry,
Rob Hughes23214432019-11-05 11:27:36 +0000985 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100986 if (strategyResult.m_Error)
987 {
988 // Failed to apply the backend-specific optimizations
989 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
990 }
991
992 // Based on the tensor handle strategy determined above, insert copy layers where required.
Derek Lambertif674aa02019-08-01 15:56:25 +0100993 optGraph.AddCompatibilityLayers(backends, tensorHandleFactoryRegistry);
telsoa01c577f2c2018-08-31 09:22:23 +0100994
995 // Convert constants
Matteo Martincighadddddb2019-01-24 14:06:23 +0000996 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
997 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
telsoa01c577f2c2018-08-31 09:22:23 +0100998
Derek Lamberti84da38b2019-06-13 11:40:08 +0100999 // Run backend specific optimizations (deprecated)
Matteo Martincigh49124022019-01-11 13:25:59 +00001000 for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
David Beck263e3492018-11-09 14:46:40 +00001001 {
1002 auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
1003 auto backendPtr = factoryFun();
1004 BOOST_ASSERT(backendPtr.get() != nullptr);
1005
Matteo Martincighed735042019-05-22 09:42:43 +01001006 ARMNN_NO_DEPRECATE_WARN_BEGIN
David Beck263e3492018-11-09 14:46:40 +00001007 auto backendSpecificOptimizations = backendPtr->GetOptimizations();
Matteo Martincighed735042019-05-22 09:42:43 +01001008 ARMNN_NO_DEPRECATE_WARN_END
1009
David Beck263e3492018-11-09 14:46:40 +00001010 if (!backendSpecificOptimizations.empty())
1011 {
1012 Optimizer::Pass(optNetObjPtr->GetGraph(), backendSpecificOptimizations);
1013 }
1014 }
1015
telsoa01c577f2c2018-08-31 09:22:23 +01001016 return optNet;
telsoa014fcda012018-03-09 14:13:49 +00001017}
1018
1019Network::Network()
Jan Eilers99d9d4a2019-11-06 10:02:16 +00001020: m_Graph(std::make_unique<Graph>()),
1021 m_Guid(profiling::ProfilingService::Instance().NextGuid())
telsoa014fcda012018-03-09 14:13:49 +00001022{
1023}
1024
1025Network::~Network()
1026{
1027}
1028
Jan Eilers99d9d4a2019-11-06 10:02:16 +00001029Status Network::PrintGraph()
1030{
1031 m_Graph->Print();
1032 return Status::Success;
1033}
1034
telsoa014fcda012018-03-09 14:13:49 +00001035IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
1036{
1037 return m_Graph->AddLayer<InputLayer>(id, name);
1038}
1039
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001040IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
1041 const char* name)
1042{
1043 return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
1044}
1045
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001046IConnectableLayer* Network::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
1047 const char* name)
1048{
1049 return m_Graph->AddLayer<ComparisonLayer>(comparisonDescriptor, name);
1050}
1051
josh minor4a3c6102020-01-06 16:40:46 -06001052IConnectableLayer* Network::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
1053 const char* name)
1054{
1055 return m_Graph->AddLayer<ElementwiseUnaryLayer>(elementwiseUnaryDescriptor, name);
1056}
1057
telsoa014fcda012018-03-09 14:13:49 +00001058IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001059 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001060 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001061 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001062{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001063 if (fullyConnectedDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001064 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001065 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001066 }
1067
1068 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
1069
1070 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1071
1072 if (fullyConnectedDescriptor.m_BiasEnabled)
1073 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001074 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001075 }
1076
1077 return layer;
1078}
1079
1080IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001081 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001082 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001083 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001084{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001085 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001086}
1087
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001088IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
1089 const ConstTensor& weights,
1090 const char* name)
1091{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001092 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001093 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
1094}
1095
telsoa014fcda012018-03-09 14:13:49 +00001096IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001097 const ConstTensor& weights,
1098 const ConstTensor& biases,
1099 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001100{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001101 Optional<ConstTensor> optionalBiases(biases);
1102 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001103}
1104
Jim Flynne242f2d2019-05-22 14:24:13 +01001105IConnectableLayer* Network::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001106 const char* name)
1107{
Jim Flynne242f2d2019-05-22 14:24:13 +01001108 return m_Graph->AddLayer<ConcatLayer>(concatDescriptor, name);
Jim Flynn906f9462019-05-10 13:55:21 +01001109}
1110
telsoa014fcda012018-03-09 14:13:49 +00001111IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001112 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001113 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001114 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001115{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001116 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001117 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001118 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001119 }
1120
1121 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
1122
1123 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1124
1125 if (convolution2dDescriptor.m_BiasEnabled)
1126 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001127 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001128 }
1129
1130 return layer;
1131}
1132
1133IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001134 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001135 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001136 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001137{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001138 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001139}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001140
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001141IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
1142 const ConstTensor& weights,
1143 const char* name)
1144{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001145 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001146 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1147}
1148
telsoa014fcda012018-03-09 14:13:49 +00001149IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001150 const ConstTensor& weights,
1151 const ConstTensor& biases,
1152 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001153{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001154 Optional<ConstTensor> optionalBiases(biases);
1155 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001156}
1157
1158IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
1159 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1160 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001161 const Optional<ConstTensor>& biases,
telsoa014fcda012018-03-09 14:13:49 +00001162 const char* name)
1163{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001164 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001165 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001166 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001167 }
1168
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00001169 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001170
1171 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1172
1173 if (convolution2dDescriptor.m_BiasEnabled)
1174 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001175 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001176 }
1177
1178 return layer;
1179}
1180
Aron Virginas-Tardd6247f2019-09-19 14:31:17 +01001181IConnectableLayer* Network::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
1182 const char* name)
1183{
1184 return m_Graph->AddLayer<DepthToSpaceLayer>(depthToSpaceDescriptor, name);
1185}
1186
telsoa014fcda012018-03-09 14:13:49 +00001187IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001188 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1189 const ConstTensor& weights,
1190 const Optional<ConstTensor>& biases,
1191 const char* name)
1192{
1193 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1194}
1195
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001196IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
telsoa014fcda012018-03-09 14:13:49 +00001197 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1198 const ConstTensor& weights,
1199 const char* name)
1200{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001201 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001202 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001203}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001204
telsoa014fcda012018-03-09 14:13:49 +00001205IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
1206 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1207 const ConstTensor& weights,
1208 const ConstTensor& biases,
1209 const char* name)
1210{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001211 Optional<ConstTensor> optionalBiases(biases);
1212 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001213}
1214
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001215IConnectableLayer* Network::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001216 const ConstTensor& anchors, const char* name)
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001217{
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001218 const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
1219
1220 layer->m_Anchors = std::make_unique<ScopedCpuTensorHandle>(anchors);
1221
1222 return layer;
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001223}
1224
telsoa014fcda012018-03-09 14:13:49 +00001225IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
1226 const char* name)
1227{
1228 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
1229}
1230
1231IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
1232 const char* name)
1233{
1234 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
1235}
1236
1237IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
1238 const char* name)
1239{
1240 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
1241}
1242
Nikhil Rajee391d52019-09-05 17:50:44 +01001243IConnectableLayer* Network::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor,
1244 const char* name)
1245{
1246 return m_Graph->AddLayer<ArgMinMaxLayer>(argMinMaxDescriptor, name);
1247}
1248
telsoa01c577f2c2018-08-31 09:22:23 +01001249IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
1250normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001251 const char* name)
1252{
1253 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
1254}
1255
Aron Virginas-Tar636ab402019-09-16 14:27:45 +01001256IConnectableLayer* Network::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
1257{
1258 return m_Graph->AddLayer<SliceLayer>(sliceDescriptor, name);
1259}
1260
telsoa014fcda012018-03-09 14:13:49 +00001261IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
1262 const char* name)
1263{
1264 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
1265}
1266
1267IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
1268 const char* name)
1269{
1270 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
1271}
1272
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001273IConnectableLayer* Network::AddMaximumLayer(const char* name)
1274{
1275 return m_Graph->AddLayer<MaximumLayer>(name);
1276}
1277
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001278IConnectableLayer* Network::AddMinimumLayer(const char* name)
1279{
1280 return m_Graph->AddLayer<MinimumLayer>(name);
1281}
1282
Jim Flynne242f2d2019-05-22 14:24:13 +01001283IConnectableLayer* Network::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001284 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001285{
Jim Flynne242f2d2019-05-22 14:24:13 +01001286 return AddConcatLayer(mergerDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001287}
1288
Kevin May868eb142019-09-04 17:29:31 +01001289IConnectableLayer* Network::AddAbsLayer(const char * name)
1290{
josh minor4a3c6102020-01-06 16:40:46 -06001291 return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
Kevin May868eb142019-09-04 17:29:31 +01001292}
1293
telsoa014fcda012018-03-09 14:13:49 +00001294IConnectableLayer* Network::AddAdditionLayer(const char* name)
1295{
1296 return m_Graph->AddLayer<AdditionLayer>(name);
1297}
1298
1299IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
1300{
1301 return m_Graph->AddLayer<MultiplicationLayer>(name);
1302}
1303
1304IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
1305{
1306 return m_Graph->AddLayer<OutputLayer>(id, name);
1307}
1308
1309IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
1310 const ConstTensor& mean,
1311 const ConstTensor& variance,
1312 const ConstTensor& beta,
1313 const ConstTensor& gamma,
1314 const char* name)
1315{
1316 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
1317
1318 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
1319 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
1320 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
1321 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
1322
1323 return layer;
1324}
1325
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001326IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
1327 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001328{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001329 ResizeDescriptor resizeDescriptor;
1330 resizeDescriptor.m_Method = ResizeMethod::Bilinear;
1331 resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
1332 resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
1333 resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
1334
1335 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001336}
1337
Teresa Charlina9075df2019-06-27 15:41:57 +01001338IConnectableLayer* Network::AddResizeLayer(const ResizeDescriptor&
1339resizeDescriptor, const char* name)
1340{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001341 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
Teresa Charlina9075df2019-06-27 15:41:57 +01001342}
1343
Kevin Mayce5045a2019-10-02 14:07:47 +01001344IConnectableLayer* Network::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
1345 const char* name)
1346{
1347 return m_Graph->AddLayer<InstanceNormalizationLayer>(desc, name);
1348}
1349
Matteo Martincighbcd3c852018-09-28 14:14:12 +01001350IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
1351 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001352{
Matteo Martincighbcd3c852018-09-28 14:14:12 +01001353 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +00001354}
1355
Aron Virginas-Tarf982dea2019-10-11 14:07:53 +01001356IConnectableLayer* Network::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& desc,
1357 const char* name)
1358{
1359 return m_Graph->AddLayer<LogSoftmaxLayer>(desc, name);
1360}
1361
telsoa014fcda012018-03-09 14:13:49 +00001362IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
1363{
telsoa01c577f2c2018-08-31 09:22:23 +01001364 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
1365
1366 layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
1367
1368 return layer;
telsoa014fcda012018-03-09 14:13:49 +00001369}
1370
telsoa01c577f2c2018-08-31 09:22:23 +01001371IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
1372 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001373{
1374 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
1375}
1376
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001377IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1378 const char* name)
1379{
1380 return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
1381}
1382
Aron Virginas-Tar972af152019-06-11 14:14:03 +01001383IConnectableLayer* Network::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
1384 const char* name)
1385{
1386 return m_Graph->AddLayer<SpaceToDepthLayer>(spaceToDepthDescriptor, name);
1387}
1388
telsoa014fcda012018-03-09 14:13:49 +00001389IConnectableLayer* Network::AddFloorLayer(const char* name)
1390{
1391 return m_Graph->AddLayer<FloorLayer>(name);
1392}
1393
telsoa01c577f2c2018-08-31 09:22:23 +01001394IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
1395 const LstmInputParams& params,
1396 const char* name)
1397{
1398 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
1399
1400 //Lstm Basic Parameters
1401 layer->m_BasicParameters.m_InputToForgetWeights =
1402 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
1403 layer->m_BasicParameters.m_InputToCellWeights =
1404 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
1405 layer->m_BasicParameters.m_InputToOutputWeights =
1406 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
1407 layer->m_BasicParameters.m_RecurrentToForgetWeights =
1408 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
1409 layer->m_BasicParameters.m_RecurrentToCellWeights =
1410 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
1411 layer->m_BasicParameters.m_RecurrentToOutputWeights =
1412 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
1413 layer->m_BasicParameters.m_ForgetGateBias =
1414 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
1415 layer->m_BasicParameters.m_CellBias =
1416 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
1417 layer->m_BasicParameters.m_OutputGateBias =
1418 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
1419
1420 //Lstm Cifg parameters
1421 if(!descriptor.m_CifgEnabled)
1422 {
1423 if(params.m_InputToInputWeights == nullptr)
1424 {
1425 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL");
1426 }
1427 if(params.m_RecurrentToInputWeights == nullptr)
1428 {
1429 throw InvalidArgumentException(
1430 "AddLstmLayer: Recurrent To Input Weights cannot be NULL");
1431 }
1432 if(params.m_InputGateBias == nullptr)
1433 {
1434 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL");
1435 }
1436 layer->m_CifgParameters.m_InputToInputWeights =
1437 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
1438 layer->m_CifgParameters.m_RecurrentToInputWeights =
1439 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
1440 // In the VTS tests, cell-to-input weights may be null, even if the other CIFG params are not.
1441 if(params.m_CellToInputWeights != nullptr)
1442 {
1443 layer->m_CifgParameters.m_CellToInputWeights =
1444 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
1445 }
1446 layer->m_CifgParameters.m_InputGateBias =
1447 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
1448 }
1449
1450 //Lstm projection parameters
1451 if(descriptor.m_ProjectionEnabled)
1452 {
1453 if(params.m_ProjectionWeights == nullptr)
1454 {
1455 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL");
1456 }
1457 layer->m_ProjectionParameters.m_ProjectionWeights =
1458 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
1459 if(params.m_ProjectionBias != nullptr)
1460 {
1461 layer->m_ProjectionParameters.m_ProjectionBias =
1462 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
1463 }
1464 }
1465
1466 //Lstm Peephole params
1467 if(descriptor.m_PeepholeEnabled)
1468 {
1469 if(params.m_CellToForgetWeights == nullptr)
1470 {
1471 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL");
1472 }
1473 if(params.m_CellToOutputWeights == nullptr)
1474 {
1475 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL");
1476 }
1477 layer->m_PeepholeParameters.m_CellToForgetWeights =
1478 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
1479 layer->m_PeepholeParameters.m_CellToOutputWeights =
1480 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
1481 }
Jan Eilersf8c62972019-07-17 11:07:49 +01001482
1483 //Lstm Layer Normalization params
1484 if(descriptor.m_LayerNormEnabled)
1485 {
1486 if(!descriptor.m_CifgEnabled)
1487 {
1488 if(params.m_InputLayerNormWeights == nullptr)
1489 {
1490 throw InvalidArgumentException("AddLstmLayer: Input layer normalization weights cannot be NULL");
1491 }
1492 layer->m_LayerNormParameters.m_InputLayerNormWeights =
1493 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
1494 }
1495
1496 if(params.m_ForgetLayerNormWeights == nullptr)
1497 {
1498 throw InvalidArgumentException("AddLstmLayer: Forget layer normalization weights cannot be NULL");
1499 }
1500 if(params.m_CellLayerNormWeights == nullptr)
1501 {
1502 throw InvalidArgumentException("AddLstmLayer: Cell layer normalization weights cannot be NULL");
1503 }
1504 if(params.m_OutputLayerNormWeights == nullptr)
1505 {
1506 throw InvalidArgumentException("AddLstmLayer: Output layer normalization weights cannot be NULL");
1507 }
1508 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
1509 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
1510 layer->m_LayerNormParameters.m_CellLayerNormWeights =
1511 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
1512 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
1513 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
1514 }
telsoa01c577f2c2018-08-31 09:22:23 +01001515 return layer;
1516}
1517
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001518IConnectableLayer* Network::AddDivisionLayer(const char* name)
1519{
1520 return m_Graph->AddLayer<DivisionLayer>(name);
1521}
1522
David Beck19526222018-09-12 16:00:08 +01001523IConnectableLayer* Network::AddSubtractionLayer(const char* name)
1524{
1525 return m_Graph->AddLayer<SubtractionLayer>(name);
1526}
1527
narpra0132b90462018-09-13 11:07:48 +01001528IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
1529{
1530 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
1531}
1532
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +01001533IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
1534{
1535 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
1536}
1537
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001538IConnectableLayer *Network::AddQuantizeLayer(const char *name)
1539{
1540 return m_Graph->AddLayer<QuantizeLayer>(name);
1541}
1542
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001543IConnectableLayer* Network::AddDequantizeLayer(const char* name)
1544{
1545 return m_Graph->AddLayer<DequantizeLayer>(name);
1546}
1547
Conor Kennedy430b5d82018-11-14 15:28:28 +00001548IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
1549 const char* name)
1550{
1551 return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
1552}
1553
Matteo Martincigh59a950c2018-12-13 12:48:25 +00001554IConnectableLayer* Network::AddGreaterLayer(const char* name)
1555{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001556 return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
Matteo Martincigh59a950c2018-12-13 12:48:25 +00001557}
1558
FrancisMurtagh20995952018-12-17 12:11:36 +00001559IConnectableLayer* Network::AddEqualLayer(const char* name)
1560{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001561 return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
FrancisMurtagh20995952018-12-17 12:11:36 +00001562}
1563
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001564IConnectableLayer* Network::AddRsqrtLayer(const char * name)
1565{
josh minor4a3c6102020-01-06 16:40:46 -06001566 return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001567}
1568
narpra01b89b05f2019-01-16 09:53:09 +00001569IConnectableLayer* Network::AddGatherLayer(const char* name)
1570{
1571 return m_Graph->AddLayer<GatherLayer>(name);
1572}
1573
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001574IConnectableLayer* Network::AddMergeLayer(const char* name)
1575{
1576 return m_Graph->AddLayer<MergeLayer>(name);
1577}
1578
Sadik Armaganeff363d2019-04-05 15:25:46 +01001579IConnectableLayer* Network::AddSwitchLayer(const char* name)
1580{
1581 return m_Graph->AddLayer<SwitchLayer>(name);
1582}
1583
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01001584IConnectableLayer* Network::AddPreluLayer(const char* name)
1585{
1586 return m_Graph->AddLayer<PreluLayer>(name);
1587}
1588
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01001589IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
1590 const ConstTensor& weights,
1591 const Optional<ConstTensor>& biases,
1592 const char* name)
1593{
1594 if (descriptor.m_BiasEnabled && !biases.has_value())
1595 {
1596 throw InvalidArgumentException("AddTransposeConvolution2dLayer: Biases cannot be empty");
1597 }
1598
1599 const auto layer = m_Graph->AddLayer<TransposeConvolution2dLayer>(descriptor, name);
1600
1601 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1602
1603 if (descriptor.m_BiasEnabled)
1604 {
1605 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
1606 }
1607
1608 return layer;
1609}
1610
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001611IConnectableLayer* Network::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
1612 const char* name)
1613{
1614 return m_Graph->AddLayer<TransposeLayer>(transposeDescriptor, name);
1615}
1616
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001617IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor,
1618 const char* name)
1619{
1620 return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
1621}
1622
Derek Lamberti013c3902019-10-21 10:46:16 +01001623
1624IConnectableLayer* Network::AddStandInLayer(const StandInDescriptor& desc,
1625 const char* name)
1626{
1627 return m_Graph->AddLayer<StandInLayer>(desc, name);
1628}
1629
James Conroyee18dc82019-07-17 11:27:46 +01001630IConnectableLayer* Network::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
1631 const char* name)
1632{
1633 const auto layer = m_Graph->AddLayer<QuantizedLstmLayer>(name);
1634
1635 // InputToX weights
1636 layer->m_QuantizedLstmParameters.m_InputToInputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001637 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001638 layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001639 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001640 layer->m_QuantizedLstmParameters.m_InputToCellWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001641 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001642 layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001643 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001644
1645 // RecurrentToX weights
1646 layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001647 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001648 layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001649 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001650 layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001651 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001652 layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001653 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001654
1655 // Bias
1656 layer->m_QuantizedLstmParameters.m_InputGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001657 std::make_unique<ScopedCpuTensorHandle>(params.GetInputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001658 layer->m_QuantizedLstmParameters.m_ForgetGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001659 std::make_unique<ScopedCpuTensorHandle>(params.GetForgetGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001660 layer->m_QuantizedLstmParameters.m_CellBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001661 std::make_unique<ScopedCpuTensorHandle>(params.GetCellBias());
James Conroyee18dc82019-07-17 11:27:46 +01001662 layer->m_QuantizedLstmParameters.m_OutputGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001663 std::make_unique<ScopedCpuTensorHandle>(params.GetOutputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001664
1665 return layer;
1666}
1667
Mike Kelly8c1701a2019-02-11 17:01:27 +00001668void Network::Accept(ILayerVisitor& visitor) const
1669{
1670 for (auto layer : GetGraph())
1671 {
1672 layer->Accept(visitor);
1673 };
1674}
1675
telsoa014fcda012018-03-09 14:13:49 +00001676OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
Jan Eilers99d9d4a2019-11-06 10:02:16 +00001677 : m_Graph(std::move(graph)),
1678 m_Guid(profiling::ProfilingService::Instance().NextGuid())
telsoa014fcda012018-03-09 14:13:49 +00001679{
1680}
1681
1682OptimizedNetwork::~OptimizedNetwork()
1683{
1684}
1685
1686} // namespace armnn