blob: a43800827f7c58df5631fb31d45fca0fac951546 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matteo Martincigh49124022019-01-11 13:25:59 +00005
telsoa014fcda012018-03-09 14:13:49 +00006#include "Network.hpp"
7#include "Graph.hpp"
8#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01009#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010#include "Optimizer.hpp"
Derek Lambertiff05cc52019-04-26 13:05:17 +010011#include "SubgraphViewSelector.hpp"
Matteo Martincigh49124022019-01-11 13:25:59 +000012#include "BackendSettings.hpp"
David Beckac42efd2018-09-26 17:41:13 +010013#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000014
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000015#include <backendsCommon/CpuTensorHandle.hpp>
16#include <backendsCommon/WorkloadFactory.hpp>
David Beck263e3492018-11-09 14:46:40 +000017#include <backendsCommon/BackendRegistry.hpp>
18#include <backendsCommon/IBackendInternal.hpp>
Derek Lamberti84da38b2019-06-13 11:40:08 +010019#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
David Beckac42efd2018-09-26 17:41:13 +010020
21#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010023#include <armnn/TypesUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000024
25#include <fcntl.h>
26#include <algorithm>
27#include <fstream>
28#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010029#include <vector>
30#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000031
32#include <boost/assert.hpp>
33#include <boost/format.hpp>
34#include <boost/log/trivial.hpp>
35#include <boost/numeric/conversion/converter_policies.hpp>
36#include <boost/cast.hpp>
37
38namespace armnn
39{
40
41armnn::INetwork* INetwork::CreateRaw()
42{
43 return new Network();
44}
45
46armnn::INetworkPtr INetwork::Create()
47{
48 return INetworkPtr(CreateRaw(), &INetwork::Destroy);
49}
50
51void INetwork::Destroy(INetwork* network)
52{
53 delete boost::polymorphic_downcast<Network*>(network);
54}
55
56Status Network::PrintGraph()
57{
58 m_Graph->Print();
59 return Status::Success;
60}
61
62void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
63{
64 delete boost::polymorphic_downcast<OptimizedNetwork*>(network);
65}
66
67Status OptimizedNetwork::PrintGraph()
68{
69 m_Graph->Print();
70 return Status::Success;
71}
72
surmeh01bceff2f2018-03-29 16:29:27 +010073Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
74{
75 return m_Graph->SerializeToDot(stream);
76}
77
Matteo Martincigh49124022019-01-11 13:25:59 +000078
Matteo Martincigh49124022019-01-11 13:25:59 +000079
80void ReportError(const std::string& errorMessage,
81 Optional<std::vector<std::string>&> errorMessages)
82{
83 std::stringstream fullErrorMessage;
84 fullErrorMessage << "ERROR: " << errorMessage;
85 BOOST_LOG_TRIVIAL(warning) << fullErrorMessage.str();
86 if (errorMessages)
87 {
88 errorMessages.value().push_back(fullErrorMessage.str());
89 }
90}
91
92void ReportWarning(const std::string& warningMessage,
93 Optional<std::vector<std::string>&> warningMessages)
94{
95 std::stringstream fullWarningMessage;
96 fullWarningMessage << "WARNING: " << warningMessage;
97 BOOST_LOG_TRIVIAL(warning) << fullWarningMessage.str();
98 if (warningMessages)
99 {
100 warningMessages.value().push_back(fullWarningMessage.str());
101 }
102}
103
jimfly016b0b53d2018-10-08 14:43:01 +0100104bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
105{
106 bool noErrors = true;
107 unsigned int numOutputs = layer->GetNumOutputSlots();
108 for (unsigned int i = 0; i < numOutputs; i++) {
David Monahanb8554702019-04-25 16:03:38 +0100109 OutputSlot& outputSlot = layer->GetOutputSlot(i);
110 TensorInfo info = outputSlot.GetTensorInfo();
jimfly016b0b53d2018-10-08 14:43:01 +0100111 if (DataType::QuantisedAsymm8 == info.GetDataType()) {
112 if (0.f == info.GetQuantizationScale()) {
113 noErrors = false;
114 std::stringstream ss;
Matteo Martincigh49124022019-01-11 13:25:59 +0000115 ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
jimfly016b0b53d2018-10-08 14:43:01 +0100116 << " (" << layer->GetNameStr() << ") is of type"
117 << " Quantized 8 bit but its scale parameter has not been set";
Matteo Martincigh49124022019-01-11 13:25:59 +0000118 ReportError(ss.str(), errMessages);
jimfly016b0b53d2018-10-08 14:43:01 +0100119 }
David Monahanb8554702019-04-25 16:03:38 +0100120 // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
121 if ((info.GetQuantizationScale() != (1.0f / 256.0f) ||
122 info.GetQuantizationOffset() != 0) &&
123 layer->GetType() == armnn::LayerType::Softmax)
124 {
125 std::stringstream ss;
126 ss << "Quantization parameters for Softmax layer (Scale: " <<
127 info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() <<
128 ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
129 BOOST_LOG_TRIVIAL(warning) << ss.str();
130 info.SetQuantizationScale((1.0f /256.0f));
131 info.SetQuantizationOffset(0);
132 outputSlot.SetTensorInfo(info);
133 }
jimfly016b0b53d2018-10-08 14:43:01 +0100134 }
135 }
136 return noErrors;
137}
138
Matteo Martincigh49124022019-01-11 13:25:59 +0000139OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
140 BackendSettings& backendSettings,
141 Graph::Iterator& firstLayer,
142 Graph::Iterator& lastLayer,
143 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +0000144{
Matteo Martincigh49124022019-01-11 13:25:59 +0000145 OptimizationResult result;
telsoa014fcda012018-03-09 14:13:49 +0000146
Matteo Martincigh49124022019-01-11 13:25:59 +0000147 // Helper lambda to compose meaningful error message before returning with error
148 auto ReturnWithError = [&](const Layer* layer)
telsoa01c577f2c2018-08-31 09:22:23 +0100149 {
jimfly016b0b53d2018-10-08 14:43:01 +0100150 std::stringstream failureMsg;
Matteo Martincigh49124022019-01-11 13:25:59 +0000151 failureMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
152 << " is not supported on any preferred backend " << backendSettings.m_PreferredBackends;
153 ReportError(failureMsg.str(), errMessages);
154
155 result.m_Error = true;
156 return result;
telsoa01c577f2c2018-08-31 09:22:23 +0100157 };
158
Matteo Martincigh49124022019-01-11 13:25:59 +0000159 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
160 if (availablePreferredBackends.empty())
telsoa01c577f2c2018-08-31 09:22:23 +0100161 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000162 std::stringstream failureMsg;
163 failureMsg << "No preferred backends are available";
164 ReportError(failureMsg.str(), errMessages);
165
166 result.m_Error = true;
167 return result;
168 }
169
170 for (auto it = firstLayer; it != lastLayer; ++it)
171 {
172 auto layer = *it;
telsoa01c577f2c2018-08-31 09:22:23 +0100173 DataType dataType = layer->GetDataType();
174 std::string reasonIfUnsupported;
175 bool found = false;
jimfly016b0b53d2018-10-08 14:43:01 +0100176 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
177 {
178 // don't bomb immediately, find all the quantized outputs
179 // which haven't had a scale set and report them all back.
Matteo Martincigh49124022019-01-11 13:25:59 +0000180 result.m_Error = true;
jimfly016b0b53d2018-10-08 14:43:01 +0100181 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000182
David Beckf0b48452018-10-19 15:20:56 +0100183 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100184 {
185 // need to set the compute device on the layer
186 // before we can check if it is supported
David Beck33f0ae02018-10-18 15:13:56 +0100187 layer->SetBackendId(backend);
telsoa01c577f2c2018-08-31 09:22:23 +0100188 if (!IWorkloadFactory::IsLayerSupported(*layer, dataType, reasonIfUnsupported))
189 {
190 if (dataType == DataType::Float16)
191 {
192 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
193 && layer->GetType() != LayerType::ConvertFp32ToFp16
194 && layer->GetType() != LayerType::ConvertFp16ToFp32)
195 {
196 // Insert FP16 -> FP32 conversion layer before current layer
197 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers =
198 InsertConvertFp16ToFp32LayersBefore(optNetObjPtr->GetGraph(), *layer);
199
200 // Insert FP32 -> FP16 conversion layer after current layer
201 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers =
202 InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer);
203
204 // Assign a supported backend to the newly introduced conversion layers
David Beckf0b48452018-10-19 15:20:56 +0100205 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
telsoa01c577f2c2018-08-31 09:22:23 +0100206 {
207 bool supportedBackendFound = false;
208 std::string reasonIfUnsupported;
209
210 // Try preferred backend first
David Beck33f0ae02018-10-18 15:13:56 +0100211 layer->SetBackendId(preferredBackend);
David Beck29c75de2018-10-23 13:35:58 +0100212 if (IWorkloadFactory::IsLayerSupported(*layer,
213 EmptyOptional(),
214 reasonIfUnsupported))
telsoa01c577f2c2018-08-31 09:22:23 +0100215 {
216 supportedBackendFound = true;
217 }
218 else
219 {
David Beckf0b48452018-10-19 15:20:56 +0100220 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100221 {
222 // Skip preferred backend (we already determined that it is not supported)
223 if (backend == preferredBackend)
224 {
225 continue;
226 }
227
David Beck33f0ae02018-10-18 15:13:56 +0100228 layer->SetBackendId(backend);
David Beck29c75de2018-10-23 13:35:58 +0100229 if (IWorkloadFactory::IsLayerSupported(*layer,
230 EmptyOptional(),
231 reasonIfUnsupported))
telsoa01c577f2c2018-08-31 09:22:23 +0100232 {
233 supportedBackendFound = true;
234 break;
235 }
236 }
237 }
238
239 return supportedBackendFound;
240 };
241
242 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
243 {
244 if (!AssignFirstSupportedBackend(convertLayer, backend))
245 {
246 return ReturnWithError(convertLayer);
247 }
248 }
249
250 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
251 {
252 if (!AssignFirstSupportedBackend(convertLayer, backend))
253 {
254 return ReturnWithError(convertLayer);
255 }
256 }
257
258 found = true;
259 break;
260 }
261 }
jimfly016b0b53d2018-10-08 14:43:01 +0100262 std::stringstream warningMsg;
Matteo Martincigh49124022019-01-11 13:25:59 +0000263 warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
David Beck33f0ae02018-10-18 15:13:56 +0100264 << " is not supported on requested backend " << layer->GetBackendId().Get()
jimfly016b0b53d2018-10-08 14:43:01 +0100265 << " for data type " << GetDataTypeName(dataType)
266 << " (reason: " << reasonIfUnsupported
267 << "), falling back to the next backend.";
Matteo Martincigh49124022019-01-11 13:25:59 +0000268 ReportWarning(warningMsg.str(), errMessages);
telsoa01c577f2c2018-08-31 09:22:23 +0100269 }
270 else
271 {
272 found = true;
Matteo Martincigh49124022019-01-11 13:25:59 +0000273 backendSettings.m_SelectedBackends.insert(backend);
telsoa01c577f2c2018-08-31 09:22:23 +0100274 break;
275 }
276 }
277
278 // If the layer is unsupported by any devices, log and return a null network.
Matteo Martincigh49124022019-01-11 13:25:59 +0000279 if (!found)
280 {
telsoa01c577f2c2018-08-31 09:22:23 +0100281 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
282 // fallback we should set the compute device on the layer to CpuRef (these are not
283 // available as accelerated operations, or are only available under certain
284 // conditions, currently they comprise MemCopy, Constant, Permute)
285 armnn::LayerType layerType = layer->GetType();
Matteo Martincigh49124022019-01-11 13:25:59 +0000286 if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
287 layerType == armnn::LayerType::Constant ||
288 layerType == armnn::LayerType::Permute))
telsoa01c577f2c2018-08-31 09:22:23 +0100289 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000290 BackendId cpuBackendId(armnn::Compute::CpuRef);
291 layer->SetBackendId(cpuBackendId);
292 backendSettings.m_SelectedBackends.insert(cpuBackendId);
telsoa01c577f2c2018-08-31 09:22:23 +0100293 }
294 else
295 {
296 return ReturnWithError(layer);
297 }
298 }
299 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000300
301 return result;
302}
303
Matteo Martincighadddddb2019-01-24 14:06:23 +0000304OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
305 BackendSettings& backendSettings,
Derek Lambertiff05cc52019-04-26 13:05:17 +0100306 SubgraphView& subgraph,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000307 Optional<std::vector<std::string>&> errMessages)
Matteo Martincigh49124022019-01-11 13:25:59 +0000308{
Derek Lambertiff05cc52019-04-26 13:05:17 +0100309 Graph::Iterator firstLayer = subgraph.begin();
310 Graph::Iterator lastLayer = subgraph.end();
Matteo Martincighadddddb2019-01-24 14:06:23 +0000311 return AssignBackends(optNetObjPtr,
312 backendSettings,
313 firstLayer,
314 lastLayer,
315 errMessages);
316}
317
Derek Lamberti84da38b2019-06-13 11:40:08 +0100318BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry& handleFactoryRegistry,
319 BackendSettings& backendSettings)
320{
321 BackendsMap backends;
322 auto const& backendRegistry = BackendRegistryInstance();
323 for (auto&& selectedBackend : backendSettings.m_SupportedBackends)
324 {
325 auto backendFactory = backendRegistry.GetFactory(selectedBackend);
326 auto backendObjPtr = backendFactory();
327 BOOST_ASSERT(backendObjPtr);
328
329 backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
330
331 backends[backendObjPtr->GetId()] = std::move(backendObjPtr);
332 }
333
334 return backends;
335}
336
Matteo Martincighadddddb2019-01-24 14:06:23 +0000337OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
338 BackendSettings& backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +0100339 BackendsMap& backends,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000340 Optional<std::vector<std::string>&> errMessages)
341{
342 BOOST_ASSERT(optNetObjPtr);
Matteo Martincigh49124022019-01-11 13:25:59 +0000343
344 OptimizationResult result;
345
Matteo Martincighadddddb2019-01-24 14:06:23 +0000346 // Get the optimized graph
347 Graph& optGraph = optNetObjPtr->GetGraph();
Matteo Martincigh49124022019-01-11 13:25:59 +0000348
Matteo Martincighadddddb2019-01-24 14:06:23 +0000349 // Run backend specific optimizations
Matteo Martincighadddddb2019-01-24 14:06:23 +0000350 for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
Matteo Martincigh49124022019-01-11 13:25:59 +0000351 {
Derek Lamberti84da38b2019-06-13 11:40:08 +0100352 auto backendObjPtr = backends.find(selectedBackend)->second.get();
Matteo Martincighadddddb2019-01-24 14:06:23 +0000353 BOOST_ASSERT(backendObjPtr);
354
355 // Select sub-graphs based on backend
Derek Lambertiff05cc52019-04-26 13:05:17 +0100356 SubgraphViewSelector::Subgraphs subgraphs =
Rob Hughes65c32262019-07-23 15:33:39 +0100357 SubgraphViewSelector::SelectSubgraphs(optGraph,
Matteo Martincigh602af092019-05-01 10:31:27 +0100358 // Select layers assigned to the requested backend
359 [&backendObjPtr](const Layer& layer)
360 {
361 return layer.GetType() != LayerType::Input &&
362 layer.GetType() != LayerType::Output &&
363 layer.GetBackendId() == backendObjPtr->GetId();
364 });
Derek Lambertiff05cc52019-04-26 13:05:17 +0100365 if (subgraphs.empty())
Matteo Martincigh49124022019-01-11 13:25:59 +0000366 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000367 // No sub-graphs found, try with next selected backend
368 continue;
Matteo Martincigh49124022019-01-11 13:25:59 +0000369 }
Matteo Martincighadddddb2019-01-24 14:06:23 +0000370
371 // Try to optimize each sub-graph
Derek Lambertiff05cc52019-04-26 13:05:17 +0100372 for (auto& subgraph : subgraphs)
Matteo Martincigh49124022019-01-11 13:25:59 +0000373 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000374 // Try to optimize the current sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100375 OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph);
376 BOOST_ASSERT(optimizationViews.Validate(*subgraph));
Matteo Martincighadddddb2019-01-24 14:06:23 +0000377
378 // Optimization attempted, check the resulting optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100379 for (auto& substitution : optimizationViews.GetSubstitutions())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000380 {
381 // Sub-graph optimized, substitute the sub-graph with the new optimized one in the main optimized graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100382 SubgraphView& replacementSubgraph = substitution.m_ReplacementSubgraph;
383 SubgraphView& substitutableSubgraph = substitution.m_SubstitutableSubgraph;
384 optGraph.SubstituteSubgraph(substitutableSubgraph, replacementSubgraph);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000385
386 // Assign the current backend to the optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100387 std::for_each(replacementSubgraph.begin(), replacementSubgraph.end(), [&selectedBackend](Layer* l)
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100388 {
389 BOOST_ASSERT(l);
390 l->SetBackendId(selectedBackend);
391 });
Matteo Martincighadddddb2019-01-24 14:06:23 +0000392 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100393
Matteo Martincigh84924332019-05-09 12:46:16 +0100394 if (!optimizationViews.GetFailedSubgraphs().empty())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000395 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000396 std::stringstream warningMsg;
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100397 warningMsg << "Some sub-graph(s) failed to optimized on " << backendObjPtr->GetId() << " backend.";
Matteo Martincighadddddb2019-01-24 14:06:23 +0000398 ReportWarning(warningMsg.str(), errMessages);
399
400 // Failed to optimize the given sub-graph, re-assign the sub-graph layers to other available backends
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100401 BackendSettings settingsCopy(backendSettings);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000402 if (!backendObjPtr->GetId().IsCpuRef())
403 {
404 // Add the current backend to the list of backends to ignore
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100405 settingsCopy.m_IgnoredBackends.insert(backendObjPtr->GetId());
Matteo Martincighadddddb2019-01-24 14:06:23 +0000406 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100407
408 int count=0;
Matteo Martincigh84924332019-05-09 12:46:16 +0100409 for (auto& failedSubgraph : optimizationViews.GetFailedSubgraphs())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000410 {
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100411 // An error occurred: the optimization was attempted but not performed, try different backends
412 std::stringstream subgraphMsg;
413 subgraphMsg << "Re-assigning backends to " << failedSubgraph.GetLayers().size()
414 << " layers inside sub-graph " << count++;
Matteo Martincigh328d92b2019-07-04 17:52:55 +0100415 ReportWarning(subgraphMsg.str(), errMessages);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100416
417 OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr,
418 settingsCopy,
419 *subgraph,
420 errMessages);
421 if (reassignmentResult.m_Error)
422 {
423 // Failed to re-assign one of the remaining backends to each layer of the sub-graph
424 result.m_Error = true;
425 return result;
426 }
Matteo Martincighadddddb2019-01-24 14:06:23 +0000427 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000428 }
429 }
430 }
431
432 return result;
433}
434
Derek Lamberti84da38b2019-06-13 11:40:08 +0100435bool RequiresCopy(ITensorHandleFactory::FactoryId src,
436 ITensorHandleFactory::FactoryId dst,
437 TensorHandleFactoryRegistry& registry)
438{
439 if (src != dst)
440 {
441 ITensorHandleFactory* srcFactory = registry.GetFactory(src);
442 ITensorHandleFactory* dstFactory = registry.GetFactory(dst);
443
444 if (srcFactory->SupportsExport() && dstFactory->SupportsImport())
445 {
446 return false;
447 }
448 return true;
449 }
450 return false;
451}
452
453// Find the handle factory for the input layer which results in fewest required copies.
454ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backends,
455 OutputSlot& slot,
456 TensorHandleFactoryRegistry& registry)
457{
458 Layer& layer = slot.GetOwningLayer();
459 BOOST_ASSERT(layer.GetType() == LayerType::Input);
460
461 // Explicitly select the tensorhandle factory for InputLayer because the rules for it are slightly different. It
462 // doesn't matter which backend it is assigned to because they all use the same implementation, which
463 // requires Map/Unmap support. This means that, so long as the handle type supports map/unmap semantics, we can
464 // select a factory with maximum compatibility with the layers connected to the InputLayer.
465
466 // First ensure the from backends can support the TensorHandeAPI
467 auto frmBackend = backends.find(layer.GetBackendId());
468 if (frmBackend == backends.end() ||
469 !frmBackend->second->SupportsTensorAllocatorAPI())
470 {
471 return ITensorHandleFactory::LegacyFactoryId;
472 }
473
474 // Go through all connections to the output slot and determine the TensorHandleFactory which results in the
475 // fewest copies.
476 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
477 int topScore = 0;
478 ITensorHandleFactory::FactoryId topChoice = ITensorHandleFactory::LegacyFactoryId;
479
480 for (auto&& connection : slot.GetConnections())
481 {
482 const Layer& connectedLayer = connection->GetOwningLayer();
483
484 auto toBackend = backends.find(connectedLayer.GetBackendId());
485 BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
486
487 if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
488 {
489 // The destination backend does not support the tensor allocator API, move to the next one
490 continue;
491 }
492
493 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
494 for (auto&& dst : dstPrefs)
495 {
496 // Input layers use the mem copy workload, so the selected factory must support map/unmap API
497 ITensorHandleFactory* factory = registry.GetFactory(dst);
498 if (!factory->SupportsMapUnmap())
499 {
500 // The current tensor handle factory does not support the map/unmap strategy, move to the next one
501 continue;
502 }
503
504 auto it = factoryScores.find(dst);
505 if (it == factoryScores.end())
506 {
507 // Add new score to the table
508 factoryScores[dst] = 0;
509 if (topChoice == ITensorHandleFactory::LegacyFactoryId)
510 {
511 topChoice = dst;
512 }
513 }
514 else
515 {
516 // Increase the score
517 factoryScores[dst]++;
518
519 // Track the best option
520 if (factoryScores[dst] > topScore)
521 {
522 topScore = factoryScores[dst];
523 topChoice = dst;
524 }
525 }
526 }
527 }
528
529 return topChoice;
530}
531
532// Find the handle factory for the output layer which results in fewest required copies.
533ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backends,
534 OutputSlot& slot,
535 TensorHandleFactoryRegistry& registry)
536{
537 return ITensorHandleFactory::DeferredFactoryId;
538}
539
540// For all handle factories supported on the source backend, we wish to find the one which requires the fewest copies
541// when considering all connections.
542ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
543 OutputSlot& outputSlot,
544 TensorHandleFactoryRegistry& registry)
545{
546 // First ensure the from backends can support the TensorHandeAPI
547 Layer& layer = outputSlot.GetOwningLayer();
548 auto frmBackend = backends.find(layer.GetBackendId());
549 if (frmBackend == backends.end() ||
550 !frmBackend->second->SupportsTensorAllocatorAPI())
551 {
552 return ITensorHandleFactory::LegacyFactoryId;
553 }
554
555 // Connections to Output Layers requires support for map/unmap on the TensorHandle.
556 bool requiresMapUnmap = false;
557 for (auto&& connection : outputSlot.GetConnections())
558 {
559 const Layer& connectedLayer = connection->GetOwningLayer();
560 if (connectedLayer.GetType() == LayerType::Output)
561 {
562 requiresMapUnmap = true;
563 }
564 }
565
566 IBackendInternal* srcBackend = frmBackend->second.get();
567 auto srcPrefs = srcBackend->GetHandleFactoryPreferences();
568
569 // Initialize the scores
570 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
571 for (auto&& pref : srcPrefs)
572 {
573 if (requiresMapUnmap) // Only consider factories that support map/unmap if required
574 {
575 ITensorHandleFactory* factory = registry.GetFactory(pref);
576 if (!factory->SupportsMapUnmap())
577 {
578 // The current tensor handle factory does not support the map/unmap strategy, move to the next one
579 continue;
580 }
581 }
582
583 auto it = factoryScores.find(pref);
584 if (it == factoryScores.end())
585 {
586 // Add new score to the table
587 factoryScores[pref] = 0;
588 }
589 }
590
591 // Score each handle factory based on how many times it requires copies on the slot connections
592 for (auto&& connection : outputSlot.GetConnections())
593 {
594 const Layer& connectedLayer = connection->GetOwningLayer();
595
596 auto toBackend = backends.find(connectedLayer.GetBackendId());
597 BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
598
599 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
600 for (auto&& src : srcPrefs)
601 {
602 if (factoryScores.find(src) == factoryScores.end()) // Don't consider excluded factories
603 {
604 continue;
605 }
606
607 for (auto&& dst : dstPrefs)
608 {
609 if (RequiresCopy(src, dst, registry))
610 {
611 // Copy avoided, increase the score
612 factoryScores[src]++;
613 break;
614 }
615 }
616 }
617 }
618
619 // Find the lowest score
620 int minScore = std::numeric_limits<int>::max();
621 for (auto it : factoryScores)
622 {
623 minScore = std::min(minScore, it.second);
624 }
625
626 // Collect factories matching the best(lowest) score
627 std::vector<ITensorHandleFactory::FactoryId> optimalFactories;
628 for (auto it : factoryScores)
629 {
630 if (it.second == minScore)
631 {
632 optimalFactories.push_back(it.first);
633 }
634 }
635
636 // For all compatible Factories matching the best score, find the preferred one for the current layer.
637 for (auto&& srcPref : srcPrefs)
638 {
639 for (auto&& comp : optimalFactories)
640 {
641 if (comp == srcPref)
642 {
643 return comp;
644 }
645 }
646 }
647
648 return ITensorHandleFactory::LegacyFactoryId;
649}
650
651MemoryStrategy CalculateStrategy(BackendsMap& backends,
652 ITensorHandleFactory::FactoryId srcFactoryId,
653 const Layer& layer,
654 const Layer& connectedLayer,
655 TensorHandleFactoryRegistry& registry)
656{
657 auto toBackend = backends.find(connectedLayer.GetBackendId());
658 BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
659
660 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
661
662 // Legacy API check for backward compatibility
663 if (srcFactoryId == ITensorHandleFactory::LegacyFactoryId || dstPrefs.empty())
664 {
665 if (layer.GetBackendId() != connectedLayer.GetBackendId())
666 {
667 return MemoryStrategy::CopyToTarget;
668 }
669 else
670 {
671 return MemoryStrategy::DirectCompatibility;
672 }
673 }
674
675 // TensorHandleFactory API present, so perform more sophisticated strategies.
676 // Dst Output layers don't require copy because they use map/unmap
677 if (connectedLayer.GetType() == LayerType::Output)
678 {
679 return MemoryStrategy::DirectCompatibility;
680 }
681
682 // Search for direct match in prefs
683 for (auto&& pref : dstPrefs)
684 {
685 if (pref == srcFactoryId)
686 {
687 return MemoryStrategy::DirectCompatibility;
688 }
689 }
690
691 // Search for export/import options
692 ITensorHandleFactory* srcFactory = registry.GetFactory(srcFactoryId);
693 if (srcFactory->SupportsExport())
694 {
695 for (auto&& pref : dstPrefs)
696 {
697 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
698 if (dstFactory->SupportsImport())
699 {
700 return MemoryStrategy::ExportToTarget;
701 }
702 }
703 }
704
705 // Search for copy options via map/unmap
706 if (srcFactory->SupportsMapUnmap())
707 {
708 for (auto&& pref : dstPrefs)
709 {
710 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
711 if (dstFactory->SupportsMapUnmap())
712 {
713 return MemoryStrategy::CopyToTarget;
714 }
715 }
716 }
717
718 return MemoryStrategy::Undefined;
719}
720
721// Select the TensorHandleFactories and the corresponding memory strategy
722OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
723 BackendsMap& backends,
724 TensorHandleFactoryRegistry& registry,
725 Optional<std::vector<std::string>&> errMessages)
726{
727 OptimizationResult result;
728
729 optGraph.ForEachLayer([&backends, &registry, &result, &errMessages](Layer* layer)
730 {
731 BOOST_ASSERT(layer);
732
733 // Lets make sure the backend is in our list of supported backends. Something went wrong during backend
734 // assignment if this check fails
735 BOOST_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
736
737 // Check each output separately
738 for (unsigned int slotIdx = 0; slotIdx < layer->GetNumOutputSlots(); slotIdx++)
739 {
740 OutputSlot& outputSlot = layer->GetOutputSlot(slotIdx);
741
742 ITensorHandleFactory::FactoryId slotOption = ITensorHandleFactory::LegacyFactoryId;
743
744 // Calculate the factory to use which results in the fewest copies being made.
745 switch(layer->GetType())
746 {
747 case LayerType::Input:
748 slotOption = CalculateSlotOptionForInput(backends, outputSlot, registry);
749 break;
750 case LayerType::Output:
751 slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
752 break;
753 default:
754 slotOption = CalculateSlotOption(backends, outputSlot, registry);
755 break;
756 }
757 outputSlot.SetTensorHandleFactory(slotOption);
758
759 // Now determine the "best" memory strategy for each connection given the slotOption.
760 unsigned int connectionIdx = 0;
761 for (auto&& connection : outputSlot.GetConnections())
762 {
763 const Layer& connectedLayer = connection->GetOwningLayer();
764
765 MemoryStrategy strategy = CalculateStrategy(backends, slotOption, *layer, connectedLayer, registry);
766
767 if (strategy == MemoryStrategy::Undefined)
768 {
769 result.m_Error = true;
770 if (errMessages)
771 {
772 errMessages.value().emplace_back("Could not find valid strategy required for compatibility"
773 " between backends.");
774 }
775 return;
776 }
777
778 outputSlot.SetMemoryStrategy(connectionIdx, strategy);
779
780 connectionIdx++;
781 }
782 }
783 });
784
785 return result;
786}
787
Matteo Martincigh49124022019-01-11 13:25:59 +0000788IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
789 const std::vector<BackendId>& backendPreferences,
790 const IDeviceSpec& deviceSpec,
791 const OptimizerOptions& options,
792 Optional<std::vector<std::string>&> errMessages)
793{
794 if (backendPreferences.empty())
795 {
796 throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
797 }
798
799 const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork);
800 std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
801
802 auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
803
804 OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
805
Matteo Martincighadddddb2019-01-24 14:06:23 +0000806 // Get the optimized graph
807 Graph& optGraph = optNetObjPtr->GetGraph();
808
Matteo Martincigh49124022019-01-11 13:25:59 +0000809 // Perform optimisation passes
810 using namespace optimizations;
Matteo Martincighadddddb2019-01-24 14:06:23 +0000811 Optimizer::Pass(optGraph, MakeOptimizations(SquashEqualPermuteSiblings(),
812 SquashEqualReshapeSiblings(),
813 OptimizeInversePermutes(),
814 MovePermuteUp(),
815 PermuteAsReshape(),
Nina Drozd861985f2019-04-18 14:48:51 +0100816 OptimizeConsecutiveReshapes(),
817 FoldPadIntoConvolution2d()));
Matteo Martincigh49124022019-01-11 13:25:59 +0000818
Matteo Martincighadddddb2019-01-24 14:06:23 +0000819 // Infer the tensor infos for all output slots. Throws an exception on failure
820 optGraph.InferTensorInfos();
Matteo Martincigh49124022019-01-11 13:25:59 +0000821
822 // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
823 if (options.m_ReduceFp32ToFp16)
824 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000825 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToFp16Converter()));
Matteo Martincigh49124022019-01-11 13:25:59 +0000826 }
827
828 // Initialize backend settings
829 BackendSettings backendSettings(backendPreferences, deviceSpec);
830 if (backendSettings.GetAvailablePreferredBackends().empty())
831 {
832 std::stringstream failureMsg;
833 failureMsg << "None of the preferred backends " << backendPreferences
834 << " are supported. Current platform provides " << backendSettings.m_SupportedBackends;
835 ReportError(failureMsg.str(), errMessages);
836 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
837 }
838
Derek Lamberti84da38b2019-06-13 11:40:08 +0100839 // Create a map to temporarily hold initialized backend objects
840 TensorHandleFactoryRegistry tensorHandleFactoryRegistry;
841 BackendsMap backends = CreateSupportedBackends(tensorHandleFactoryRegistry, backendSettings);
842
Matteo Martincigh49124022019-01-11 13:25:59 +0000843 // Assign an available backend to each layer
Matteo Martincighadddddb2019-01-24 14:06:23 +0000844 Graph::Iterator firstLayer = optGraph.begin();
845 Graph::Iterator lastLayer = optGraph.end();
Derek Lamberti84da38b2019-06-13 11:40:08 +0100846 OptimizationResult assignBackendsResult = AssignBackends(optNetObjPtr,
847 backendSettings,
848 firstLayer,
849 lastLayer,
850 errMessages);
851 if (assignBackendsResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +0000852 {
853 // Failed to assign a backend to each layer
jimfly016b0b53d2018-10-08 14:43:01 +0100854 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
855 }
telsoa01c577f2c2018-08-31 09:22:23 +0100856
Matteo Martincighadddddb2019-01-24 14:06:23 +0000857 Optimizer::Pass(optGraph, MakeOptimizations(OptimizeInverseConversionsFp16(),
858 OptimizeInverseConversionsFp32()));
telsoa01c577f2c2018-08-31 09:22:23 +0100859
Matteo Martincighadddddb2019-01-24 14:06:23 +0000860 // Apply the backend-specific optimizations
861 OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr,
862 backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +0100863 backends,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000864 errMessages);
865 if (backendOptimizationResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +0000866 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000867 // Failed to apply the backend-specific optimizations
868 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
Matteo Martincigh49124022019-01-11 13:25:59 +0000869 }
870
Matteo Martincighadddddb2019-01-24 14:06:23 +0000871 // If the debug flag is set, then insert a DebugLayer after each layer
872 // Doing this after applying the backend optimizations as they might have changed some layers
873 if (options.m_Debug)
874 {
875 Optimizer::Pass(optGraph, MakeOptimizations(InsertDebugLayer()));
876 }
877
Derek Lamberti84da38b2019-06-13 11:40:08 +0100878 // Calculate the compatibility strategies for tensor handles
879 OptimizationResult strategyResult = SelectTensorHandleStrategy(optGraph,
880 backends,
881 tensorHandleFactoryRegistry,
882 errMessages);
883 if (strategyResult.m_Error)
884 {
885 // Failed to apply the backend-specific optimizations
886 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
887 }
888
889 // Based on the tensor handle strategy determined above, insert copy layers where required.
890 optGraph.AddCopyLayers(backends, tensorHandleFactoryRegistry);
telsoa01c577f2c2018-08-31 09:22:23 +0100891
892 // Convert constants
Matteo Martincighadddddb2019-01-24 14:06:23 +0000893 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
894 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
telsoa01c577f2c2018-08-31 09:22:23 +0100895
Derek Lamberti84da38b2019-06-13 11:40:08 +0100896 // Run backend specific optimizations (deprecated)
Matteo Martincigh49124022019-01-11 13:25:59 +0000897 for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
David Beck263e3492018-11-09 14:46:40 +0000898 {
899 auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
900 auto backendPtr = factoryFun();
901 BOOST_ASSERT(backendPtr.get() != nullptr);
902
Matteo Martincighed735042019-05-22 09:42:43 +0100903 ARMNN_NO_DEPRECATE_WARN_BEGIN
David Beck263e3492018-11-09 14:46:40 +0000904 auto backendSpecificOptimizations = backendPtr->GetOptimizations();
Matteo Martincighed735042019-05-22 09:42:43 +0100905 ARMNN_NO_DEPRECATE_WARN_END
906
David Beck263e3492018-11-09 14:46:40 +0000907 if (!backendSpecificOptimizations.empty())
908 {
909 Optimizer::Pass(optNetObjPtr->GetGraph(), backendSpecificOptimizations);
910 }
911 }
912
telsoa01c577f2c2018-08-31 09:22:23 +0100913 return optNet;
telsoa014fcda012018-03-09 14:13:49 +0000914}
915
916Network::Network()
917: m_Graph(std::make_unique<Graph>())
918{
919}
920
921Network::~Network()
922{
923}
924
925IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
926{
927 return m_Graph->AddLayer<InputLayer>(id, name);
928}
929
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +0000930IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
931 const char* name)
932{
933 return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
934}
935
telsoa014fcda012018-03-09 14:13:49 +0000936IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100937 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000938 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +0100939 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000940{
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000941 if (fullyConnectedDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +0000942 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000943 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +0000944 }
945
946 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
947
948 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
949
950 if (fullyConnectedDescriptor.m_BiasEnabled)
951 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000952 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +0000953 }
954
955 return layer;
956}
957
958IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100959 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000960 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +0100961 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000962{
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000963 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +0000964}
965
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000966IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
967 const ConstTensor& weights,
968 const char* name)
969{
Matteo Martincighfc598e12019-05-14 10:36:13 +0100970 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000971 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
972}
973
telsoa014fcda012018-03-09 14:13:49 +0000974IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100975 const ConstTensor& weights,
976 const ConstTensor& biases,
977 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000978{
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000979 Optional<ConstTensor> optionalBiases(biases);
980 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +0000981}
982
Jim Flynne242f2d2019-05-22 14:24:13 +0100983IConnectableLayer* Network::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100984 const char* name)
985{
Jim Flynne242f2d2019-05-22 14:24:13 +0100986 return m_Graph->AddLayer<ConcatLayer>(concatDescriptor, name);
Jim Flynn906f9462019-05-10 13:55:21 +0100987}
988
telsoa014fcda012018-03-09 14:13:49 +0000989IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100990 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000991 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +0100992 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000993{
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000994 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +0000995 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000996 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +0000997 }
998
999 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
1000
1001 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1002
1003 if (convolution2dDescriptor.m_BiasEnabled)
1004 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001005 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001006 }
1007
1008 return layer;
1009}
1010
1011IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001012 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001013 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001014 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001015{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001016 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001017}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001018
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001019IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
1020 const ConstTensor& weights,
1021 const char* name)
1022{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001023 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001024 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1025}
1026
telsoa014fcda012018-03-09 14:13:49 +00001027IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001028 const ConstTensor& weights,
1029 const ConstTensor& biases,
1030 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001031{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001032 Optional<ConstTensor> optionalBiases(biases);
1033 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001034}
1035
1036IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
1037 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1038 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001039 const Optional<ConstTensor>& biases,
telsoa014fcda012018-03-09 14:13:49 +00001040 const char* name)
1041{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001042 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001043 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001044 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001045 }
1046
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00001047 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001048
1049 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1050
1051 if (convolution2dDescriptor.m_BiasEnabled)
1052 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001053 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001054 }
1055
1056 return layer;
1057}
1058
1059IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001060 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1061 const ConstTensor& weights,
1062 const Optional<ConstTensor>& biases,
1063 const char* name)
1064{
1065 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1066}
1067
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001068IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
telsoa014fcda012018-03-09 14:13:49 +00001069 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1070 const ConstTensor& weights,
1071 const char* name)
1072{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001073 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001074 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001075}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001076
telsoa014fcda012018-03-09 14:13:49 +00001077IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
1078 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1079 const ConstTensor& weights,
1080 const ConstTensor& biases,
1081 const char* name)
1082{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001083 Optional<ConstTensor> optionalBiases(biases);
1084 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001085}
1086
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001087IConnectableLayer* Network::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001088 const ConstTensor& anchors, const char* name)
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001089{
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001090 const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
1091
1092 layer->m_Anchors = std::make_unique<ScopedCpuTensorHandle>(anchors);
1093
1094 return layer;
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001095}
1096
telsoa014fcda012018-03-09 14:13:49 +00001097IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
1098 const char* name)
1099{
1100 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
1101}
1102
1103IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
1104 const char* name)
1105{
1106 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
1107}
1108
1109IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
1110 const char* name)
1111{
1112 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
1113}
1114
telsoa01c577f2c2018-08-31 09:22:23 +01001115IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
1116normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001117 const char* name)
1118{
1119 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
1120}
1121
1122IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
1123 const char* name)
1124{
1125 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
1126}
1127
1128IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
1129 const char* name)
1130{
1131 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
1132}
1133
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001134IConnectableLayer* Network::AddMaximumLayer(const char* name)
1135{
1136 return m_Graph->AddLayer<MaximumLayer>(name);
1137}
1138
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001139IConnectableLayer* Network::AddMinimumLayer(const char* name)
1140{
1141 return m_Graph->AddLayer<MinimumLayer>(name);
1142}
1143
Jim Flynne242f2d2019-05-22 14:24:13 +01001144IConnectableLayer* Network::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001145 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001146{
Jim Flynne242f2d2019-05-22 14:24:13 +01001147 return AddConcatLayer(mergerDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001148}
1149
1150IConnectableLayer* Network::AddAdditionLayer(const char* name)
1151{
1152 return m_Graph->AddLayer<AdditionLayer>(name);
1153}
1154
1155IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
1156{
1157 return m_Graph->AddLayer<MultiplicationLayer>(name);
1158}
1159
1160IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
1161{
1162 return m_Graph->AddLayer<OutputLayer>(id, name);
1163}
1164
1165IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
1166 const ConstTensor& mean,
1167 const ConstTensor& variance,
1168 const ConstTensor& beta,
1169 const ConstTensor& gamma,
1170 const char* name)
1171{
1172 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
1173
1174 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
1175 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
1176 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
1177 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
1178
1179 return layer;
1180}
1181
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001182IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
1183 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001184{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001185 ResizeDescriptor resizeDescriptor;
1186 resizeDescriptor.m_Method = ResizeMethod::Bilinear;
1187 resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
1188 resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
1189 resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
1190
1191 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001192}
1193
Teresa Charlina9075df2019-06-27 15:41:57 +01001194IConnectableLayer* Network::AddResizeLayer(const ResizeDescriptor&
1195resizeDescriptor, const char* name)
1196{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001197 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
Teresa Charlina9075df2019-06-27 15:41:57 +01001198}
1199
Matteo Martincighbcd3c852018-09-28 14:14:12 +01001200IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
1201 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001202{
Matteo Martincighbcd3c852018-09-28 14:14:12 +01001203 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +00001204}
1205
1206IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
1207{
telsoa01c577f2c2018-08-31 09:22:23 +01001208 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
1209
1210 layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
1211
1212 return layer;
telsoa014fcda012018-03-09 14:13:49 +00001213}
1214
telsoa01c577f2c2018-08-31 09:22:23 +01001215IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
1216 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001217{
1218 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
1219}
1220
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001221IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1222 const char* name)
1223{
1224 return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
1225}
1226
Aron Virginas-Tar972af152019-06-11 14:14:03 +01001227IConnectableLayer* Network::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
1228 const char* name)
1229{
1230 return m_Graph->AddLayer<SpaceToDepthLayer>(spaceToDepthDescriptor, name);
1231}
1232
telsoa014fcda012018-03-09 14:13:49 +00001233IConnectableLayer* Network::AddFloorLayer(const char* name)
1234{
1235 return m_Graph->AddLayer<FloorLayer>(name);
1236}
1237
telsoa01c577f2c2018-08-31 09:22:23 +01001238IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
1239 const LstmInputParams& params,
1240 const char* name)
1241{
1242 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
1243
1244 //Lstm Basic Parameters
1245 layer->m_BasicParameters.m_InputToForgetWeights =
1246 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
1247 layer->m_BasicParameters.m_InputToCellWeights =
1248 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
1249 layer->m_BasicParameters.m_InputToOutputWeights =
1250 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
1251 layer->m_BasicParameters.m_RecurrentToForgetWeights =
1252 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
1253 layer->m_BasicParameters.m_RecurrentToCellWeights =
1254 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
1255 layer->m_BasicParameters.m_RecurrentToOutputWeights =
1256 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
1257 layer->m_BasicParameters.m_ForgetGateBias =
1258 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
1259 layer->m_BasicParameters.m_CellBias =
1260 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
1261 layer->m_BasicParameters.m_OutputGateBias =
1262 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
1263
1264 //Lstm Cifg parameters
1265 if(!descriptor.m_CifgEnabled)
1266 {
1267 if(params.m_InputToInputWeights == nullptr)
1268 {
1269 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL");
1270 }
1271 if(params.m_RecurrentToInputWeights == nullptr)
1272 {
1273 throw InvalidArgumentException(
1274 "AddLstmLayer: Recurrent To Input Weights cannot be NULL");
1275 }
1276 if(params.m_InputGateBias == nullptr)
1277 {
1278 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL");
1279 }
1280 layer->m_CifgParameters.m_InputToInputWeights =
1281 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
1282 layer->m_CifgParameters.m_RecurrentToInputWeights =
1283 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
1284 // In the VTS tests, cell-to-input weights may be null, even if the other CIFG params are not.
1285 if(params.m_CellToInputWeights != nullptr)
1286 {
1287 layer->m_CifgParameters.m_CellToInputWeights =
1288 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
1289 }
1290 layer->m_CifgParameters.m_InputGateBias =
1291 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
1292 }
1293
1294 //Lstm projection parameters
1295 if(descriptor.m_ProjectionEnabled)
1296 {
1297 if(params.m_ProjectionWeights == nullptr)
1298 {
1299 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL");
1300 }
1301 layer->m_ProjectionParameters.m_ProjectionWeights =
1302 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
1303 if(params.m_ProjectionBias != nullptr)
1304 {
1305 layer->m_ProjectionParameters.m_ProjectionBias =
1306 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
1307 }
1308 }
1309
1310 //Lstm Peephole params
1311 if(descriptor.m_PeepholeEnabled)
1312 {
1313 if(params.m_CellToForgetWeights == nullptr)
1314 {
1315 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL");
1316 }
1317 if(params.m_CellToOutputWeights == nullptr)
1318 {
1319 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL");
1320 }
1321 layer->m_PeepholeParameters.m_CellToForgetWeights =
1322 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
1323 layer->m_PeepholeParameters.m_CellToOutputWeights =
1324 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
1325 }
Jan Eilersf8c62972019-07-17 11:07:49 +01001326
1327 //Lstm Layer Normalization params
1328 if(descriptor.m_LayerNormEnabled)
1329 {
1330 if(!descriptor.m_CifgEnabled)
1331 {
1332 if(params.m_InputLayerNormWeights == nullptr)
1333 {
1334 throw InvalidArgumentException("AddLstmLayer: Input layer normalization weights cannot be NULL");
1335 }
1336 layer->m_LayerNormParameters.m_InputLayerNormWeights =
1337 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
1338 }
1339
1340 if(params.m_ForgetLayerNormWeights == nullptr)
1341 {
1342 throw InvalidArgumentException("AddLstmLayer: Forget layer normalization weights cannot be NULL");
1343 }
1344 if(params.m_CellLayerNormWeights == nullptr)
1345 {
1346 throw InvalidArgumentException("AddLstmLayer: Cell layer normalization weights cannot be NULL");
1347 }
1348 if(params.m_OutputLayerNormWeights == nullptr)
1349 {
1350 throw InvalidArgumentException("AddLstmLayer: Output layer normalization weights cannot be NULL");
1351 }
1352 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
1353 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
1354 layer->m_LayerNormParameters.m_CellLayerNormWeights =
1355 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
1356 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
1357 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
1358 }
telsoa01c577f2c2018-08-31 09:22:23 +01001359 return layer;
1360}
1361
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001362IConnectableLayer* Network::AddDivisionLayer(const char* name)
1363{
1364 return m_Graph->AddLayer<DivisionLayer>(name);
1365}
1366
David Beck19526222018-09-12 16:00:08 +01001367IConnectableLayer* Network::AddSubtractionLayer(const char* name)
1368{
1369 return m_Graph->AddLayer<SubtractionLayer>(name);
1370}
1371
narpra0132b90462018-09-13 11:07:48 +01001372IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
1373{
1374 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
1375}
1376
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +01001377IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
1378{
1379 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
1380}
1381
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001382IConnectableLayer *Network::AddQuantizeLayer(const char *name)
1383{
1384 return m_Graph->AddLayer<QuantizeLayer>(name);
1385}
1386
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001387IConnectableLayer* Network::AddDequantizeLayer(const char* name)
1388{
1389 return m_Graph->AddLayer<DequantizeLayer>(name);
1390}
1391
Conor Kennedy430b5d82018-11-14 15:28:28 +00001392IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
1393 const char* name)
1394{
1395 return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
1396}
1397
Matteo Martincigh59a950c2018-12-13 12:48:25 +00001398IConnectableLayer* Network::AddGreaterLayer(const char* name)
1399{
1400 return m_Graph->AddLayer<GreaterLayer>(name);
1401}
1402
FrancisMurtagh20995952018-12-17 12:11:36 +00001403IConnectableLayer* Network::AddEqualLayer(const char* name)
1404{
jimfly0184c70e62018-12-19 13:14:46 +00001405 return m_Graph->AddLayer<EqualLayer>(name);
FrancisMurtagh20995952018-12-17 12:11:36 +00001406}
1407
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001408IConnectableLayer* Network::AddRsqrtLayer(const char * name)
1409{
1410 return m_Graph->AddLayer<RsqrtLayer>(name);
1411}
1412
narpra01b89b05f2019-01-16 09:53:09 +00001413IConnectableLayer* Network::AddGatherLayer(const char* name)
1414{
1415 return m_Graph->AddLayer<GatherLayer>(name);
1416}
1417
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001418IConnectableLayer* Network::AddMergeLayer(const char* name)
1419{
1420 return m_Graph->AddLayer<MergeLayer>(name);
1421}
1422
Sadik Armaganeff363d2019-04-05 15:25:46 +01001423IConnectableLayer* Network::AddSwitchLayer(const char* name)
1424{
1425 return m_Graph->AddLayer<SwitchLayer>(name);
1426}
1427
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01001428IConnectableLayer* Network::AddPreluLayer(const char* name)
1429{
1430 return m_Graph->AddLayer<PreluLayer>(name);
1431}
1432
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01001433IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
1434 const ConstTensor& weights,
1435 const Optional<ConstTensor>& biases,
1436 const char* name)
1437{
1438 if (descriptor.m_BiasEnabled && !biases.has_value())
1439 {
1440 throw InvalidArgumentException("AddTransposeConvolution2dLayer: Biases cannot be empty");
1441 }
1442
1443 const auto layer = m_Graph->AddLayer<TransposeConvolution2dLayer>(descriptor, name);
1444
1445 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1446
1447 if (descriptor.m_BiasEnabled)
1448 {
1449 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
1450 }
1451
1452 return layer;
1453}
1454
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001455IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor,
1456 const char* name)
1457{
1458 return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
1459}
1460
Mike Kelly8c1701a2019-02-11 17:01:27 +00001461void Network::Accept(ILayerVisitor& visitor) const
1462{
1463 for (auto layer : GetGraph())
1464 {
1465 layer->Accept(visitor);
1466 };
1467}
1468
telsoa014fcda012018-03-09 14:13:49 +00001469OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
1470 : m_Graph(std::move(graph))
1471{
1472}
1473
1474OptimizedNetwork::~OptimizedNetwork()
1475{
1476}
1477
1478} // namespace armnn