blob: 9ef0c568a3d60713e222dd5e6a4f03b4fff5a9e8 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matteo Martincigh49124022019-01-11 13:25:59 +00005
telsoa014fcda012018-03-09 14:13:49 +00006#include "Network.hpp"
7#include "Graph.hpp"
8#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01009#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010#include "Optimizer.hpp"
Derek Lambertiff05cc52019-04-26 13:05:17 +010011#include "SubgraphViewSelector.hpp"
Matteo Martincigh49124022019-01-11 13:25:59 +000012#include "BackendSettings.hpp"
David Beckac42efd2018-09-26 17:41:13 +010013#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000014
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000015#include <backendsCommon/CpuTensorHandle.hpp>
16#include <backendsCommon/WorkloadFactory.hpp>
David Beck263e3492018-11-09 14:46:40 +000017#include <backendsCommon/BackendRegistry.hpp>
18#include <backendsCommon/IBackendInternal.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019
20#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000021#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022#include <armnn/TypesUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000023
24#include <fcntl.h>
25#include <algorithm>
26#include <fstream>
27#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010028#include <vector>
29#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000030
31#include <boost/assert.hpp>
32#include <boost/format.hpp>
33#include <boost/log/trivial.hpp>
34#include <boost/numeric/conversion/converter_policies.hpp>
35#include <boost/cast.hpp>
36
37namespace armnn
38{
39
40armnn::INetwork* INetwork::CreateRaw()
41{
42 return new Network();
43}
44
45armnn::INetworkPtr INetwork::Create()
46{
47 return INetworkPtr(CreateRaw(), &INetwork::Destroy);
48}
49
50void INetwork::Destroy(INetwork* network)
51{
52 delete boost::polymorphic_downcast<Network*>(network);
53}
54
55Status Network::PrintGraph()
56{
57 m_Graph->Print();
58 return Status::Success;
59}
60
61void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
62{
63 delete boost::polymorphic_downcast<OptimizedNetwork*>(network);
64}
65
66Status OptimizedNetwork::PrintGraph()
67{
68 m_Graph->Print();
69 return Status::Success;
70}
71
surmeh01bceff2f2018-03-29 16:29:27 +010072Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
73{
74 return m_Graph->SerializeToDot(stream);
75}
76
Matteo Martincigh49124022019-01-11 13:25:59 +000077struct OptimizationResult
78{
79 bool m_Warning;
80 bool m_Error;
81
82 OptimizationResult()
83 : m_Warning(false)
84 , m_Error(false)
85 {}
86};
87
88void ReportError(const std::string& errorMessage,
89 Optional<std::vector<std::string>&> errorMessages)
90{
91 std::stringstream fullErrorMessage;
92 fullErrorMessage << "ERROR: " << errorMessage;
93 BOOST_LOG_TRIVIAL(warning) << fullErrorMessage.str();
94 if (errorMessages)
95 {
96 errorMessages.value().push_back(fullErrorMessage.str());
97 }
98}
99
100void ReportWarning(const std::string& warningMessage,
101 Optional<std::vector<std::string>&> warningMessages)
102{
103 std::stringstream fullWarningMessage;
104 fullWarningMessage << "WARNING: " << warningMessage;
105 BOOST_LOG_TRIVIAL(warning) << fullWarningMessage.str();
106 if (warningMessages)
107 {
108 warningMessages.value().push_back(fullWarningMessage.str());
109 }
110}
111
jimfly016b0b53d2018-10-08 14:43:01 +0100112bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
113{
114 bool noErrors = true;
115 unsigned int numOutputs = layer->GetNumOutputSlots();
116 for (unsigned int i = 0; i < numOutputs; i++) {
David Monahanb8554702019-04-25 16:03:38 +0100117 OutputSlot& outputSlot = layer->GetOutputSlot(i);
118 TensorInfo info = outputSlot.GetTensorInfo();
jimfly016b0b53d2018-10-08 14:43:01 +0100119 if (DataType::QuantisedAsymm8 == info.GetDataType()) {
120 if (0.f == info.GetQuantizationScale()) {
121 noErrors = false;
122 std::stringstream ss;
Matteo Martincigh49124022019-01-11 13:25:59 +0000123 ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
jimfly016b0b53d2018-10-08 14:43:01 +0100124 << " (" << layer->GetNameStr() << ") is of type"
125 << " Quantized 8 bit but its scale parameter has not been set";
Matteo Martincigh49124022019-01-11 13:25:59 +0000126 ReportError(ss.str(), errMessages);
jimfly016b0b53d2018-10-08 14:43:01 +0100127 }
David Monahanb8554702019-04-25 16:03:38 +0100128 // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
129 if ((info.GetQuantizationScale() != (1.0f / 256.0f) ||
130 info.GetQuantizationOffset() != 0) &&
131 layer->GetType() == armnn::LayerType::Softmax)
132 {
133 std::stringstream ss;
134 ss << "Quantization parameters for Softmax layer (Scale: " <<
135 info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() <<
136 ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
137 BOOST_LOG_TRIVIAL(warning) << ss.str();
138 info.SetQuantizationScale((1.0f /256.0f));
139 info.SetQuantizationOffset(0);
140 outputSlot.SetTensorInfo(info);
141 }
jimfly016b0b53d2018-10-08 14:43:01 +0100142 }
143 }
144 return noErrors;
145}
146
Matteo Martincigh49124022019-01-11 13:25:59 +0000147OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
148 BackendSettings& backendSettings,
149 Graph::Iterator& firstLayer,
150 Graph::Iterator& lastLayer,
151 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +0000152{
Matteo Martincigh49124022019-01-11 13:25:59 +0000153 OptimizationResult result;
telsoa014fcda012018-03-09 14:13:49 +0000154
Matteo Martincigh49124022019-01-11 13:25:59 +0000155 // Helper lambda to compose meaningful error message before returning with error
156 auto ReturnWithError = [&](const Layer* layer)
telsoa01c577f2c2018-08-31 09:22:23 +0100157 {
jimfly016b0b53d2018-10-08 14:43:01 +0100158 std::stringstream failureMsg;
Matteo Martincigh49124022019-01-11 13:25:59 +0000159 failureMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
160 << " is not supported on any preferred backend " << backendSettings.m_PreferredBackends;
161 ReportError(failureMsg.str(), errMessages);
162
163 result.m_Error = true;
164 return result;
telsoa01c577f2c2018-08-31 09:22:23 +0100165 };
166
Matteo Martincigh49124022019-01-11 13:25:59 +0000167 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
168 if (availablePreferredBackends.empty())
telsoa01c577f2c2018-08-31 09:22:23 +0100169 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000170 std::stringstream failureMsg;
171 failureMsg << "No preferred backends are available";
172 ReportError(failureMsg.str(), errMessages);
173
174 result.m_Error = true;
175 return result;
176 }
177
178 for (auto it = firstLayer; it != lastLayer; ++it)
179 {
180 auto layer = *it;
telsoa01c577f2c2018-08-31 09:22:23 +0100181 DataType dataType = layer->GetDataType();
182 std::string reasonIfUnsupported;
183 bool found = false;
jimfly016b0b53d2018-10-08 14:43:01 +0100184 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
185 {
186 // don't bomb immediately, find all the quantized outputs
187 // which haven't had a scale set and report them all back.
Matteo Martincigh49124022019-01-11 13:25:59 +0000188 result.m_Error = true;
jimfly016b0b53d2018-10-08 14:43:01 +0100189 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000190
David Beckf0b48452018-10-19 15:20:56 +0100191 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100192 {
193 // need to set the compute device on the layer
194 // before we can check if it is supported
David Beck33f0ae02018-10-18 15:13:56 +0100195 layer->SetBackendId(backend);
telsoa01c577f2c2018-08-31 09:22:23 +0100196 if (!IWorkloadFactory::IsLayerSupported(*layer, dataType, reasonIfUnsupported))
197 {
198 if (dataType == DataType::Float16)
199 {
200 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
201 && layer->GetType() != LayerType::ConvertFp32ToFp16
202 && layer->GetType() != LayerType::ConvertFp16ToFp32)
203 {
204 // Insert FP16 -> FP32 conversion layer before current layer
205 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers =
206 InsertConvertFp16ToFp32LayersBefore(optNetObjPtr->GetGraph(), *layer);
207
208 // Insert FP32 -> FP16 conversion layer after current layer
209 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers =
210 InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer);
211
212 // Assign a supported backend to the newly introduced conversion layers
David Beckf0b48452018-10-19 15:20:56 +0100213 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
telsoa01c577f2c2018-08-31 09:22:23 +0100214 {
215 bool supportedBackendFound = false;
216 std::string reasonIfUnsupported;
217
218 // Try preferred backend first
David Beck33f0ae02018-10-18 15:13:56 +0100219 layer->SetBackendId(preferredBackend);
David Beck29c75de2018-10-23 13:35:58 +0100220 if (IWorkloadFactory::IsLayerSupported(*layer,
221 EmptyOptional(),
222 reasonIfUnsupported))
telsoa01c577f2c2018-08-31 09:22:23 +0100223 {
224 supportedBackendFound = true;
225 }
226 else
227 {
David Beckf0b48452018-10-19 15:20:56 +0100228 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100229 {
230 // Skip preferred backend (we already determined that it is not supported)
231 if (backend == preferredBackend)
232 {
233 continue;
234 }
235
David Beck33f0ae02018-10-18 15:13:56 +0100236 layer->SetBackendId(backend);
David Beck29c75de2018-10-23 13:35:58 +0100237 if (IWorkloadFactory::IsLayerSupported(*layer,
238 EmptyOptional(),
239 reasonIfUnsupported))
telsoa01c577f2c2018-08-31 09:22:23 +0100240 {
241 supportedBackendFound = true;
242 break;
243 }
244 }
245 }
246
247 return supportedBackendFound;
248 };
249
250 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
251 {
252 if (!AssignFirstSupportedBackend(convertLayer, backend))
253 {
254 return ReturnWithError(convertLayer);
255 }
256 }
257
258 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
259 {
260 if (!AssignFirstSupportedBackend(convertLayer, backend))
261 {
262 return ReturnWithError(convertLayer);
263 }
264 }
265
266 found = true;
267 break;
268 }
269 }
jimfly016b0b53d2018-10-08 14:43:01 +0100270 std::stringstream warningMsg;
Matteo Martincigh49124022019-01-11 13:25:59 +0000271 warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
David Beck33f0ae02018-10-18 15:13:56 +0100272 << " is not supported on requested backend " << layer->GetBackendId().Get()
jimfly016b0b53d2018-10-08 14:43:01 +0100273 << " for data type " << GetDataTypeName(dataType)
274 << " (reason: " << reasonIfUnsupported
275 << "), falling back to the next backend.";
Matteo Martincigh49124022019-01-11 13:25:59 +0000276 ReportWarning(warningMsg.str(), errMessages);
telsoa01c577f2c2018-08-31 09:22:23 +0100277 }
278 else
279 {
280 found = true;
Matteo Martincigh49124022019-01-11 13:25:59 +0000281 backendSettings.m_SelectedBackends.insert(backend);
telsoa01c577f2c2018-08-31 09:22:23 +0100282 break;
283 }
284 }
285
286 // If the layer is unsupported by any devices, log and return a null network.
Matteo Martincigh49124022019-01-11 13:25:59 +0000287 if (!found)
288 {
telsoa01c577f2c2018-08-31 09:22:23 +0100289 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
290 // fallback we should set the compute device on the layer to CpuRef (these are not
291 // available as accelerated operations, or are only available under certain
292 // conditions, currently they comprise MemCopy, Constant, Permute)
293 armnn::LayerType layerType = layer->GetType();
Matteo Martincigh49124022019-01-11 13:25:59 +0000294 if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
295 layerType == armnn::LayerType::Constant ||
296 layerType == armnn::LayerType::Permute))
telsoa01c577f2c2018-08-31 09:22:23 +0100297 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000298 BackendId cpuBackendId(armnn::Compute::CpuRef);
299 layer->SetBackendId(cpuBackendId);
300 backendSettings.m_SelectedBackends.insert(cpuBackendId);
telsoa01c577f2c2018-08-31 09:22:23 +0100301 }
302 else
303 {
304 return ReturnWithError(layer);
305 }
306 }
307 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000308
309 return result;
310}
311
Matteo Martincighadddddb2019-01-24 14:06:23 +0000312OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
313 BackendSettings& backendSettings,
Derek Lambertiff05cc52019-04-26 13:05:17 +0100314 SubgraphView& subgraph,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000315 Optional<std::vector<std::string>&> errMessages)
Matteo Martincigh49124022019-01-11 13:25:59 +0000316{
Derek Lambertiff05cc52019-04-26 13:05:17 +0100317 Graph::Iterator firstLayer = subgraph.begin();
318 Graph::Iterator lastLayer = subgraph.end();
Matteo Martincighadddddb2019-01-24 14:06:23 +0000319 return AssignBackends(optNetObjPtr,
320 backendSettings,
321 firstLayer,
322 lastLayer,
323 errMessages);
324}
325
326OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
327 BackendSettings& backendSettings,
328 Optional<std::vector<std::string>&> errMessages)
329{
330 BOOST_ASSERT(optNetObjPtr);
Matteo Martincigh49124022019-01-11 13:25:59 +0000331
332 OptimizationResult result;
333
Matteo Martincighadddddb2019-01-24 14:06:23 +0000334 // Get the optimized graph
335 Graph& optGraph = optNetObjPtr->GetGraph();
Matteo Martincigh49124022019-01-11 13:25:59 +0000336
Matteo Martincighadddddb2019-01-24 14:06:23 +0000337 // Get the entire graph as a sub-graph
Derek Lambertiff05cc52019-04-26 13:05:17 +0100338 SubgraphView mainSubgraph(optGraph);
Matteo Martincigh49124022019-01-11 13:25:59 +0000339
Matteo Martincighadddddb2019-01-24 14:06:23 +0000340 // Run backend specific optimizations
341 auto const& backendRegistry = BackendRegistryInstance();
342 for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
Matteo Martincigh49124022019-01-11 13:25:59 +0000343 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000344 auto backendFactory = backendRegistry.GetFactory(selectedBackend);
345 auto backendObjPtr = backendFactory();
346 BOOST_ASSERT(backendObjPtr);
347
348 // Select sub-graphs based on backend
Derek Lambertiff05cc52019-04-26 13:05:17 +0100349 SubgraphViewSelector::Subgraphs subgraphs =
350 SubgraphViewSelector::SelectSubgraphs(mainSubgraph,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000351 // Select layers assigned to the requested backend
352 [&backendObjPtr](const Layer& layer)
353 {
354 return layer.GetType() != LayerType::Input &&
355 layer.GetType() != LayerType::Output &&
356 layer.GetBackendId() == backendObjPtr->GetId();
357 });
Derek Lambertiff05cc52019-04-26 13:05:17 +0100358 if (subgraphs.empty())
Matteo Martincigh49124022019-01-11 13:25:59 +0000359 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000360 // No sub-graphs found, try with next selected backend
361 continue;
Matteo Martincigh49124022019-01-11 13:25:59 +0000362 }
Matteo Martincighadddddb2019-01-24 14:06:23 +0000363
364 // Try to optimize each sub-graph
Derek Lambertiff05cc52019-04-26 13:05:17 +0100365 for (auto& subgraph : subgraphs)
Matteo Martincigh49124022019-01-11 13:25:59 +0000366 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000367 // Try to optimize the current sub-graph
368 bool optimizationAttempted = false;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100369 SubgraphView::SubgraphViewPtr optSubgraph = backendObjPtr->OptimizeSubgraphView(*subgraph,
370 optimizationAttempted);
Matteo Martincigh49124022019-01-11 13:25:59 +0000371
Matteo Martincighadddddb2019-01-24 14:06:23 +0000372 // Check if the optimization has been attempted
373 if (!optimizationAttempted)
Matteo Martincigh49124022019-01-11 13:25:59 +0000374 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000375 // No optimization attempted, keep the current sub-graph as it is and move to the next one
376 continue;
377 }
378
379 // Optimization attempted, check the resulting optimized sub-graph
Derek Lambertiff05cc52019-04-26 13:05:17 +0100380 if (optSubgraph)
Matteo Martincighadddddb2019-01-24 14:06:23 +0000381 {
382 // Sub-graph optimized, substitute the sub-graph with the new optimized one in the main optimized graph
Derek Lambertiff05cc52019-04-26 13:05:17 +0100383 optGraph.SubstituteSubgraph(std::move(subgraph), *optSubgraph);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000384
385 // Assign the current backend to the optimized sub-graph
Derek Lambertiff05cc52019-04-26 13:05:17 +0100386 std::for_each(optSubgraph->begin(), optSubgraph->end(), [&selectedBackend](Layer* l)
Matteo Martincighadddddb2019-01-24 14:06:23 +0000387 {
388 BOOST_ASSERT(l);
389 l->SetBackendId(selectedBackend);
390 });
391
392 // Recreate the sub-graph representing the entire graph
Derek Lambertiff05cc52019-04-26 13:05:17 +0100393 mainSubgraph.Update(optGraph);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000394 }
395 else
396 {
397 // An error occurred: the optimization was attempted but not performed, try different backends
398 std::stringstream warningMsg;
399 warningMsg << "Sub-graph failed to get optimized on " << backendObjPtr->GetId() << ". "
Derek Lambertiff05cc52019-04-26 13:05:17 +0100400 << "Re-assigning backends to " << subgraph->GetLayers().size() << " layers inside sub-graph";
Matteo Martincighadddddb2019-01-24 14:06:23 +0000401 ReportWarning(warningMsg.str(), errMessages);
402
403 // Failed to optimize the given sub-graph, re-assign the sub-graph layers to other available backends
404 if (!backendObjPtr->GetId().IsCpuRef())
405 {
406 // Add the current backend to the list of backends to ignore
407 backendSettings.m_IgnoredBackends.insert(backendObjPtr->GetId());
408 }
409 OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr,
410 backendSettings,
Derek Lambertiff05cc52019-04-26 13:05:17 +0100411 *subgraph,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000412 errMessages);
413 if (reassignmentResult.m_Error)
414 {
415 // Failed to re-assign one of the remaining backends to each layer of the sub-graph
416 result.m_Error = true;
417 return result;
418 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000419 }
420 }
421 }
422
423 return result;
424}
425
426IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
427 const std::vector<BackendId>& backendPreferences,
428 const IDeviceSpec& deviceSpec,
429 const OptimizerOptions& options,
430 Optional<std::vector<std::string>&> errMessages)
431{
432 if (backendPreferences.empty())
433 {
434 throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
435 }
436
437 const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork);
438 std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
439
440 auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
441
442 OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
443
Matteo Martincighadddddb2019-01-24 14:06:23 +0000444 // Get the optimized graph
445 Graph& optGraph = optNetObjPtr->GetGraph();
446
Matteo Martincigh49124022019-01-11 13:25:59 +0000447 // Perform optimisation passes
448 using namespace optimizations;
Matteo Martincighadddddb2019-01-24 14:06:23 +0000449 Optimizer::Pass(optGraph, MakeOptimizations(SquashEqualPermuteSiblings(),
450 SquashEqualReshapeSiblings(),
451 OptimizeInversePermutes(),
452 MovePermuteUp(),
453 PermuteAsReshape(),
Nina Drozd861985f2019-04-18 14:48:51 +0100454 OptimizeConsecutiveReshapes(),
455 FoldPadIntoConvolution2d()));
Matteo Martincigh49124022019-01-11 13:25:59 +0000456
Matteo Martincighadddddb2019-01-24 14:06:23 +0000457 // Infer the tensor infos for all output slots. Throws an exception on failure
458 optGraph.InferTensorInfos();
Matteo Martincigh49124022019-01-11 13:25:59 +0000459
460 // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
461 if (options.m_ReduceFp32ToFp16)
462 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000463 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToFp16Converter()));
Matteo Martincigh49124022019-01-11 13:25:59 +0000464 }
465
466 // Initialize backend settings
467 BackendSettings backendSettings(backendPreferences, deviceSpec);
468 if (backendSettings.GetAvailablePreferredBackends().empty())
469 {
470 std::stringstream failureMsg;
471 failureMsg << "None of the preferred backends " << backendPreferences
472 << " are supported. Current platform provides " << backendSettings.m_SupportedBackends;
473 ReportError(failureMsg.str(), errMessages);
474 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
475 }
476
477 // Assign an available backend to each layer
Matteo Martincighadddddb2019-01-24 14:06:23 +0000478 Graph::Iterator firstLayer = optGraph.begin();
479 Graph::Iterator lastLayer = optGraph.end();
Matteo Martincigh49124022019-01-11 13:25:59 +0000480 OptimizationResult assigBackendsResult = AssignBackends(optNetObjPtr,
481 backendSettings,
482 firstLayer,
483 lastLayer,
484 errMessages);
485 if (assigBackendsResult.m_Error)
486 {
487 // Failed to assign a backend to each layer
jimfly016b0b53d2018-10-08 14:43:01 +0100488 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
489 }
telsoa01c577f2c2018-08-31 09:22:23 +0100490
Matteo Martincighadddddb2019-01-24 14:06:23 +0000491 Optimizer::Pass(optGraph, MakeOptimizations(OptimizeInverseConversionsFp16(),
492 OptimizeInverseConversionsFp32()));
telsoa01c577f2c2018-08-31 09:22:23 +0100493
Matteo Martincighadddddb2019-01-24 14:06:23 +0000494 // Apply the backend-specific optimizations
495 OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr,
496 backendSettings,
497 errMessages);
498 if (backendOptimizationResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +0000499 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000500 // Failed to apply the backend-specific optimizations
501 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
Matteo Martincigh49124022019-01-11 13:25:59 +0000502 }
503
Matteo Martincighadddddb2019-01-24 14:06:23 +0000504 // If the debug flag is set, then insert a DebugLayer after each layer
505 // Doing this after applying the backend optimizations as they might have changed some layers
506 if (options.m_Debug)
507 {
508 Optimizer::Pass(optGraph, MakeOptimizations(InsertDebugLayer()));
509 }
510
511 optGraph.AddCopyLayers();
telsoa01c577f2c2018-08-31 09:22:23 +0100512
513 // Convert constants
Matteo Martincighadddddb2019-01-24 14:06:23 +0000514 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
515 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
telsoa01c577f2c2018-08-31 09:22:23 +0100516
David Beck263e3492018-11-09 14:46:40 +0000517 // Run backend specific optimizations
Matteo Martincigh49124022019-01-11 13:25:59 +0000518 for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
David Beck263e3492018-11-09 14:46:40 +0000519 {
520 auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
521 auto backendPtr = factoryFun();
522 BOOST_ASSERT(backendPtr.get() != nullptr);
523
524 auto backendSpecificOptimizations = backendPtr->GetOptimizations();
525 if (!backendSpecificOptimizations.empty())
526 {
527 Optimizer::Pass(optNetObjPtr->GetGraph(), backendSpecificOptimizations);
528 }
529 }
530
telsoa01c577f2c2018-08-31 09:22:23 +0100531 return optNet;
telsoa014fcda012018-03-09 14:13:49 +0000532}
533
534Network::Network()
535: m_Graph(std::make_unique<Graph>())
536{
537}
538
539Network::~Network()
540{
541}
542
543IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
544{
545 return m_Graph->AddLayer<InputLayer>(id, name);
546}
547
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +0000548IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
549 const char* name)
550{
551 return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
552}
553
telsoa014fcda012018-03-09 14:13:49 +0000554IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100555 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000556 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +0100557 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000558{
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000559 if (fullyConnectedDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +0000560 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000561 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +0000562 }
563
564 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
565
566 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
567
568 if (fullyConnectedDescriptor.m_BiasEnabled)
569 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000570 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +0000571 }
572
573 return layer;
574}
575
576IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100577 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000578 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +0100579 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000580{
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000581 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +0000582}
583
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000584/// @deprecated
585IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
586 const ConstTensor& weights,
587 const char* name)
588{
589 Optional<ConstTensor> biases = EmptyOptional();
590 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
591}
592
593/// @deprecated
telsoa014fcda012018-03-09 14:13:49 +0000594IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100595 const ConstTensor& weights,
596 const ConstTensor& biases,
597 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000598{
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000599 Optional<ConstTensor> optionalBiases(biases);
600 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +0000601}
602
603IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100604 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000605 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +0100606 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000607{
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000608 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +0000609 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000610 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +0000611 }
612
613 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
614
615 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
616
617 if (convolution2dDescriptor.m_BiasEnabled)
618 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000619 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +0000620 }
621
622 return layer;
623}
624
625IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100626 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000627 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +0100628 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000629{
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000630 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +0000631}
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000632
633/// @deprecated
634IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
635 const ConstTensor& weights,
636 const char* name)
637{
638 Optional<ConstTensor> biases = EmptyOptional();
639 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
640}
641
642/// @deprecated
telsoa014fcda012018-03-09 14:13:49 +0000643IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100644 const ConstTensor& weights,
645 const ConstTensor& biases,
646 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000647{
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000648 Optional<ConstTensor> optionalBiases(biases);
649 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +0000650}
651
652IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
653 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
654 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000655 const Optional<ConstTensor>& biases,
telsoa014fcda012018-03-09 14:13:49 +0000656 const char* name)
657{
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000658 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +0000659 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000660 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +0000661 }
662
Matteo Martincigh3d6898c2019-01-15 16:11:44 +0000663 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +0000664
665 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
666
667 if (convolution2dDescriptor.m_BiasEnabled)
668 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000669 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +0000670 }
671
672 return layer;
673}
674
675IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000676 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
677 const ConstTensor& weights,
678 const Optional<ConstTensor>& biases,
679 const char* name)
680{
681 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
682}
683
684/// @deprecated
685IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
telsoa014fcda012018-03-09 14:13:49 +0000686 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
687 const ConstTensor& weights,
688 const char* name)
689{
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000690 Optional<ConstTensor> biases = EmptyOptional();
691 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +0000692}
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000693
694/// @deprecated
telsoa014fcda012018-03-09 14:13:49 +0000695IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
696 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
697 const ConstTensor& weights,
698 const ConstTensor& biases,
699 const char* name)
700{
Aron Virginas-Tarad402702019-02-22 17:03:44 +0000701 Optional<ConstTensor> optionalBiases(biases);
702 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +0000703}
704
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +0000705IConnectableLayer* Network::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +0000706 const ConstTensor& anchors, const char* name)
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +0000707{
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +0000708 const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
709
710 layer->m_Anchors = std::make_unique<ScopedCpuTensorHandle>(anchors);
711
712 return layer;
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +0000713}
714
telsoa014fcda012018-03-09 14:13:49 +0000715IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
716 const char* name)
717{
718 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
719}
720
721IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
722 const char* name)
723{
724 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
725}
726
727IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
728 const char* name)
729{
730 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
731}
732
telsoa01c577f2c2018-08-31 09:22:23 +0100733IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
734normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +0000735 const char* name)
736{
737 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
738}
739
740IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
741 const char* name)
742{
743 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
744}
745
746IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
747 const char* name)
748{
749 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
750}
751
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000752IConnectableLayer* Network::AddMaximumLayer(const char* name)
753{
754 return m_Graph->AddLayer<MaximumLayer>(name);
755}
756
Éanna Ó Catháin20e58802018-12-04 10:29:06 +0000757IConnectableLayer* Network::AddMinimumLayer(const char* name)
758{
759 return m_Graph->AddLayer<MinimumLayer>(name);
760}
761
telsoa014fcda012018-03-09 14:13:49 +0000762IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
763 const char* name)
764{
765 return m_Graph->AddLayer<MergerLayer>(mergerDescriptor, name);
766}
767
768IConnectableLayer* Network::AddAdditionLayer(const char* name)
769{
770 return m_Graph->AddLayer<AdditionLayer>(name);
771}
772
773IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
774{
775 return m_Graph->AddLayer<MultiplicationLayer>(name);
776}
777
778IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
779{
780 return m_Graph->AddLayer<OutputLayer>(id, name);
781}
782
783IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
784 const ConstTensor& mean,
785 const ConstTensor& variance,
786 const ConstTensor& beta,
787 const ConstTensor& gamma,
788 const char* name)
789{
790 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
791
792 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
793 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
794 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
795 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
796
797 return layer;
798}
799
telsoa01c577f2c2018-08-31 09:22:23 +0100800IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor&
801resizeDescriptor, const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000802{
803 return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name);
804}
805
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100806IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
807 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000808{
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100809 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +0000810}
811
812IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
813{
telsoa01c577f2c2018-08-31 09:22:23 +0100814 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
815
816 layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
817
818 return layer;
telsoa014fcda012018-03-09 14:13:49 +0000819}
820
telsoa01c577f2c2018-08-31 09:22:23 +0100821IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
822 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000823{
824 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
825}
826
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000827IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
828 const char* name)
829{
830 return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
831}
832
telsoa014fcda012018-03-09 14:13:49 +0000833IConnectableLayer* Network::AddFloorLayer(const char* name)
834{
835 return m_Graph->AddLayer<FloorLayer>(name);
836}
837
telsoa01c577f2c2018-08-31 09:22:23 +0100838IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
839 const LstmInputParams& params,
840 const char* name)
841{
842 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
843
844 //Lstm Basic Parameters
845 layer->m_BasicParameters.m_InputToForgetWeights =
846 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
847 layer->m_BasicParameters.m_InputToCellWeights =
848 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
849 layer->m_BasicParameters.m_InputToOutputWeights =
850 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
851 layer->m_BasicParameters.m_RecurrentToForgetWeights =
852 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
853 layer->m_BasicParameters.m_RecurrentToCellWeights =
854 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
855 layer->m_BasicParameters.m_RecurrentToOutputWeights =
856 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
857 layer->m_BasicParameters.m_ForgetGateBias =
858 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
859 layer->m_BasicParameters.m_CellBias =
860 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
861 layer->m_BasicParameters.m_OutputGateBias =
862 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
863
864 //Lstm Cifg parameters
865 if(!descriptor.m_CifgEnabled)
866 {
867 if(params.m_InputToInputWeights == nullptr)
868 {
869 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL");
870 }
871 if(params.m_RecurrentToInputWeights == nullptr)
872 {
873 throw InvalidArgumentException(
874 "AddLstmLayer: Recurrent To Input Weights cannot be NULL");
875 }
876 if(params.m_InputGateBias == nullptr)
877 {
878 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL");
879 }
880 layer->m_CifgParameters.m_InputToInputWeights =
881 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
882 layer->m_CifgParameters.m_RecurrentToInputWeights =
883 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
884 // In the VTS tests, cell-to-input weights may be null, even if the other CIFG params are not.
885 if(params.m_CellToInputWeights != nullptr)
886 {
887 layer->m_CifgParameters.m_CellToInputWeights =
888 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
889 }
890 layer->m_CifgParameters.m_InputGateBias =
891 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
892 }
893
894 //Lstm projection parameters
895 if(descriptor.m_ProjectionEnabled)
896 {
897 if(params.m_ProjectionWeights == nullptr)
898 {
899 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL");
900 }
901 layer->m_ProjectionParameters.m_ProjectionWeights =
902 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
903 if(params.m_ProjectionBias != nullptr)
904 {
905 layer->m_ProjectionParameters.m_ProjectionBias =
906 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
907 }
908 }
909
910 //Lstm Peephole params
911 if(descriptor.m_PeepholeEnabled)
912 {
913 if(params.m_CellToForgetWeights == nullptr)
914 {
915 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL");
916 }
917 if(params.m_CellToOutputWeights == nullptr)
918 {
919 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL");
920 }
921 layer->m_PeepholeParameters.m_CellToForgetWeights =
922 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
923 layer->m_PeepholeParameters.m_CellToOutputWeights =
924 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
925 }
926 return layer;
927}
928
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100929IConnectableLayer* Network::AddDivisionLayer(const char* name)
930{
931 return m_Graph->AddLayer<DivisionLayer>(name);
932}
933
David Beck19526222018-09-12 16:00:08 +0100934IConnectableLayer* Network::AddSubtractionLayer(const char* name)
935{
936 return m_Graph->AddLayer<SubtractionLayer>(name);
937}
938
narpra0132b90462018-09-13 11:07:48 +0100939IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
940{
941 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
942}
943
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +0100944IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
945{
946 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
947}
948
Derek Lambertia9cca6a2019-03-25 15:41:58 +0000949IConnectableLayer *Network::AddQuantizeLayer(const char *name)
950{
951 return m_Graph->AddLayer<QuantizeLayer>(name);
952}
953
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +0000954IConnectableLayer* Network::AddDequantizeLayer(const char* name)
955{
956 return m_Graph->AddLayer<DequantizeLayer>(name);
957}
958
Conor Kennedy430b5d82018-11-14 15:28:28 +0000959IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
960 const char* name)
961{
962 return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
963}
964
Matteo Martincigh59a950c2018-12-13 12:48:25 +0000965IConnectableLayer* Network::AddGreaterLayer(const char* name)
966{
967 return m_Graph->AddLayer<GreaterLayer>(name);
968}
969
FrancisMurtagh20995952018-12-17 12:11:36 +0000970IConnectableLayer* Network::AddEqualLayer(const char* name)
971{
jimfly0184c70e62018-12-19 13:14:46 +0000972 return m_Graph->AddLayer<EqualLayer>(name);
FrancisMurtagh20995952018-12-17 12:11:36 +0000973}
974
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +0000975IConnectableLayer* Network::AddRsqrtLayer(const char * name)
976{
977 return m_Graph->AddLayer<RsqrtLayer>(name);
978}
979
narpra01b89b05f2019-01-16 09:53:09 +0000980IConnectableLayer* Network::AddGatherLayer(const char* name)
981{
982 return m_Graph->AddLayer<GatherLayer>(name);
983}
984
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +0100985IConnectableLayer* Network::AddMergeLayer(const char* name)
986{
987 return m_Graph->AddLayer<MergeLayer>(name);
988}
989
Sadik Armaganeff363d2019-04-05 15:25:46 +0100990IConnectableLayer* Network::AddSwitchLayer(const char* name)
991{
992 return m_Graph->AddLayer<SwitchLayer>(name);
993}
994
Mike Kelly8c1701a2019-02-11 17:01:27 +0000995void Network::Accept(ILayerVisitor& visitor) const
996{
997 for (auto layer : GetGraph())
998 {
999 layer->Accept(visitor);
1000 };
1001}
1002
telsoa014fcda012018-03-09 14:13:49 +00001003OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
1004 : m_Graph(std::move(graph))
1005{
1006}
1007
1008OptimizedNetwork::~OptimizedNetwork()
1009{
1010}
1011
1012} // namespace armnn