blob: 662a9ccd3c9f87ccdecfe97c4d0012f5407eb812 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matteo Martincigh49124022019-01-11 13:25:59 +00005
telsoa014fcda012018-03-09 14:13:49 +00006#include "Network.hpp"
7#include "Graph.hpp"
8#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01009#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010#include "Optimizer.hpp"
Matteo Martincigh49124022019-01-11 13:25:59 +000011#include "SubGraphSelector.hpp"
12#include "BackendSettings.hpp"
David Beckac42efd2018-09-26 17:41:13 +010013#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000014
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000015#include <backendsCommon/CpuTensorHandle.hpp>
16#include <backendsCommon/WorkloadFactory.hpp>
David Beck263e3492018-11-09 14:46:40 +000017#include <backendsCommon/BackendRegistry.hpp>
18#include <backendsCommon/IBackendInternal.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019
20#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000021#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022#include <armnn/TypesUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000023
24#include <fcntl.h>
25#include <algorithm>
26#include <fstream>
27#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010028#include <vector>
29#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000030
31#include <boost/assert.hpp>
32#include <boost/format.hpp>
33#include <boost/log/trivial.hpp>
34#include <boost/numeric/conversion/converter_policies.hpp>
35#include <boost/cast.hpp>
36
37namespace armnn
38{
39
40armnn::INetwork* INetwork::CreateRaw()
41{
42 return new Network();
43}
44
45armnn::INetworkPtr INetwork::Create()
46{
47 return INetworkPtr(CreateRaw(), &INetwork::Destroy);
48}
49
50void INetwork::Destroy(INetwork* network)
51{
52 delete boost::polymorphic_downcast<Network*>(network);
53}
54
55Status Network::PrintGraph()
56{
57 m_Graph->Print();
58 return Status::Success;
59}
60
61void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
62{
63 delete boost::polymorphic_downcast<OptimizedNetwork*>(network);
64}
65
66Status OptimizedNetwork::PrintGraph()
67{
68 m_Graph->Print();
69 return Status::Success;
70}
71
surmeh01bceff2f2018-03-29 16:29:27 +010072Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
73{
74 return m_Graph->SerializeToDot(stream);
75}
76
Matteo Martincigh49124022019-01-11 13:25:59 +000077struct OptimizationResult
78{
79 bool m_Warning;
80 bool m_Error;
81
82 OptimizationResult()
83 : m_Warning(false)
84 , m_Error(false)
85 {}
86};
87
88void ReportError(const std::string& errorMessage,
89 Optional<std::vector<std::string>&> errorMessages)
90{
91 std::stringstream fullErrorMessage;
92 fullErrorMessage << "ERROR: " << errorMessage;
93 BOOST_LOG_TRIVIAL(warning) << fullErrorMessage.str();
94 if (errorMessages)
95 {
96 errorMessages.value().push_back(fullErrorMessage.str());
97 }
98}
99
100void ReportWarning(const std::string& warningMessage,
101 Optional<std::vector<std::string>&> warningMessages)
102{
103 std::stringstream fullWarningMessage;
104 fullWarningMessage << "WARNING: " << warningMessage;
105 BOOST_LOG_TRIVIAL(warning) << fullWarningMessage.str();
106 if (warningMessages)
107 {
108 warningMessages.value().push_back(fullWarningMessage.str());
109 }
110}
111
jimfly016b0b53d2018-10-08 14:43:01 +0100112bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
113{
114 bool noErrors = true;
115 unsigned int numOutputs = layer->GetNumOutputSlots();
116 for (unsigned int i = 0; i < numOutputs; i++) {
117 const OutputSlot &outputSlot = layer->GetOutputSlot(i);
118 const TensorInfo &info = outputSlot.GetTensorInfo();
119 if (DataType::QuantisedAsymm8 == info.GetDataType()) {
120 if (0.f == info.GetQuantizationScale()) {
121 noErrors = false;
122 std::stringstream ss;
Matteo Martincigh49124022019-01-11 13:25:59 +0000123 ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
jimfly016b0b53d2018-10-08 14:43:01 +0100124 << " (" << layer->GetNameStr() << ") is of type"
125 << " Quantized 8 bit but its scale parameter has not been set";
Matteo Martincigh49124022019-01-11 13:25:59 +0000126 ReportError(ss.str(), errMessages);
jimfly016b0b53d2018-10-08 14:43:01 +0100127 }
128 }
129 }
130 return noErrors;
131}
132
Matteo Martincigh49124022019-01-11 13:25:59 +0000133OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
134 BackendSettings& backendSettings,
135 Graph::Iterator& firstLayer,
136 Graph::Iterator& lastLayer,
137 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +0000138{
Matteo Martincigh49124022019-01-11 13:25:59 +0000139 OptimizationResult result;
telsoa014fcda012018-03-09 14:13:49 +0000140
Matteo Martincigh49124022019-01-11 13:25:59 +0000141 // Helper lambda to compose meaningful error message before returning with error
142 auto ReturnWithError = [&](const Layer* layer)
telsoa01c577f2c2018-08-31 09:22:23 +0100143 {
jimfly016b0b53d2018-10-08 14:43:01 +0100144 std::stringstream failureMsg;
Matteo Martincigh49124022019-01-11 13:25:59 +0000145 failureMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
146 << " is not supported on any preferred backend " << backendSettings.m_PreferredBackends;
147 ReportError(failureMsg.str(), errMessages);
148
149 result.m_Error = true;
150 return result;
telsoa01c577f2c2018-08-31 09:22:23 +0100151 };
152
Matteo Martincigh49124022019-01-11 13:25:59 +0000153 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
154 if (availablePreferredBackends.empty())
telsoa01c577f2c2018-08-31 09:22:23 +0100155 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000156 std::stringstream failureMsg;
157 failureMsg << "No preferred backends are available";
158 ReportError(failureMsg.str(), errMessages);
159
160 result.m_Error = true;
161 return result;
162 }
163
164 for (auto it = firstLayer; it != lastLayer; ++it)
165 {
166 auto layer = *it;
telsoa01c577f2c2018-08-31 09:22:23 +0100167 DataType dataType = layer->GetDataType();
168 std::string reasonIfUnsupported;
169 bool found = false;
jimfly016b0b53d2018-10-08 14:43:01 +0100170 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
171 {
172 // don't bomb immediately, find all the quantized outputs
173 // which haven't had a scale set and report them all back.
Matteo Martincigh49124022019-01-11 13:25:59 +0000174 result.m_Error = true;
jimfly016b0b53d2018-10-08 14:43:01 +0100175 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000176
David Beckf0b48452018-10-19 15:20:56 +0100177 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100178 {
179 // need to set the compute device on the layer
180 // before we can check if it is supported
David Beck33f0ae02018-10-18 15:13:56 +0100181 layer->SetBackendId(backend);
telsoa01c577f2c2018-08-31 09:22:23 +0100182 if (!IWorkloadFactory::IsLayerSupported(*layer, dataType, reasonIfUnsupported))
183 {
184 if (dataType == DataType::Float16)
185 {
186 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
187 && layer->GetType() != LayerType::ConvertFp32ToFp16
188 && layer->GetType() != LayerType::ConvertFp16ToFp32)
189 {
190 // Insert FP16 -> FP32 conversion layer before current layer
191 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers =
192 InsertConvertFp16ToFp32LayersBefore(optNetObjPtr->GetGraph(), *layer);
193
194 // Insert FP32 -> FP16 conversion layer after current layer
195 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers =
196 InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer);
197
198 // Assign a supported backend to the newly introduced conversion layers
David Beckf0b48452018-10-19 15:20:56 +0100199 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
telsoa01c577f2c2018-08-31 09:22:23 +0100200 {
201 bool supportedBackendFound = false;
202 std::string reasonIfUnsupported;
203
204 // Try preferred backend first
David Beck33f0ae02018-10-18 15:13:56 +0100205 layer->SetBackendId(preferredBackend);
David Beck29c75de2018-10-23 13:35:58 +0100206 if (IWorkloadFactory::IsLayerSupported(*layer,
207 EmptyOptional(),
208 reasonIfUnsupported))
telsoa01c577f2c2018-08-31 09:22:23 +0100209 {
210 supportedBackendFound = true;
211 }
212 else
213 {
David Beckf0b48452018-10-19 15:20:56 +0100214 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100215 {
216 // Skip preferred backend (we already determined that it is not supported)
217 if (backend == preferredBackend)
218 {
219 continue;
220 }
221
David Beck33f0ae02018-10-18 15:13:56 +0100222 layer->SetBackendId(backend);
David Beck29c75de2018-10-23 13:35:58 +0100223 if (IWorkloadFactory::IsLayerSupported(*layer,
224 EmptyOptional(),
225 reasonIfUnsupported))
telsoa01c577f2c2018-08-31 09:22:23 +0100226 {
227 supportedBackendFound = true;
228 break;
229 }
230 }
231 }
232
233 return supportedBackendFound;
234 };
235
236 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
237 {
238 if (!AssignFirstSupportedBackend(convertLayer, backend))
239 {
240 return ReturnWithError(convertLayer);
241 }
242 }
243
244 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
245 {
246 if (!AssignFirstSupportedBackend(convertLayer, backend))
247 {
248 return ReturnWithError(convertLayer);
249 }
250 }
251
252 found = true;
253 break;
254 }
255 }
jimfly016b0b53d2018-10-08 14:43:01 +0100256 std::stringstream warningMsg;
Matteo Martincigh49124022019-01-11 13:25:59 +0000257 warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
David Beck33f0ae02018-10-18 15:13:56 +0100258 << " is not supported on requested backend " << layer->GetBackendId().Get()
jimfly016b0b53d2018-10-08 14:43:01 +0100259 << " for data type " << GetDataTypeName(dataType)
260 << " (reason: " << reasonIfUnsupported
261 << "), falling back to the next backend.";
Matteo Martincigh49124022019-01-11 13:25:59 +0000262 ReportWarning(warningMsg.str(), errMessages);
telsoa01c577f2c2018-08-31 09:22:23 +0100263 }
264 else
265 {
266 found = true;
Matteo Martincigh49124022019-01-11 13:25:59 +0000267 backendSettings.m_SelectedBackends.insert(backend);
telsoa01c577f2c2018-08-31 09:22:23 +0100268 break;
269 }
270 }
271
272 // If the layer is unsupported by any devices, log and return a null network.
Matteo Martincigh49124022019-01-11 13:25:59 +0000273 if (!found)
274 {
telsoa01c577f2c2018-08-31 09:22:23 +0100275 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
276 // fallback we should set the compute device on the layer to CpuRef (these are not
277 // available as accelerated operations, or are only available under certain
278 // conditions, currently they comprise MemCopy, Constant, Permute)
279 armnn::LayerType layerType = layer->GetType();
Matteo Martincigh49124022019-01-11 13:25:59 +0000280 if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
281 layerType == armnn::LayerType::Constant ||
282 layerType == armnn::LayerType::Permute))
telsoa01c577f2c2018-08-31 09:22:23 +0100283 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000284 BackendId cpuBackendId(armnn::Compute::CpuRef);
285 layer->SetBackendId(cpuBackendId);
286 backendSettings.m_SelectedBackends.insert(cpuBackendId);
telsoa01c577f2c2018-08-31 09:22:23 +0100287 }
288 else
289 {
290 return ReturnWithError(layer);
291 }
292 }
293 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000294
295 return result;
296}
297
298OptimizationResult InsertPreCompiledLayers(OptimizedNetwork* optNetObjPtr,
299 const IBackendInternalUniquePtr& backendObjPtr,
300 BackendSettings& backendSettings,
301 Optional<std::vector<std::string>&> errMessages)
302{
303 BOOST_ASSERT(backendObjPtr);
304
305 OptimizationResult result;
306
307 // Select sub-graphs based on backend
308 SubGraphSelector::SubGraphs subGraphs =
309 SubGraphSelector::SelectSubGraphs(optNetObjPtr->GetGraph(),
310 // select layers assigned to requested backend
311 [&](const Layer& layer)
312 {
313 return layer.GetType() != LayerType::Input &&
314 layer.GetType() != LayerType::Output &&
315 layer.GetBackendId() == backendObjPtr->GetId();
316 });
317
318 if (subGraphs.empty())
jimfly016b0b53d2018-10-08 14:43:01 +0100319 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000320 // No sub-graphs found -> return with no error
321 return result;
322 }
323
324 // Convert sub-graphs and substitute them with pre-compiled layers
325 unsigned int index = 0u;
326 for (auto& subGraph : subGraphs)
327 {
328 // Create a pre-compiled layer
329 PreCompiledLayer* preCompiledLayer = CreatePreCompiledLayer(optNetObjPtr->GetGraph(),
330 *subGraph,
331 index++,
332 backendObjPtr);
333 if (preCompiledLayer)
334 {
335 // Substitute sub-graph with pre-compiled layer in graph
336 optNetObjPtr->GetGraph().SubstituteSubGraph(std::move(subGraph), preCompiledLayer);
337 }
338 else
339 {
340 // Failed to create pre-compiled layer from sub-graph ->
341 // re-assign sub-graph layers to other available backends
342 std::stringstream warningMsg;
343 warningMsg << "Sub-graph #" << index << " failed to compile on "
344 << backendObjPtr->GetId() << ". Re-assigning backends to "
345 << subGraph->GetLayers().size() << " layers inside sub-graph";
346 ReportWarning(warningMsg.str(), errMessages);
347
348 backendSettings.m_IgnoredBackends = { backendObjPtr->GetId() };
349
350 Graph::Iterator firstLayer = subGraph->begin();
351 Graph::Iterator lastLayer = subGraph->end();
352 OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr,
353 backendSettings,
354 firstLayer,
355 lastLayer,
356 errMessages);
357
358 if (reassignmentResult.m_Error)
359 {
360 result.m_Error = true;
361 return result;
362 }
363 }
364 }
365
366 return result;
367}
368
369IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
370 const std::vector<BackendId>& backendPreferences,
371 const IDeviceSpec& deviceSpec,
372 const OptimizerOptions& options,
373 Optional<std::vector<std::string>&> errMessages)
374{
375 if (backendPreferences.empty())
376 {
377 throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
378 }
379
380 const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork);
381 std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
382
383 auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
384
385 OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
386
387 // Perform optimisation passes
388 using namespace optimizations;
389 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(SquashEqualPermuteSiblings(),
390 SquashEqualReshapeSiblings(),
391 OptimizeInversePermutes(),
392 MovePermuteUp(),
393 PermuteAsReshape(),
394 OptimizeConsecutiveReshapes()));
395
396 // Infer the tensor infos for all output slots. Throws an exception on failure.
397 optNetObjPtr->GetGraph().InferTensorInfos();
398
399 // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
400 if (options.m_ReduceFp32ToFp16)
401 {
402 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(Fp32NetworkToFp16Converter()));
403 }
404
405 // Initialize backend settings
406 BackendSettings backendSettings(backendPreferences, deviceSpec);
407 if (backendSettings.GetAvailablePreferredBackends().empty())
408 {
409 std::stringstream failureMsg;
410 failureMsg << "None of the preferred backends " << backendPreferences
411 << " are supported. Current platform provides " << backendSettings.m_SupportedBackends;
412 ReportError(failureMsg.str(), errMessages);
413 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
414 }
415
416 // Assign an available backend to each layer
417 Graph::Iterator firstLayer = optNetObjPtr->GetGraph().begin();
418 Graph::Iterator lastLayer = optNetObjPtr->GetGraph().end();
419 OptimizationResult assigBackendsResult = AssignBackends(optNetObjPtr,
420 backendSettings,
421 firstLayer,
422 lastLayer,
423 errMessages);
424 if (assigBackendsResult.m_Error)
425 {
426 // Failed to assign a backend to each layer
jimfly016b0b53d2018-10-08 14:43:01 +0100427 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
428 }
telsoa01c577f2c2018-08-31 09:22:23 +0100429
430 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(OptimizeInverseConversionsFp16(),
431 OptimizeInverseConversionsFp32()));
432
Matteo Martincigh49124022019-01-11 13:25:59 +0000433 // If the debug flag is set, then insert a DebugLayer after each layer.
434 // NOTE: This optimization can only happen strictly after the PreCompiled layers have
435 // already been inserted
436 if (options.m_Debug)
437 {
438 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(InsertDebugLayer()));
439 }
440
telsoa01c577f2c2018-08-31 09:22:23 +0100441 optNetObjPtr->GetGraph().AddCopyLayers();
442
443 // Convert constants
444 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsFloatToHalf()));
445 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsHalfToFloat()));
446
David Beck263e3492018-11-09 14:46:40 +0000447 // Run backend specific optimizations
Matteo Martincigh49124022019-01-11 13:25:59 +0000448 for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
David Beck263e3492018-11-09 14:46:40 +0000449 {
450 auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
451 auto backendPtr = factoryFun();
452 BOOST_ASSERT(backendPtr.get() != nullptr);
453
454 auto backendSpecificOptimizations = backendPtr->GetOptimizations();
455 if (!backendSpecificOptimizations.empty())
456 {
457 Optimizer::Pass(optNetObjPtr->GetGraph(), backendSpecificOptimizations);
458 }
459 }
460
telsoa01c577f2c2018-08-31 09:22:23 +0100461 return optNet;
telsoa014fcda012018-03-09 14:13:49 +0000462}
463
jimfly016b0b53d2018-10-08 14:43:01 +0100464
telsoa014fcda012018-03-09 14:13:49 +0000465Network::Network()
466: m_Graph(std::make_unique<Graph>())
467{
468}
469
470Network::~Network()
471{
472}
473
474IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
475{
476 return m_Graph->AddLayer<InputLayer>(id, name);
477}
478
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +0000479IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
480 const char* name)
481{
482 return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
483}
484
telsoa014fcda012018-03-09 14:13:49 +0000485IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100486 const ConstTensor& weights,
487 const ConstTensor* biases,
488 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000489{
490 if (fullyConnectedDescriptor.m_BiasEnabled && (biases == nullptr))
491 {
492 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be NULL");
493 }
494
495 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
496
497 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
498
499 if (fullyConnectedDescriptor.m_BiasEnabled)
500 {
501 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
502 }
503
504 return layer;
505}
506
507IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100508 const ConstTensor& weights,
509 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000510{
511 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, nullptr, name);
512}
513
514IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100515 const ConstTensor& weights,
516 const ConstTensor& biases,
517 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000518{
519 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, &biases, name);
520}
521
522IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100523 const ConstTensor& weights,
524 const ConstTensor* biases,
525 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000526{
527 if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
528 {
529 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be NULL");
530 }
531
532 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
533
534 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
535
536 if (convolution2dDescriptor.m_BiasEnabled)
537 {
538 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
539 }
540
541 return layer;
542}
543
544IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100545 const ConstTensor& weights,
546 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000547{
548 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
549}
550IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100551 const ConstTensor& weights,
552 const ConstTensor& biases,
553 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000554{
555 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
556}
557
558IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
559 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
560 const ConstTensor& weights,
561 const ConstTensor* biases,
562 const char* name)
563{
564 if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
565 {
566 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be NULL");
567 }
568
Matteo Martincigh3d6898c2019-01-15 16:11:44 +0000569 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +0000570
571 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
572
573 if (convolution2dDescriptor.m_BiasEnabled)
574 {
575 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
576 }
577
578 return layer;
579}
580
581IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
582 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
583 const ConstTensor& weights,
584 const char* name)
585{
586 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
587}
588IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
589 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
590 const ConstTensor& weights,
591 const ConstTensor& biases,
592 const char* name)
593{
594 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
595}
596
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +0000597IConnectableLayer* Network::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
598 const char* name)
599{
600 return m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
601}
602
telsoa014fcda012018-03-09 14:13:49 +0000603IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
604 const char* name)
605{
606 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
607}
608
609IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
610 const char* name)
611{
612 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
613}
614
615IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
616 const char* name)
617{
618 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
619}
620
telsoa01c577f2c2018-08-31 09:22:23 +0100621IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
622normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +0000623 const char* name)
624{
625 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
626}
627
628IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
629 const char* name)
630{
631 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
632}
633
634IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
635 const char* name)
636{
637 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
638}
639
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000640IConnectableLayer* Network::AddMaximumLayer(const char* name)
641{
642 return m_Graph->AddLayer<MaximumLayer>(name);
643}
644
Éanna Ó Catháin20e58802018-12-04 10:29:06 +0000645IConnectableLayer* Network::AddMinimumLayer(const char* name)
646{
647 return m_Graph->AddLayer<MinimumLayer>(name);
648}
649
telsoa014fcda012018-03-09 14:13:49 +0000650IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
651 const char* name)
652{
653 return m_Graph->AddLayer<MergerLayer>(mergerDescriptor, name);
654}
655
656IConnectableLayer* Network::AddAdditionLayer(const char* name)
657{
658 return m_Graph->AddLayer<AdditionLayer>(name);
659}
660
661IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
662{
663 return m_Graph->AddLayer<MultiplicationLayer>(name);
664}
665
666IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
667{
668 return m_Graph->AddLayer<OutputLayer>(id, name);
669}
670
671IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
672 const ConstTensor& mean,
673 const ConstTensor& variance,
674 const ConstTensor& beta,
675 const ConstTensor& gamma,
676 const char* name)
677{
678 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
679
680 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
681 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
682 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
683 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
684
685 return layer;
686}
687
telsoa01c577f2c2018-08-31 09:22:23 +0100688IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor&
689resizeDescriptor, const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000690{
691 return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name);
692}
693
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100694IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
695 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000696{
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100697 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +0000698}
699
700IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
701{
telsoa01c577f2c2018-08-31 09:22:23 +0100702 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
703
704 layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
705
706 return layer;
telsoa014fcda012018-03-09 14:13:49 +0000707}
708
telsoa01c577f2c2018-08-31 09:22:23 +0100709IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
710 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000711{
712 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
713}
714
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000715IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
716 const char* name)
717{
718 return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
719}
720
telsoa014fcda012018-03-09 14:13:49 +0000721IConnectableLayer* Network::AddFloorLayer(const char* name)
722{
723 return m_Graph->AddLayer<FloorLayer>(name);
724}
725
telsoa01c577f2c2018-08-31 09:22:23 +0100726IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
727 const LstmInputParams& params,
728 const char* name)
729{
730 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
731
732 //Lstm Basic Parameters
733 layer->m_BasicParameters.m_InputToForgetWeights =
734 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
735 layer->m_BasicParameters.m_InputToCellWeights =
736 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
737 layer->m_BasicParameters.m_InputToOutputWeights =
738 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
739 layer->m_BasicParameters.m_RecurrentToForgetWeights =
740 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
741 layer->m_BasicParameters.m_RecurrentToCellWeights =
742 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
743 layer->m_BasicParameters.m_RecurrentToOutputWeights =
744 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
745 layer->m_BasicParameters.m_ForgetGateBias =
746 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
747 layer->m_BasicParameters.m_CellBias =
748 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
749 layer->m_BasicParameters.m_OutputGateBias =
750 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
751
752 //Lstm Cifg parameters
753 if(!descriptor.m_CifgEnabled)
754 {
755 if(params.m_InputToInputWeights == nullptr)
756 {
757 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL");
758 }
759 if(params.m_RecurrentToInputWeights == nullptr)
760 {
761 throw InvalidArgumentException(
762 "AddLstmLayer: Recurrent To Input Weights cannot be NULL");
763 }
764 if(params.m_InputGateBias == nullptr)
765 {
766 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL");
767 }
768 layer->m_CifgParameters.m_InputToInputWeights =
769 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
770 layer->m_CifgParameters.m_RecurrentToInputWeights =
771 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
772 // In the VTS tests, cell-to-input weights may be null, even if the other CIFG params are not.
773 if(params.m_CellToInputWeights != nullptr)
774 {
775 layer->m_CifgParameters.m_CellToInputWeights =
776 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
777 }
778 layer->m_CifgParameters.m_InputGateBias =
779 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
780 }
781
782 //Lstm projection parameters
783 if(descriptor.m_ProjectionEnabled)
784 {
785 if(params.m_ProjectionWeights == nullptr)
786 {
787 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL");
788 }
789 layer->m_ProjectionParameters.m_ProjectionWeights =
790 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
791 if(params.m_ProjectionBias != nullptr)
792 {
793 layer->m_ProjectionParameters.m_ProjectionBias =
794 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
795 }
796 }
797
798 //Lstm Peephole params
799 if(descriptor.m_PeepholeEnabled)
800 {
801 if(params.m_CellToForgetWeights == nullptr)
802 {
803 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL");
804 }
805 if(params.m_CellToOutputWeights == nullptr)
806 {
807 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL");
808 }
809 layer->m_PeepholeParameters.m_CellToForgetWeights =
810 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
811 layer->m_PeepholeParameters.m_CellToOutputWeights =
812 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
813 }
814 return layer;
815}
816
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100817IConnectableLayer* Network::AddDivisionLayer(const char* name)
818{
819 return m_Graph->AddLayer<DivisionLayer>(name);
820}
821
David Beck19526222018-09-12 16:00:08 +0100822IConnectableLayer* Network::AddSubtractionLayer(const char* name)
823{
824 return m_Graph->AddLayer<SubtractionLayer>(name);
825}
826
narpra0132b90462018-09-13 11:07:48 +0100827IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
828{
829 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
830}
831
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +0100832IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
833{
834 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
835}
836
Conor Kennedy430b5d82018-11-14 15:28:28 +0000837IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
838 const char* name)
839{
840 return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
841}
842
Matteo Martincigh59a950c2018-12-13 12:48:25 +0000843IConnectableLayer* Network::AddGreaterLayer(const char* name)
844{
845 return m_Graph->AddLayer<GreaterLayer>(name);
846}
847
FrancisMurtagh20995952018-12-17 12:11:36 +0000848IConnectableLayer* Network::AddEqualLayer(const char* name)
849{
jimfly0184c70e62018-12-19 13:14:46 +0000850 return m_Graph->AddLayer<EqualLayer>(name);
FrancisMurtagh20995952018-12-17 12:11:36 +0000851}
852
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +0000853IConnectableLayer* Network::AddRsqrtLayer(const char * name)
854{
855 return m_Graph->AddLayer<RsqrtLayer>(name);
856}
857
narpra01b89b05f2019-01-16 09:53:09 +0000858IConnectableLayer* Network::AddGatherLayer(const char* name)
859{
860 return m_Graph->AddLayer<GatherLayer>(name);
861}
862
telsoa014fcda012018-03-09 14:13:49 +0000863OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
864 : m_Graph(std::move(graph))
865{
866}
867
868OptimizedNetwork::~OptimizedNetwork()
869{
870}
871
872} // namespace armnn