blob: 8a1437a0fb3574be23809d05c0300de77954b0ed [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matteo Martincigh49124022019-01-11 13:25:59 +00005
telsoa014fcda012018-03-09 14:13:49 +00006#include "Network.hpp"
7#include "Graph.hpp"
8#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01009#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010#include "Optimizer.hpp"
Matteo Martincigh49124022019-01-11 13:25:59 +000011#include "SubGraphSelector.hpp"
12#include "BackendSettings.hpp"
David Beckac42efd2018-09-26 17:41:13 +010013#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000014
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000015#include <backendsCommon/CpuTensorHandle.hpp>
16#include <backendsCommon/WorkloadFactory.hpp>
David Beck263e3492018-11-09 14:46:40 +000017#include <backendsCommon/BackendRegistry.hpp>
18#include <backendsCommon/IBackendInternal.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019
20#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000021#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022#include <armnn/TypesUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000023
24#include <fcntl.h>
25#include <algorithm>
26#include <fstream>
27#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010028#include <vector>
29#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000030
31#include <boost/assert.hpp>
32#include <boost/format.hpp>
33#include <boost/log/trivial.hpp>
34#include <boost/numeric/conversion/converter_policies.hpp>
35#include <boost/cast.hpp>
36
37namespace armnn
38{
39
40armnn::INetwork* INetwork::CreateRaw()
41{
42 return new Network();
43}
44
45armnn::INetworkPtr INetwork::Create()
46{
47 return INetworkPtr(CreateRaw(), &INetwork::Destroy);
48}
49
50void INetwork::Destroy(INetwork* network)
51{
52 delete boost::polymorphic_downcast<Network*>(network);
53}
54
55Status Network::PrintGraph()
56{
57 m_Graph->Print();
58 return Status::Success;
59}
60
61void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
62{
63 delete boost::polymorphic_downcast<OptimizedNetwork*>(network);
64}
65
66Status OptimizedNetwork::PrintGraph()
67{
68 m_Graph->Print();
69 return Status::Success;
70}
71
surmeh01bceff2f2018-03-29 16:29:27 +010072Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
73{
74 return m_Graph->SerializeToDot(stream);
75}
76
Matteo Martincigh49124022019-01-11 13:25:59 +000077struct OptimizationResult
78{
79 bool m_Warning;
80 bool m_Error;
81
82 OptimizationResult()
83 : m_Warning(false)
84 , m_Error(false)
85 {}
86};
87
88void ReportError(const std::string& errorMessage,
89 Optional<std::vector<std::string>&> errorMessages)
90{
91 std::stringstream fullErrorMessage;
92 fullErrorMessage << "ERROR: " << errorMessage;
93 BOOST_LOG_TRIVIAL(warning) << fullErrorMessage.str();
94 if (errorMessages)
95 {
96 errorMessages.value().push_back(fullErrorMessage.str());
97 }
98}
99
100void ReportWarning(const std::string& warningMessage,
101 Optional<std::vector<std::string>&> warningMessages)
102{
103 std::stringstream fullWarningMessage;
104 fullWarningMessage << "WARNING: " << warningMessage;
105 BOOST_LOG_TRIVIAL(warning) << fullWarningMessage.str();
106 if (warningMessages)
107 {
108 warningMessages.value().push_back(fullWarningMessage.str());
109 }
110}
111
jimfly016b0b53d2018-10-08 14:43:01 +0100112bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
113{
114 bool noErrors = true;
115 unsigned int numOutputs = layer->GetNumOutputSlots();
116 for (unsigned int i = 0; i < numOutputs; i++) {
117 const OutputSlot &outputSlot = layer->GetOutputSlot(i);
118 const TensorInfo &info = outputSlot.GetTensorInfo();
119 if (DataType::QuantisedAsymm8 == info.GetDataType()) {
120 if (0.f == info.GetQuantizationScale()) {
121 noErrors = false;
122 std::stringstream ss;
Matteo Martincigh49124022019-01-11 13:25:59 +0000123 ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
jimfly016b0b53d2018-10-08 14:43:01 +0100124 << " (" << layer->GetNameStr() << ") is of type"
125 << " Quantized 8 bit but its scale parameter has not been set";
Matteo Martincigh49124022019-01-11 13:25:59 +0000126 ReportError(ss.str(), errMessages);
jimfly016b0b53d2018-10-08 14:43:01 +0100127 }
128 }
129 }
130 return noErrors;
131}
132
Matteo Martincigh49124022019-01-11 13:25:59 +0000133OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
134 BackendSettings& backendSettings,
135 Graph::Iterator& firstLayer,
136 Graph::Iterator& lastLayer,
137 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +0000138{
Matteo Martincigh49124022019-01-11 13:25:59 +0000139 OptimizationResult result;
telsoa014fcda012018-03-09 14:13:49 +0000140
Matteo Martincigh49124022019-01-11 13:25:59 +0000141 // Helper lambda to compose meaningful error message before returning with error
142 auto ReturnWithError = [&](const Layer* layer)
telsoa01c577f2c2018-08-31 09:22:23 +0100143 {
jimfly016b0b53d2018-10-08 14:43:01 +0100144 std::stringstream failureMsg;
Matteo Martincigh49124022019-01-11 13:25:59 +0000145 failureMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
146 << " is not supported on any preferred backend " << backendSettings.m_PreferredBackends;
147 ReportError(failureMsg.str(), errMessages);
148
149 result.m_Error = true;
150 return result;
telsoa01c577f2c2018-08-31 09:22:23 +0100151 };
152
Matteo Martincigh49124022019-01-11 13:25:59 +0000153 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
154 if (availablePreferredBackends.empty())
telsoa01c577f2c2018-08-31 09:22:23 +0100155 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000156 std::stringstream failureMsg;
157 failureMsg << "No preferred backends are available";
158 ReportError(failureMsg.str(), errMessages);
159
160 result.m_Error = true;
161 return result;
162 }
163
164 for (auto it = firstLayer; it != lastLayer; ++it)
165 {
166 auto layer = *it;
telsoa01c577f2c2018-08-31 09:22:23 +0100167 DataType dataType = layer->GetDataType();
168 std::string reasonIfUnsupported;
169 bool found = false;
jimfly016b0b53d2018-10-08 14:43:01 +0100170 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
171 {
172 // don't bomb immediately, find all the quantized outputs
173 // which haven't had a scale set and report them all back.
Matteo Martincigh49124022019-01-11 13:25:59 +0000174 result.m_Error = true;
jimfly016b0b53d2018-10-08 14:43:01 +0100175 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000176
David Beckf0b48452018-10-19 15:20:56 +0100177 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100178 {
179 // need to set the compute device on the layer
180 // before we can check if it is supported
David Beck33f0ae02018-10-18 15:13:56 +0100181 layer->SetBackendId(backend);
telsoa01c577f2c2018-08-31 09:22:23 +0100182 if (!IWorkloadFactory::IsLayerSupported(*layer, dataType, reasonIfUnsupported))
183 {
184 if (dataType == DataType::Float16)
185 {
186 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
187 && layer->GetType() != LayerType::ConvertFp32ToFp16
188 && layer->GetType() != LayerType::ConvertFp16ToFp32)
189 {
190 // Insert FP16 -> FP32 conversion layer before current layer
191 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers =
192 InsertConvertFp16ToFp32LayersBefore(optNetObjPtr->GetGraph(), *layer);
193
194 // Insert FP32 -> FP16 conversion layer after current layer
195 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers =
196 InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer);
197
198 // Assign a supported backend to the newly introduced conversion layers
David Beckf0b48452018-10-19 15:20:56 +0100199 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
telsoa01c577f2c2018-08-31 09:22:23 +0100200 {
201 bool supportedBackendFound = false;
202 std::string reasonIfUnsupported;
203
204 // Try preferred backend first
David Beck33f0ae02018-10-18 15:13:56 +0100205 layer->SetBackendId(preferredBackend);
David Beck29c75de2018-10-23 13:35:58 +0100206 if (IWorkloadFactory::IsLayerSupported(*layer,
207 EmptyOptional(),
208 reasonIfUnsupported))
telsoa01c577f2c2018-08-31 09:22:23 +0100209 {
210 supportedBackendFound = true;
211 }
212 else
213 {
David Beckf0b48452018-10-19 15:20:56 +0100214 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100215 {
216 // Skip preferred backend (we already determined that it is not supported)
217 if (backend == preferredBackend)
218 {
219 continue;
220 }
221
David Beck33f0ae02018-10-18 15:13:56 +0100222 layer->SetBackendId(backend);
David Beck29c75de2018-10-23 13:35:58 +0100223 if (IWorkloadFactory::IsLayerSupported(*layer,
224 EmptyOptional(),
225 reasonIfUnsupported))
telsoa01c577f2c2018-08-31 09:22:23 +0100226 {
227 supportedBackendFound = true;
228 break;
229 }
230 }
231 }
232
233 return supportedBackendFound;
234 };
235
236 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
237 {
238 if (!AssignFirstSupportedBackend(convertLayer, backend))
239 {
240 return ReturnWithError(convertLayer);
241 }
242 }
243
244 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
245 {
246 if (!AssignFirstSupportedBackend(convertLayer, backend))
247 {
248 return ReturnWithError(convertLayer);
249 }
250 }
251
252 found = true;
253 break;
254 }
255 }
jimfly016b0b53d2018-10-08 14:43:01 +0100256 std::stringstream warningMsg;
Matteo Martincigh49124022019-01-11 13:25:59 +0000257 warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
David Beck33f0ae02018-10-18 15:13:56 +0100258 << " is not supported on requested backend " << layer->GetBackendId().Get()
jimfly016b0b53d2018-10-08 14:43:01 +0100259 << " for data type " << GetDataTypeName(dataType)
260 << " (reason: " << reasonIfUnsupported
261 << "), falling back to the next backend.";
Matteo Martincigh49124022019-01-11 13:25:59 +0000262 ReportWarning(warningMsg.str(), errMessages);
telsoa01c577f2c2018-08-31 09:22:23 +0100263 }
264 else
265 {
266 found = true;
Matteo Martincigh49124022019-01-11 13:25:59 +0000267 backendSettings.m_SelectedBackends.insert(backend);
telsoa01c577f2c2018-08-31 09:22:23 +0100268 break;
269 }
270 }
271
272 // If the layer is unsupported by any devices, log and return a null network.
Matteo Martincigh49124022019-01-11 13:25:59 +0000273 if (!found)
274 {
telsoa01c577f2c2018-08-31 09:22:23 +0100275 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
276 // fallback we should set the compute device on the layer to CpuRef (these are not
277 // available as accelerated operations, or are only available under certain
278 // conditions, currently they comprise MemCopy, Constant, Permute)
279 armnn::LayerType layerType = layer->GetType();
Matteo Martincigh49124022019-01-11 13:25:59 +0000280 if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
281 layerType == armnn::LayerType::Constant ||
282 layerType == armnn::LayerType::Permute))
telsoa01c577f2c2018-08-31 09:22:23 +0100283 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000284 BackendId cpuBackendId(armnn::Compute::CpuRef);
285 layer->SetBackendId(cpuBackendId);
286 backendSettings.m_SelectedBackends.insert(cpuBackendId);
telsoa01c577f2c2018-08-31 09:22:23 +0100287 }
288 else
289 {
290 return ReturnWithError(layer);
291 }
292 }
293 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000294
295 return result;
296}
297
298OptimizationResult InsertPreCompiledLayers(OptimizedNetwork* optNetObjPtr,
299 const IBackendInternalUniquePtr& backendObjPtr,
300 BackendSettings& backendSettings,
301 Optional<std::vector<std::string>&> errMessages)
302{
303 BOOST_ASSERT(backendObjPtr);
304
305 OptimizationResult result;
306
307 // Select sub-graphs based on backend
308 SubGraphSelector::SubGraphs subGraphs =
309 SubGraphSelector::SelectSubGraphs(optNetObjPtr->GetGraph(),
310 // select layers assigned to requested backend
311 [&](const Layer& layer)
312 {
313 return layer.GetType() != LayerType::Input &&
314 layer.GetType() != LayerType::Output &&
315 layer.GetBackendId() == backendObjPtr->GetId();
316 });
317
318 if (subGraphs.empty())
jimfly016b0b53d2018-10-08 14:43:01 +0100319 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000320 // No sub-graphs found -> return with no error
321 return result;
322 }
323
324 // Convert sub-graphs and substitute them with pre-compiled layers
325 unsigned int index = 0u;
326 for (auto& subGraph : subGraphs)
327 {
328 // Create a pre-compiled layer
329 PreCompiledLayer* preCompiledLayer = CreatePreCompiledLayer(optNetObjPtr->GetGraph(),
330 *subGraph,
331 index++,
332 backendObjPtr);
333 if (preCompiledLayer)
334 {
335 // Substitute sub-graph with pre-compiled layer in graph
336 optNetObjPtr->GetGraph().SubstituteSubGraph(std::move(subGraph), preCompiledLayer);
337 }
338 else
339 {
340 // Failed to create pre-compiled layer from sub-graph ->
341 // re-assign sub-graph layers to other available backends
342 std::stringstream warningMsg;
343 warningMsg << "Sub-graph #" << index << " failed to compile on "
344 << backendObjPtr->GetId() << ". Re-assigning backends to "
345 << subGraph->GetLayers().size() << " layers inside sub-graph";
346 ReportWarning(warningMsg.str(), errMessages);
347
348 backendSettings.m_IgnoredBackends = { backendObjPtr->GetId() };
349
350 Graph::Iterator firstLayer = subGraph->begin();
351 Graph::Iterator lastLayer = subGraph->end();
352 OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr,
353 backendSettings,
354 firstLayer,
355 lastLayer,
356 errMessages);
357
358 if (reassignmentResult.m_Error)
359 {
360 result.m_Error = true;
361 return result;
362 }
363 }
364 }
365
366 return result;
367}
368
369IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
370 const std::vector<BackendId>& backendPreferences,
371 const IDeviceSpec& deviceSpec,
372 const OptimizerOptions& options,
373 Optional<std::vector<std::string>&> errMessages)
374{
375 if (backendPreferences.empty())
376 {
377 throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
378 }
379
380 const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork);
381 std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
382
383 auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
384
385 OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
386
387 // Perform optimisation passes
388 using namespace optimizations;
389 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(SquashEqualPermuteSiblings(),
390 SquashEqualReshapeSiblings(),
391 OptimizeInversePermutes(),
392 MovePermuteUp(),
393 PermuteAsReshape(),
394 OptimizeConsecutiveReshapes()));
395
396 // Infer the tensor infos for all output slots. Throws an exception on failure.
397 optNetObjPtr->GetGraph().InferTensorInfos();
398
399 // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
400 if (options.m_ReduceFp32ToFp16)
401 {
402 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(Fp32NetworkToFp16Converter()));
403 }
404
405 // Initialize backend settings
406 BackendSettings backendSettings(backendPreferences, deviceSpec);
407 if (backendSettings.GetAvailablePreferredBackends().empty())
408 {
409 std::stringstream failureMsg;
410 failureMsg << "None of the preferred backends " << backendPreferences
411 << " are supported. Current platform provides " << backendSettings.m_SupportedBackends;
412 ReportError(failureMsg.str(), errMessages);
413 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
414 }
415
416 // Assign an available backend to each layer
417 Graph::Iterator firstLayer = optNetObjPtr->GetGraph().begin();
418 Graph::Iterator lastLayer = optNetObjPtr->GetGraph().end();
419 OptimizationResult assigBackendsResult = AssignBackends(optNetObjPtr,
420 backendSettings,
421 firstLayer,
422 lastLayer,
423 errMessages);
424 if (assigBackendsResult.m_Error)
425 {
426 // Failed to assign a backend to each layer
jimfly016b0b53d2018-10-08 14:43:01 +0100427 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
428 }
telsoa01c577f2c2018-08-31 09:22:23 +0100429
430 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(OptimizeInverseConversionsFp16(),
431 OptimizeInverseConversionsFp32()));
432
Matteo Martincigh49124022019-01-11 13:25:59 +0000433 // Insert pre-compiled layers where required by the backend
434 // TODO: This is a dummy/default backend id used for making the code build until
435 // we've properly refactored the optimizer
436 const BackendId backendId(Compute::Undefined);
437 auto const& backendRegistry = BackendRegistryInstance();
438 if (backendRegistry.IsBackendRegistered(backendId))
439 {
440 // Obtain a backend object using the registered factory
441 auto backendFactory = backendRegistry.GetFactory(backendId);
442 auto backendObjPtr = backendFactory();
443
444 OptimizationResult insertPreCompiledLayersResult = InsertPreCompiledLayers(optNetObjPtr,
445 backendObjPtr,
446 backendSettings,
447 errMessages);
448 if (insertPreCompiledLayersResult.m_Error)
449 {
450 // Failed to insert pre-compiled layers
451 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
452 }
453 }
454
455 // If the debug flag is set, then insert a DebugLayer after each layer.
456 // NOTE: This optimization can only happen strictly after the PreCompiled layers have
457 // already been inserted
458 if (options.m_Debug)
459 {
460 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(InsertDebugLayer()));
461 }
462
telsoa01c577f2c2018-08-31 09:22:23 +0100463 optNetObjPtr->GetGraph().AddCopyLayers();
464
465 // Convert constants
466 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsFloatToHalf()));
467 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsHalfToFloat()));
468
David Beck263e3492018-11-09 14:46:40 +0000469 // Run backend specific optimizations
Matteo Martincigh49124022019-01-11 13:25:59 +0000470 for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
David Beck263e3492018-11-09 14:46:40 +0000471 {
472 auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
473 auto backendPtr = factoryFun();
474 BOOST_ASSERT(backendPtr.get() != nullptr);
475
476 auto backendSpecificOptimizations = backendPtr->GetOptimizations();
477 if (!backendSpecificOptimizations.empty())
478 {
479 Optimizer::Pass(optNetObjPtr->GetGraph(), backendSpecificOptimizations);
480 }
481 }
482
telsoa01c577f2c2018-08-31 09:22:23 +0100483 return optNet;
telsoa014fcda012018-03-09 14:13:49 +0000484}
485
jimfly016b0b53d2018-10-08 14:43:01 +0100486
telsoa014fcda012018-03-09 14:13:49 +0000487Network::Network()
488: m_Graph(std::make_unique<Graph>())
489{
490}
491
492Network::~Network()
493{
494}
495
496IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
497{
498 return m_Graph->AddLayer<InputLayer>(id, name);
499}
500
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +0000501IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
502 const char* name)
503{
504 return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
505}
506
telsoa014fcda012018-03-09 14:13:49 +0000507IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100508 const ConstTensor& weights,
509 const ConstTensor* biases,
510 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000511{
512 if (fullyConnectedDescriptor.m_BiasEnabled && (biases == nullptr))
513 {
514 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be NULL");
515 }
516
517 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
518
519 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
520
521 if (fullyConnectedDescriptor.m_BiasEnabled)
522 {
523 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
524 }
525
526 return layer;
527}
528
529IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100530 const ConstTensor& weights,
531 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000532{
533 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, nullptr, name);
534}
535
536IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100537 const ConstTensor& weights,
538 const ConstTensor& biases,
539 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000540{
541 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, &biases, name);
542}
543
544IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100545 const ConstTensor& weights,
546 const ConstTensor* biases,
547 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000548{
549 if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
550 {
551 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be NULL");
552 }
553
554 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
555
556 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
557
558 if (convolution2dDescriptor.m_BiasEnabled)
559 {
560 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
561 }
562
563 return layer;
564}
565
566IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100567 const ConstTensor& weights,
568 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000569{
570 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
571}
572IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100573 const ConstTensor& weights,
574 const ConstTensor& biases,
575 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000576{
577 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
578}
579
580IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
581 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
582 const ConstTensor& weights,
583 const ConstTensor* biases,
584 const char* name)
585{
586 if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
587 {
588 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be NULL");
589 }
590
telsoa01c577f2c2018-08-31 09:22:23 +0100591 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor,
592 name);
telsoa014fcda012018-03-09 14:13:49 +0000593
594 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
595
596 if (convolution2dDescriptor.m_BiasEnabled)
597 {
598 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
599 }
600
601 return layer;
602}
603
604IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
605 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
606 const ConstTensor& weights,
607 const char* name)
608{
609 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
610}
611IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
612 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
613 const ConstTensor& weights,
614 const ConstTensor& biases,
615 const char* name)
616{
617 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
618}
619
620IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
621 const char* name)
622{
623 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
624}
625
626IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
627 const char* name)
628{
629 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
630}
631
632IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
633 const char* name)
634{
635 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
636}
637
telsoa01c577f2c2018-08-31 09:22:23 +0100638IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
639normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +0000640 const char* name)
641{
642 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
643}
644
645IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
646 const char* name)
647{
648 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
649}
650
651IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
652 const char* name)
653{
654 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
655}
656
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000657IConnectableLayer* Network::AddMaximumLayer(const char* name)
658{
659 return m_Graph->AddLayer<MaximumLayer>(name);
660}
661
Éanna Ó Catháin20e58802018-12-04 10:29:06 +0000662IConnectableLayer* Network::AddMinimumLayer(const char* name)
663{
664 return m_Graph->AddLayer<MinimumLayer>(name);
665}
666
telsoa014fcda012018-03-09 14:13:49 +0000667IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
668 const char* name)
669{
670 return m_Graph->AddLayer<MergerLayer>(mergerDescriptor, name);
671}
672
673IConnectableLayer* Network::AddAdditionLayer(const char* name)
674{
675 return m_Graph->AddLayer<AdditionLayer>(name);
676}
677
678IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
679{
680 return m_Graph->AddLayer<MultiplicationLayer>(name);
681}
682
683IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
684{
685 return m_Graph->AddLayer<OutputLayer>(id, name);
686}
687
688IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
689 const ConstTensor& mean,
690 const ConstTensor& variance,
691 const ConstTensor& beta,
692 const ConstTensor& gamma,
693 const char* name)
694{
695 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
696
697 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
698 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
699 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
700 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
701
702 return layer;
703}
704
telsoa01c577f2c2018-08-31 09:22:23 +0100705IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor&
706resizeDescriptor, const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000707{
708 return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name);
709}
710
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100711IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
712 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000713{
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100714 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +0000715}
716
717IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
718{
telsoa01c577f2c2018-08-31 09:22:23 +0100719 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
720
721 layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
722
723 return layer;
telsoa014fcda012018-03-09 14:13:49 +0000724}
725
telsoa01c577f2c2018-08-31 09:22:23 +0100726IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
727 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000728{
729 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
730}
731
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000732IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
733 const char* name)
734{
735 return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
736}
737
telsoa014fcda012018-03-09 14:13:49 +0000738IConnectableLayer* Network::AddFloorLayer(const char* name)
739{
740 return m_Graph->AddLayer<FloorLayer>(name);
741}
742
telsoa01c577f2c2018-08-31 09:22:23 +0100743IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
744 const LstmInputParams& params,
745 const char* name)
746{
747 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
748
749 //Lstm Basic Parameters
750 layer->m_BasicParameters.m_InputToForgetWeights =
751 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
752 layer->m_BasicParameters.m_InputToCellWeights =
753 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
754 layer->m_BasicParameters.m_InputToOutputWeights =
755 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
756 layer->m_BasicParameters.m_RecurrentToForgetWeights =
757 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
758 layer->m_BasicParameters.m_RecurrentToCellWeights =
759 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
760 layer->m_BasicParameters.m_RecurrentToOutputWeights =
761 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
762 layer->m_BasicParameters.m_ForgetGateBias =
763 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
764 layer->m_BasicParameters.m_CellBias =
765 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
766 layer->m_BasicParameters.m_OutputGateBias =
767 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
768
769 //Lstm Cifg parameters
770 if(!descriptor.m_CifgEnabled)
771 {
772 if(params.m_InputToInputWeights == nullptr)
773 {
774 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL");
775 }
776 if(params.m_RecurrentToInputWeights == nullptr)
777 {
778 throw InvalidArgumentException(
779 "AddLstmLayer: Recurrent To Input Weights cannot be NULL");
780 }
781 if(params.m_InputGateBias == nullptr)
782 {
783 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL");
784 }
785 layer->m_CifgParameters.m_InputToInputWeights =
786 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
787 layer->m_CifgParameters.m_RecurrentToInputWeights =
788 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
789 // In the VTS tests, cell-to-input weights may be null, even if the other CIFG params are not.
790 if(params.m_CellToInputWeights != nullptr)
791 {
792 layer->m_CifgParameters.m_CellToInputWeights =
793 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
794 }
795 layer->m_CifgParameters.m_InputGateBias =
796 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
797 }
798
799 //Lstm projection parameters
800 if(descriptor.m_ProjectionEnabled)
801 {
802 if(params.m_ProjectionWeights == nullptr)
803 {
804 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL");
805 }
806 layer->m_ProjectionParameters.m_ProjectionWeights =
807 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
808 if(params.m_ProjectionBias != nullptr)
809 {
810 layer->m_ProjectionParameters.m_ProjectionBias =
811 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
812 }
813 }
814
815 //Lstm Peephole params
816 if(descriptor.m_PeepholeEnabled)
817 {
818 if(params.m_CellToForgetWeights == nullptr)
819 {
820 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL");
821 }
822 if(params.m_CellToOutputWeights == nullptr)
823 {
824 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL");
825 }
826 layer->m_PeepholeParameters.m_CellToForgetWeights =
827 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
828 layer->m_PeepholeParameters.m_CellToOutputWeights =
829 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
830 }
831 return layer;
832}
833
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100834IConnectableLayer* Network::AddDivisionLayer(const char* name)
835{
836 return m_Graph->AddLayer<DivisionLayer>(name);
837}
838
David Beck19526222018-09-12 16:00:08 +0100839IConnectableLayer* Network::AddSubtractionLayer(const char* name)
840{
841 return m_Graph->AddLayer<SubtractionLayer>(name);
842}
843
narpra0132b90462018-09-13 11:07:48 +0100844IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
845{
846 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
847}
848
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +0100849IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
850{
851 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
852}
853
Conor Kennedy430b5d82018-11-14 15:28:28 +0000854IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
855 const char* name)
856{
857 return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
858}
859
Matteo Martincigh59a950c2018-12-13 12:48:25 +0000860IConnectableLayer* Network::AddGreaterLayer(const char* name)
861{
862 return m_Graph->AddLayer<GreaterLayer>(name);
863}
864
FrancisMurtagh20995952018-12-17 12:11:36 +0000865IConnectableLayer* Network::AddEqualLayer(const char* name)
866{
jimfly0184c70e62018-12-19 13:14:46 +0000867 return m_Graph->AddLayer<EqualLayer>(name);
FrancisMurtagh20995952018-12-17 12:11:36 +0000868}
869
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +0000870IConnectableLayer* Network::AddRsqrtLayer(const char * name)
871{
872 return m_Graph->AddLayer<RsqrtLayer>(name);
873}
874
narpra01b89b05f2019-01-16 09:53:09 +0000875IConnectableLayer* Network::AddGatherLayer(const char* name)
876{
877 return m_Graph->AddLayer<GatherLayer>(name);
878}
879
telsoa014fcda012018-03-09 14:13:49 +0000880OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
881 : m_Graph(std::move(graph))
882{
883}
884
885OptimizedNetwork::~OptimizedNetwork()
886{
887}
888
889} // namespace armnn