blob: d798c84a62283b05d96a908ac5e853fcfbaa3e09 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "Network.hpp"
6#include "Graph.hpp"
7#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01008#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +00009#include "Optimizer.hpp"
David Beckac42efd2018-09-26 17:41:13 +010010#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000011
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <backendsCommon/CpuTensorHandle.hpp>
13#include <backendsCommon/WorkloadFactory.hpp>
David Beck263e3492018-11-09 14:46:40 +000014#include <backendsCommon/BackendRegistry.hpp>
15#include <backendsCommon/IBackendInternal.hpp>
David Beckac42efd2018-09-26 17:41:13 +010016
17#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010019#include <armnn/TypesUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000020
21#include <fcntl.h>
22#include <algorithm>
23#include <fstream>
24#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010025#include <vector>
26#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000027
28#include <boost/assert.hpp>
29#include <boost/format.hpp>
30#include <boost/log/trivial.hpp>
31#include <boost/numeric/conversion/converter_policies.hpp>
32#include <boost/cast.hpp>
33
34namespace armnn
35{
36
37armnn::INetwork* INetwork::CreateRaw()
38{
39 return new Network();
40}
41
42armnn::INetworkPtr INetwork::Create()
43{
44 return INetworkPtr(CreateRaw(), &INetwork::Destroy);
45}
46
47void INetwork::Destroy(INetwork* network)
48{
49 delete boost::polymorphic_downcast<Network*>(network);
50}
51
52Status Network::PrintGraph()
53{
54 m_Graph->Print();
55 return Status::Success;
56}
57
58void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
59{
60 delete boost::polymorphic_downcast<OptimizedNetwork*>(network);
61}
62
63Status OptimizedNetwork::PrintGraph()
64{
65 m_Graph->Print();
66 return Status::Success;
67}
68
surmeh01bceff2f2018-03-29 16:29:27 +010069Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
70{
71 return m_Graph->SerializeToDot(stream);
72}
73
jimfly016b0b53d2018-10-08 14:43:01 +010074bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
75{
76 bool noErrors = true;
77 unsigned int numOutputs = layer->GetNumOutputSlots();
78 for (unsigned int i = 0; i < numOutputs; i++) {
79 const OutputSlot &outputSlot = layer->GetOutputSlot(i);
80 const TensorInfo &info = outputSlot.GetTensorInfo();
81 if (DataType::QuantisedAsymm8 == info.GetDataType()) {
82 if (0.f == info.GetQuantizationScale()) {
83 noErrors = false;
84 std::stringstream ss;
85 ss << "ERROR: output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
86 << " (" << layer->GetNameStr() << ") is of type"
87 << " Quantized 8 bit but its scale parameter has not been set";
88 BOOST_LOG_TRIVIAL(warning) << ss.str() ;
89 if (errMessages) {
90 errMessages.value().push_back(ss.str());
91 }
92 }
93 }
94 }
95 return noErrors;
96}
97
telsoa01c577f2c2018-08-31 09:22:23 +010098IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
David Beckf0b48452018-10-19 15:20:56 +010099 const std::vector<BackendId>& backendPreferences,
telsoa01c577f2c2018-08-31 09:22:23 +0100100 const IDeviceSpec& deviceSpec,
jimfly016b0b53d2018-10-08 14:43:01 +0100101 const OptimizerOptions& options,
102 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +0000103{
telsoa01c577f2c2018-08-31 09:22:23 +0100104 if (backendPreferences.empty()) {
105 throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
106 }
telsoa014fcda012018-03-09 14:13:49 +0000107 const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork);
108 std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
109
telsoa01c577f2c2018-08-31 09:22:23 +0100110 auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
telsoa014fcda012018-03-09 14:13:49 +0000111
telsoa01c577f2c2018-08-31 09:22:23 +0100112 OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
113
114 // Perform optimisation passes
115 using namespace optimizations;
116 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(SquashEqualPermuteSiblings(),
117 SquashEqualReshapeSiblings(),
118 OptimizeInversePermutes(),
119 MovePermuteUp(),
120 PermuteAsReshape(),
121 OptimizeConsecutiveReshapes()));
telsoa014fcda012018-03-09 14:13:49 +0000122
123 // Infer the tensor infos for all output slots. Throws an exception on failure.
telsoa01c577f2c2018-08-31 09:22:23 +0100124 optNetObjPtr->GetGraph().InferTensorInfos();
telsoa014fcda012018-03-09 14:13:49 +0000125
telsoa01c577f2c2018-08-31 09:22:23 +0100126 // if Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
127 if (options.m_ReduceFp32ToFp16)
telsoa014fcda012018-03-09 14:13:49 +0000128 {
telsoa01c577f2c2018-08-31 09:22:23 +0100129 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(Fp32NetworkToFp16Converter()));
telsoa014fcda012018-03-09 14:13:49 +0000130 }
131
keidav01738c2e62018-12-11 16:14:20 +0000132 // if debug optimization is set, then print out data after each layer
133 if (options.m_Debug)
134 {
135 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(InsertDebugLayer()));
136 }
137
telsoa01c577f2c2018-08-31 09:22:23 +0100138 // We know that DeviceSpec should be the only implementation of IDeviceSpec.
139 const DeviceSpec& spec = *boost::polymorphic_downcast<const DeviceSpec*>(&deviceSpec);
David Beck056be3c2018-10-22 13:16:00 +0100140 auto const& supportedBackends = spec.GetSupportedBackends();
telsoa014fcda012018-03-09 14:13:49 +0000141
telsoa01c577f2c2018-08-31 09:22:23 +0100142 // determine which of the preferred backends we have available for use
143 // and whether we have specified CpuRef as one of those backends.
144 bool cpuRefUsed = false;
David Beckf0b48452018-10-19 15:20:56 +0100145 std::vector<BackendId> availablePreferredBackends;
146 for (const auto& backend : backendPreferences)
telsoa01c577f2c2018-08-31 09:22:23 +0100147 {
148 // Check if the backend is in the available backend devices.
David Beck056be3c2018-10-22 13:16:00 +0100149 if (supportedBackends.count(backend) > 0)
telsoa01c577f2c2018-08-31 09:22:23 +0100150 {
151 availablePreferredBackends.push_back(backend);
David Beckf0b48452018-10-19 15:20:56 +0100152 if (backend == armnn::Compute::CpuRef) {
telsoa01c577f2c2018-08-31 09:22:23 +0100153 cpuRefUsed = true;
154 }
155 }
156 }
157 if (availablePreferredBackends.empty()) {
jimfly016b0b53d2018-10-08 14:43:01 +0100158 std::stringstream failureMsg;
159 failureMsg << "ERROR: None of the preferred backends " << backendPreferences
David Beck056be3c2018-10-22 13:16:00 +0100160 << " are supported. Current platform provides " << supportedBackends;
jimfly016b0b53d2018-10-08 14:43:01 +0100161 BOOST_LOG_TRIVIAL(warning) << failureMsg.str();
162 if (errMessages) {
163 errMessages.value().push_back(failureMsg.str());
164 }
165 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
telsoa01c577f2c2018-08-31 09:22:23 +0100166 }
167
168 auto ReturnWithError = [&](Layer* layer)
169 {
jimfly016b0b53d2018-10-08 14:43:01 +0100170 std::stringstream failureMsg;
171 failureMsg << "ERROR: Layer of type " << GetLayerTypeAsCString(layer->GetType())
172 << " is not supported on any preferred backend " << backendPreferences;
173 BOOST_LOG_TRIVIAL(warning) << failureMsg.str();
174 if (errMessages) {
175 errMessages.value().push_back(failureMsg.str());
176 }
telsoa01c577f2c2018-08-31 09:22:23 +0100177 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
178 };
179
David Beck263e3492018-11-09 14:46:40 +0000180 // The backends that we choose to run layers on
181 std::unordered_set<BackendId> chosenBackends;
182
telsoa01c577f2c2018-08-31 09:22:23 +0100183 // Assign a compute device for all nodes
jimfly016b0b53d2018-10-08 14:43:01 +0100184 bool bErrorFound = false;
telsoa01c577f2c2018-08-31 09:22:23 +0100185 for (auto&& layer : optNetObjPtr->GetGraph())
186 {
187 DataType dataType = layer->GetDataType();
188 std::string reasonIfUnsupported;
189 bool found = false;
jimfly016b0b53d2018-10-08 14:43:01 +0100190 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
191 {
192 // don't bomb immediately, find all the quantized outputs
193 // which haven't had a scale set and report them all back.
194 bErrorFound = true;
195 }
David Beckf0b48452018-10-19 15:20:56 +0100196 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100197 {
198 // need to set the compute device on the layer
199 // before we can check if it is supported
David Beck33f0ae02018-10-18 15:13:56 +0100200 layer->SetBackendId(backend);
telsoa01c577f2c2018-08-31 09:22:23 +0100201 if (!IWorkloadFactory::IsLayerSupported(*layer, dataType, reasonIfUnsupported))
202 {
203 if (dataType == DataType::Float16)
204 {
205 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
206 && layer->GetType() != LayerType::ConvertFp32ToFp16
207 && layer->GetType() != LayerType::ConvertFp16ToFp32)
208 {
209 // Insert FP16 -> FP32 conversion layer before current layer
210 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers =
211 InsertConvertFp16ToFp32LayersBefore(optNetObjPtr->GetGraph(), *layer);
212
213 // Insert FP32 -> FP16 conversion layer after current layer
214 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers =
215 InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer);
216
217 // Assign a supported backend to the newly introduced conversion layers
David Beckf0b48452018-10-19 15:20:56 +0100218 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
telsoa01c577f2c2018-08-31 09:22:23 +0100219 {
220 bool supportedBackendFound = false;
221 std::string reasonIfUnsupported;
222
223 // Try preferred backend first
David Beck33f0ae02018-10-18 15:13:56 +0100224 layer->SetBackendId(preferredBackend);
David Beck29c75de2018-10-23 13:35:58 +0100225 if (IWorkloadFactory::IsLayerSupported(*layer,
226 EmptyOptional(),
227 reasonIfUnsupported))
telsoa01c577f2c2018-08-31 09:22:23 +0100228 {
229 supportedBackendFound = true;
230 }
231 else
232 {
David Beckf0b48452018-10-19 15:20:56 +0100233 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100234 {
235 // Skip preferred backend (we already determined that it is not supported)
236 if (backend == preferredBackend)
237 {
238 continue;
239 }
240
David Beck33f0ae02018-10-18 15:13:56 +0100241 layer->SetBackendId(backend);
David Beck29c75de2018-10-23 13:35:58 +0100242 if (IWorkloadFactory::IsLayerSupported(*layer,
243 EmptyOptional(),
244 reasonIfUnsupported))
telsoa01c577f2c2018-08-31 09:22:23 +0100245 {
246 supportedBackendFound = true;
247 break;
248 }
249 }
250 }
251
252 return supportedBackendFound;
253 };
254
255 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
256 {
257 if (!AssignFirstSupportedBackend(convertLayer, backend))
258 {
259 return ReturnWithError(convertLayer);
260 }
261 }
262
263 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
264 {
265 if (!AssignFirstSupportedBackend(convertLayer, backend))
266 {
267 return ReturnWithError(convertLayer);
268 }
269 }
270
271 found = true;
272 break;
273 }
274 }
jimfly016b0b53d2018-10-08 14:43:01 +0100275 std::stringstream warningMsg;
276 warningMsg << "WARNING: Layer of type " << GetLayerTypeAsCString(layer->GetType())
David Beck33f0ae02018-10-18 15:13:56 +0100277 << " is not supported on requested backend " << layer->GetBackendId().Get()
jimfly016b0b53d2018-10-08 14:43:01 +0100278 << " for data type " << GetDataTypeName(dataType)
279 << " (reason: " << reasonIfUnsupported
280 << "), falling back to the next backend.";
jimfly01f6ba7472018-12-04 10:09:52 +0000281 std::string wMsg = warningMsg.str();
282 BOOST_LOG_TRIVIAL(warning) << wMsg;
jimfly016b0b53d2018-10-08 14:43:01 +0100283 if (errMessages) {
jimfly01f6ba7472018-12-04 10:09:52 +0000284 errMessages.value().push_back(wMsg);
jimfly016b0b53d2018-10-08 14:43:01 +0100285 }
telsoa01c577f2c2018-08-31 09:22:23 +0100286 }
287 else
288 {
289 found = true;
David Beck263e3492018-11-09 14:46:40 +0000290 chosenBackends.insert(backend);
telsoa01c577f2c2018-08-31 09:22:23 +0100291 break;
292 }
293 }
294
295 // If the layer is unsupported by any devices, log and return a null network.
296 if (!found) {
297 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
298 // fallback we should set the compute device on the layer to CpuRef (these are not
299 // available as accelerated operations, or are only available under certain
300 // conditions, currently they comprise MemCopy, Constant, Permute)
301 armnn::LayerType layerType = layer->GetType();
302 if (!cpuRefUsed && (layerType == armnn::LayerType::MemCopy ||
303 layerType == armnn::LayerType::Constant ||
304 layerType == armnn::LayerType::Permute))
305 {
David Beck33f0ae02018-10-18 15:13:56 +0100306 layer->SetBackendId(armnn::Compute::CpuRef);
David Beck263e3492018-11-09 14:46:40 +0000307 chosenBackends.insert(armnn::Compute::CpuRef);
telsoa01c577f2c2018-08-31 09:22:23 +0100308 }
309 else
310 {
311 return ReturnWithError(layer);
312 }
313 }
314 }
jimfly016b0b53d2018-10-08 14:43:01 +0100315 if (bErrorFound)
316 {
317 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
318 }
telsoa01c577f2c2018-08-31 09:22:23 +0100319
320 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(OptimizeInverseConversionsFp16(),
321 OptimizeInverseConversionsFp32()));
322
323 optNetObjPtr->GetGraph().AddCopyLayers();
324
325 // Convert constants
326 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsFloatToHalf()));
327 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsHalfToFloat()));
328
David Beck263e3492018-11-09 14:46:40 +0000329 // Run backend specific optimizations
330 for (auto&& chosenBackend : chosenBackends)
331 {
332 auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
333 auto backendPtr = factoryFun();
334 BOOST_ASSERT(backendPtr.get() != nullptr);
335
336 auto backendSpecificOptimizations = backendPtr->GetOptimizations();
337 if (!backendSpecificOptimizations.empty())
338 {
339 Optimizer::Pass(optNetObjPtr->GetGraph(), backendSpecificOptimizations);
340 }
341 }
342
telsoa01c577f2c2018-08-31 09:22:23 +0100343 return optNet;
telsoa014fcda012018-03-09 14:13:49 +0000344}
345
jimfly016b0b53d2018-10-08 14:43:01 +0100346
telsoa014fcda012018-03-09 14:13:49 +0000347Network::Network()
348: m_Graph(std::make_unique<Graph>())
349{
350}
351
352Network::~Network()
353{
354}
355
356IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
357{
358 return m_Graph->AddLayer<InputLayer>(id, name);
359}
360
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +0000361IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
362 const char* name)
363{
364 return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
365}
366
telsoa014fcda012018-03-09 14:13:49 +0000367IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100368 const ConstTensor& weights,
369 const ConstTensor* biases,
370 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000371{
372 if (fullyConnectedDescriptor.m_BiasEnabled && (biases == nullptr))
373 {
374 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be NULL");
375 }
376
377 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
378
379 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
380
381 if (fullyConnectedDescriptor.m_BiasEnabled)
382 {
383 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
384 }
385
386 return layer;
387}
388
389IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100390 const ConstTensor& weights,
391 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000392{
393 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, nullptr, name);
394}
395
396IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100397 const ConstTensor& weights,
398 const ConstTensor& biases,
399 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000400{
401 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, &biases, name);
402}
403
404IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100405 const ConstTensor& weights,
406 const ConstTensor* biases,
407 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000408{
409 if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
410 {
411 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be NULL");
412 }
413
414 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
415
416 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
417
418 if (convolution2dDescriptor.m_BiasEnabled)
419 {
420 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
421 }
422
423 return layer;
424}
425
426IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100427 const ConstTensor& weights,
428 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000429{
430 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
431}
432IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100433 const ConstTensor& weights,
434 const ConstTensor& biases,
435 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000436{
437 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
438}
439
440IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
441 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
442 const ConstTensor& weights,
443 const ConstTensor* biases,
444 const char* name)
445{
446 if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
447 {
448 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be NULL");
449 }
450
telsoa01c577f2c2018-08-31 09:22:23 +0100451 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor,
452 name);
telsoa014fcda012018-03-09 14:13:49 +0000453
454 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
455
456 if (convolution2dDescriptor.m_BiasEnabled)
457 {
458 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
459 }
460
461 return layer;
462}
463
464IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
465 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
466 const ConstTensor& weights,
467 const char* name)
468{
469 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
470}
471IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
472 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
473 const ConstTensor& weights,
474 const ConstTensor& biases,
475 const char* name)
476{
477 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
478}
479
480IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
481 const char* name)
482{
483 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
484}
485
486IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
487 const char* name)
488{
489 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
490}
491
492IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
493 const char* name)
494{
495 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
496}
497
telsoa01c577f2c2018-08-31 09:22:23 +0100498IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
499normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +0000500 const char* name)
501{
502 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
503}
504
505IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
506 const char* name)
507{
508 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
509}
510
511IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
512 const char* name)
513{
514 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
515}
516
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000517IConnectableLayer* Network::AddMaximumLayer(const char* name)
518{
519 return m_Graph->AddLayer<MaximumLayer>(name);
520}
521
Éanna Ó Catháin20e58802018-12-04 10:29:06 +0000522IConnectableLayer* Network::AddMinimumLayer(const char* name)
523{
524 return m_Graph->AddLayer<MinimumLayer>(name);
525}
526
telsoa014fcda012018-03-09 14:13:49 +0000527IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
528 const char* name)
529{
530 return m_Graph->AddLayer<MergerLayer>(mergerDescriptor, name);
531}
532
533IConnectableLayer* Network::AddAdditionLayer(const char* name)
534{
535 return m_Graph->AddLayer<AdditionLayer>(name);
536}
537
538IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
539{
540 return m_Graph->AddLayer<MultiplicationLayer>(name);
541}
542
543IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
544{
545 return m_Graph->AddLayer<OutputLayer>(id, name);
546}
547
548IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
549 const ConstTensor& mean,
550 const ConstTensor& variance,
551 const ConstTensor& beta,
552 const ConstTensor& gamma,
553 const char* name)
554{
555 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
556
557 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
558 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
559 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
560 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
561
562 return layer;
563}
564
telsoa01c577f2c2018-08-31 09:22:23 +0100565IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor&
566resizeDescriptor, const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000567{
568 return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name);
569}
570
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100571IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
572 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000573{
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100574 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +0000575}
576
577IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
578{
telsoa01c577f2c2018-08-31 09:22:23 +0100579 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
580
581 layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
582
583 return layer;
telsoa014fcda012018-03-09 14:13:49 +0000584}
585
telsoa01c577f2c2018-08-31 09:22:23 +0100586IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
587 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000588{
589 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
590}
591
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000592IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
593 const char* name)
594{
595 return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
596}
597
telsoa014fcda012018-03-09 14:13:49 +0000598IConnectableLayer* Network::AddFloorLayer(const char* name)
599{
600 return m_Graph->AddLayer<FloorLayer>(name);
601}
602
telsoa01c577f2c2018-08-31 09:22:23 +0100603IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
604 const LstmInputParams& params,
605 const char* name)
606{
607 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
608
609 //Lstm Basic Parameters
610 layer->m_BasicParameters.m_InputToForgetWeights =
611 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
612 layer->m_BasicParameters.m_InputToCellWeights =
613 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
614 layer->m_BasicParameters.m_InputToOutputWeights =
615 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
616 layer->m_BasicParameters.m_RecurrentToForgetWeights =
617 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
618 layer->m_BasicParameters.m_RecurrentToCellWeights =
619 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
620 layer->m_BasicParameters.m_RecurrentToOutputWeights =
621 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
622 layer->m_BasicParameters.m_ForgetGateBias =
623 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
624 layer->m_BasicParameters.m_CellBias =
625 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
626 layer->m_BasicParameters.m_OutputGateBias =
627 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
628
629 //Lstm Cifg parameters
630 if(!descriptor.m_CifgEnabled)
631 {
632 if(params.m_InputToInputWeights == nullptr)
633 {
634 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL");
635 }
636 if(params.m_RecurrentToInputWeights == nullptr)
637 {
638 throw InvalidArgumentException(
639 "AddLstmLayer: Recurrent To Input Weights cannot be NULL");
640 }
641 if(params.m_InputGateBias == nullptr)
642 {
643 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL");
644 }
645 layer->m_CifgParameters.m_InputToInputWeights =
646 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
647 layer->m_CifgParameters.m_RecurrentToInputWeights =
648 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
649 // In the VTS tests, cell-to-input weights may be null, even if the other CIFG params are not.
650 if(params.m_CellToInputWeights != nullptr)
651 {
652 layer->m_CifgParameters.m_CellToInputWeights =
653 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
654 }
655 layer->m_CifgParameters.m_InputGateBias =
656 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
657 }
658
659 //Lstm projection parameters
660 if(descriptor.m_ProjectionEnabled)
661 {
662 if(params.m_ProjectionWeights == nullptr)
663 {
664 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL");
665 }
666 layer->m_ProjectionParameters.m_ProjectionWeights =
667 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
668 if(params.m_ProjectionBias != nullptr)
669 {
670 layer->m_ProjectionParameters.m_ProjectionBias =
671 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
672 }
673 }
674
675 //Lstm Peephole params
676 if(descriptor.m_PeepholeEnabled)
677 {
678 if(params.m_CellToForgetWeights == nullptr)
679 {
680 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL");
681 }
682 if(params.m_CellToOutputWeights == nullptr)
683 {
684 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL");
685 }
686 layer->m_PeepholeParameters.m_CellToForgetWeights =
687 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
688 layer->m_PeepholeParameters.m_CellToOutputWeights =
689 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
690 }
691 return layer;
692}
693
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100694IConnectableLayer* Network::AddDivisionLayer(const char* name)
695{
696 return m_Graph->AddLayer<DivisionLayer>(name);
697}
698
David Beck19526222018-09-12 16:00:08 +0100699IConnectableLayer* Network::AddSubtractionLayer(const char* name)
700{
701 return m_Graph->AddLayer<SubtractionLayer>(name);
702}
703
narpra0132b90462018-09-13 11:07:48 +0100704IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
705{
706 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
707}
708
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +0100709IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
710{
711 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
712}
713
Conor Kennedy430b5d82018-11-14 15:28:28 +0000714IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
715 const char* name)
716{
717 return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
718}
719
Matteo Martincigh59a950c2018-12-13 12:48:25 +0000720IConnectableLayer* Network::AddGreaterLayer(const char* name)
721{
722 return m_Graph->AddLayer<GreaterLayer>(name);
723}
724
FrancisMurtagh20995952018-12-17 12:11:36 +0000725IConnectableLayer* Network::AddEqualLayer(const char* name)
726{
727 return m_Graph->AddLayer<GreaterLayer>(name);
728}
729
telsoa014fcda012018-03-09 14:13:49 +0000730OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
731 : m_Graph(std::move(graph))
732{
733}
734
735OptimizedNetwork::~OptimizedNetwork()
736{
737}
738
739} // namespace armnn