blob: 20c67160c02c09416f5abb8eaf5e9f338e2a6cd7 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "Network.hpp"
6#include "Graph.hpp"
7#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01008#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +00009#include "Optimizer.hpp"
David Beckac42efd2018-09-26 17:41:13 +010010#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000011
David Beckac42efd2018-09-26 17:41:13 +010012#include <backends/CpuTensorHandle.hpp>
13#include <backends/WorkloadFactory.hpp>
14
15#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010017#include <armnn/TypesUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
19#include <fcntl.h>
20#include <algorithm>
21#include <fstream>
22#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010023#include <vector>
24#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000025
26#include <boost/assert.hpp>
27#include <boost/format.hpp>
28#include <boost/log/trivial.hpp>
29#include <boost/numeric/conversion/converter_policies.hpp>
30#include <boost/cast.hpp>
31
32namespace armnn
33{
34
35armnn::INetwork* INetwork::CreateRaw()
36{
37 return new Network();
38}
39
40armnn::INetworkPtr INetwork::Create()
41{
42 return INetworkPtr(CreateRaw(), &INetwork::Destroy);
43}
44
45void INetwork::Destroy(INetwork* network)
46{
47 delete boost::polymorphic_downcast<Network*>(network);
48}
49
50Status Network::PrintGraph()
51{
52 m_Graph->Print();
53 return Status::Success;
54}
55
56void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
57{
58 delete boost::polymorphic_downcast<OptimizedNetwork*>(network);
59}
60
61Status OptimizedNetwork::PrintGraph()
62{
63 m_Graph->Print();
64 return Status::Success;
65}
66
surmeh01bceff2f2018-03-29 16:29:27 +010067Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
68{
69 return m_Graph->SerializeToDot(stream);
70}
71
jimfly016b0b53d2018-10-08 14:43:01 +010072bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
73{
74 bool noErrors = true;
75 unsigned int numOutputs = layer->GetNumOutputSlots();
76 for (unsigned int i = 0; i < numOutputs; i++) {
77 const OutputSlot &outputSlot = layer->GetOutputSlot(i);
78 const TensorInfo &info = outputSlot.GetTensorInfo();
79 if (DataType::QuantisedAsymm8 == info.GetDataType()) {
80 if (0.f == info.GetQuantizationScale()) {
81 noErrors = false;
82 std::stringstream ss;
83 ss << "ERROR: output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
84 << " (" << layer->GetNameStr() << ") is of type"
85 << " Quantized 8 bit but its scale parameter has not been set";
86 BOOST_LOG_TRIVIAL(warning) << ss.str() ;
87 if (errMessages) {
88 errMessages.value().push_back(ss.str());
89 }
90 }
91 }
92 }
93 return noErrors;
94}
95
telsoa01c577f2c2018-08-31 09:22:23 +010096IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
David Beckf0b48452018-10-19 15:20:56 +010097 const std::vector<BackendId>& backendPreferences,
telsoa01c577f2c2018-08-31 09:22:23 +010098 const IDeviceSpec& deviceSpec,
jimfly016b0b53d2018-10-08 14:43:01 +010099 const OptimizerOptions& options,
100 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +0000101{
telsoa01c577f2c2018-08-31 09:22:23 +0100102 if (backendPreferences.empty()) {
103 throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
104 }
telsoa014fcda012018-03-09 14:13:49 +0000105 const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork);
106 std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
107
telsoa01c577f2c2018-08-31 09:22:23 +0100108 auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
telsoa014fcda012018-03-09 14:13:49 +0000109
telsoa01c577f2c2018-08-31 09:22:23 +0100110 OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
111
112 // Perform optimisation passes
113 using namespace optimizations;
114 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(SquashEqualPermuteSiblings(),
115 SquashEqualReshapeSiblings(),
116 OptimizeInversePermutes(),
117 MovePermuteUp(),
118 PermuteAsReshape(),
119 OptimizeConsecutiveReshapes()));
telsoa014fcda012018-03-09 14:13:49 +0000120
121 // Infer the tensor infos for all output slots. Throws an exception on failure.
telsoa01c577f2c2018-08-31 09:22:23 +0100122 optNetObjPtr->GetGraph().InferTensorInfos();
telsoa014fcda012018-03-09 14:13:49 +0000123
telsoa01c577f2c2018-08-31 09:22:23 +0100124 // if Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
125 if (options.m_ReduceFp32ToFp16)
telsoa014fcda012018-03-09 14:13:49 +0000126 {
telsoa01c577f2c2018-08-31 09:22:23 +0100127 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(Fp32NetworkToFp16Converter()));
telsoa014fcda012018-03-09 14:13:49 +0000128 }
129
telsoa01c577f2c2018-08-31 09:22:23 +0100130 // We know that DeviceSpec should be the only implementation of IDeviceSpec.
131 const DeviceSpec& spec = *boost::polymorphic_downcast<const DeviceSpec*>(&deviceSpec);
David Beck056be3c2018-10-22 13:16:00 +0100132 auto const& supportedBackends = spec.GetSupportedBackends();
telsoa014fcda012018-03-09 14:13:49 +0000133
telsoa01c577f2c2018-08-31 09:22:23 +0100134 // determine which of the preferred backends we have available for use
135 // and whether we have specified CpuRef as one of those backends.
136 bool cpuRefUsed = false;
David Beckf0b48452018-10-19 15:20:56 +0100137 std::vector<BackendId> availablePreferredBackends;
138 for (const auto& backend : backendPreferences)
telsoa01c577f2c2018-08-31 09:22:23 +0100139 {
140 // Check if the backend is in the available backend devices.
David Beck056be3c2018-10-22 13:16:00 +0100141 if (supportedBackends.count(backend) > 0)
telsoa01c577f2c2018-08-31 09:22:23 +0100142 {
143 availablePreferredBackends.push_back(backend);
David Beckf0b48452018-10-19 15:20:56 +0100144 if (backend == armnn::Compute::CpuRef) {
telsoa01c577f2c2018-08-31 09:22:23 +0100145 cpuRefUsed = true;
146 }
147 }
148 }
149 if (availablePreferredBackends.empty()) {
jimfly016b0b53d2018-10-08 14:43:01 +0100150 std::stringstream failureMsg;
151 failureMsg << "ERROR: None of the preferred backends " << backendPreferences
David Beck056be3c2018-10-22 13:16:00 +0100152 << " are supported. Current platform provides " << supportedBackends;
jimfly016b0b53d2018-10-08 14:43:01 +0100153 BOOST_LOG_TRIVIAL(warning) << failureMsg.str();
154 if (errMessages) {
155 errMessages.value().push_back(failureMsg.str());
156 }
157 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
telsoa01c577f2c2018-08-31 09:22:23 +0100158 }
159
160 auto ReturnWithError = [&](Layer* layer)
161 {
jimfly016b0b53d2018-10-08 14:43:01 +0100162 std::stringstream failureMsg;
163 failureMsg << "ERROR: Layer of type " << GetLayerTypeAsCString(layer->GetType())
164 << " is not supported on any preferred backend " << backendPreferences;
165 BOOST_LOG_TRIVIAL(warning) << failureMsg.str();
166 if (errMessages) {
167 errMessages.value().push_back(failureMsg.str());
168 }
telsoa01c577f2c2018-08-31 09:22:23 +0100169 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
170 };
171
172 // Assign a compute device for all nodes
jimfly016b0b53d2018-10-08 14:43:01 +0100173 bool bErrorFound = false;
telsoa01c577f2c2018-08-31 09:22:23 +0100174 for (auto&& layer : optNetObjPtr->GetGraph())
175 {
176 DataType dataType = layer->GetDataType();
177 std::string reasonIfUnsupported;
178 bool found = false;
jimfly016b0b53d2018-10-08 14:43:01 +0100179 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
180 {
181 // don't bomb immediately, find all the quantized outputs
182 // which haven't had a scale set and report them all back.
183 bErrorFound = true;
184 }
David Beckf0b48452018-10-19 15:20:56 +0100185 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100186 {
187 // need to set the compute device on the layer
188 // before we can check if it is supported
David Beck33f0ae02018-10-18 15:13:56 +0100189 layer->SetBackendId(backend);
telsoa01c577f2c2018-08-31 09:22:23 +0100190 if (!IWorkloadFactory::IsLayerSupported(*layer, dataType, reasonIfUnsupported))
191 {
192 if (dataType == DataType::Float16)
193 {
194 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
195 && layer->GetType() != LayerType::ConvertFp32ToFp16
196 && layer->GetType() != LayerType::ConvertFp16ToFp32)
197 {
198 // Insert FP16 -> FP32 conversion layer before current layer
199 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers =
200 InsertConvertFp16ToFp32LayersBefore(optNetObjPtr->GetGraph(), *layer);
201
202 // Insert FP32 -> FP16 conversion layer after current layer
203 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers =
204 InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer);
205
206 // Assign a supported backend to the newly introduced conversion layers
David Beckf0b48452018-10-19 15:20:56 +0100207 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
telsoa01c577f2c2018-08-31 09:22:23 +0100208 {
209 bool supportedBackendFound = false;
210 std::string reasonIfUnsupported;
211
212 // Try preferred backend first
David Beck33f0ae02018-10-18 15:13:56 +0100213 layer->SetBackendId(preferredBackend);
David Beck29c75de2018-10-23 13:35:58 +0100214 if (IWorkloadFactory::IsLayerSupported(*layer,
215 EmptyOptional(),
216 reasonIfUnsupported))
telsoa01c577f2c2018-08-31 09:22:23 +0100217 {
218 supportedBackendFound = true;
219 }
220 else
221 {
David Beckf0b48452018-10-19 15:20:56 +0100222 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100223 {
224 // Skip preferred backend (we already determined that it is not supported)
225 if (backend == preferredBackend)
226 {
227 continue;
228 }
229
David Beck33f0ae02018-10-18 15:13:56 +0100230 layer->SetBackendId(backend);
David Beck29c75de2018-10-23 13:35:58 +0100231 if (IWorkloadFactory::IsLayerSupported(*layer,
232 EmptyOptional(),
233 reasonIfUnsupported))
telsoa01c577f2c2018-08-31 09:22:23 +0100234 {
235 supportedBackendFound = true;
236 break;
237 }
238 }
239 }
240
241 return supportedBackendFound;
242 };
243
244 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
245 {
246 if (!AssignFirstSupportedBackend(convertLayer, backend))
247 {
248 return ReturnWithError(convertLayer);
249 }
250 }
251
252 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
253 {
254 if (!AssignFirstSupportedBackend(convertLayer, backend))
255 {
256 return ReturnWithError(convertLayer);
257 }
258 }
259
260 found = true;
261 break;
262 }
263 }
jimfly016b0b53d2018-10-08 14:43:01 +0100264 std::stringstream warningMsg;
265 warningMsg << "WARNING: Layer of type " << GetLayerTypeAsCString(layer->GetType())
David Beck33f0ae02018-10-18 15:13:56 +0100266 << " is not supported on requested backend " << layer->GetBackendId().Get()
jimfly016b0b53d2018-10-08 14:43:01 +0100267 << " for data type " << GetDataTypeName(dataType)
268 << " (reason: " << reasonIfUnsupported
269 << "), falling back to the next backend.";
270 BOOST_LOG_TRIVIAL(warning) << warningMsg.str();
271 if (errMessages) {
272 errMessages.value().push_back(warningMsg.str());
273 }
telsoa01c577f2c2018-08-31 09:22:23 +0100274 }
275 else
276 {
277 found = true;
278 break;
279 }
280 }
281
282 // If the layer is unsupported by any devices, log and return a null network.
283 if (!found) {
284 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
285 // fallback we should set the compute device on the layer to CpuRef (these are not
286 // available as accelerated operations, or are only available under certain
287 // conditions, currently they comprise MemCopy, Constant, Permute)
288 armnn::LayerType layerType = layer->GetType();
289 if (!cpuRefUsed && (layerType == armnn::LayerType::MemCopy ||
290 layerType == armnn::LayerType::Constant ||
291 layerType == armnn::LayerType::Permute))
292 {
David Beck33f0ae02018-10-18 15:13:56 +0100293 layer->SetBackendId(armnn::Compute::CpuRef);
telsoa01c577f2c2018-08-31 09:22:23 +0100294 }
295 else
296 {
297 return ReturnWithError(layer);
298 }
299 }
300 }
jimfly016b0b53d2018-10-08 14:43:01 +0100301 if (bErrorFound)
302 {
303 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
304 }
telsoa01c577f2c2018-08-31 09:22:23 +0100305
306 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(OptimizeInverseConversionsFp16(),
307 OptimizeInverseConversionsFp32()));
308
309 optNetObjPtr->GetGraph().AddCopyLayers();
310
311 // Convert constants
312 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsFloatToHalf()));
313 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsHalfToFloat()));
314
315 return optNet;
telsoa014fcda012018-03-09 14:13:49 +0000316}
317
jimfly016b0b53d2018-10-08 14:43:01 +0100318
telsoa014fcda012018-03-09 14:13:49 +0000319Network::Network()
320: m_Graph(std::make_unique<Graph>())
321{
322}
323
324Network::~Network()
325{
326}
327
328IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
329{
330 return m_Graph->AddLayer<InputLayer>(id, name);
331}
332
333IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100334 const ConstTensor& weights,
335 const ConstTensor* biases,
336 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000337{
338 if (fullyConnectedDescriptor.m_BiasEnabled && (biases == nullptr))
339 {
340 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be NULL");
341 }
342
343 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
344
345 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
346
347 if (fullyConnectedDescriptor.m_BiasEnabled)
348 {
349 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
350 }
351
352 return layer;
353}
354
355IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100356 const ConstTensor& weights,
357 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000358{
359 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, nullptr, name);
360}
361
362IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100363 const ConstTensor& weights,
364 const ConstTensor& biases,
365 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000366{
367 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, &biases, name);
368}
369
370IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100371 const ConstTensor& weights,
372 const ConstTensor* biases,
373 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000374{
375 if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
376 {
377 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be NULL");
378 }
379
380 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
381
382 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
383
384 if (convolution2dDescriptor.m_BiasEnabled)
385 {
386 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
387 }
388
389 return layer;
390}
391
392IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100393 const ConstTensor& weights,
394 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000395{
396 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
397}
398IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100399 const ConstTensor& weights,
400 const ConstTensor& biases,
401 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000402{
403 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
404}
405
406IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
407 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
408 const ConstTensor& weights,
409 const ConstTensor* biases,
410 const char* name)
411{
412 if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
413 {
414 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be NULL");
415 }
416
telsoa01c577f2c2018-08-31 09:22:23 +0100417 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor,
418 name);
telsoa014fcda012018-03-09 14:13:49 +0000419
420 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
421
422 if (convolution2dDescriptor.m_BiasEnabled)
423 {
424 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
425 }
426
427 return layer;
428}
429
430IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
431 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
432 const ConstTensor& weights,
433 const char* name)
434{
435 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
436}
437IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
438 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
439 const ConstTensor& weights,
440 const ConstTensor& biases,
441 const char* name)
442{
443 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
444}
445
446IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
447 const char* name)
448{
449 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
450}
451
452IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
453 const char* name)
454{
455 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
456}
457
458IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
459 const char* name)
460{
461 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
462}
463
telsoa01c577f2c2018-08-31 09:22:23 +0100464IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
465normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +0000466 const char* name)
467{
468 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
469}
470
471IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
472 const char* name)
473{
474 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
475}
476
477IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
478 const char* name)
479{
480 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
481}
482
483IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
484 const char* name)
485{
486 return m_Graph->AddLayer<MergerLayer>(mergerDescriptor, name);
487}
488
489IConnectableLayer* Network::AddAdditionLayer(const char* name)
490{
491 return m_Graph->AddLayer<AdditionLayer>(name);
492}
493
494IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
495{
496 return m_Graph->AddLayer<MultiplicationLayer>(name);
497}
498
499IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
500{
501 return m_Graph->AddLayer<OutputLayer>(id, name);
502}
503
504IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
505 const ConstTensor& mean,
506 const ConstTensor& variance,
507 const ConstTensor& beta,
508 const ConstTensor& gamma,
509 const char* name)
510{
511 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
512
513 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
514 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
515 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
516 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
517
518 return layer;
519}
520
telsoa01c577f2c2018-08-31 09:22:23 +0100521IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor&
522resizeDescriptor, const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000523{
524 return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name);
525}
526
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100527IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
528 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000529{
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100530 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +0000531}
532
533IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
534{
telsoa01c577f2c2018-08-31 09:22:23 +0100535 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
536
537 layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
538
539 return layer;
telsoa014fcda012018-03-09 14:13:49 +0000540}
541
telsoa01c577f2c2018-08-31 09:22:23 +0100542IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
543 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000544{
545 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
546}
547
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000548IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
549 const char* name)
550{
551 return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
552}
553
telsoa014fcda012018-03-09 14:13:49 +0000554IConnectableLayer* Network::AddFloorLayer(const char* name)
555{
556 return m_Graph->AddLayer<FloorLayer>(name);
557}
558
telsoa01c577f2c2018-08-31 09:22:23 +0100559IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
560 const LstmInputParams& params,
561 const char* name)
562{
563 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
564
565 //Lstm Basic Parameters
566 layer->m_BasicParameters.m_InputToForgetWeights =
567 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
568 layer->m_BasicParameters.m_InputToCellWeights =
569 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
570 layer->m_BasicParameters.m_InputToOutputWeights =
571 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
572 layer->m_BasicParameters.m_RecurrentToForgetWeights =
573 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
574 layer->m_BasicParameters.m_RecurrentToCellWeights =
575 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
576 layer->m_BasicParameters.m_RecurrentToOutputWeights =
577 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
578 layer->m_BasicParameters.m_ForgetGateBias =
579 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
580 layer->m_BasicParameters.m_CellBias =
581 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
582 layer->m_BasicParameters.m_OutputGateBias =
583 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
584
585 //Lstm Cifg parameters
586 if(!descriptor.m_CifgEnabled)
587 {
588 if(params.m_InputToInputWeights == nullptr)
589 {
590 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL");
591 }
592 if(params.m_RecurrentToInputWeights == nullptr)
593 {
594 throw InvalidArgumentException(
595 "AddLstmLayer: Recurrent To Input Weights cannot be NULL");
596 }
597 if(params.m_InputGateBias == nullptr)
598 {
599 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL");
600 }
601 layer->m_CifgParameters.m_InputToInputWeights =
602 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
603 layer->m_CifgParameters.m_RecurrentToInputWeights =
604 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
605 // In the VTS tests, cell-to-input weights may be null, even if the other CIFG params are not.
606 if(params.m_CellToInputWeights != nullptr)
607 {
608 layer->m_CifgParameters.m_CellToInputWeights =
609 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
610 }
611 layer->m_CifgParameters.m_InputGateBias =
612 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
613 }
614
615 //Lstm projection parameters
616 if(descriptor.m_ProjectionEnabled)
617 {
618 if(params.m_ProjectionWeights == nullptr)
619 {
620 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL");
621 }
622 layer->m_ProjectionParameters.m_ProjectionWeights =
623 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
624 if(params.m_ProjectionBias != nullptr)
625 {
626 layer->m_ProjectionParameters.m_ProjectionBias =
627 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
628 }
629 }
630
631 //Lstm Peephole params
632 if(descriptor.m_PeepholeEnabled)
633 {
634 if(params.m_CellToForgetWeights == nullptr)
635 {
636 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL");
637 }
638 if(params.m_CellToOutputWeights == nullptr)
639 {
640 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL");
641 }
642 layer->m_PeepholeParameters.m_CellToForgetWeights =
643 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
644 layer->m_PeepholeParameters.m_CellToOutputWeights =
645 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
646 }
647 return layer;
648}
649
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100650IConnectableLayer* Network::AddDivisionLayer(const char* name)
651{
652 return m_Graph->AddLayer<DivisionLayer>(name);
653}
654
David Beck19526222018-09-12 16:00:08 +0100655IConnectableLayer* Network::AddSubtractionLayer(const char* name)
656{
657 return m_Graph->AddLayer<SubtractionLayer>(name);
658}
659
narpra0132b90462018-09-13 11:07:48 +0100660IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
661{
662 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
663}
664
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +0100665IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
666{
667 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
668}
669
telsoa014fcda012018-03-09 14:13:49 +0000670OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
671 : m_Graph(std::move(graph))
672{
673}
674
675OptimizedNetwork::~OptimizedNetwork()
676{
677}
678
679} // namespace armnn