blob: 76c33ba2e69282e31e7f6385436e7b7e017d0864 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "Network.hpp"
6#include "Graph.hpp"
7#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01008#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +00009#include "Optimizer.hpp"
David Beckac42efd2018-09-26 17:41:13 +010010#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000011
David Beckac42efd2018-09-26 17:41:13 +010012#include <backends/CpuTensorHandle.hpp>
13#include <backends/WorkloadFactory.hpp>
14
15#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010017#include <armnn/TypesUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
19#include <fcntl.h>
20#include <algorithm>
21#include <fstream>
22#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010023#include <vector>
24#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000025
26#include <boost/assert.hpp>
27#include <boost/format.hpp>
28#include <boost/log/trivial.hpp>
29#include <boost/numeric/conversion/converter_policies.hpp>
30#include <boost/cast.hpp>
31
32namespace armnn
33{
34
35armnn::INetwork* INetwork::CreateRaw()
36{
37 return new Network();
38}
39
40armnn::INetworkPtr INetwork::Create()
41{
42 return INetworkPtr(CreateRaw(), &INetwork::Destroy);
43}
44
45void INetwork::Destroy(INetwork* network)
46{
47 delete boost::polymorphic_downcast<Network*>(network);
48}
49
50Status Network::PrintGraph()
51{
52 m_Graph->Print();
53 return Status::Success;
54}
55
56void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
57{
58 delete boost::polymorphic_downcast<OptimizedNetwork*>(network);
59}
60
61Status OptimizedNetwork::PrintGraph()
62{
63 m_Graph->Print();
64 return Status::Success;
65}
66
surmeh01bceff2f2018-03-29 16:29:27 +010067Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
68{
69 return m_Graph->SerializeToDot(stream);
70}
71
telsoa01c577f2c2018-08-31 09:22:23 +010072IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
73 const std::vector<armnn::Compute>& backendPreferences,
74 const IDeviceSpec& deviceSpec,
75 const OptimizerOptions& options)
telsoa014fcda012018-03-09 14:13:49 +000076{
telsoa01c577f2c2018-08-31 09:22:23 +010077 if (backendPreferences.empty()) {
78 throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
79 }
telsoa014fcda012018-03-09 14:13:49 +000080 const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork);
81 std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
82
telsoa01c577f2c2018-08-31 09:22:23 +010083 auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
telsoa014fcda012018-03-09 14:13:49 +000084
telsoa01c577f2c2018-08-31 09:22:23 +010085 OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
86
87 // Perform optimisation passes
88 using namespace optimizations;
89 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(SquashEqualPermuteSiblings(),
90 SquashEqualReshapeSiblings(),
91 OptimizeInversePermutes(),
92 MovePermuteUp(),
93 PermuteAsReshape(),
94 OptimizeConsecutiveReshapes()));
telsoa014fcda012018-03-09 14:13:49 +000095
96 // Infer the tensor infos for all output slots. Throws an exception on failure.
telsoa01c577f2c2018-08-31 09:22:23 +010097 optNetObjPtr->GetGraph().InferTensorInfos();
telsoa014fcda012018-03-09 14:13:49 +000098
telsoa01c577f2c2018-08-31 09:22:23 +010099 // if Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
100 if (options.m_ReduceFp32ToFp16)
telsoa014fcda012018-03-09 14:13:49 +0000101 {
telsoa01c577f2c2018-08-31 09:22:23 +0100102 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(Fp32NetworkToFp16Converter()));
telsoa014fcda012018-03-09 14:13:49 +0000103 }
104
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // We know that DeviceSpec should be the only implementation of IDeviceSpec.
106 const DeviceSpec& spec = *boost::polymorphic_downcast<const DeviceSpec*>(&deviceSpec);
telsoa014fcda012018-03-09 14:13:49 +0000107
telsoa01c577f2c2018-08-31 09:22:23 +0100108 // determine which of the preferred backends we have available for use
109 // and whether we have specified CpuRef as one of those backends.
110 bool cpuRefUsed = false;
111 std::vector<armnn::Compute> availablePreferredBackends;
112 for (const armnn::Compute& backend : backendPreferences)
113 {
114 // Check if the backend is in the available backend devices.
115 if (std::find(spec.m_SupportedComputeDevices.begin(),
116 spec.m_SupportedComputeDevices.end(), backend) !=
117 spec.m_SupportedComputeDevices.end())
118 {
119 availablePreferredBackends.push_back(backend);
120 if (armnn::Compute::CpuRef == backend) {
121 cpuRefUsed = true;
122 }
123 }
124 }
125 if (availablePreferredBackends.empty()) {
126 BOOST_LOG_TRIVIAL(warning) << "None of the preferred backends " << backendPreferences
127 << " are supported. Current platform provides " << spec.m_SupportedComputeDevices;
128 return {nullptr, &IOptimizedNetwork::Destroy};
129 }
130
131 auto ReturnWithError = [&](Layer* layer)
132 {
133 BOOST_LOG_TRIVIAL(warning) << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
134 << " is not supported on any preferred backend " << backendPreferences;
135 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
136 };
137
138 // Assign a compute device for all nodes
139 for (auto&& layer : optNetObjPtr->GetGraph())
140 {
141 DataType dataType = layer->GetDataType();
142 std::string reasonIfUnsupported;
143 bool found = false;
144 for (const armnn::Compute& backend : availablePreferredBackends)
145 {
146 // need to set the compute device on the layer
147 // before we can check if it is supported
148 layer->SetComputeDevice(backend);
149 if (!IWorkloadFactory::IsLayerSupported(*layer, dataType, reasonIfUnsupported))
150 {
151 if (dataType == DataType::Float16)
152 {
153 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
154 && layer->GetType() != LayerType::ConvertFp32ToFp16
155 && layer->GetType() != LayerType::ConvertFp16ToFp32)
156 {
157 // Insert FP16 -> FP32 conversion layer before current layer
158 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers =
159 InsertConvertFp16ToFp32LayersBefore(optNetObjPtr->GetGraph(), *layer);
160
161 // Insert FP32 -> FP16 conversion layer after current layer
162 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers =
163 InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer);
164
165 // Assign a supported backend to the newly introduced conversion layers
166 auto AssignFirstSupportedBackend = [&](Layer* layer, Compute preferredBackend)
167 {
168 bool supportedBackendFound = false;
169 std::string reasonIfUnsupported;
170
171 // Try preferred backend first
172 layer->SetComputeDevice(preferredBackend);
173 if (IWorkloadFactory::IsLayerSupported(*layer, boost::none, reasonIfUnsupported))
174 {
175 supportedBackendFound = true;
176 }
177 else
178 {
179 for (const Compute& backend : availablePreferredBackends)
180 {
181 // Skip preferred backend (we already determined that it is not supported)
182 if (backend == preferredBackend)
183 {
184 continue;
185 }
186
187 layer->SetComputeDevice(backend);
188 if (IWorkloadFactory::IsLayerSupported(*layer, boost::none, reasonIfUnsupported))
189 {
190 supportedBackendFound = true;
191 break;
192 }
193 }
194 }
195
196 return supportedBackendFound;
197 };
198
199 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
200 {
201 if (!AssignFirstSupportedBackend(convertLayer, backend))
202 {
203 return ReturnWithError(convertLayer);
204 }
205 }
206
207 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
208 {
209 if (!AssignFirstSupportedBackend(convertLayer, backend))
210 {
211 return ReturnWithError(convertLayer);
212 }
213 }
214
215 found = true;
216 break;
217 }
218 }
219 BOOST_LOG_TRIVIAL(warning) << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
220 << " is not supported on requested backend " << layer->GetComputeDevice()
221 << " (reason: " << reasonIfUnsupported
222 << "), falling back to the next backend.";
223 }
224 else
225 {
226 found = true;
227 break;
228 }
229 }
230
231 // If the layer is unsupported by any devices, log and return a null network.
232 if (!found) {
233 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
234 // fallback we should set the compute device on the layer to CpuRef (these are not
235 // available as accelerated operations, or are only available under certain
236 // conditions, currently they comprise MemCopy, Constant, Permute)
237 armnn::LayerType layerType = layer->GetType();
238 if (!cpuRefUsed && (layerType == armnn::LayerType::MemCopy ||
239 layerType == armnn::LayerType::Constant ||
240 layerType == armnn::LayerType::Permute))
241 {
242 layer->SetComputeDevice(armnn::Compute::CpuRef);
243 }
244 else
245 {
246 return ReturnWithError(layer);
247 }
248 }
249 }
250
251 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(OptimizeInverseConversionsFp16(),
252 OptimizeInverseConversionsFp32()));
253
254 optNetObjPtr->GetGraph().AddCopyLayers();
255
256 // Convert constants
257 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsFloatToHalf()));
258 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsHalfToFloat()));
259
260 return optNet;
telsoa014fcda012018-03-09 14:13:49 +0000261}
262
263Network::Network()
264: m_Graph(std::make_unique<Graph>())
265{
266}
267
268Network::~Network()
269{
270}
271
272IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
273{
274 return m_Graph->AddLayer<InputLayer>(id, name);
275}
276
277IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100278 const ConstTensor& weights,
279 const ConstTensor* biases,
280 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000281{
282 if (fullyConnectedDescriptor.m_BiasEnabled && (biases == nullptr))
283 {
284 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be NULL");
285 }
286
287 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
288
289 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
290
291 if (fullyConnectedDescriptor.m_BiasEnabled)
292 {
293 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
294 }
295
296 return layer;
297}
298
299IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100300 const ConstTensor& weights,
301 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000302{
303 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, nullptr, name);
304}
305
306IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100307 const ConstTensor& weights,
308 const ConstTensor& biases,
309 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000310{
311 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, &biases, name);
312}
313
314IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100315 const ConstTensor& weights,
316 const ConstTensor* biases,
317 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000318{
319 if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
320 {
321 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be NULL");
322 }
323
324 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
325
326 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
327
328 if (convolution2dDescriptor.m_BiasEnabled)
329 {
330 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
331 }
332
333 return layer;
334}
335
336IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100337 const ConstTensor& weights,
338 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000339{
340 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
341}
342IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100343 const ConstTensor& weights,
344 const ConstTensor& biases,
345 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000346{
347 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
348}
349
350IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
351 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
352 const ConstTensor& weights,
353 const ConstTensor* biases,
354 const char* name)
355{
356 if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
357 {
358 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be NULL");
359 }
360
telsoa01c577f2c2018-08-31 09:22:23 +0100361 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor,
362 name);
telsoa014fcda012018-03-09 14:13:49 +0000363
364 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
365
366 if (convolution2dDescriptor.m_BiasEnabled)
367 {
368 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
369 }
370
371 return layer;
372}
373
374IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
375 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
376 const ConstTensor& weights,
377 const char* name)
378{
379 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
380}
381IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
382 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
383 const ConstTensor& weights,
384 const ConstTensor& biases,
385 const char* name)
386{
387 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
388}
389
390IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
391 const char* name)
392{
393 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
394}
395
396IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
397 const char* name)
398{
399 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
400}
401
402IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
403 const char* name)
404{
405 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
406}
407
telsoa01c577f2c2018-08-31 09:22:23 +0100408IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
409normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +0000410 const char* name)
411{
412 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
413}
414
415IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
416 const char* name)
417{
418 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
419}
420
421IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
422 const char* name)
423{
424 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
425}
426
427IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
428 const char* name)
429{
430 return m_Graph->AddLayer<MergerLayer>(mergerDescriptor, name);
431}
432
433IConnectableLayer* Network::AddAdditionLayer(const char* name)
434{
435 return m_Graph->AddLayer<AdditionLayer>(name);
436}
437
438IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
439{
440 return m_Graph->AddLayer<MultiplicationLayer>(name);
441}
442
443IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
444{
445 return m_Graph->AddLayer<OutputLayer>(id, name);
446}
447
448IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
449 const ConstTensor& mean,
450 const ConstTensor& variance,
451 const ConstTensor& beta,
452 const ConstTensor& gamma,
453 const char* name)
454{
455 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
456
457 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
458 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
459 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
460 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
461
462 return layer;
463}
464
telsoa01c577f2c2018-08-31 09:22:23 +0100465IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor&
466resizeDescriptor, const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000467{
468 return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name);
469}
470
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100471IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
472 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000473{
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100474 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +0000475}
476
477IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
478{
telsoa01c577f2c2018-08-31 09:22:23 +0100479 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
480
481 layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
482
483 return layer;
telsoa014fcda012018-03-09 14:13:49 +0000484}
485
telsoa01c577f2c2018-08-31 09:22:23 +0100486IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
487 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000488{
489 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
490}
491
492IConnectableLayer* Network::AddFloorLayer(const char* name)
493{
494 return m_Graph->AddLayer<FloorLayer>(name);
495}
496
telsoa01c577f2c2018-08-31 09:22:23 +0100497IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
498 const LstmInputParams& params,
499 const char* name)
500{
501 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
502
503 //Lstm Basic Parameters
504 layer->m_BasicParameters.m_InputToForgetWeights =
505 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
506 layer->m_BasicParameters.m_InputToCellWeights =
507 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
508 layer->m_BasicParameters.m_InputToOutputWeights =
509 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
510 layer->m_BasicParameters.m_RecurrentToForgetWeights =
511 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
512 layer->m_BasicParameters.m_RecurrentToCellWeights =
513 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
514 layer->m_BasicParameters.m_RecurrentToOutputWeights =
515 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
516 layer->m_BasicParameters.m_ForgetGateBias =
517 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
518 layer->m_BasicParameters.m_CellBias =
519 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
520 layer->m_BasicParameters.m_OutputGateBias =
521 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
522
523 //Lstm Cifg parameters
524 if(!descriptor.m_CifgEnabled)
525 {
526 if(params.m_InputToInputWeights == nullptr)
527 {
528 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL");
529 }
530 if(params.m_RecurrentToInputWeights == nullptr)
531 {
532 throw InvalidArgumentException(
533 "AddLstmLayer: Recurrent To Input Weights cannot be NULL");
534 }
535 if(params.m_InputGateBias == nullptr)
536 {
537 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL");
538 }
539 layer->m_CifgParameters.m_InputToInputWeights =
540 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
541 layer->m_CifgParameters.m_RecurrentToInputWeights =
542 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
543 // In the VTS tests, cell-to-input weights may be null, even if the other CIFG params are not.
544 if(params.m_CellToInputWeights != nullptr)
545 {
546 layer->m_CifgParameters.m_CellToInputWeights =
547 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
548 }
549 layer->m_CifgParameters.m_InputGateBias =
550 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
551 }
552
553 //Lstm projection parameters
554 if(descriptor.m_ProjectionEnabled)
555 {
556 if(params.m_ProjectionWeights == nullptr)
557 {
558 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL");
559 }
560 layer->m_ProjectionParameters.m_ProjectionWeights =
561 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
562 if(params.m_ProjectionBias != nullptr)
563 {
564 layer->m_ProjectionParameters.m_ProjectionBias =
565 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
566 }
567 }
568
569 //Lstm Peephole params
570 if(descriptor.m_PeepholeEnabled)
571 {
572 if(params.m_CellToForgetWeights == nullptr)
573 {
574 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL");
575 }
576 if(params.m_CellToOutputWeights == nullptr)
577 {
578 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL");
579 }
580 layer->m_PeepholeParameters.m_CellToForgetWeights =
581 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
582 layer->m_PeepholeParameters.m_CellToOutputWeights =
583 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
584 }
585 return layer;
586}
587
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100588IConnectableLayer* Network::AddDivisionLayer(const char* name)
589{
590 return m_Graph->AddLayer<DivisionLayer>(name);
591}
592
David Beck19526222018-09-12 16:00:08 +0100593IConnectableLayer* Network::AddSubtractionLayer(const char* name)
594{
595 return m_Graph->AddLayer<SubtractionLayer>(name);
596}
597
narpra0132b90462018-09-13 11:07:48 +0100598IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
599{
600 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
601}
602
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +0100603IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
604{
605 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
606}
607
telsoa014fcda012018-03-09 14:13:49 +0000608OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
609 : m_Graph(std::move(graph))
610{
611}
612
613OptimizedNetwork::~OptimizedNetwork()
614{
615}
616
617} // namespace armnn