blob: 51490e33c4c03137f89fa50874cd73cb7bd93429 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "Network.hpp"
6#include "Graph.hpp"
7#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01008#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +00009#include "Optimizer.hpp"
David Beckac42efd2018-09-26 17:41:13 +010010#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000011
David Beckac42efd2018-09-26 17:41:13 +010012#include <backends/CpuTensorHandle.hpp>
13#include <backends/WorkloadFactory.hpp>
14
15#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010017#include <armnn/TypesUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
19#include <fcntl.h>
20#include <algorithm>
21#include <fstream>
22#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010023#include <vector>
24#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000025
26#include <boost/assert.hpp>
27#include <boost/format.hpp>
28#include <boost/log/trivial.hpp>
29#include <boost/numeric/conversion/converter_policies.hpp>
30#include <boost/cast.hpp>
31
32namespace armnn
33{
34
35armnn::INetwork* INetwork::CreateRaw()
36{
37 return new Network();
38}
39
40armnn::INetworkPtr INetwork::Create()
41{
42 return INetworkPtr(CreateRaw(), &INetwork::Destroy);
43}
44
45void INetwork::Destroy(INetwork* network)
46{
47 delete boost::polymorphic_downcast<Network*>(network);
48}
49
50Status Network::PrintGraph()
51{
52 m_Graph->Print();
53 return Status::Success;
54}
55
56void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
57{
58 delete boost::polymorphic_downcast<OptimizedNetwork*>(network);
59}
60
61Status OptimizedNetwork::PrintGraph()
62{
63 m_Graph->Print();
64 return Status::Success;
65}
66
surmeh01bceff2f2018-03-29 16:29:27 +010067Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
68{
69 return m_Graph->SerializeToDot(stream);
70}
71
jimfly016b0b53d2018-10-08 14:43:01 +010072bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
73{
74 bool noErrors = true;
75 unsigned int numOutputs = layer->GetNumOutputSlots();
76 for (unsigned int i = 0; i < numOutputs; i++) {
77 const OutputSlot &outputSlot = layer->GetOutputSlot(i);
78 const TensorInfo &info = outputSlot.GetTensorInfo();
79 if (DataType::QuantisedAsymm8 == info.GetDataType()) {
80 if (0.f == info.GetQuantizationScale()) {
81 noErrors = false;
82 std::stringstream ss;
83 ss << "ERROR: output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
84 << " (" << layer->GetNameStr() << ") is of type"
85 << " Quantized 8 bit but its scale parameter has not been set";
86 BOOST_LOG_TRIVIAL(warning) << ss.str() ;
87 if (errMessages) {
88 errMessages.value().push_back(ss.str());
89 }
90 }
91 }
92 }
93 return noErrors;
94}
95
telsoa01c577f2c2018-08-31 09:22:23 +010096IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
97 const std::vector<armnn::Compute>& backendPreferences,
98 const IDeviceSpec& deviceSpec,
jimfly016b0b53d2018-10-08 14:43:01 +010099 const OptimizerOptions& options,
100 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +0000101{
telsoa01c577f2c2018-08-31 09:22:23 +0100102 if (backendPreferences.empty()) {
103 throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
104 }
telsoa014fcda012018-03-09 14:13:49 +0000105 const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork);
106 std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
107
telsoa01c577f2c2018-08-31 09:22:23 +0100108 auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
telsoa014fcda012018-03-09 14:13:49 +0000109
telsoa01c577f2c2018-08-31 09:22:23 +0100110 OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
111
112 // Perform optimisation passes
113 using namespace optimizations;
114 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(SquashEqualPermuteSiblings(),
115 SquashEqualReshapeSiblings(),
116 OptimizeInversePermutes(),
117 MovePermuteUp(),
118 PermuteAsReshape(),
119 OptimizeConsecutiveReshapes()));
telsoa014fcda012018-03-09 14:13:49 +0000120
121 // Infer the tensor infos for all output slots. Throws an exception on failure.
telsoa01c577f2c2018-08-31 09:22:23 +0100122 optNetObjPtr->GetGraph().InferTensorInfos();
telsoa014fcda012018-03-09 14:13:49 +0000123
telsoa01c577f2c2018-08-31 09:22:23 +0100124 // if Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
125 if (options.m_ReduceFp32ToFp16)
telsoa014fcda012018-03-09 14:13:49 +0000126 {
telsoa01c577f2c2018-08-31 09:22:23 +0100127 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(Fp32NetworkToFp16Converter()));
telsoa014fcda012018-03-09 14:13:49 +0000128 }
129
telsoa01c577f2c2018-08-31 09:22:23 +0100130 // We know that DeviceSpec should be the only implementation of IDeviceSpec.
131 const DeviceSpec& spec = *boost::polymorphic_downcast<const DeviceSpec*>(&deviceSpec);
telsoa014fcda012018-03-09 14:13:49 +0000132
telsoa01c577f2c2018-08-31 09:22:23 +0100133 // determine which of the preferred backends we have available for use
134 // and whether we have specified CpuRef as one of those backends.
135 bool cpuRefUsed = false;
136 std::vector<armnn::Compute> availablePreferredBackends;
137 for (const armnn::Compute& backend : backendPreferences)
138 {
139 // Check if the backend is in the available backend devices.
140 if (std::find(spec.m_SupportedComputeDevices.begin(),
141 spec.m_SupportedComputeDevices.end(), backend) !=
142 spec.m_SupportedComputeDevices.end())
143 {
144 availablePreferredBackends.push_back(backend);
145 if (armnn::Compute::CpuRef == backend) {
146 cpuRefUsed = true;
147 }
148 }
149 }
150 if (availablePreferredBackends.empty()) {
jimfly016b0b53d2018-10-08 14:43:01 +0100151 std::stringstream failureMsg;
152 failureMsg << "ERROR: None of the preferred backends " << backendPreferences
153 << " are supported. Current platform provides " << spec.m_SupportedComputeDevices;
154 BOOST_LOG_TRIVIAL(warning) << failureMsg.str();
155 if (errMessages) {
156 errMessages.value().push_back(failureMsg.str());
157 }
158 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
telsoa01c577f2c2018-08-31 09:22:23 +0100159 }
160
161 auto ReturnWithError = [&](Layer* layer)
162 {
jimfly016b0b53d2018-10-08 14:43:01 +0100163 std::stringstream failureMsg;
164 failureMsg << "ERROR: Layer of type " << GetLayerTypeAsCString(layer->GetType())
165 << " is not supported on any preferred backend " << backendPreferences;
166 BOOST_LOG_TRIVIAL(warning) << failureMsg.str();
167 if (errMessages) {
168 errMessages.value().push_back(failureMsg.str());
169 }
telsoa01c577f2c2018-08-31 09:22:23 +0100170 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
171 };
172
173 // Assign a compute device for all nodes
jimfly016b0b53d2018-10-08 14:43:01 +0100174 bool bErrorFound = false;
telsoa01c577f2c2018-08-31 09:22:23 +0100175 for (auto&& layer : optNetObjPtr->GetGraph())
176 {
177 DataType dataType = layer->GetDataType();
178 std::string reasonIfUnsupported;
179 bool found = false;
jimfly016b0b53d2018-10-08 14:43:01 +0100180 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
181 {
182 // don't bomb immediately, find all the quantized outputs
183 // which haven't had a scale set and report them all back.
184 bErrorFound = true;
185 }
telsoa01c577f2c2018-08-31 09:22:23 +0100186 for (const armnn::Compute& backend : availablePreferredBackends)
187 {
188 // need to set the compute device on the layer
189 // before we can check if it is supported
190 layer->SetComputeDevice(backend);
191 if (!IWorkloadFactory::IsLayerSupported(*layer, dataType, reasonIfUnsupported))
192 {
193 if (dataType == DataType::Float16)
194 {
195 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
196 && layer->GetType() != LayerType::ConvertFp32ToFp16
197 && layer->GetType() != LayerType::ConvertFp16ToFp32)
198 {
199 // Insert FP16 -> FP32 conversion layer before current layer
200 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers =
201 InsertConvertFp16ToFp32LayersBefore(optNetObjPtr->GetGraph(), *layer);
202
203 // Insert FP32 -> FP16 conversion layer after current layer
204 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers =
205 InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer);
206
207 // Assign a supported backend to the newly introduced conversion layers
208 auto AssignFirstSupportedBackend = [&](Layer* layer, Compute preferredBackend)
209 {
210 bool supportedBackendFound = false;
211 std::string reasonIfUnsupported;
212
213 // Try preferred backend first
214 layer->SetComputeDevice(preferredBackend);
215 if (IWorkloadFactory::IsLayerSupported(*layer, boost::none, reasonIfUnsupported))
216 {
217 supportedBackendFound = true;
218 }
219 else
220 {
221 for (const Compute& backend : availablePreferredBackends)
222 {
223 // Skip preferred backend (we already determined that it is not supported)
224 if (backend == preferredBackend)
225 {
226 continue;
227 }
228
229 layer->SetComputeDevice(backend);
230 if (IWorkloadFactory::IsLayerSupported(*layer, boost::none, reasonIfUnsupported))
231 {
232 supportedBackendFound = true;
233 break;
234 }
235 }
236 }
237
238 return supportedBackendFound;
239 };
240
241 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
242 {
243 if (!AssignFirstSupportedBackend(convertLayer, backend))
244 {
245 return ReturnWithError(convertLayer);
246 }
247 }
248
249 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
250 {
251 if (!AssignFirstSupportedBackend(convertLayer, backend))
252 {
253 return ReturnWithError(convertLayer);
254 }
255 }
256
257 found = true;
258 break;
259 }
260 }
jimfly016b0b53d2018-10-08 14:43:01 +0100261 std::stringstream warningMsg;
262 warningMsg << "WARNING: Layer of type " << GetLayerTypeAsCString(layer->GetType())
263 << " is not supported on requested backend " << layer->GetComputeDevice()
264 << " for data type " << GetDataTypeName(dataType)
265 << " (reason: " << reasonIfUnsupported
266 << "), falling back to the next backend.";
267 BOOST_LOG_TRIVIAL(warning) << warningMsg.str();
268 if (errMessages) {
269 errMessages.value().push_back(warningMsg.str());
270 }
telsoa01c577f2c2018-08-31 09:22:23 +0100271 }
272 else
273 {
274 found = true;
275 break;
276 }
277 }
278
279 // If the layer is unsupported by any devices, log and return a null network.
280 if (!found) {
281 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
282 // fallback we should set the compute device on the layer to CpuRef (these are not
283 // available as accelerated operations, or are only available under certain
284 // conditions, currently they comprise MemCopy, Constant, Permute)
285 armnn::LayerType layerType = layer->GetType();
286 if (!cpuRefUsed && (layerType == armnn::LayerType::MemCopy ||
287 layerType == armnn::LayerType::Constant ||
288 layerType == armnn::LayerType::Permute))
289 {
290 layer->SetComputeDevice(armnn::Compute::CpuRef);
291 }
292 else
293 {
294 return ReturnWithError(layer);
295 }
296 }
297 }
jimfly016b0b53d2018-10-08 14:43:01 +0100298 if (bErrorFound)
299 {
300 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
301 }
telsoa01c577f2c2018-08-31 09:22:23 +0100302
303 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(OptimizeInverseConversionsFp16(),
304 OptimizeInverseConversionsFp32()));
305
306 optNetObjPtr->GetGraph().AddCopyLayers();
307
308 // Convert constants
309 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsFloatToHalf()));
310 Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsHalfToFloat()));
311
312 return optNet;
telsoa014fcda012018-03-09 14:13:49 +0000313}
314
jimfly016b0b53d2018-10-08 14:43:01 +0100315
telsoa014fcda012018-03-09 14:13:49 +0000316Network::Network()
317: m_Graph(std::make_unique<Graph>())
318{
319}
320
321Network::~Network()
322{
323}
324
325IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
326{
327 return m_Graph->AddLayer<InputLayer>(id, name);
328}
329
330IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100331 const ConstTensor& weights,
332 const ConstTensor* biases,
333 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000334{
335 if (fullyConnectedDescriptor.m_BiasEnabled && (biases == nullptr))
336 {
337 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be NULL");
338 }
339
340 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
341
342 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
343
344 if (fullyConnectedDescriptor.m_BiasEnabled)
345 {
346 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
347 }
348
349 return layer;
350}
351
352IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100353 const ConstTensor& weights,
354 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000355{
356 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, nullptr, name);
357}
358
359IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100360 const ConstTensor& weights,
361 const ConstTensor& biases,
362 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000363{
364 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, &biases, name);
365}
366
367IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100368 const ConstTensor& weights,
369 const ConstTensor* biases,
370 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000371{
372 if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
373 {
374 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be NULL");
375 }
376
377 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
378
379 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
380
381 if (convolution2dDescriptor.m_BiasEnabled)
382 {
383 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
384 }
385
386 return layer;
387}
388
389IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100390 const ConstTensor& weights,
391 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000392{
393 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
394}
395IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +0100396 const ConstTensor& weights,
397 const ConstTensor& biases,
398 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000399{
400 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
401}
402
403IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
404 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
405 const ConstTensor& weights,
406 const ConstTensor* biases,
407 const char* name)
408{
409 if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
410 {
411 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be NULL");
412 }
413
telsoa01c577f2c2018-08-31 09:22:23 +0100414 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor,
415 name);
telsoa014fcda012018-03-09 14:13:49 +0000416
417 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
418
419 if (convolution2dDescriptor.m_BiasEnabled)
420 {
421 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
422 }
423
424 return layer;
425}
426
427IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
428 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
429 const ConstTensor& weights,
430 const char* name)
431{
432 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
433}
434IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
435 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
436 const ConstTensor& weights,
437 const ConstTensor& biases,
438 const char* name)
439{
440 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
441}
442
443IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
444 const char* name)
445{
446 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
447}
448
449IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
450 const char* name)
451{
452 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
453}
454
455IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
456 const char* name)
457{
458 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
459}
460
telsoa01c577f2c2018-08-31 09:22:23 +0100461IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
462normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +0000463 const char* name)
464{
465 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
466}
467
468IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
469 const char* name)
470{
471 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
472}
473
474IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
475 const char* name)
476{
477 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
478}
479
480IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
481 const char* name)
482{
483 return m_Graph->AddLayer<MergerLayer>(mergerDescriptor, name);
484}
485
486IConnectableLayer* Network::AddAdditionLayer(const char* name)
487{
488 return m_Graph->AddLayer<AdditionLayer>(name);
489}
490
491IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
492{
493 return m_Graph->AddLayer<MultiplicationLayer>(name);
494}
495
496IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
497{
498 return m_Graph->AddLayer<OutputLayer>(id, name);
499}
500
501IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
502 const ConstTensor& mean,
503 const ConstTensor& variance,
504 const ConstTensor& beta,
505 const ConstTensor& gamma,
506 const char* name)
507{
508 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
509
510 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
511 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
512 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
513 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
514
515 return layer;
516}
517
telsoa01c577f2c2018-08-31 09:22:23 +0100518IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor&
519resizeDescriptor, const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000520{
521 return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name);
522}
523
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100524IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
525 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000526{
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100527 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +0000528}
529
530IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
531{
telsoa01c577f2c2018-08-31 09:22:23 +0100532 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
533
534 layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
535
536 return layer;
telsoa014fcda012018-03-09 14:13:49 +0000537}
538
telsoa01c577f2c2018-08-31 09:22:23 +0100539IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
540 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000541{
542 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
543}
544
545IConnectableLayer* Network::AddFloorLayer(const char* name)
546{
547 return m_Graph->AddLayer<FloorLayer>(name);
548}
549
telsoa01c577f2c2018-08-31 09:22:23 +0100550IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
551 const LstmInputParams& params,
552 const char* name)
553{
554 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
555
556 //Lstm Basic Parameters
557 layer->m_BasicParameters.m_InputToForgetWeights =
558 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
559 layer->m_BasicParameters.m_InputToCellWeights =
560 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
561 layer->m_BasicParameters.m_InputToOutputWeights =
562 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
563 layer->m_BasicParameters.m_RecurrentToForgetWeights =
564 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
565 layer->m_BasicParameters.m_RecurrentToCellWeights =
566 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
567 layer->m_BasicParameters.m_RecurrentToOutputWeights =
568 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
569 layer->m_BasicParameters.m_ForgetGateBias =
570 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
571 layer->m_BasicParameters.m_CellBias =
572 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
573 layer->m_BasicParameters.m_OutputGateBias =
574 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
575
576 //Lstm Cifg parameters
577 if(!descriptor.m_CifgEnabled)
578 {
579 if(params.m_InputToInputWeights == nullptr)
580 {
581 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL");
582 }
583 if(params.m_RecurrentToInputWeights == nullptr)
584 {
585 throw InvalidArgumentException(
586 "AddLstmLayer: Recurrent To Input Weights cannot be NULL");
587 }
588 if(params.m_InputGateBias == nullptr)
589 {
590 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL");
591 }
592 layer->m_CifgParameters.m_InputToInputWeights =
593 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
594 layer->m_CifgParameters.m_RecurrentToInputWeights =
595 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
596 // In the VTS tests, cell-to-input weights may be null, even if the other CIFG params are not.
597 if(params.m_CellToInputWeights != nullptr)
598 {
599 layer->m_CifgParameters.m_CellToInputWeights =
600 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
601 }
602 layer->m_CifgParameters.m_InputGateBias =
603 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
604 }
605
606 //Lstm projection parameters
607 if(descriptor.m_ProjectionEnabled)
608 {
609 if(params.m_ProjectionWeights == nullptr)
610 {
611 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL");
612 }
613 layer->m_ProjectionParameters.m_ProjectionWeights =
614 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
615 if(params.m_ProjectionBias != nullptr)
616 {
617 layer->m_ProjectionParameters.m_ProjectionBias =
618 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
619 }
620 }
621
622 //Lstm Peephole params
623 if(descriptor.m_PeepholeEnabled)
624 {
625 if(params.m_CellToForgetWeights == nullptr)
626 {
627 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL");
628 }
629 if(params.m_CellToOutputWeights == nullptr)
630 {
631 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL");
632 }
633 layer->m_PeepholeParameters.m_CellToForgetWeights =
634 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
635 layer->m_PeepholeParameters.m_CellToOutputWeights =
636 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
637 }
638 return layer;
639}
640
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100641IConnectableLayer* Network::AddDivisionLayer(const char* name)
642{
643 return m_Graph->AddLayer<DivisionLayer>(name);
644}
645
David Beck19526222018-09-12 16:00:08 +0100646IConnectableLayer* Network::AddSubtractionLayer(const char* name)
647{
648 return m_Graph->AddLayer<SubtractionLayer>(name);
649}
650
narpra0132b90462018-09-13 11:07:48 +0100651IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
652{
653 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
654}
655
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +0100656IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
657{
658 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
659}
660
telsoa014fcda012018-03-09 14:13:49 +0000661OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
662 : m_Graph(std::move(graph))
663{
664}
665
666OptimizedNetwork::~OptimizedNetwork()
667{
668}
669
670} // namespace armnn