blob: 668b634bc7152aebb7d524da0ab1168e99ace047 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlin52664732020-06-29 16:27:03 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matteo Martincigh49124022019-01-11 13:25:59 +00005
telsoa014fcda012018-03-09 14:13:49 +00006#include "Network.hpp"
7#include "Graph.hpp"
8#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01009#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010#include "Optimizer.hpp"
Derek Lambertiff05cc52019-04-26 13:05:17 +010011#include "SubgraphViewSelector.hpp"
Matteo Martincigh49124022019-01-11 13:25:59 +000012#include "BackendSettings.hpp"
David Beckac42efd2018-09-26 17:41:13 +010013#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000014
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000015#include <backendsCommon/CpuTensorHandle.hpp>
16#include <backendsCommon/WorkloadFactory.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000017#include <armnn/backends/IBackendInternal.hpp>
Derek Lamberti84da38b2019-06-13 11:40:08 +010018#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019
20#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000021#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022#include <armnn/TypesUtils.hpp>
Matteo Martincighc601aa62019-10-29 15:03:22 +000023#include <armnn/BackendRegistry.hpp>
Matthew Benthamf48afc62020-01-15 17:55:08 +000024#include <armnn/Logging.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010025#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000026#include <armnn/utility/IgnoreUnused.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010027#include <armnn/utility/PolymorphicDowncast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000028
Jan Eilers99d9d4a2019-11-06 10:02:16 +000029#include <ProfilingService.hpp>
30
telsoa014fcda012018-03-09 14:13:49 +000031#include <fcntl.h>
32#include <algorithm>
33#include <fstream>
34#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010035#include <vector>
36#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000037
telsoa014fcda012018-03-09 14:13:49 +000038#include <boost/format.hpp>
telsoa014fcda012018-03-09 14:13:49 +000039#include <boost/numeric/conversion/converter_policies.hpp>
telsoa014fcda012018-03-09 14:13:49 +000040
41namespace armnn
42{
43
Finn Williamsf24effa2020-07-03 10:12:03 +010044armnn::INetwork* INetwork::CreateRaw(NetworkOptions networkOptions)
telsoa014fcda012018-03-09 14:13:49 +000045{
Finn Williamsf24effa2020-07-03 10:12:03 +010046 return new Network(networkOptions);
telsoa014fcda012018-03-09 14:13:49 +000047}
48
Finn Williamsf24effa2020-07-03 10:12:03 +010049armnn::INetworkPtr INetwork::Create(NetworkOptions networkOptions)
telsoa014fcda012018-03-09 14:13:49 +000050{
Finn Williamsf24effa2020-07-03 10:12:03 +010051 return INetworkPtr(CreateRaw(networkOptions), &INetwork::Destroy);
telsoa014fcda012018-03-09 14:13:49 +000052}
53
54void INetwork::Destroy(INetwork* network)
55{
Jan Eilersbb446e52020-04-02 13:56:54 +010056 delete PolymorphicDowncast<Network*>(network);
telsoa014fcda012018-03-09 14:13:49 +000057}
58
telsoa014fcda012018-03-09 14:13:49 +000059void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
60{
Jan Eilersbb446e52020-04-02 13:56:54 +010061 delete PolymorphicDowncast<OptimizedNetwork*>(network);
telsoa014fcda012018-03-09 14:13:49 +000062}
63
64Status OptimizedNetwork::PrintGraph()
65{
66 m_Graph->Print();
67 return Status::Success;
68}
69
surmeh01bceff2f2018-03-29 16:29:27 +010070Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
71{
72 return m_Graph->SerializeToDot(stream);
73}
74
Matteo Martincigh49124022019-01-11 13:25:59 +000075void ReportError(const std::string& errorMessage,
76 Optional<std::vector<std::string>&> errorMessages)
77{
78 std::stringstream fullErrorMessage;
79 fullErrorMessage << "ERROR: " << errorMessage;
Derek Lamberti08446972019-11-26 16:38:31 +000080 ARMNN_LOG(warning) << fullErrorMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +000081 if (errorMessages)
82 {
83 errorMessages.value().push_back(fullErrorMessage.str());
84 }
85}
86
87void ReportWarning(const std::string& warningMessage,
88 Optional<std::vector<std::string>&> warningMessages)
89{
90 std::stringstream fullWarningMessage;
91 fullWarningMessage << "WARNING: " << warningMessage;
Derek Lamberti08446972019-11-26 16:38:31 +000092 ARMNN_LOG(warning) << fullWarningMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +000093 if (warningMessages)
94 {
95 warningMessages.value().push_back(fullWarningMessage.str());
96 }
97}
98
Derek Lamberti4a9e24b2020-01-03 16:53:38 +000099OptimizationResult ReturnWithError(OptimizationResult res,
100 const Layer* layer,
101 const BackendSettings& backendSettings,
102 Optional<std::vector<std::string>&> errMessages)
103{
104 std::stringstream failureMsg;
105 failureMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
106 << " is not supported on any preferred backend " << backendSettings.m_PreferredBackends;
107 ReportError(failureMsg.str(), errMessages);
108
109 res.m_Error = true;
110 return res;
111}
112
113
jimfly016b0b53d2018-10-08 14:43:01 +0100114bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
115{
116 bool noErrors = true;
117 unsigned int numOutputs = layer->GetNumOutputSlots();
118 for (unsigned int i = 0; i < numOutputs; i++) {
David Monahanb8554702019-04-25 16:03:38 +0100119 OutputSlot& outputSlot = layer->GetOutputSlot(i);
120 TensorInfo info = outputSlot.GetTensorInfo();
Derek Lambertif90c56d2020-01-10 17:14:08 +0000121 if (DataType::QAsymmU8 == info.GetDataType()) {
jimfly016b0b53d2018-10-08 14:43:01 +0100122 if (0.f == info.GetQuantizationScale()) {
123 noErrors = false;
124 std::stringstream ss;
Matteo Martincigh49124022019-01-11 13:25:59 +0000125 ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
jimfly016b0b53d2018-10-08 14:43:01 +0100126 << " (" << layer->GetNameStr() << ") is of type"
127 << " Quantized 8 bit but its scale parameter has not been set";
Matteo Martincigh49124022019-01-11 13:25:59 +0000128 ReportError(ss.str(), errMessages);
jimfly016b0b53d2018-10-08 14:43:01 +0100129 }
David Monahanb8554702019-04-25 16:03:38 +0100130 // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
131 if ((info.GetQuantizationScale() != (1.0f / 256.0f) ||
132 info.GetQuantizationOffset() != 0) &&
133 layer->GetType() == armnn::LayerType::Softmax)
134 {
135 std::stringstream ss;
136 ss << "Quantization parameters for Softmax layer (Scale: " <<
137 info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() <<
138 ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
Derek Lamberti08446972019-11-26 16:38:31 +0000139 ARMNN_LOG(warning) << ss.str();
David Monahanb8554702019-04-25 16:03:38 +0100140 info.SetQuantizationScale((1.0f /256.0f));
141 info.SetQuantizationOffset(0);
142 outputSlot.SetTensorInfo(info);
143 }
jimfly016b0b53d2018-10-08 14:43:01 +0100144 }
145 }
146 return noErrors;
147}
148
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100149template <typename LayerT>
150LayerT* ConvertBf16ToFp32Weight(Layer* l)
151{
Jan Eilersbb446e52020-04-02 13:56:54 +0100152 LayerT* layer = PolymorphicDowncast<LayerT*>(l);
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100153 if ((layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected)
154 && layer->m_Weight)
155 {
156 const TensorInfo& info = layer->m_Weight->GetTensorInfo();
157
158 if (info.GetDataType() == DataType::BFloat16)
159 {
160 std::vector<float> newValues(info.GetNumElements());
161
162 armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(
163 layer->m_Weight->template GetTensor<armnn::BFloat16>(), info.GetNumElements(), newValues.data());
164
165 TensorInfo newInfo(info.GetShape(), DataType::Float32);
166 ConstTensor newInput(newInfo, newValues);
167 layer->m_Weight.reset(new ScopedCpuTensorHandle(newInput));
168 }
169 }
170 return layer;
171}
172
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000173OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings,
174 Graph& graph,
175 Layer* layer,
176 BackendId backend,
177 DataType dataTypeIn,
178 DataType dataTypeOut,
179 const std::vector<BackendId>& availablePreferredBackends,
180 std::string& reasonIfUnsupported,
181 Optional<std::vector<std::string>&> errMessages)
182{
183 OptimizationResult result;
184
185 // Helper lambda to compose meaningful error message before returning with error
186 auto ReturnError = [&](const Layer* layer)
187 {
188 return ReturnWithError(result, layer, backendSettings, errMessages);
189 };
190
191 // need to set the compute device on the layer
192 // before we can check if it is supported
193 layer->SetBackendId(backend);
194 if (!IWorkloadFactory::IsLayerSupported(*layer, EmptyOptional(), reasonIfUnsupported))
195 {
196 if (dataTypeIn == DataType::Float16 || dataTypeOut == DataType::Float16)
197 {
198 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
199 && layer->GetType() != LayerType::ConvertFp32ToFp16
200 && layer->GetType() != LayerType::ConvertFp16ToFp32)
201 {
202 // Insert FP16 -> FP32 conversion layer before current layer
203 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers;
204 if (dataTypeIn == DataType::Float16)
205 {
206 convertFp16ToFp32Layers =
207 InsertConvertFp16ToFp32LayersBefore(graph, *layer);
208 }
209
210 // Insert FP32 -> FP16 conversion layer after current layer
211 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers;
212 if (dataTypeOut == DataType::Float16)
213 {
214 convertFp32ToFp16Layers =
215 InsertConvertFp32ToFp16LayersAfter(graph, *layer);
216 }
217
218 // Assign a supported backend to the newly introduced conversion layers
219 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
220 {
221 bool supportedBackendFound = false;
222 std::string reasonIfUnsupported;
223
224 // Try preferred backend first
225 layer->SetBackendId(preferredBackend);
226 if (IWorkloadFactory::IsLayerSupported(*layer,
227 EmptyOptional(),
228 reasonIfUnsupported))
229 {
230 supportedBackendFound = true;
231 }
232 else
233 {
234 for (const auto& backend : availablePreferredBackends)
235 {
236 // Skip preferred backend (we already determined that it is not supported)
237 if (backend == preferredBackend)
238 {
239 continue;
240 }
241
242 layer->SetBackendId(backend);
243 if (IWorkloadFactory::IsLayerSupported(*layer,
244 EmptyOptional(),
245 reasonIfUnsupported))
246 {
247 supportedBackendFound = true;
248 break;
249 }
250 }
251 }
252
253 return supportedBackendFound;
254 };
255
256 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
257 {
258 if (!AssignFirstSupportedBackend(convertLayer, backend))
259 {
260 return ReturnError(convertLayer);
261 }
262 }
263
264 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
265 {
266 if (!AssignFirstSupportedBackend(convertLayer, backend))
267 {
268 return ReturnError(convertLayer);
269 }
270 }
271
272 return result;
273 }
274 }
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000275 else if (dataTypeIn == DataType::BFloat16 || dataTypeOut == DataType::BFloat16)
276 {
277 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
278 && layer->GetType() != LayerType::ConvertFp32ToBf16
279 && layer->GetType() != LayerType::ConvertBf16ToFp32)
280 {
281 // Insert BF16 -> FP32 conversion layer before current layer
282 std::vector<ConvertBf16ToFp32Layer*> convertBf16ToFp32Layers;
283 if (dataTypeIn == DataType::BFloat16)
284 {
285 convertBf16ToFp32Layers =
286 InsertConvertBf16ToFp32LayersBefore(graph, *layer);
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100287 if (layer->GetType() == LayerType::Convolution2d)
288 {
289 ConvertBf16ToFp32Weight<Convolution2dLayer>(layer);
290 }
291 else if (layer->GetType() == LayerType::FullyConnected)
292 {
293 ConvertBf16ToFp32Weight<FullyConnectedLayer>(layer);
294 }
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000295 }
296
297 // Insert FP32 -> BF16 conversion layer after current layer
298 std::vector<ConvertFp32ToBf16Layer*> convertFp32ToBf16Layers;
299 if (dataTypeOut == DataType::BFloat16)
300 {
301 convertFp32ToBf16Layers =
302 InsertConvertFp32ToBf16LayersAfter(graph, *layer);
303 }
304
305 // Assign a supported backend to the newly introduced conversion layers
306 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
307 {
308 bool supportedBackendFound = false;
309 std::string reasonIfUnsupported;
310
311 // Try preferred backend first
312 layer->SetBackendId(preferredBackend);
313 if (IWorkloadFactory::IsLayerSupported(*layer,
314 EmptyOptional(),
315 reasonIfUnsupported))
316 {
317 supportedBackendFound = true;
318 }
319 else
320 {
321 for (const auto& backend : availablePreferredBackends)
322 {
323 // Skip preferred backend (we already determined that it is not supported)
324 if (backend == preferredBackend)
325 {
326 continue;
327 }
328
329 layer->SetBackendId(backend);
330 if (IWorkloadFactory::IsLayerSupported(*layer,
331 EmptyOptional(),
332 reasonIfUnsupported))
333 {
334 supportedBackendFound = true;
335 break;
336 }
337 }
338 }
339
340 return supportedBackendFound;
341 };
342
343 for (ConvertBf16ToFp32Layer* convertLayer : convertBf16ToFp32Layers)
344 {
345 if (!AssignFirstSupportedBackend(convertLayer, backend))
346 {
347 return ReturnError(convertLayer);
348 }
349 }
350
351 for (ConvertFp32ToBf16Layer* convertLayer : convertFp32ToBf16Layers)
352 {
353 if (!AssignFirstSupportedBackend(convertLayer, backend))
354 {
355 return ReturnError(convertLayer);
356 }
357 }
358
359 return result;
360 }
361 }
362
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000363 std::stringstream warningMsg;
364 warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
365 << " is not supported on requested backend " << layer->GetBackendId().Get()
366 << " for input data type " << GetDataTypeName(dataTypeIn)
367 << " and output data type " << GetDataTypeName(dataTypeOut)
368 << " (reason: " << reasonIfUnsupported
369 << "), falling back to the next backend.";
370 ReportWarning(warningMsg.str(), errMessages);
371
372 return OptimizationResult(true, false);
373 }
374 else
375 {
376 return result;
377 }
378}
379
380
Matteo Martincigh49124022019-01-11 13:25:59 +0000381OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
382 BackendSettings& backendSettings,
383 Graph::Iterator& firstLayer,
384 Graph::Iterator& lastLayer,
385 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +0000386{
Matteo Martincigh49124022019-01-11 13:25:59 +0000387 OptimizationResult result;
telsoa014fcda012018-03-09 14:13:49 +0000388
Matteo Martincigh49124022019-01-11 13:25:59 +0000389 // Helper lambda to compose meaningful error message before returning with error
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000390 auto ReturnError = [&](const Layer* layer)
391 {
392 return ReturnWithError(result, layer, backendSettings, errMessages);
393 };
Matteo Martincigh49124022019-01-11 13:25:59 +0000394
telsoa01c577f2c2018-08-31 09:22:23 +0100395
Matteo Martincigh49124022019-01-11 13:25:59 +0000396 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
397 if (availablePreferredBackends.empty())
telsoa01c577f2c2018-08-31 09:22:23 +0100398 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000399 std::stringstream failureMsg;
400 failureMsg << "No preferred backends are available";
401 ReportError(failureMsg.str(), errMessages);
402
403 result.m_Error = true;
404 return result;
405 }
406
407 for (auto it = firstLayer; it != lastLayer; ++it)
408 {
409 auto layer = *it;
Aron Virginas-Tar87972be2019-11-13 15:16:28 +0000410
411 DataType dataTypeIn = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
412 layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
413 DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
414 layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
415
telsoa01c577f2c2018-08-31 09:22:23 +0100416 std::string reasonIfUnsupported;
417 bool found = false;
jimfly016b0b53d2018-10-08 14:43:01 +0100418 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
419 {
420 // don't bomb immediately, find all the quantized outputs
421 // which haven't had a scale set and report them all back.
Matteo Martincigh49124022019-01-11 13:25:59 +0000422 result.m_Error = true;
jimfly016b0b53d2018-10-08 14:43:01 +0100423 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000424
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000425 // First try assign layer to hint backend
426 if (layer->GetBackendHint().has_value() &&
427 backendSettings.IsBackendSupported(layer->GetBackendHint().value()) &&
428 AttemptBackendAssignment(backendSettings,
429 optNetObjPtr->GetGraph(),
430 layer,
431 layer->GetBackendHint().value(),
432 dataTypeIn,
433 dataTypeOut,
434 availablePreferredBackends,
435 reasonIfUnsupported,
436 errMessages).IsOk())
telsoa01c577f2c2018-08-31 09:22:23 +0100437 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000438 found = true;
439 backendSettings.m_SelectedBackends.insert(layer->GetBackendHint().value());
440 }
441 else
442 {
443 // Try assign layer to prefered list of backends
444 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100445 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000446 if (layer->GetBackendHint().has_value() &&
447 layer->GetBackendHint().value() == backend)
telsoa01c577f2c2018-08-31 09:22:23 +0100448 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000449 continue; //Don't re-test the backend hint
telsoa01c577f2c2018-08-31 09:22:23 +0100450 }
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000451
452 OptimizationResult res = AttemptBackendAssignment(backendSettings,
453 optNetObjPtr->GetGraph(),
454 layer,
455 backend,
456 dataTypeIn,
457 dataTypeOut,
458 availablePreferredBackends,
459 reasonIfUnsupported,
460 errMessages);
461
462 if (res.IsOk())
463 {
464 found = true;
465 backendSettings.m_SelectedBackends.insert(backend);
466 break;
467 }
468 else if (res.IsError())
469 {
470 return res; // Cannot continue.
471 // Note: we don't need to log the error as it would already
472 // be logged in AttemptBackendAssignment().
473 }
474 else
475 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100476 ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000477 }
telsoa01c577f2c2018-08-31 09:22:23 +0100478 }
479 }
480
481 // If the layer is unsupported by any devices, log and return a null network.
Matteo Martincigh49124022019-01-11 13:25:59 +0000482 if (!found)
483 {
telsoa01c577f2c2018-08-31 09:22:23 +0100484 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
485 // fallback we should set the compute device on the layer to CpuRef (these are not
486 // available as accelerated operations, or are only available under certain
487 // conditions, currently they comprise MemCopy, Constant, Permute)
488 armnn::LayerType layerType = layer->GetType();
Matteo Martincigh49124022019-01-11 13:25:59 +0000489 if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
490 layerType == armnn::LayerType::Constant ||
491 layerType == armnn::LayerType::Permute))
telsoa01c577f2c2018-08-31 09:22:23 +0100492 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000493 BackendId cpuBackendId(armnn::Compute::CpuRef);
494 layer->SetBackendId(cpuBackendId);
495 backendSettings.m_SelectedBackends.insert(cpuBackendId);
telsoa01c577f2c2018-08-31 09:22:23 +0100496 }
497 else
498 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000499 return ReturnError(layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100500 }
501 }
502 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000503
504 return result;
505}
506
Matteo Martincighadddddb2019-01-24 14:06:23 +0000507OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
508 BackendSettings& backendSettings,
Derek Lambertiff05cc52019-04-26 13:05:17 +0100509 SubgraphView& subgraph,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000510 Optional<std::vector<std::string>&> errMessages)
Matteo Martincigh49124022019-01-11 13:25:59 +0000511{
Derek Lambertiff05cc52019-04-26 13:05:17 +0100512 Graph::Iterator firstLayer = subgraph.begin();
513 Graph::Iterator lastLayer = subgraph.end();
Matteo Martincighadddddb2019-01-24 14:06:23 +0000514 return AssignBackends(optNetObjPtr,
515 backendSettings,
516 firstLayer,
517 lastLayer,
518 errMessages);
519}
520
Derek Lamberti84da38b2019-06-13 11:40:08 +0100521BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry& handleFactoryRegistry,
522 BackendSettings& backendSettings)
523{
524 BackendsMap backends;
525 auto const& backendRegistry = BackendRegistryInstance();
526 for (auto&& selectedBackend : backendSettings.m_SupportedBackends)
527 {
528 auto backendFactory = backendRegistry.GetFactory(selectedBackend);
529 auto backendObjPtr = backendFactory();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100530 ARMNN_ASSERT(backendObjPtr);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100531
532 backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
533
534 backends[backendObjPtr->GetId()] = std::move(backendObjPtr);
535 }
536
537 return backends;
538}
539
Matteo Martincighadddddb2019-01-24 14:06:23 +0000540OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
541 BackendSettings& backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +0100542 BackendsMap& backends,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000543 Optional<std::vector<std::string>&> errMessages)
544{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100545 ARMNN_ASSERT(optNetObjPtr);
Matteo Martincigh49124022019-01-11 13:25:59 +0000546
547 OptimizationResult result;
548
Matteo Martincighadddddb2019-01-24 14:06:23 +0000549 // Get the optimized graph
550 Graph& optGraph = optNetObjPtr->GetGraph();
Matteo Martincigh49124022019-01-11 13:25:59 +0000551
Matteo Martincighadddddb2019-01-24 14:06:23 +0000552 // Run backend specific optimizations
Matteo Martincighadddddb2019-01-24 14:06:23 +0000553 for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
Matteo Martincigh49124022019-01-11 13:25:59 +0000554 {
Derek Lamberti84da38b2019-06-13 11:40:08 +0100555 auto backendObjPtr = backends.find(selectedBackend)->second.get();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100556 ARMNN_ASSERT(backendObjPtr);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000557
558 // Select sub-graphs based on backend
Derek Lambertiff05cc52019-04-26 13:05:17 +0100559 SubgraphViewSelector::Subgraphs subgraphs =
Rob Hughes65c32262019-07-23 15:33:39 +0100560 SubgraphViewSelector::SelectSubgraphs(optGraph,
Matteo Martincigh602af092019-05-01 10:31:27 +0100561 // Select layers assigned to the requested backend
562 [&backendObjPtr](const Layer& layer)
563 {
564 return layer.GetType() != LayerType::Input &&
565 layer.GetType() != LayerType::Output &&
566 layer.GetBackendId() == backendObjPtr->GetId();
567 });
Derek Lambertiff05cc52019-04-26 13:05:17 +0100568 if (subgraphs.empty())
Matteo Martincigh49124022019-01-11 13:25:59 +0000569 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000570 // No sub-graphs found, try with next selected backend
571 continue;
Matteo Martincigh49124022019-01-11 13:25:59 +0000572 }
Matteo Martincighadddddb2019-01-24 14:06:23 +0000573
574 // Try to optimize each sub-graph
Derek Lambertiff05cc52019-04-26 13:05:17 +0100575 for (auto& subgraph : subgraphs)
Matteo Martincigh49124022019-01-11 13:25:59 +0000576 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000577 // Try to optimize the current sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100578 OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100579 ARMNN_ASSERT(optimizationViews.Validate(*subgraph));
Matteo Martincighadddddb2019-01-24 14:06:23 +0000580
581 // Optimization attempted, check the resulting optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100582 for (auto& substitution : optimizationViews.GetSubstitutions())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000583 {
584 // Sub-graph optimized, substitute the sub-graph with the new optimized one in the main optimized graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100585 SubgraphView& replacementSubgraph = substitution.m_ReplacementSubgraph;
586 SubgraphView& substitutableSubgraph = substitution.m_SubstitutableSubgraph;
587 optGraph.SubstituteSubgraph(substitutableSubgraph, replacementSubgraph);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000588
589 // Assign the current backend to the optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100590 std::for_each(replacementSubgraph.begin(), replacementSubgraph.end(), [&selectedBackend](Layer* l)
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100591 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100592 ARMNN_ASSERT(l);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100593 l->SetBackendId(selectedBackend);
594 });
Matteo Martincighadddddb2019-01-24 14:06:23 +0000595 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100596
Matteo Martincigh84924332019-05-09 12:46:16 +0100597 if (!optimizationViews.GetFailedSubgraphs().empty())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000598 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000599 std::stringstream warningMsg;
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100600 warningMsg << "Some sub-graph(s) failed to optimized on " << backendObjPtr->GetId() << " backend.";
Matteo Martincighadddddb2019-01-24 14:06:23 +0000601 ReportWarning(warningMsg.str(), errMessages);
602
603 // Failed to optimize the given sub-graph, re-assign the sub-graph layers to other available backends
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100604 BackendSettings settingsCopy(backendSettings);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000605 if (!backendObjPtr->GetId().IsCpuRef())
606 {
607 // Add the current backend to the list of backends to ignore
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100608 settingsCopy.m_IgnoredBackends.insert(backendObjPtr->GetId());
Matteo Martincighadddddb2019-01-24 14:06:23 +0000609 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100610
611 int count=0;
Matteo Martincigh84924332019-05-09 12:46:16 +0100612 for (auto& failedSubgraph : optimizationViews.GetFailedSubgraphs())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000613 {
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100614 // An error occurred: the optimization was attempted but not performed, try different backends
615 std::stringstream subgraphMsg;
616 subgraphMsg << "Re-assigning backends to " << failedSubgraph.GetLayers().size()
617 << " layers inside sub-graph " << count++;
Matteo Martincigh328d92b2019-07-04 17:52:55 +0100618 ReportWarning(subgraphMsg.str(), errMessages);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100619
620 OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr,
621 settingsCopy,
622 *subgraph,
623 errMessages);
624 if (reassignmentResult.m_Error)
625 {
626 // Failed to re-assign one of the remaining backends to each layer of the sub-graph
627 result.m_Error = true;
628 return result;
629 }
Matteo Martincighadddddb2019-01-24 14:06:23 +0000630 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000631 }
632 }
633 }
634
635 return result;
636}
637
Derek Lamberti84da38b2019-06-13 11:40:08 +0100638bool RequiresCopy(ITensorHandleFactory::FactoryId src,
639 ITensorHandleFactory::FactoryId dst,
640 TensorHandleFactoryRegistry& registry)
641{
642 if (src != dst)
643 {
644 ITensorHandleFactory* srcFactory = registry.GetFactory(src);
645 ITensorHandleFactory* dstFactory = registry.GetFactory(dst);
646
Matteo Martincigha6539ed2019-08-27 13:43:32 +0100647 if (srcFactory && dstFactory &&
648 (srcFactory->GetExportFlags() & dstFactory->GetImportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100649 {
650 return false;
651 }
652 return true;
653 }
654 return false;
655}
656
657// Find the handle factory for the input layer which results in fewest required copies.
658ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backends,
659 OutputSlot& slot,
660 TensorHandleFactoryRegistry& registry)
661{
662 Layer& layer = slot.GetOwningLayer();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100663 ARMNN_ASSERT(layer.GetType() == LayerType::Input);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100664
665 // Explicitly select the tensorhandle factory for InputLayer because the rules for it are slightly different. It
666 // doesn't matter which backend it is assigned to because they all use the same implementation, which
667 // requires Map/Unmap support. This means that, so long as the handle type supports map/unmap semantics, we can
668 // select a factory with maximum compatibility with the layers connected to the InputLayer.
669
670 // First ensure the from backends can support the TensorHandeAPI
671 auto frmBackend = backends.find(layer.GetBackendId());
672 if (frmBackend == backends.end() ||
673 !frmBackend->second->SupportsTensorAllocatorAPI())
674 {
675 return ITensorHandleFactory::LegacyFactoryId;
676 }
677
678 // Go through all connections to the output slot and determine the TensorHandleFactory which results in the
679 // fewest copies.
680 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
681 int topScore = 0;
682 ITensorHandleFactory::FactoryId topChoice = ITensorHandleFactory::LegacyFactoryId;
683
684 for (auto&& connection : slot.GetConnections())
685 {
686 const Layer& connectedLayer = connection->GetOwningLayer();
687
688 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100689 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +0100690
691 if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
692 {
693 // The destination backend does not support the tensor allocator API, move to the next one
694 continue;
695 }
696
697 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
698 for (auto&& dst : dstPrefs)
699 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100700 // Input layers use the mem copy workload or import, so the selected factory must
701 // support either the map/unmap API or Import API
Derek Lamberti84da38b2019-06-13 11:40:08 +0100702 ITensorHandleFactory* factory = registry.GetFactory(dst);
Derek Lambertif674aa02019-08-01 15:56:25 +0100703 if (!factory->SupportsMapUnmap() &&
704 !CheckFlag(factory->GetImportFlags(), MemorySource::Malloc)) // Just support cpu mem imports for now
Derek Lamberti84da38b2019-06-13 11:40:08 +0100705 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100706 // The current tensor handle factory does not support the map/unmap or import
707 // strategy, move to the next one
Derek Lamberti84da38b2019-06-13 11:40:08 +0100708 continue;
709 }
710
711 auto it = factoryScores.find(dst);
712 if (it == factoryScores.end())
713 {
714 // Add new score to the table
715 factoryScores[dst] = 0;
716 if (topChoice == ITensorHandleFactory::LegacyFactoryId)
717 {
718 topChoice = dst;
719 }
720 }
721 else
722 {
723 // Increase the score
724 factoryScores[dst]++;
725
726 // Track the best option
727 if (factoryScores[dst] > topScore)
728 {
729 topScore = factoryScores[dst];
730 topChoice = dst;
731 }
732 }
733 }
734 }
735
736 return topChoice;
737}
738
739// Find the handle factory for the output layer which results in fewest required copies.
740ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backends,
741 OutputSlot& slot,
742 TensorHandleFactoryRegistry& registry)
743{
Jan Eilers8eb25602020-03-09 12:13:48 +0000744 IgnoreUnused(backends, slot, registry);
Derek Lamberti94a88d22019-12-10 21:12:59 +0000745 return ITensorHandleFactory::DeferredFactoryId;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100746}
747
748// For all handle factories supported on the source backend, we wish to find the one which requires the fewest copies
749// when considering all connections.
750ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
751 OutputSlot& outputSlot,
752 TensorHandleFactoryRegistry& registry)
753{
754 // First ensure the from backends can support the TensorHandeAPI
755 Layer& layer = outputSlot.GetOwningLayer();
756 auto frmBackend = backends.find(layer.GetBackendId());
757 if (frmBackend == backends.end() ||
758 !frmBackend->second->SupportsTensorAllocatorAPI())
759 {
760 return ITensorHandleFactory::LegacyFactoryId;
761 }
762
763 // Connections to Output Layers requires support for map/unmap on the TensorHandle.
764 bool requiresMapUnmap = false;
765 for (auto&& connection : outputSlot.GetConnections())
766 {
767 const Layer& connectedLayer = connection->GetOwningLayer();
768 if (connectedLayer.GetType() == LayerType::Output)
769 {
770 requiresMapUnmap = true;
771 }
772 }
773
774 IBackendInternal* srcBackend = frmBackend->second.get();
775 auto srcPrefs = srcBackend->GetHandleFactoryPreferences();
776
777 // Initialize the scores
778 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
779 for (auto&& pref : srcPrefs)
780 {
781 if (requiresMapUnmap) // Only consider factories that support map/unmap if required
782 {
783 ITensorHandleFactory* factory = registry.GetFactory(pref);
784 if (!factory->SupportsMapUnmap())
785 {
786 // The current tensor handle factory does not support the map/unmap strategy, move to the next one
787 continue;
788 }
789 }
790
791 auto it = factoryScores.find(pref);
792 if (it == factoryScores.end())
793 {
794 // Add new score to the table
795 factoryScores[pref] = 0;
796 }
797 }
798
799 // Score each handle factory based on how many times it requires copies on the slot connections
800 for (auto&& connection : outputSlot.GetConnections())
801 {
802 const Layer& connectedLayer = connection->GetOwningLayer();
803
804 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100805 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +0100806
807 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
808 for (auto&& src : srcPrefs)
809 {
810 if (factoryScores.find(src) == factoryScores.end()) // Don't consider excluded factories
811 {
812 continue;
813 }
814
815 for (auto&& dst : dstPrefs)
816 {
817 if (RequiresCopy(src, dst, registry))
818 {
819 // Copy avoided, increase the score
820 factoryScores[src]++;
821 break;
822 }
823 }
824 }
825 }
826
827 // Find the lowest score
828 int minScore = std::numeric_limits<int>::max();
829 for (auto it : factoryScores)
830 {
831 minScore = std::min(minScore, it.second);
832 }
833
834 // Collect factories matching the best(lowest) score
835 std::vector<ITensorHandleFactory::FactoryId> optimalFactories;
836 for (auto it : factoryScores)
837 {
838 if (it.second == minScore)
839 {
840 optimalFactories.push_back(it.first);
841 }
842 }
843
844 // For all compatible Factories matching the best score, find the preferred one for the current layer.
845 for (auto&& srcPref : srcPrefs)
846 {
847 for (auto&& comp : optimalFactories)
848 {
849 if (comp == srcPref)
850 {
851 return comp;
852 }
853 }
854 }
855
856 return ITensorHandleFactory::LegacyFactoryId;
857}
858
Derek Lambertif674aa02019-08-01 15:56:25 +0100859EdgeStrategy CalculateEdgeStrategy(BackendsMap& backends,
860 ITensorHandleFactory::FactoryId srcFactoryId,
861 const Layer& layer,
862 const Layer& connectedLayer,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100863 TensorHandleFactoryRegistry& registry,
864 bool importEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100865{
866 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100867 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +0100868
869 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
870
871 // Legacy API check for backward compatibility
872 if (srcFactoryId == ITensorHandleFactory::LegacyFactoryId || dstPrefs.empty())
873 {
874 if (layer.GetBackendId() != connectedLayer.GetBackendId())
875 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100876 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100877 }
878 else
879 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100880 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100881 }
882 }
883
884 // TensorHandleFactory API present, so perform more sophisticated strategies.
Derek Lambertif674aa02019-08-01 15:56:25 +0100885 // Dst Output layers don't require copy because they use import or map/unmap
Derek Lamberti84da38b2019-06-13 11:40:08 +0100886 if (connectedLayer.GetType() == LayerType::Output)
887 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100888 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100889 }
890
891 // Search for direct match in prefs
892 for (auto&& pref : dstPrefs)
893 {
894 if (pref == srcFactoryId)
895 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100896 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100897 }
898 }
899
900 // Search for export/import options
901 ITensorHandleFactory* srcFactory = registry.GetFactory(srcFactoryId);
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100902 if (srcFactory->GetExportFlags() != 0 && importEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100903 {
904 for (auto&& pref : dstPrefs)
905 {
906 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroyffab16f2019-11-07 14:37:09 +0000907
James Conroy47e863d2019-11-18 17:07:43 +0000908 // Handles cases when a destPref is not listed in TensorHandleFactoryRegistry
James Conroyffab16f2019-11-07 14:37:09 +0000909 if (!dstFactory) {
James Conroy47e863d2019-11-18 17:07:43 +0000910 continue;
James Conroyffab16f2019-11-07 14:37:09 +0000911 }
912
Derek Lambertif674aa02019-08-01 15:56:25 +0100913 if ((dstFactory->GetImportFlags() & srcFactory->GetExportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100914 {
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +0100915 auto srcCapability = srcFactory->GetCapabilities(&layer, &layer, CapabilityClass::PaddingRequired);
916 auto dstCapability = dstFactory->GetCapabilities(&connectedLayer,
917 &connectedLayer,
918 CapabilityClass::PaddingRequired);
919 // Do not require memory copy if the source and destination do not require padding.
920 if (srcCapability.empty() && dstCapability.empty())
921 {
922 return EdgeStrategy::ExportToTarget;
923 }
Derek Lamberti84da38b2019-06-13 11:40:08 +0100924 }
925 }
926 }
927
928 // Search for copy options via map/unmap
929 if (srcFactory->SupportsMapUnmap())
930 {
931 for (auto&& pref : dstPrefs)
932 {
933 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroy47e863d2019-11-18 17:07:43 +0000934 if (dstFactory && dstFactory->SupportsMapUnmap())
Derek Lamberti84da38b2019-06-13 11:40:08 +0100935 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100936 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100937 }
938 }
939 }
940
Derek Lambertif674aa02019-08-01 15:56:25 +0100941 return EdgeStrategy::Undefined;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100942}
943
944// Select the TensorHandleFactories and the corresponding memory strategy
945OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
946 BackendsMap& backends,
947 TensorHandleFactoryRegistry& registry,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100948 bool importEnabled,
Derek Lamberti84da38b2019-06-13 11:40:08 +0100949 Optional<std::vector<std::string>&> errMessages)
950{
951 OptimizationResult result;
952
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100953 optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled](Layer* layer)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100954 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100955 ARMNN_ASSERT(layer);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100956
957 // Lets make sure the backend is in our list of supported backends. Something went wrong during backend
958 // assignment if this check fails
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100959 ARMNN_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
Derek Lamberti84da38b2019-06-13 11:40:08 +0100960
961 // Check each output separately
962 for (unsigned int slotIdx = 0; slotIdx < layer->GetNumOutputSlots(); slotIdx++)
963 {
964 OutputSlot& outputSlot = layer->GetOutputSlot(slotIdx);
965
966 ITensorHandleFactory::FactoryId slotOption = ITensorHandleFactory::LegacyFactoryId;
967
968 // Calculate the factory to use which results in the fewest copies being made.
969 switch(layer->GetType())
970 {
971 case LayerType::Input:
972 slotOption = CalculateSlotOptionForInput(backends, outputSlot, registry);
973 break;
974 case LayerType::Output:
975 slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
976 break;
977 default:
978 slotOption = CalculateSlotOption(backends, outputSlot, registry);
979 break;
980 }
981 outputSlot.SetTensorHandleFactory(slotOption);
982
Derek Lambertif674aa02019-08-01 15:56:25 +0100983 // Now determine the "best" edge strategy for each connection given the slotOption.
Derek Lamberti84da38b2019-06-13 11:40:08 +0100984 unsigned int connectionIdx = 0;
985 for (auto&& connection : outputSlot.GetConnections())
986 {
987 const Layer& connectedLayer = connection->GetOwningLayer();
988
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100989 EdgeStrategy strategy = CalculateEdgeStrategy(backends, slotOption, *layer, connectedLayer,
990 registry, importEnabled);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100991
Derek Lambertif674aa02019-08-01 15:56:25 +0100992 if (strategy == EdgeStrategy::Undefined)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100993 {
994 result.m_Error = true;
995 if (errMessages)
996 {
997 errMessages.value().emplace_back("Could not find valid strategy required for compatibility"
998 " between backends.");
999 }
1000 return;
1001 }
1002
Derek Lambertif674aa02019-08-01 15:56:25 +01001003 outputSlot.SetEdgeStrategy(connectionIdx, strategy);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001004
1005 connectionIdx++;
1006 }
1007 }
1008 });
1009
1010 return result;
1011}
1012
Matteo Martincigh49124022019-01-11 13:25:59 +00001013IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
1014 const std::vector<BackendId>& backendPreferences,
1015 const IDeviceSpec& deviceSpec,
1016 const OptimizerOptions& options,
Rob Hughes23214432019-11-05 11:27:36 +00001017 Optional<std::vector<std::string>&> messages)
Matteo Martincigh49124022019-01-11 13:25:59 +00001018{
1019 if (backendPreferences.empty())
1020 {
Mike Kelly3a613cc2020-09-29 20:50:35 +01001021 throw InvalidArgumentException("Invoked Optimize with no backends specified");
Matteo Martincigh49124022019-01-11 13:25:59 +00001022 }
1023
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001024 if (options.m_ReduceFp32ToFp16 && options.m_ReduceFp32ToBf16)
1025 {
1026 throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
1027 }
1028
Jan Eilersbb446e52020-04-02 13:56:54 +01001029 const Network& network = *PolymorphicDowncast<const Network*>(&inNetwork);
Matteo Martincigh49124022019-01-11 13:25:59 +00001030 std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
1031
Sadik Armagan045f6be2020-09-10 13:37:32 +01001032 auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph), options.m_ModelOptions),
1033 &IOptimizedNetwork::Destroy);
Matteo Martincigh49124022019-01-11 13:25:59 +00001034
Jan Eilersbb446e52020-04-02 13:56:54 +01001035 OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
Matteo Martincigh49124022019-01-11 13:25:59 +00001036
Matteo Martincighadddddb2019-01-24 14:06:23 +00001037 // Get the optimized graph
1038 Graph& optGraph = optNetObjPtr->GetGraph();
1039
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001040 // Perform AddBroadcastReshapeLayer optimisation
1041 using namespace optimizations;
1042 Optimizer::Pass(optGraph, MakeOptimizations(AddBroadcastReshapeLayer()));
1043
Narumol Prangnawaratbbf71a62020-09-07 14:05:22 +01001044 // Infer the tensor infos for all output slots. Throws an exception on failure
1045 optGraph.InferTensorInfos();
1046
Matteo Martincigh49124022019-01-11 13:25:59 +00001047 // Perform optimisation passes
Matteo Martincighadddddb2019-01-24 14:06:23 +00001048 Optimizer::Pass(optGraph, MakeOptimizations(SquashEqualPermuteSiblings(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001049 SquashEqualTransposeSiblings(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001050 SquashEqualReshapeSiblings(),
1051 OptimizeInversePermutes(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001052 OptimizeInverseTransposes(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001053 MovePermuteUp(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001054 MoveTransposeUp(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001055 PermuteAsReshape(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001056 TransposeAsReshape(),
Nina Drozd861985f2019-04-18 14:48:51 +01001057 OptimizeConsecutiveReshapes(),
Rob Hughes3a7d3a72019-09-24 16:59:56 +01001058 FoldPadIntoConvolution2d(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001059 PermuteAndBatchToSpaceAsDepthToSpace(),
1060 TransposeAndBatchToSpaceAsDepthToSpace()));
Matteo Martincigh49124022019-01-11 13:25:59 +00001061
Matteo Martincigh49124022019-01-11 13:25:59 +00001062 // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
1063 if (options.m_ReduceFp32ToFp16)
1064 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001065 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToFp16Converter()));
Derek Lambertidd6804b2019-11-27 09:29:57 +00001066 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
Matteo Martincigh49124022019-01-11 13:25:59 +00001067 }
1068
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001069 // If Fp32 to Bf16 optimization is set convert Fp32 network to Bf16
Narumol Prangnawarat57ef0082020-03-26 09:20:43 +00001070 // Convert input of Convolution2d and FullyConnected from Fp32 to Bf16
1071 // Only Constant weight of Convolution2d and FullyConnected are converted from Fp32 to Bf16
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001072 if (options.m_ReduceFp32ToBf16)
1073 {
1074 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToBf16Converter()));
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001075 }
1076
Matteo Martincigh49124022019-01-11 13:25:59 +00001077 // Initialize backend settings
1078 BackendSettings backendSettings(backendPreferences, deviceSpec);
1079 if (backendSettings.GetAvailablePreferredBackends().empty())
1080 {
1081 std::stringstream failureMsg;
1082 failureMsg << "None of the preferred backends " << backendPreferences
1083 << " are supported. Current platform provides " << backendSettings.m_SupportedBackends;
Rob Hughes23214432019-11-05 11:27:36 +00001084 ReportError(failureMsg.str(), messages);
Mike Kelly3a613cc2020-09-29 20:50:35 +01001085 throw InvalidArgumentException(failureMsg.str());
Matteo Martincigh49124022019-01-11 13:25:59 +00001086 }
1087
Derek Lamberti84da38b2019-06-13 11:40:08 +01001088 // Create a map to temporarily hold initialized backend objects
1089 TensorHandleFactoryRegistry tensorHandleFactoryRegistry;
1090 BackendsMap backends = CreateSupportedBackends(tensorHandleFactoryRegistry, backendSettings);
1091
Matteo Martincigh49124022019-01-11 13:25:59 +00001092 // Assign an available backend to each layer
Matteo Martincighadddddb2019-01-24 14:06:23 +00001093 Graph::Iterator firstLayer = optGraph.begin();
1094 Graph::Iterator lastLayer = optGraph.end();
Derek Lamberti84da38b2019-06-13 11:40:08 +01001095 OptimizationResult assignBackendsResult = AssignBackends(optNetObjPtr,
1096 backendSettings,
1097 firstLayer,
1098 lastLayer,
Rob Hughes23214432019-11-05 11:27:36 +00001099 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001100 if (assignBackendsResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +00001101 {
1102 // Failed to assign a backend to each layer
Mike Kelly3a613cc2020-09-29 20:50:35 +01001103 throw InvalidArgumentException("Failed to assign a backend to each layer");
jimfly016b0b53d2018-10-08 14:43:01 +01001104 }
telsoa01c577f2c2018-08-31 09:22:23 +01001105
Matteo Martincighadddddb2019-01-24 14:06:23 +00001106 Optimizer::Pass(optGraph, MakeOptimizations(OptimizeInverseConversionsFp16(),
1107 OptimizeInverseConversionsFp32()));
telsoa01c577f2c2018-08-31 09:22:23 +01001108
Matteo Martincighadddddb2019-01-24 14:06:23 +00001109 // Apply the backend-specific optimizations
1110 OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr,
1111 backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +01001112 backends,
Rob Hughes23214432019-11-05 11:27:36 +00001113 messages);
Matteo Martincighadddddb2019-01-24 14:06:23 +00001114 if (backendOptimizationResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +00001115 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001116 // Failed to apply the backend-specific optimizations
Mike Kelly3a613cc2020-09-29 20:50:35 +01001117 throw InvalidArgumentException("Failed to apply the backend-specific optimizations");
Matteo Martincigh49124022019-01-11 13:25:59 +00001118 }
1119
Matteo Martincighadddddb2019-01-24 14:06:23 +00001120 // If the debug flag is set, then insert a DebugLayer after each layer
1121 // Doing this after applying the backend optimizations as they might have changed some layers
1122 if (options.m_Debug)
1123 {
1124 Optimizer::Pass(optGraph, MakeOptimizations(InsertDebugLayer()));
1125 }
1126
Derek Lamberti84da38b2019-06-13 11:40:08 +01001127 // Calculate the compatibility strategies for tensor handles
1128 OptimizationResult strategyResult = SelectTensorHandleStrategy(optGraph,
1129 backends,
1130 tensorHandleFactoryRegistry,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001131 options.m_ImportEnabled,
Rob Hughes23214432019-11-05 11:27:36 +00001132 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001133 if (strategyResult.m_Error)
1134 {
1135 // Failed to apply the backend-specific optimizations
1136 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
1137 }
1138
1139 // Based on the tensor handle strategy determined above, insert copy layers where required.
Derek Lambertif674aa02019-08-01 15:56:25 +01001140 optGraph.AddCompatibilityLayers(backends, tensorHandleFactoryRegistry);
telsoa01c577f2c2018-08-31 09:22:23 +01001141
1142 // Convert constants
Matteo Martincighadddddb2019-01-24 14:06:23 +00001143 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
1144 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
telsoa01c577f2c2018-08-31 09:22:23 +01001145
Derek Lamberti84da38b2019-06-13 11:40:08 +01001146 // Run backend specific optimizations (deprecated)
Matteo Martincigh49124022019-01-11 13:25:59 +00001147 for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
David Beck263e3492018-11-09 14:46:40 +00001148 {
1149 auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
1150 auto backendPtr = factoryFun();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001151 ARMNN_ASSERT(backendPtr.get() != nullptr);
David Beck263e3492018-11-09 14:46:40 +00001152
Matteo Martincighed735042019-05-22 09:42:43 +01001153 ARMNN_NO_DEPRECATE_WARN_BEGIN
David Beck263e3492018-11-09 14:46:40 +00001154 auto backendSpecificOptimizations = backendPtr->GetOptimizations();
Matteo Martincighed735042019-05-22 09:42:43 +01001155 ARMNN_NO_DEPRECATE_WARN_END
1156
David Beck263e3492018-11-09 14:46:40 +00001157 if (!backendSpecificOptimizations.empty())
1158 {
1159 Optimizer::Pass(optNetObjPtr->GetGraph(), backendSpecificOptimizations);
1160 }
1161 }
1162
telsoa01c577f2c2018-08-31 09:22:23 +01001163 return optNet;
telsoa014fcda012018-03-09 14:13:49 +00001164}
Finn Williamsf24effa2020-07-03 10:12:03 +01001165bool Network::GetShapeInferenceMethod()
telsoa014fcda012018-03-09 14:13:49 +00001166{
Finn Williamsf24effa2020-07-03 10:12:03 +01001167 if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() == "ShapeInferenceMethod")
1168 {
1169 return m_NetworkOptions[0].GetOption(0).GetValue().AsBool();
1170 }
1171
1172 return false;
telsoa014fcda012018-03-09 14:13:49 +00001173}
Finn Williamsf24effa2020-07-03 10:12:03 +01001174Network::Network(NetworkOptions networkOptions)
1175: m_NetworkOptions(networkOptions),
1176 m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod()))
1177{}
telsoa014fcda012018-03-09 14:13:49 +00001178
1179Network::~Network()
1180{
1181}
1182
Jan Eilers99d9d4a2019-11-06 10:02:16 +00001183Status Network::PrintGraph()
1184{
1185 m_Graph->Print();
1186 return Status::Success;
1187}
1188
telsoa014fcda012018-03-09 14:13:49 +00001189IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
1190{
1191 return m_Graph->AddLayer<InputLayer>(id, name);
1192}
1193
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001194IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
1195 const char* name)
1196{
1197 return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
1198}
1199
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001200IConnectableLayer* Network::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
1201 const char* name)
1202{
1203 return m_Graph->AddLayer<ComparisonLayer>(comparisonDescriptor, name);
1204}
1205
josh minor4a3c6102020-01-06 16:40:46 -06001206IConnectableLayer* Network::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
1207 const char* name)
1208{
1209 return m_Graph->AddLayer<ElementwiseUnaryLayer>(elementwiseUnaryDescriptor, name);
1210}
1211
Ryan OSheaec6c6802020-06-05 17:17:06 +01001212IConnectableLayer* Network::AddFillLayer(const FillDescriptor& fillDescriptor,
1213 const char* name)
1214{
1215 return m_Graph->AddLayer<FillLayer>(fillDescriptor, name);
1216}
1217
telsoa014fcda012018-03-09 14:13:49 +00001218IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001219 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001220 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001221 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001222{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001223 if (fullyConnectedDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001224 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001225 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001226 }
1227
1228 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
1229
1230 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1231
1232 if (fullyConnectedDescriptor.m_BiasEnabled)
1233 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001234 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001235 }
1236
1237 return layer;
1238}
1239
1240IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001241 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001242 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001243 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001244{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001245 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001246}
1247
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001248IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
1249 const ConstTensor& weights,
1250 const char* name)
1251{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001252 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001253 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
1254}
1255
telsoa014fcda012018-03-09 14:13:49 +00001256IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001257 const ConstTensor& weights,
1258 const ConstTensor& biases,
1259 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001260{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001261 Optional<ConstTensor> optionalBiases(biases);
1262 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001263}
1264
Jim Flynne242f2d2019-05-22 14:24:13 +01001265IConnectableLayer* Network::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001266 const char* name)
1267{
Jim Flynne242f2d2019-05-22 14:24:13 +01001268 return m_Graph->AddLayer<ConcatLayer>(concatDescriptor, name);
Jim Flynn906f9462019-05-10 13:55:21 +01001269}
1270
telsoa014fcda012018-03-09 14:13:49 +00001271IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001272 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001273 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001274 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001275{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001276 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001277 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001278 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001279 }
1280
1281 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
1282
1283 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1284
1285 if (convolution2dDescriptor.m_BiasEnabled)
1286 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001287 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001288 }
1289
1290 return layer;
1291}
1292
1293IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001294 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001295 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001296 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001297{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001298 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001299}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001300
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001301IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
1302 const ConstTensor& weights,
1303 const char* name)
1304{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001305 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001306 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1307}
1308
telsoa014fcda012018-03-09 14:13:49 +00001309IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001310 const ConstTensor& weights,
1311 const ConstTensor& biases,
1312 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001313{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001314 Optional<ConstTensor> optionalBiases(biases);
1315 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001316}
1317
1318IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
1319 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1320 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001321 const Optional<ConstTensor>& biases,
telsoa014fcda012018-03-09 14:13:49 +00001322 const char* name)
1323{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001324 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001325 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001326 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001327 }
1328
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00001329 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001330
1331 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1332
1333 if (convolution2dDescriptor.m_BiasEnabled)
1334 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001335 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001336 }
1337
1338 return layer;
1339}
1340
Aron Virginas-Tardd6247f2019-09-19 14:31:17 +01001341IConnectableLayer* Network::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
1342 const char* name)
1343{
1344 return m_Graph->AddLayer<DepthToSpaceLayer>(depthToSpaceDescriptor, name);
1345}
1346
telsoa014fcda012018-03-09 14:13:49 +00001347IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001348 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1349 const ConstTensor& weights,
1350 const Optional<ConstTensor>& biases,
1351 const char* name)
1352{
1353 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1354}
1355
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001356IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
telsoa014fcda012018-03-09 14:13:49 +00001357 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1358 const ConstTensor& weights,
1359 const char* name)
1360{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001361 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001362 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001363}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001364
telsoa014fcda012018-03-09 14:13:49 +00001365IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
1366 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1367 const ConstTensor& weights,
1368 const ConstTensor& biases,
1369 const char* name)
1370{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001371 Optional<ConstTensor> optionalBiases(biases);
1372 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001373}
1374
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001375IConnectableLayer* Network::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001376 const ConstTensor& anchors, const char* name)
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001377{
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001378 const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
1379
1380 layer->m_Anchors = std::make_unique<ScopedCpuTensorHandle>(anchors);
1381
1382 return layer;
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001383}
1384
telsoa014fcda012018-03-09 14:13:49 +00001385IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
1386 const char* name)
1387{
1388 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
1389}
1390
1391IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
1392 const char* name)
1393{
1394 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
1395}
1396
1397IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
1398 const char* name)
1399{
1400 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
1401}
1402
Nikhil Rajee391d52019-09-05 17:50:44 +01001403IConnectableLayer* Network::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor,
1404 const char* name)
1405{
1406 return m_Graph->AddLayer<ArgMinMaxLayer>(argMinMaxDescriptor, name);
1407}
1408
telsoa01c577f2c2018-08-31 09:22:23 +01001409IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
1410normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001411 const char* name)
1412{
1413 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
1414}
1415
Aron Virginas-Tar636ab402019-09-16 14:27:45 +01001416IConnectableLayer* Network::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
1417{
1418 return m_Graph->AddLayer<SliceLayer>(sliceDescriptor, name);
1419}
1420
telsoa014fcda012018-03-09 14:13:49 +00001421IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
1422 const char* name)
1423{
1424 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
1425}
1426
1427IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
1428 const char* name)
1429{
1430 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
1431}
1432
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001433IConnectableLayer* Network::AddMaximumLayer(const char* name)
1434{
1435 return m_Graph->AddLayer<MaximumLayer>(name);
1436}
1437
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001438IConnectableLayer* Network::AddMinimumLayer(const char* name)
1439{
1440 return m_Graph->AddLayer<MinimumLayer>(name);
1441}
1442
Jim Flynne242f2d2019-05-22 14:24:13 +01001443IConnectableLayer* Network::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001444 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001445{
Jim Flynne242f2d2019-05-22 14:24:13 +01001446 return AddConcatLayer(mergerDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001447}
1448
Kevin May868eb142019-09-04 17:29:31 +01001449IConnectableLayer* Network::AddAbsLayer(const char * name)
1450{
josh minor4a3c6102020-01-06 16:40:46 -06001451 return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
Kevin May868eb142019-09-04 17:29:31 +01001452}
1453
telsoa014fcda012018-03-09 14:13:49 +00001454IConnectableLayer* Network::AddAdditionLayer(const char* name)
1455{
1456 return m_Graph->AddLayer<AdditionLayer>(name);
1457}
1458
1459IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
1460{
1461 return m_Graph->AddLayer<MultiplicationLayer>(name);
1462}
1463
1464IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
1465{
1466 return m_Graph->AddLayer<OutputLayer>(id, name);
1467}
1468
1469IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
1470 const ConstTensor& mean,
1471 const ConstTensor& variance,
1472 const ConstTensor& beta,
1473 const ConstTensor& gamma,
1474 const char* name)
1475{
1476 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
1477
1478 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
1479 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
1480 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
1481 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
1482
1483 return layer;
1484}
1485
Finn Williams2605b232020-06-10 15:53:46 +01001486IConnectableLayer* Network::AddRankLayer(const char* name)
1487{
1488 return m_Graph->AddLayer<RankLayer>(name);
1489}
1490
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001491IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
1492 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001493{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001494 ResizeDescriptor resizeDescriptor;
David Monahan4a0c9b92020-05-30 09:48:39 +01001495 resizeDescriptor.m_Method = ResizeMethod::Bilinear;
1496 resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
1497 resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
1498 resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
1499 resizeDescriptor.m_AlignCorners = descriptor.m_AlignCorners;
1500 resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001501
1502 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001503}
1504
Teresa Charlina9075df2019-06-27 15:41:57 +01001505IConnectableLayer* Network::AddResizeLayer(const ResizeDescriptor&
1506resizeDescriptor, const char* name)
1507{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001508 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
Teresa Charlina9075df2019-06-27 15:41:57 +01001509}
1510
Kevin Mayce5045a2019-10-02 14:07:47 +01001511IConnectableLayer* Network::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
1512 const char* name)
1513{
1514 return m_Graph->AddLayer<InstanceNormalizationLayer>(desc, name);
1515}
1516
Matteo Martincighbcd3c852018-09-28 14:14:12 +01001517IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
1518 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001519{
Matteo Martincighbcd3c852018-09-28 14:14:12 +01001520 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +00001521}
1522
Aron Virginas-Tarf982dea2019-10-11 14:07:53 +01001523IConnectableLayer* Network::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& desc,
1524 const char* name)
1525{
1526 return m_Graph->AddLayer<LogSoftmaxLayer>(desc, name);
1527}
1528
telsoa014fcda012018-03-09 14:13:49 +00001529IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
1530{
telsoa01c577f2c2018-08-31 09:22:23 +01001531 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
1532
1533 layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
1534
1535 return layer;
telsoa014fcda012018-03-09 14:13:49 +00001536}
1537
telsoa01c577f2c2018-08-31 09:22:23 +01001538IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
1539 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001540{
1541 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
1542}
1543
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001544IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1545 const char* name)
1546{
1547 return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
1548}
1549
Aron Virginas-Tar972af152019-06-11 14:14:03 +01001550IConnectableLayer* Network::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
1551 const char* name)
1552{
1553 return m_Graph->AddLayer<SpaceToDepthLayer>(spaceToDepthDescriptor, name);
1554}
1555
telsoa014fcda012018-03-09 14:13:49 +00001556IConnectableLayer* Network::AddFloorLayer(const char* name)
1557{
1558 return m_Graph->AddLayer<FloorLayer>(name);
1559}
1560
telsoa01c577f2c2018-08-31 09:22:23 +01001561IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
1562 const LstmInputParams& params,
1563 const char* name)
1564{
1565 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
1566
1567 //Lstm Basic Parameters
1568 layer->m_BasicParameters.m_InputToForgetWeights =
1569 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
1570 layer->m_BasicParameters.m_InputToCellWeights =
1571 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
1572 layer->m_BasicParameters.m_InputToOutputWeights =
1573 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
1574 layer->m_BasicParameters.m_RecurrentToForgetWeights =
1575 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
1576 layer->m_BasicParameters.m_RecurrentToCellWeights =
1577 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
1578 layer->m_BasicParameters.m_RecurrentToOutputWeights =
1579 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
1580 layer->m_BasicParameters.m_ForgetGateBias =
1581 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
1582 layer->m_BasicParameters.m_CellBias =
1583 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
1584 layer->m_BasicParameters.m_OutputGateBias =
1585 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
1586
1587 //Lstm Cifg parameters
1588 if(!descriptor.m_CifgEnabled)
1589 {
1590 if(params.m_InputToInputWeights == nullptr)
1591 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001592 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL "
1593 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001594 }
1595 if(params.m_RecurrentToInputWeights == nullptr)
1596 {
1597 throw InvalidArgumentException(
Jan Eilerse2062cd2020-03-30 15:07:45 +01001598 "AddLstmLayer: Recurrent To Input Weights cannot be NULL "
1599 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001600 }
1601 if(params.m_InputGateBias == nullptr)
1602 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001603 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL "
1604 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001605 }
1606 layer->m_CifgParameters.m_InputToInputWeights =
1607 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
1608 layer->m_CifgParameters.m_RecurrentToInputWeights =
1609 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01001610 layer->m_CifgParameters.m_InputGateBias =
1611 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
1612 }
1613
1614 //Lstm projection parameters
1615 if(descriptor.m_ProjectionEnabled)
1616 {
1617 if(params.m_ProjectionWeights == nullptr)
1618 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001619 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL "
1620 "when projection is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001621 }
1622 layer->m_ProjectionParameters.m_ProjectionWeights =
1623 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
1624 if(params.m_ProjectionBias != nullptr)
1625 {
1626 layer->m_ProjectionParameters.m_ProjectionBias =
1627 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
1628 }
1629 }
1630
1631 //Lstm Peephole params
1632 if(descriptor.m_PeepholeEnabled)
1633 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001634 if(!descriptor.m_CifgEnabled)
1635 {
1636 if(params.m_CellToInputWeights == nullptr)
1637 {
1638 throw InvalidArgumentException("AddLstmLayer: Cell To Input Weights cannot be NULL "
1639 "when Peephole is enabled and CIFG disabled.");
1640 }
1641
1642 layer->m_PeepholeParameters.m_CellToInputWeights =
1643 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
1644 }
1645
telsoa01c577f2c2018-08-31 09:22:23 +01001646 if(params.m_CellToForgetWeights == nullptr)
1647 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001648 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL "
1649 "when Peephole is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001650 }
1651 if(params.m_CellToOutputWeights == nullptr)
1652 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001653 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL "
1654 "when Peephole is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001655 }
Jan Eilerse2062cd2020-03-30 15:07:45 +01001656
telsoa01c577f2c2018-08-31 09:22:23 +01001657 layer->m_PeepholeParameters.m_CellToForgetWeights =
1658 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
1659 layer->m_PeepholeParameters.m_CellToOutputWeights =
1660 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
1661 }
Jan Eilersf8c62972019-07-17 11:07:49 +01001662
1663 //Lstm Layer Normalization params
1664 if(descriptor.m_LayerNormEnabled)
1665 {
1666 if(!descriptor.m_CifgEnabled)
1667 {
1668 if(params.m_InputLayerNormWeights == nullptr)
1669 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001670 throw InvalidArgumentException("AddLstmLayer: Input layer normalization weights cannot be NULL "
1671 "when layer normalization is enabled and CIFG disabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01001672 }
1673 layer->m_LayerNormParameters.m_InputLayerNormWeights =
1674 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
1675 }
1676
1677 if(params.m_ForgetLayerNormWeights == nullptr)
1678 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001679 throw InvalidArgumentException("AddLstmLayer: Forget layer normalization weights cannot be NULL "
1680 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01001681 }
1682 if(params.m_CellLayerNormWeights == nullptr)
1683 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001684 throw InvalidArgumentException("AddLstmLayer: Cell layer normalization weights cannot be NULL "
1685 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01001686 }
1687 if(params.m_OutputLayerNormWeights == nullptr)
1688 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001689 throw InvalidArgumentException("AddLstmLayer: Output layer normalization weights cannot be NULL "
1690 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01001691 }
1692 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
1693 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
1694 layer->m_LayerNormParameters.m_CellLayerNormWeights =
1695 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
1696 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
1697 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
1698 }
telsoa01c577f2c2018-08-31 09:22:23 +01001699 return layer;
1700}
1701
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001702IConnectableLayer* Network::AddDivisionLayer(const char* name)
1703{
1704 return m_Graph->AddLayer<DivisionLayer>(name);
1705}
1706
David Beck19526222018-09-12 16:00:08 +01001707IConnectableLayer* Network::AddSubtractionLayer(const char* name)
1708{
1709 return m_Graph->AddLayer<SubtractionLayer>(name);
1710}
1711
narpra0132b90462018-09-13 11:07:48 +01001712IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
1713{
1714 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
1715}
1716
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +01001717IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
1718{
1719 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
1720}
1721
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001722IConnectableLayer *Network::AddQuantizeLayer(const char *name)
1723{
1724 return m_Graph->AddLayer<QuantizeLayer>(name);
1725}
1726
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001727IConnectableLayer* Network::AddDequantizeLayer(const char* name)
1728{
1729 return m_Graph->AddLayer<DequantizeLayer>(name);
1730}
1731
Conor Kennedy430b5d82018-11-14 15:28:28 +00001732IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
1733 const char* name)
1734{
1735 return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
1736}
1737
Matteo Martincigh59a950c2018-12-13 12:48:25 +00001738IConnectableLayer* Network::AddGreaterLayer(const char* name)
1739{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001740 return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
Matteo Martincigh59a950c2018-12-13 12:48:25 +00001741}
1742
FrancisMurtagh20995952018-12-17 12:11:36 +00001743IConnectableLayer* Network::AddEqualLayer(const char* name)
1744{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001745 return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
FrancisMurtagh20995952018-12-17 12:11:36 +00001746}
1747
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001748IConnectableLayer* Network::AddRsqrtLayer(const char * name)
1749{
josh minor4a3c6102020-01-06 16:40:46 -06001750 return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001751}
1752
narpra01b89b05f2019-01-16 09:53:09 +00001753IConnectableLayer* Network::AddGatherLayer(const char* name)
1754{
Teresa Charlin52664732020-06-29 16:27:03 +01001755 GatherDescriptor gatherDescriptor{};
1756 return AddGatherLayer(gatherDescriptor, name);
1757}
1758
1759IConnectableLayer* Network::AddGatherLayer(const GatherDescriptor& gatherDescriptor,
1760 const char* name)
1761{
1762 return m_Graph->AddLayer<GatherLayer>(gatherDescriptor, name);
narpra01b89b05f2019-01-16 09:53:09 +00001763}
1764
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001765IConnectableLayer* Network::AddMergeLayer(const char* name)
1766{
1767 return m_Graph->AddLayer<MergeLayer>(name);
1768}
1769
Sadik Armaganeff363d2019-04-05 15:25:46 +01001770IConnectableLayer* Network::AddSwitchLayer(const char* name)
1771{
1772 return m_Graph->AddLayer<SwitchLayer>(name);
1773}
1774
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01001775IConnectableLayer* Network::AddPreluLayer(const char* name)
1776{
1777 return m_Graph->AddLayer<PreluLayer>(name);
1778}
1779
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01001780IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
1781 const ConstTensor& weights,
1782 const Optional<ConstTensor>& biases,
1783 const char* name)
1784{
1785 if (descriptor.m_BiasEnabled && !biases.has_value())
1786 {
1787 throw InvalidArgumentException("AddTransposeConvolution2dLayer: Biases cannot be empty");
1788 }
1789
1790 const auto layer = m_Graph->AddLayer<TransposeConvolution2dLayer>(descriptor, name);
1791
1792 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1793
1794 if (descriptor.m_BiasEnabled)
1795 {
1796 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
1797 }
1798
1799 return layer;
1800}
1801
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001802IConnectableLayer* Network::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
1803 const char* name)
1804{
1805 return m_Graph->AddLayer<TransposeLayer>(transposeDescriptor, name);
1806}
1807
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001808IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor,
1809 const char* name)
1810{
1811 return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
1812}
1813
Derek Lamberti013c3902019-10-21 10:46:16 +01001814
1815IConnectableLayer* Network::AddStandInLayer(const StandInDescriptor& desc,
1816 const char* name)
1817{
1818 return m_Graph->AddLayer<StandInLayer>(desc, name);
1819}
1820
James Conroyee18dc82019-07-17 11:27:46 +01001821IConnectableLayer* Network::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
1822 const char* name)
1823{
1824 const auto layer = m_Graph->AddLayer<QuantizedLstmLayer>(name);
1825
1826 // InputToX weights
1827 layer->m_QuantizedLstmParameters.m_InputToInputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001828 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001829 layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001830 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001831 layer->m_QuantizedLstmParameters.m_InputToCellWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001832 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001833 layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001834 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001835
1836 // RecurrentToX weights
1837 layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001838 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001839 layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001840 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001841 layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001842 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001843 layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001844 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001845
1846 // Bias
1847 layer->m_QuantizedLstmParameters.m_InputGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001848 std::make_unique<ScopedCpuTensorHandle>(params.GetInputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001849 layer->m_QuantizedLstmParameters.m_ForgetGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001850 std::make_unique<ScopedCpuTensorHandle>(params.GetForgetGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001851 layer->m_QuantizedLstmParameters.m_CellBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001852 std::make_unique<ScopedCpuTensorHandle>(params.GetCellBias());
James Conroyee18dc82019-07-17 11:27:46 +01001853 layer->m_QuantizedLstmParameters.m_OutputGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001854 std::make_unique<ScopedCpuTensorHandle>(params.GetOutputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001855
1856 return layer;
1857}
1858
James Conroy586a9aa2020-03-20 08:49:33 +00001859IConnectableLayer* Network::AddQLstmLayer(const QLstmDescriptor& descriptor,
1860 const LstmInputParams& params,
1861 const char* name)
1862{
1863 const auto layer = m_Graph->AddLayer<QLstmLayer>(descriptor, name);
1864
1865 // QLstm Basic Parameters
1866 layer->m_BasicParameters.m_InputToForgetWeights =
1867 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
1868 layer->m_BasicParameters.m_InputToCellWeights =
1869 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
1870 layer->m_BasicParameters.m_InputToOutputWeights =
1871 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
1872 layer->m_BasicParameters.m_RecurrentToForgetWeights =
1873 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
1874 layer->m_BasicParameters.m_RecurrentToCellWeights =
1875 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
1876 layer->m_BasicParameters.m_RecurrentToOutputWeights =
1877 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
1878 layer->m_BasicParameters.m_ForgetGateBias =
1879 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
1880 layer->m_BasicParameters.m_CellBias =
1881 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
1882 layer->m_BasicParameters.m_OutputGateBias =
1883 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
1884
1885 // QLstm Cifg parameters
1886 if(!descriptor.m_CifgEnabled)
1887 {
1888 if(params.m_InputToInputWeights == nullptr)
1889 {
1890 throw InvalidArgumentException("AddQLstmLayer: Input To Input Weights cannot be NULL");
1891 }
1892
1893 if(params.m_RecurrentToInputWeights == nullptr)
1894 {
1895 throw InvalidArgumentException(
1896 "AddQLstmLayer: Recurrent To Input Weights cannot be NULL");
1897 }
1898
1899 if(params.m_InputGateBias == nullptr)
1900 {
1901 throw InvalidArgumentException("AddQLstmLayer: Input Gate Bias cannot be NULL");
1902 }
1903
1904 layer->m_CifgParameters.m_InputToInputWeights =
1905 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
1906 layer->m_CifgParameters.m_RecurrentToInputWeights =
1907 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
1908 layer->m_CifgParameters.m_InputGateBias =
1909 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
1910 }
1911
1912 // QLstm Projection parameters
1913 if(descriptor.m_ProjectionEnabled)
1914 {
1915 if(params.m_ProjectionWeights == nullptr)
1916 {
1917 throw InvalidArgumentException("AddQLstmLayer: Projection Weights cannot be NULL");
1918 }
1919
James Conroy586a9aa2020-03-20 08:49:33 +00001920 layer->m_ProjectionParameters.m_ProjectionWeights =
1921 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
James Conroyed324052020-05-18 15:16:42 +01001922
1923 // Projection bias is optional even if projection is enabled
1924 if(params.m_ProjectionWeights != nullptr)
1925 {
1926 layer->m_ProjectionParameters.m_ProjectionBias =
1927 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
1928 }
1929
James Conroy586a9aa2020-03-20 08:49:33 +00001930 }
1931
1932 // QLstm Peephole params
1933 if(descriptor.m_PeepholeEnabled)
1934 {
1935 if(params.m_CellToForgetWeights == nullptr)
1936 {
1937 throw InvalidArgumentException("AddQLstmLayer: Cell To Forget Weights cannot be NULL");
1938 }
1939
1940 if(params.m_CellToOutputWeights == nullptr)
1941 {
1942 throw InvalidArgumentException("AddQLstmLayer: Cell To Output Weights cannot be NULL");
1943 }
1944
1945 if(!descriptor.m_CifgEnabled)
1946 {
1947 if(params.m_CellToInputWeights == nullptr)
1948 {
1949 throw InvalidArgumentException("AddQLstmLayer: Cell To Input Weights cannot be NULL");
1950 }
1951
1952 layer->m_PeepholeParameters.m_CellToInputWeights =
1953 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
1954 }
1955
1956 layer->m_PeepholeParameters.m_CellToForgetWeights =
1957 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
1958 layer->m_PeepholeParameters.m_CellToOutputWeights =
1959 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
1960 }
1961
1962 // QLstm Layer Normalization params
1963 if(descriptor.m_LayerNormEnabled)
1964 {
1965 if(params.m_ForgetLayerNormWeights == nullptr)
1966 {
1967 throw InvalidArgumentException("AddQLstmLayer: Forget layer normalization weights cannot be NULL");
1968 }
1969
1970 if(params.m_CellLayerNormWeights == nullptr)
1971 {
1972 throw InvalidArgumentException("AddQLstmLayer: Cell layer normalization weights cannot be NULL");
1973 }
1974
1975 if(params.m_OutputLayerNormWeights == nullptr)
1976 {
1977 throw InvalidArgumentException("AddQLstmLayer: Output layer normalization weights cannot be NULL");
1978 }
1979
1980 if(!descriptor.m_CifgEnabled)
1981 {
1982 if(params.m_InputLayerNormWeights == nullptr)
1983 {
1984 throw InvalidArgumentException("AddQLstmLayer: Input layer normalization weights cannot be NULL");
1985 }
1986
1987 layer->m_LayerNormParameters.m_InputLayerNormWeights =
1988 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
1989 }
1990
1991 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
1992 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
1993 layer->m_LayerNormParameters.m_CellLayerNormWeights =
1994 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
1995 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
1996 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
1997 }
1998 return layer;
1999}
2000
Mike Kelly8c1701a2019-02-11 17:01:27 +00002001void Network::Accept(ILayerVisitor& visitor) const
2002{
2003 for (auto layer : GetGraph())
2004 {
2005 layer->Accept(visitor);
2006 };
2007}
2008
telsoa014fcda012018-03-09 14:13:49 +00002009OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
Sadik Armagan3184c902020-03-18 10:57:30 +00002010 : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid())
telsoa014fcda012018-03-09 14:13:49 +00002011{
2012}
2013
Sadik Armagan045f6be2020-09-10 13:37:32 +01002014OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
2015 : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid()), m_ModelOptions(modelOptions)
2016{
2017}
2018
telsoa014fcda012018-03-09 14:13:49 +00002019OptimizedNetwork::~OptimizedNetwork()
2020{
2021}
2022
2023} // namespace armnn