blob: bf7a056f6e45ff5f593c1867751126342cf5c61c [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlin52664732020-06-29 16:27:03 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matteo Martincigh49124022019-01-11 13:25:59 +00005
telsoa014fcda012018-03-09 14:13:49 +00006#include "Network.hpp"
7#include "Graph.hpp"
8#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01009#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010#include "Optimizer.hpp"
Derek Lambertiff05cc52019-04-26 13:05:17 +010011#include "SubgraphViewSelector.hpp"
Matteo Martincigh49124022019-01-11 13:25:59 +000012#include "BackendSettings.hpp"
David Beckac42efd2018-09-26 17:41:13 +010013#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000014
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000015#include <backendsCommon/CpuTensorHandle.hpp>
16#include <backendsCommon/WorkloadFactory.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000017#include <armnn/backends/IBackendInternal.hpp>
Derek Lamberti84da38b2019-06-13 11:40:08 +010018#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019
20#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000021#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022#include <armnn/TypesUtils.hpp>
Matteo Martincighc601aa62019-10-29 15:03:22 +000023#include <armnn/BackendRegistry.hpp>
Matthew Benthamf48afc62020-01-15 17:55:08 +000024#include <armnn/Logging.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010025#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000026#include <armnn/utility/IgnoreUnused.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010027#include <armnn/utility/PolymorphicDowncast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000028
Jan Eilers99d9d4a2019-11-06 10:02:16 +000029#include <ProfilingService.hpp>
30
telsoa014fcda012018-03-09 14:13:49 +000031#include <fcntl.h>
32#include <algorithm>
33#include <fstream>
34#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010035#include <vector>
36#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000037
telsoa014fcda012018-03-09 14:13:49 +000038namespace armnn
39{
40
Finn Williamsf24effa2020-07-03 10:12:03 +010041armnn::INetwork* INetwork::CreateRaw(NetworkOptions networkOptions)
telsoa014fcda012018-03-09 14:13:49 +000042{
Finn Williamsf24effa2020-07-03 10:12:03 +010043 return new Network(networkOptions);
telsoa014fcda012018-03-09 14:13:49 +000044}
45
Finn Williamsf24effa2020-07-03 10:12:03 +010046armnn::INetworkPtr INetwork::Create(NetworkOptions networkOptions)
telsoa014fcda012018-03-09 14:13:49 +000047{
Finn Williamsf24effa2020-07-03 10:12:03 +010048 return INetworkPtr(CreateRaw(networkOptions), &INetwork::Destroy);
telsoa014fcda012018-03-09 14:13:49 +000049}
50
51void INetwork::Destroy(INetwork* network)
52{
Jan Eilersbb446e52020-04-02 13:56:54 +010053 delete PolymorphicDowncast<Network*>(network);
telsoa014fcda012018-03-09 14:13:49 +000054}
55
telsoa014fcda012018-03-09 14:13:49 +000056void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
57{
Jan Eilersbb446e52020-04-02 13:56:54 +010058 delete PolymorphicDowncast<OptimizedNetwork*>(network);
telsoa014fcda012018-03-09 14:13:49 +000059}
60
61Status OptimizedNetwork::PrintGraph()
62{
63 m_Graph->Print();
64 return Status::Success;
65}
66
surmeh01bceff2f2018-03-29 16:29:27 +010067Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
68{
69 return m_Graph->SerializeToDot(stream);
70}
71
Matteo Martincigh49124022019-01-11 13:25:59 +000072void ReportError(const std::string& errorMessage,
73 Optional<std::vector<std::string>&> errorMessages)
74{
75 std::stringstream fullErrorMessage;
76 fullErrorMessage << "ERROR: " << errorMessage;
Derek Lamberti08446972019-11-26 16:38:31 +000077 ARMNN_LOG(warning) << fullErrorMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +000078 if (errorMessages)
79 {
80 errorMessages.value().push_back(fullErrorMessage.str());
81 }
82}
83
84void ReportWarning(const std::string& warningMessage,
85 Optional<std::vector<std::string>&> warningMessages)
86{
87 std::stringstream fullWarningMessage;
88 fullWarningMessage << "WARNING: " << warningMessage;
Derek Lamberti08446972019-11-26 16:38:31 +000089 ARMNN_LOG(warning) << fullWarningMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +000090 if (warningMessages)
91 {
92 warningMessages.value().push_back(fullWarningMessage.str());
93 }
94}
95
Derek Lamberti4a9e24b2020-01-03 16:53:38 +000096OptimizationResult ReturnWithError(OptimizationResult res,
97 const Layer* layer,
98 const BackendSettings& backendSettings,
99 Optional<std::vector<std::string>&> errMessages)
100{
101 std::stringstream failureMsg;
102 failureMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
103 << " is not supported on any preferred backend " << backendSettings.m_PreferredBackends;
104 ReportError(failureMsg.str(), errMessages);
105
106 res.m_Error = true;
107 return res;
108}
109
110
jimfly016b0b53d2018-10-08 14:43:01 +0100111bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
112{
113 bool noErrors = true;
114 unsigned int numOutputs = layer->GetNumOutputSlots();
115 for (unsigned int i = 0; i < numOutputs; i++) {
David Monahanb8554702019-04-25 16:03:38 +0100116 OutputSlot& outputSlot = layer->GetOutputSlot(i);
117 TensorInfo info = outputSlot.GetTensorInfo();
Derek Lambertif90c56d2020-01-10 17:14:08 +0000118 if (DataType::QAsymmU8 == info.GetDataType()) {
jimfly016b0b53d2018-10-08 14:43:01 +0100119 if (0.f == info.GetQuantizationScale()) {
120 noErrors = false;
121 std::stringstream ss;
Matteo Martincigh49124022019-01-11 13:25:59 +0000122 ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
jimfly016b0b53d2018-10-08 14:43:01 +0100123 << " (" << layer->GetNameStr() << ") is of type"
124 << " Quantized 8 bit but its scale parameter has not been set";
Matteo Martincigh49124022019-01-11 13:25:59 +0000125 ReportError(ss.str(), errMessages);
jimfly016b0b53d2018-10-08 14:43:01 +0100126 }
David Monahanb8554702019-04-25 16:03:38 +0100127 // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
128 if ((info.GetQuantizationScale() != (1.0f / 256.0f) ||
129 info.GetQuantizationOffset() != 0) &&
130 layer->GetType() == armnn::LayerType::Softmax)
131 {
132 std::stringstream ss;
133 ss << "Quantization parameters for Softmax layer (Scale: " <<
134 info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() <<
135 ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
Derek Lamberti08446972019-11-26 16:38:31 +0000136 ARMNN_LOG(warning) << ss.str();
David Monahanb8554702019-04-25 16:03:38 +0100137 info.SetQuantizationScale((1.0f /256.0f));
138 info.SetQuantizationOffset(0);
139 outputSlot.SetTensorInfo(info);
140 }
jimfly016b0b53d2018-10-08 14:43:01 +0100141 }
142 }
143 return noErrors;
144}
145
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100146template <typename LayerT>
147LayerT* ConvertBf16ToFp32Weight(Layer* l)
148{
Jan Eilersbb446e52020-04-02 13:56:54 +0100149 LayerT* layer = PolymorphicDowncast<LayerT*>(l);
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100150 if ((layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected)
151 && layer->m_Weight)
152 {
153 const TensorInfo& info = layer->m_Weight->GetTensorInfo();
154
155 if (info.GetDataType() == DataType::BFloat16)
156 {
157 std::vector<float> newValues(info.GetNumElements());
158
159 armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(
160 layer->m_Weight->template GetTensor<armnn::BFloat16>(), info.GetNumElements(), newValues.data());
161
162 TensorInfo newInfo(info.GetShape(), DataType::Float32);
163 ConstTensor newInput(newInfo, newValues);
164 layer->m_Weight.reset(new ScopedCpuTensorHandle(newInput));
165 }
166 }
167 return layer;
168}
169
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000170OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings,
171 Graph& graph,
172 Layer* layer,
173 BackendId backend,
174 DataType dataTypeIn,
175 DataType dataTypeOut,
176 const std::vector<BackendId>& availablePreferredBackends,
177 std::string& reasonIfUnsupported,
178 Optional<std::vector<std::string>&> errMessages)
179{
180 OptimizationResult result;
181
182 // Helper lambda to compose meaningful error message before returning with error
183 auto ReturnError = [&](const Layer* layer)
184 {
185 return ReturnWithError(result, layer, backendSettings, errMessages);
186 };
187
188 // need to set the compute device on the layer
189 // before we can check if it is supported
190 layer->SetBackendId(backend);
191 if (!IWorkloadFactory::IsLayerSupported(*layer, EmptyOptional(), reasonIfUnsupported))
192 {
193 if (dataTypeIn == DataType::Float16 || dataTypeOut == DataType::Float16)
194 {
195 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
196 && layer->GetType() != LayerType::ConvertFp32ToFp16
197 && layer->GetType() != LayerType::ConvertFp16ToFp32)
198 {
199 // Insert FP16 -> FP32 conversion layer before current layer
200 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers;
201 if (dataTypeIn == DataType::Float16)
202 {
203 convertFp16ToFp32Layers =
204 InsertConvertFp16ToFp32LayersBefore(graph, *layer);
205 }
206
207 // Insert FP32 -> FP16 conversion layer after current layer
208 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers;
209 if (dataTypeOut == DataType::Float16)
210 {
211 convertFp32ToFp16Layers =
212 InsertConvertFp32ToFp16LayersAfter(graph, *layer);
213 }
214
215 // Assign a supported backend to the newly introduced conversion layers
216 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
217 {
218 bool supportedBackendFound = false;
219 std::string reasonIfUnsupported;
220
221 // Try preferred backend first
222 layer->SetBackendId(preferredBackend);
223 if (IWorkloadFactory::IsLayerSupported(*layer,
224 EmptyOptional(),
225 reasonIfUnsupported))
226 {
227 supportedBackendFound = true;
228 }
229 else
230 {
231 for (const auto& backend : availablePreferredBackends)
232 {
233 // Skip preferred backend (we already determined that it is not supported)
234 if (backend == preferredBackend)
235 {
236 continue;
237 }
238
239 layer->SetBackendId(backend);
240 if (IWorkloadFactory::IsLayerSupported(*layer,
241 EmptyOptional(),
242 reasonIfUnsupported))
243 {
244 supportedBackendFound = true;
245 break;
246 }
247 }
248 }
249
250 return supportedBackendFound;
251 };
252
253 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
254 {
255 if (!AssignFirstSupportedBackend(convertLayer, backend))
256 {
257 return ReturnError(convertLayer);
258 }
259 }
260
261 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
262 {
263 if (!AssignFirstSupportedBackend(convertLayer, backend))
264 {
265 return ReturnError(convertLayer);
266 }
267 }
268
269 return result;
270 }
271 }
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000272 else if (dataTypeIn == DataType::BFloat16 || dataTypeOut == DataType::BFloat16)
273 {
274 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
275 && layer->GetType() != LayerType::ConvertFp32ToBf16
276 && layer->GetType() != LayerType::ConvertBf16ToFp32)
277 {
278 // Insert BF16 -> FP32 conversion layer before current layer
279 std::vector<ConvertBf16ToFp32Layer*> convertBf16ToFp32Layers;
280 if (dataTypeIn == DataType::BFloat16)
281 {
282 convertBf16ToFp32Layers =
283 InsertConvertBf16ToFp32LayersBefore(graph, *layer);
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100284 if (layer->GetType() == LayerType::Convolution2d)
285 {
286 ConvertBf16ToFp32Weight<Convolution2dLayer>(layer);
287 }
288 else if (layer->GetType() == LayerType::FullyConnected)
289 {
290 ConvertBf16ToFp32Weight<FullyConnectedLayer>(layer);
291 }
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000292 }
293
294 // Insert FP32 -> BF16 conversion layer after current layer
295 std::vector<ConvertFp32ToBf16Layer*> convertFp32ToBf16Layers;
296 if (dataTypeOut == DataType::BFloat16)
297 {
298 convertFp32ToBf16Layers =
299 InsertConvertFp32ToBf16LayersAfter(graph, *layer);
300 }
301
302 // Assign a supported backend to the newly introduced conversion layers
303 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
304 {
305 bool supportedBackendFound = false;
306 std::string reasonIfUnsupported;
307
308 // Try preferred backend first
309 layer->SetBackendId(preferredBackend);
310 if (IWorkloadFactory::IsLayerSupported(*layer,
311 EmptyOptional(),
312 reasonIfUnsupported))
313 {
314 supportedBackendFound = true;
315 }
316 else
317 {
318 for (const auto& backend : availablePreferredBackends)
319 {
320 // Skip preferred backend (we already determined that it is not supported)
321 if (backend == preferredBackend)
322 {
323 continue;
324 }
325
326 layer->SetBackendId(backend);
327 if (IWorkloadFactory::IsLayerSupported(*layer,
328 EmptyOptional(),
329 reasonIfUnsupported))
330 {
331 supportedBackendFound = true;
332 break;
333 }
334 }
335 }
336
337 return supportedBackendFound;
338 };
339
340 for (ConvertBf16ToFp32Layer* convertLayer : convertBf16ToFp32Layers)
341 {
342 if (!AssignFirstSupportedBackend(convertLayer, backend))
343 {
344 return ReturnError(convertLayer);
345 }
346 }
347
348 for (ConvertFp32ToBf16Layer* convertLayer : convertFp32ToBf16Layers)
349 {
350 if (!AssignFirstSupportedBackend(convertLayer, backend))
351 {
352 return ReturnError(convertLayer);
353 }
354 }
355
356 return result;
357 }
358 }
359
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000360 std::stringstream warningMsg;
361 warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
362 << " is not supported on requested backend " << layer->GetBackendId().Get()
363 << " for input data type " << GetDataTypeName(dataTypeIn)
364 << " and output data type " << GetDataTypeName(dataTypeOut)
365 << " (reason: " << reasonIfUnsupported
366 << "), falling back to the next backend.";
367 ReportWarning(warningMsg.str(), errMessages);
368
369 return OptimizationResult(true, false);
370 }
371 else
372 {
373 return result;
374 }
375}
376
377
Matteo Martincigh49124022019-01-11 13:25:59 +0000378OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
379 BackendSettings& backendSettings,
380 Graph::Iterator& firstLayer,
381 Graph::Iterator& lastLayer,
382 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +0000383{
Matteo Martincigh49124022019-01-11 13:25:59 +0000384 OptimizationResult result;
telsoa014fcda012018-03-09 14:13:49 +0000385
Matteo Martincigh49124022019-01-11 13:25:59 +0000386 // Helper lambda to compose meaningful error message before returning with error
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000387 auto ReturnError = [&](const Layer* layer)
388 {
389 return ReturnWithError(result, layer, backendSettings, errMessages);
390 };
Matteo Martincigh49124022019-01-11 13:25:59 +0000391
telsoa01c577f2c2018-08-31 09:22:23 +0100392
Matteo Martincigh49124022019-01-11 13:25:59 +0000393 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
394 if (availablePreferredBackends.empty())
telsoa01c577f2c2018-08-31 09:22:23 +0100395 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000396 std::stringstream failureMsg;
397 failureMsg << "No preferred backends are available";
398 ReportError(failureMsg.str(), errMessages);
399
400 result.m_Error = true;
401 return result;
402 }
403
404 for (auto it = firstLayer; it != lastLayer; ++it)
405 {
406 auto layer = *it;
Aron Virginas-Tar87972be2019-11-13 15:16:28 +0000407
408 DataType dataTypeIn = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
409 layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
410 DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
411 layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
412
telsoa01c577f2c2018-08-31 09:22:23 +0100413 std::string reasonIfUnsupported;
414 bool found = false;
jimfly016b0b53d2018-10-08 14:43:01 +0100415 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
416 {
417 // don't bomb immediately, find all the quantized outputs
418 // which haven't had a scale set and report them all back.
Matteo Martincigh49124022019-01-11 13:25:59 +0000419 result.m_Error = true;
jimfly016b0b53d2018-10-08 14:43:01 +0100420 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000421
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000422 // First try assign layer to hint backend
423 if (layer->GetBackendHint().has_value() &&
424 backendSettings.IsBackendSupported(layer->GetBackendHint().value()) &&
425 AttemptBackendAssignment(backendSettings,
426 optNetObjPtr->GetGraph(),
427 layer,
428 layer->GetBackendHint().value(),
429 dataTypeIn,
430 dataTypeOut,
431 availablePreferredBackends,
432 reasonIfUnsupported,
433 errMessages).IsOk())
telsoa01c577f2c2018-08-31 09:22:23 +0100434 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000435 found = true;
436 backendSettings.m_SelectedBackends.insert(layer->GetBackendHint().value());
437 }
438 else
439 {
440 // Try assign layer to prefered list of backends
441 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100442 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000443 if (layer->GetBackendHint().has_value() &&
444 layer->GetBackendHint().value() == backend)
telsoa01c577f2c2018-08-31 09:22:23 +0100445 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000446 continue; //Don't re-test the backend hint
telsoa01c577f2c2018-08-31 09:22:23 +0100447 }
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000448
449 OptimizationResult res = AttemptBackendAssignment(backendSettings,
450 optNetObjPtr->GetGraph(),
451 layer,
452 backend,
453 dataTypeIn,
454 dataTypeOut,
455 availablePreferredBackends,
456 reasonIfUnsupported,
457 errMessages);
458
459 if (res.IsOk())
460 {
461 found = true;
462 backendSettings.m_SelectedBackends.insert(backend);
463 break;
464 }
465 else if (res.IsError())
466 {
467 return res; // Cannot continue.
468 // Note: we don't need to log the error as it would already
469 // be logged in AttemptBackendAssignment().
470 }
471 else
472 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100473 ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000474 }
telsoa01c577f2c2018-08-31 09:22:23 +0100475 }
476 }
477
478 // If the layer is unsupported by any devices, log and return a null network.
Matteo Martincigh49124022019-01-11 13:25:59 +0000479 if (!found)
480 {
telsoa01c577f2c2018-08-31 09:22:23 +0100481 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
482 // fallback we should set the compute device on the layer to CpuRef (these are not
483 // available as accelerated operations, or are only available under certain
484 // conditions, currently they comprise MemCopy, Constant, Permute)
485 armnn::LayerType layerType = layer->GetType();
Matteo Martincigh49124022019-01-11 13:25:59 +0000486 if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
487 layerType == armnn::LayerType::Constant ||
488 layerType == armnn::LayerType::Permute))
telsoa01c577f2c2018-08-31 09:22:23 +0100489 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000490 BackendId cpuBackendId(armnn::Compute::CpuRef);
491 layer->SetBackendId(cpuBackendId);
492 backendSettings.m_SelectedBackends.insert(cpuBackendId);
telsoa01c577f2c2018-08-31 09:22:23 +0100493 }
494 else
495 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000496 return ReturnError(layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100497 }
498 }
499 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000500
501 return result;
502}
503
Matteo Martincighadddddb2019-01-24 14:06:23 +0000504OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
505 BackendSettings& backendSettings,
Derek Lambertiff05cc52019-04-26 13:05:17 +0100506 SubgraphView& subgraph,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000507 Optional<std::vector<std::string>&> errMessages)
Matteo Martincigh49124022019-01-11 13:25:59 +0000508{
Derek Lambertiff05cc52019-04-26 13:05:17 +0100509 Graph::Iterator firstLayer = subgraph.begin();
510 Graph::Iterator lastLayer = subgraph.end();
Matteo Martincighadddddb2019-01-24 14:06:23 +0000511 return AssignBackends(optNetObjPtr,
512 backendSettings,
513 firstLayer,
514 lastLayer,
515 errMessages);
516}
517
Derek Lamberti84da38b2019-06-13 11:40:08 +0100518BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry& handleFactoryRegistry,
519 BackendSettings& backendSettings)
520{
521 BackendsMap backends;
522 auto const& backendRegistry = BackendRegistryInstance();
523 for (auto&& selectedBackend : backendSettings.m_SupportedBackends)
524 {
525 auto backendFactory = backendRegistry.GetFactory(selectedBackend);
526 auto backendObjPtr = backendFactory();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100527 ARMNN_ASSERT(backendObjPtr);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100528
529 backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
530
531 backends[backendObjPtr->GetId()] = std::move(backendObjPtr);
532 }
533
534 return backends;
535}
536
Matteo Martincighadddddb2019-01-24 14:06:23 +0000537OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
538 BackendSettings& backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +0100539 BackendsMap& backends,
Mike Kelly07810fc2020-11-12 10:58:48 +0000540 const ModelOptions& modelOptions,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000541 Optional<std::vector<std::string>&> errMessages)
542{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100543 ARMNN_ASSERT(optNetObjPtr);
Matteo Martincigh49124022019-01-11 13:25:59 +0000544
545 OptimizationResult result;
546
Matteo Martincighadddddb2019-01-24 14:06:23 +0000547 // Get the optimized graph
548 Graph& optGraph = optNetObjPtr->GetGraph();
Matteo Martincigh49124022019-01-11 13:25:59 +0000549
Matteo Martincighadddddb2019-01-24 14:06:23 +0000550 // Run backend specific optimizations
Matteo Martincighadddddb2019-01-24 14:06:23 +0000551 for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
Matteo Martincigh49124022019-01-11 13:25:59 +0000552 {
Derek Lamberti84da38b2019-06-13 11:40:08 +0100553 auto backendObjPtr = backends.find(selectedBackend)->second.get();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100554 ARMNN_ASSERT(backendObjPtr);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000555
556 // Select sub-graphs based on backend
Derek Lambertiff05cc52019-04-26 13:05:17 +0100557 SubgraphViewSelector::Subgraphs subgraphs =
Rob Hughes65c32262019-07-23 15:33:39 +0100558 SubgraphViewSelector::SelectSubgraphs(optGraph,
Matteo Martincigh602af092019-05-01 10:31:27 +0100559 // Select layers assigned to the requested backend
560 [&backendObjPtr](const Layer& layer)
561 {
562 return layer.GetType() != LayerType::Input &&
563 layer.GetType() != LayerType::Output &&
564 layer.GetBackendId() == backendObjPtr->GetId();
565 });
Derek Lambertiff05cc52019-04-26 13:05:17 +0100566 if (subgraphs.empty())
Matteo Martincigh49124022019-01-11 13:25:59 +0000567 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000568 // No sub-graphs found, try with next selected backend
569 continue;
Matteo Martincigh49124022019-01-11 13:25:59 +0000570 }
Matteo Martincighadddddb2019-01-24 14:06:23 +0000571
572 // Try to optimize each sub-graph
Derek Lambertiff05cc52019-04-26 13:05:17 +0100573 for (auto& subgraph : subgraphs)
Matteo Martincigh49124022019-01-11 13:25:59 +0000574 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000575 // Try to optimize the current sub-graph
Mike Kelly07810fc2020-11-12 10:58:48 +0000576 OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph, modelOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100577 ARMNN_ASSERT(optimizationViews.Validate(*subgraph));
Matteo Martincighadddddb2019-01-24 14:06:23 +0000578
579 // Optimization attempted, check the resulting optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100580 for (auto& substitution : optimizationViews.GetSubstitutions())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000581 {
582 // Sub-graph optimized, substitute the sub-graph with the new optimized one in the main optimized graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100583 SubgraphView& replacementSubgraph = substitution.m_ReplacementSubgraph;
584 SubgraphView& substitutableSubgraph = substitution.m_SubstitutableSubgraph;
585 optGraph.SubstituteSubgraph(substitutableSubgraph, replacementSubgraph);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000586
587 // Assign the current backend to the optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100588 std::for_each(replacementSubgraph.begin(), replacementSubgraph.end(), [&selectedBackend](Layer* l)
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100589 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100590 ARMNN_ASSERT(l);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100591 l->SetBackendId(selectedBackend);
592 });
Matteo Martincighadddddb2019-01-24 14:06:23 +0000593 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100594
Matteo Martincigh84924332019-05-09 12:46:16 +0100595 if (!optimizationViews.GetFailedSubgraphs().empty())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000596 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000597 std::stringstream warningMsg;
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100598 warningMsg << "Some sub-graph(s) failed to optimized on " << backendObjPtr->GetId() << " backend.";
Matteo Martincighadddddb2019-01-24 14:06:23 +0000599 ReportWarning(warningMsg.str(), errMessages);
600
601 // Failed to optimize the given sub-graph, re-assign the sub-graph layers to other available backends
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100602 BackendSettings settingsCopy(backendSettings);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000603 if (!backendObjPtr->GetId().IsCpuRef())
604 {
605 // Add the current backend to the list of backends to ignore
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100606 settingsCopy.m_IgnoredBackends.insert(backendObjPtr->GetId());
Matteo Martincighadddddb2019-01-24 14:06:23 +0000607 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100608
609 int count=0;
Matteo Martincigh84924332019-05-09 12:46:16 +0100610 for (auto& failedSubgraph : optimizationViews.GetFailedSubgraphs())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000611 {
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100612 // An error occurred: the optimization was attempted but not performed, try different backends
613 std::stringstream subgraphMsg;
614 subgraphMsg << "Re-assigning backends to " << failedSubgraph.GetLayers().size()
615 << " layers inside sub-graph " << count++;
Matteo Martincigh328d92b2019-07-04 17:52:55 +0100616 ReportWarning(subgraphMsg.str(), errMessages);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100617
618 OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr,
619 settingsCopy,
620 *subgraph,
621 errMessages);
622 if (reassignmentResult.m_Error)
623 {
624 // Failed to re-assign one of the remaining backends to each layer of the sub-graph
625 result.m_Error = true;
626 return result;
627 }
Matteo Martincighadddddb2019-01-24 14:06:23 +0000628 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000629 }
630 }
631 }
632
633 return result;
634}
635
Derek Lamberti84da38b2019-06-13 11:40:08 +0100636bool RequiresCopy(ITensorHandleFactory::FactoryId src,
637 ITensorHandleFactory::FactoryId dst,
638 TensorHandleFactoryRegistry& registry)
639{
640 if (src != dst)
641 {
642 ITensorHandleFactory* srcFactory = registry.GetFactory(src);
643 ITensorHandleFactory* dstFactory = registry.GetFactory(dst);
644
Matteo Martincigha6539ed2019-08-27 13:43:32 +0100645 if (srcFactory && dstFactory &&
646 (srcFactory->GetExportFlags() & dstFactory->GetImportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100647 {
648 return false;
649 }
650 return true;
651 }
652 return false;
653}
654
655// Find the handle factory for the input layer which results in fewest required copies.
656ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backends,
657 OutputSlot& slot,
658 TensorHandleFactoryRegistry& registry)
659{
660 Layer& layer = slot.GetOwningLayer();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100661 ARMNN_ASSERT(layer.GetType() == LayerType::Input);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100662
663 // Explicitly select the tensorhandle factory for InputLayer because the rules for it are slightly different. It
664 // doesn't matter which backend it is assigned to because they all use the same implementation, which
665 // requires Map/Unmap support. This means that, so long as the handle type supports map/unmap semantics, we can
666 // select a factory with maximum compatibility with the layers connected to the InputLayer.
667
668 // First ensure the from backends can support the TensorHandeAPI
669 auto frmBackend = backends.find(layer.GetBackendId());
670 if (frmBackend == backends.end() ||
671 !frmBackend->second->SupportsTensorAllocatorAPI())
672 {
673 return ITensorHandleFactory::LegacyFactoryId;
674 }
675
676 // Go through all connections to the output slot and determine the TensorHandleFactory which results in the
677 // fewest copies.
678 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
679 int topScore = 0;
680 ITensorHandleFactory::FactoryId topChoice = ITensorHandleFactory::LegacyFactoryId;
681
682 for (auto&& connection : slot.GetConnections())
683 {
684 const Layer& connectedLayer = connection->GetOwningLayer();
685
686 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100687 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +0100688
689 if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
690 {
691 // The destination backend does not support the tensor allocator API, move to the next one
692 continue;
693 }
694
695 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
696 for (auto&& dst : dstPrefs)
697 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100698 // Input layers use the mem copy workload or import, so the selected factory must
699 // support either the map/unmap API or Import API
Derek Lamberti84da38b2019-06-13 11:40:08 +0100700 ITensorHandleFactory* factory = registry.GetFactory(dst);
Derek Lambertif674aa02019-08-01 15:56:25 +0100701 if (!factory->SupportsMapUnmap() &&
702 !CheckFlag(factory->GetImportFlags(), MemorySource::Malloc)) // Just support cpu mem imports for now
Derek Lamberti84da38b2019-06-13 11:40:08 +0100703 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100704 // The current tensor handle factory does not support the map/unmap or import
705 // strategy, move to the next one
Derek Lamberti84da38b2019-06-13 11:40:08 +0100706 continue;
707 }
708
709 auto it = factoryScores.find(dst);
710 if (it == factoryScores.end())
711 {
712 // Add new score to the table
713 factoryScores[dst] = 0;
714 if (topChoice == ITensorHandleFactory::LegacyFactoryId)
715 {
716 topChoice = dst;
717 }
718 }
719 else
720 {
721 // Increase the score
722 factoryScores[dst]++;
723
724 // Track the best option
725 if (factoryScores[dst] > topScore)
726 {
727 topScore = factoryScores[dst];
728 topChoice = dst;
729 }
730 }
731 }
732 }
733
734 return topChoice;
735}
736
737// Find the handle factory for the output layer which results in fewest required copies.
738ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backends,
739 OutputSlot& slot,
740 TensorHandleFactoryRegistry& registry)
741{
Jan Eilers8eb25602020-03-09 12:13:48 +0000742 IgnoreUnused(backends, slot, registry);
Derek Lamberti94a88d22019-12-10 21:12:59 +0000743 return ITensorHandleFactory::DeferredFactoryId;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100744}
745
746// For all handle factories supported on the source backend, we wish to find the one which requires the fewest copies
747// when considering all connections.
748ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
749 OutputSlot& outputSlot,
750 TensorHandleFactoryRegistry& registry)
751{
752 // First ensure the from backends can support the TensorHandeAPI
753 Layer& layer = outputSlot.GetOwningLayer();
754 auto frmBackend = backends.find(layer.GetBackendId());
755 if (frmBackend == backends.end() ||
756 !frmBackend->second->SupportsTensorAllocatorAPI())
757 {
758 return ITensorHandleFactory::LegacyFactoryId;
759 }
760
761 // Connections to Output Layers requires support for map/unmap on the TensorHandle.
762 bool requiresMapUnmap = false;
763 for (auto&& connection : outputSlot.GetConnections())
764 {
765 const Layer& connectedLayer = connection->GetOwningLayer();
766 if (connectedLayer.GetType() == LayerType::Output)
767 {
768 requiresMapUnmap = true;
769 }
770 }
771
772 IBackendInternal* srcBackend = frmBackend->second.get();
773 auto srcPrefs = srcBackend->GetHandleFactoryPreferences();
774
775 // Initialize the scores
776 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
777 for (auto&& pref : srcPrefs)
778 {
779 if (requiresMapUnmap) // Only consider factories that support map/unmap if required
780 {
781 ITensorHandleFactory* factory = registry.GetFactory(pref);
782 if (!factory->SupportsMapUnmap())
783 {
784 // The current tensor handle factory does not support the map/unmap strategy, move to the next one
785 continue;
786 }
787 }
788
789 auto it = factoryScores.find(pref);
790 if (it == factoryScores.end())
791 {
792 // Add new score to the table
793 factoryScores[pref] = 0;
794 }
795 }
796
797 // Score each handle factory based on how many times it requires copies on the slot connections
798 for (auto&& connection : outputSlot.GetConnections())
799 {
800 const Layer& connectedLayer = connection->GetOwningLayer();
801
802 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100803 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +0100804
805 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
806 for (auto&& src : srcPrefs)
807 {
808 if (factoryScores.find(src) == factoryScores.end()) // Don't consider excluded factories
809 {
810 continue;
811 }
812
813 for (auto&& dst : dstPrefs)
814 {
815 if (RequiresCopy(src, dst, registry))
816 {
817 // Copy avoided, increase the score
818 factoryScores[src]++;
819 break;
820 }
821 }
822 }
823 }
824
825 // Find the lowest score
826 int minScore = std::numeric_limits<int>::max();
827 for (auto it : factoryScores)
828 {
829 minScore = std::min(minScore, it.second);
830 }
831
832 // Collect factories matching the best(lowest) score
833 std::vector<ITensorHandleFactory::FactoryId> optimalFactories;
834 for (auto it : factoryScores)
835 {
836 if (it.second == minScore)
837 {
838 optimalFactories.push_back(it.first);
839 }
840 }
841
842 // For all compatible Factories matching the best score, find the preferred one for the current layer.
843 for (auto&& srcPref : srcPrefs)
844 {
845 for (auto&& comp : optimalFactories)
846 {
847 if (comp == srcPref)
848 {
849 return comp;
850 }
851 }
852 }
853
854 return ITensorHandleFactory::LegacyFactoryId;
855}
856
Derek Lambertif674aa02019-08-01 15:56:25 +0100857EdgeStrategy CalculateEdgeStrategy(BackendsMap& backends,
858 ITensorHandleFactory::FactoryId srcFactoryId,
859 const Layer& layer,
860 const Layer& connectedLayer,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100861 TensorHandleFactoryRegistry& registry,
862 bool importEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100863{
864 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100865 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +0100866
867 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
868
869 // Legacy API check for backward compatibility
870 if (srcFactoryId == ITensorHandleFactory::LegacyFactoryId || dstPrefs.empty())
871 {
872 if (layer.GetBackendId() != connectedLayer.GetBackendId())
873 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100874 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100875 }
876 else
877 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100878 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100879 }
880 }
881
882 // TensorHandleFactory API present, so perform more sophisticated strategies.
Derek Lambertif674aa02019-08-01 15:56:25 +0100883 // Dst Output layers don't require copy because they use import or map/unmap
Derek Lamberti84da38b2019-06-13 11:40:08 +0100884 if (connectedLayer.GetType() == LayerType::Output)
885 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100886 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100887 }
888
889 // Search for direct match in prefs
890 for (auto&& pref : dstPrefs)
891 {
892 if (pref == srcFactoryId)
893 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100894 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100895 }
896 }
897
898 // Search for export/import options
899 ITensorHandleFactory* srcFactory = registry.GetFactory(srcFactoryId);
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100900 if (srcFactory->GetExportFlags() != 0 && importEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100901 {
902 for (auto&& pref : dstPrefs)
903 {
904 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroyffab16f2019-11-07 14:37:09 +0000905
James Conroy47e863d2019-11-18 17:07:43 +0000906 // Handles cases when a destPref is not listed in TensorHandleFactoryRegistry
James Conroyffab16f2019-11-07 14:37:09 +0000907 if (!dstFactory) {
James Conroy47e863d2019-11-18 17:07:43 +0000908 continue;
James Conroyffab16f2019-11-07 14:37:09 +0000909 }
910
Derek Lambertif674aa02019-08-01 15:56:25 +0100911 if ((dstFactory->GetImportFlags() & srcFactory->GetExportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100912 {
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +0100913 auto srcCapability = srcFactory->GetCapabilities(&layer, &layer, CapabilityClass::PaddingRequired);
914 auto dstCapability = dstFactory->GetCapabilities(&connectedLayer,
915 &connectedLayer,
916 CapabilityClass::PaddingRequired);
917 // Do not require memory copy if the source and destination do not require padding.
918 if (srcCapability.empty() && dstCapability.empty())
919 {
920 return EdgeStrategy::ExportToTarget;
921 }
Derek Lamberti84da38b2019-06-13 11:40:08 +0100922 }
923 }
924 }
925
926 // Search for copy options via map/unmap
927 if (srcFactory->SupportsMapUnmap())
928 {
929 for (auto&& pref : dstPrefs)
930 {
931 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroy47e863d2019-11-18 17:07:43 +0000932 if (dstFactory && dstFactory->SupportsMapUnmap())
Derek Lamberti84da38b2019-06-13 11:40:08 +0100933 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100934 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100935 }
936 }
937 }
938
Derek Lambertif674aa02019-08-01 15:56:25 +0100939 return EdgeStrategy::Undefined;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100940}
941
942// Select the TensorHandleFactories and the corresponding memory strategy
943OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
944 BackendsMap& backends,
945 TensorHandleFactoryRegistry& registry,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100946 bool importEnabled,
Derek Lamberti84da38b2019-06-13 11:40:08 +0100947 Optional<std::vector<std::string>&> errMessages)
948{
949 OptimizationResult result;
950
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100951 optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled](Layer* layer)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100952 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100953 ARMNN_ASSERT(layer);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100954
955 // Lets make sure the backend is in our list of supported backends. Something went wrong during backend
956 // assignment if this check fails
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100957 ARMNN_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
Derek Lamberti84da38b2019-06-13 11:40:08 +0100958
959 // Check each output separately
960 for (unsigned int slotIdx = 0; slotIdx < layer->GetNumOutputSlots(); slotIdx++)
961 {
962 OutputSlot& outputSlot = layer->GetOutputSlot(slotIdx);
963
964 ITensorHandleFactory::FactoryId slotOption = ITensorHandleFactory::LegacyFactoryId;
965
966 // Calculate the factory to use which results in the fewest copies being made.
967 switch(layer->GetType())
968 {
969 case LayerType::Input:
970 slotOption = CalculateSlotOptionForInput(backends, outputSlot, registry);
971 break;
972 case LayerType::Output:
973 slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
974 break;
975 default:
976 slotOption = CalculateSlotOption(backends, outputSlot, registry);
977 break;
978 }
979 outputSlot.SetTensorHandleFactory(slotOption);
980
Derek Lambertif674aa02019-08-01 15:56:25 +0100981 // Now determine the "best" edge strategy for each connection given the slotOption.
Derek Lamberti84da38b2019-06-13 11:40:08 +0100982 unsigned int connectionIdx = 0;
983 for (auto&& connection : outputSlot.GetConnections())
984 {
985 const Layer& connectedLayer = connection->GetOwningLayer();
986
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100987 EdgeStrategy strategy = CalculateEdgeStrategy(backends, slotOption, *layer, connectedLayer,
988 registry, importEnabled);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100989
Derek Lambertif674aa02019-08-01 15:56:25 +0100990 if (strategy == EdgeStrategy::Undefined)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100991 {
992 result.m_Error = true;
993 if (errMessages)
994 {
995 errMessages.value().emplace_back("Could not find valid strategy required for compatibility"
996 " between backends.");
997 }
998 return;
999 }
1000
Derek Lambertif674aa02019-08-01 15:56:25 +01001001 outputSlot.SetEdgeStrategy(connectionIdx, strategy);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001002
1003 connectionIdx++;
1004 }
1005 }
1006 });
1007
1008 return result;
1009}
1010
Matteo Martincigh49124022019-01-11 13:25:59 +00001011IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
1012 const std::vector<BackendId>& backendPreferences,
1013 const IDeviceSpec& deviceSpec,
1014 const OptimizerOptions& options,
Rob Hughes23214432019-11-05 11:27:36 +00001015 Optional<std::vector<std::string>&> messages)
Matteo Martincigh49124022019-01-11 13:25:59 +00001016{
1017 if (backendPreferences.empty())
1018 {
Mike Kelly3a613cc2020-09-29 20:50:35 +01001019 throw InvalidArgumentException("Invoked Optimize with no backends specified");
Matteo Martincigh49124022019-01-11 13:25:59 +00001020 }
1021
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001022 if (options.m_ReduceFp32ToFp16 && options.m_ReduceFp32ToBf16)
1023 {
1024 throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
1025 }
1026
Jan Eilersbb446e52020-04-02 13:56:54 +01001027 const Network& network = *PolymorphicDowncast<const Network*>(&inNetwork);
Matteo Martincigh49124022019-01-11 13:25:59 +00001028 std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
1029
Sadik Armagan045f6be2020-09-10 13:37:32 +01001030 auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph), options.m_ModelOptions),
1031 &IOptimizedNetwork::Destroy);
Matteo Martincigh49124022019-01-11 13:25:59 +00001032
Jan Eilersbb446e52020-04-02 13:56:54 +01001033 OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
Matteo Martincigh49124022019-01-11 13:25:59 +00001034
Matteo Martincighadddddb2019-01-24 14:06:23 +00001035 // Get the optimized graph
1036 Graph& optGraph = optNetObjPtr->GetGraph();
1037
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001038 // Perform AddBroadcastReshapeLayer optimisation
1039 using namespace optimizations;
1040 Optimizer::Pass(optGraph, MakeOptimizations(AddBroadcastReshapeLayer()));
1041
Narumol Prangnawaratbbf71a62020-09-07 14:05:22 +01001042 // Infer the tensor infos for all output slots. Throws an exception on failure
1043 optGraph.InferTensorInfos();
1044
Matteo Martincigh49124022019-01-11 13:25:59 +00001045 // Perform optimisation passes
Matteo Martincighadddddb2019-01-24 14:06:23 +00001046 Optimizer::Pass(optGraph, MakeOptimizations(SquashEqualPermuteSiblings(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001047 SquashEqualTransposeSiblings(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001048 SquashEqualReshapeSiblings(),
1049 OptimizeInversePermutes(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001050 OptimizeInverseTransposes(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001051 MovePermuteUp(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001052 MoveTransposeUp(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001053 PermuteAsReshape(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001054 TransposeAsReshape(),
Nina Drozd861985f2019-04-18 14:48:51 +01001055 OptimizeConsecutiveReshapes(),
Rob Hughes3a7d3a72019-09-24 16:59:56 +01001056 FoldPadIntoConvolution2d(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001057 PermuteAndBatchToSpaceAsDepthToSpace(),
Teresa Charlin06e03002020-10-15 13:16:07 +01001058 TransposeAndBatchToSpaceAsDepthToSpace(),
Mike Kelly90231b82020-11-05 15:44:56 +00001059 FuseBatchNormIntoConvolution2DFloat32(),
1060 FuseBatchNormIntoConvolution2DFloat16(),
1061 FuseBatchNormIntoDepthwiseConvolution2DFloat32(),
1062 FuseBatchNormIntoDepthwiseConvolution2DFloat16()));
Matteo Martincigh49124022019-01-11 13:25:59 +00001063
Matteo Martincigh49124022019-01-11 13:25:59 +00001064 // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
1065 if (options.m_ReduceFp32ToFp16)
1066 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001067 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToFp16Converter()));
Derek Lambertidd6804b2019-11-27 09:29:57 +00001068 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
Matteo Martincigh49124022019-01-11 13:25:59 +00001069 }
1070
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001071 // If Fp32 to Bf16 optimization is set convert Fp32 network to Bf16
Narumol Prangnawarat57ef0082020-03-26 09:20:43 +00001072 // Convert input of Convolution2d and FullyConnected from Fp32 to Bf16
1073 // Only Constant weight of Convolution2d and FullyConnected are converted from Fp32 to Bf16
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001074 if (options.m_ReduceFp32ToBf16)
1075 {
1076 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToBf16Converter()));
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001077 }
1078
Matteo Martincigh49124022019-01-11 13:25:59 +00001079 // Initialize backend settings
1080 BackendSettings backendSettings(backendPreferences, deviceSpec);
1081 if (backendSettings.GetAvailablePreferredBackends().empty())
1082 {
1083 std::stringstream failureMsg;
1084 failureMsg << "None of the preferred backends " << backendPreferences
1085 << " are supported. Current platform provides " << backendSettings.m_SupportedBackends;
Rob Hughes23214432019-11-05 11:27:36 +00001086 ReportError(failureMsg.str(), messages);
Mike Kelly3a613cc2020-09-29 20:50:35 +01001087 throw InvalidArgumentException(failureMsg.str());
Matteo Martincigh49124022019-01-11 13:25:59 +00001088 }
1089
Derek Lamberti84da38b2019-06-13 11:40:08 +01001090 // Create a map to temporarily hold initialized backend objects
1091 TensorHandleFactoryRegistry tensorHandleFactoryRegistry;
1092 BackendsMap backends = CreateSupportedBackends(tensorHandleFactoryRegistry, backendSettings);
1093
Matteo Martincigh49124022019-01-11 13:25:59 +00001094 // Assign an available backend to each layer
Matteo Martincighadddddb2019-01-24 14:06:23 +00001095 Graph::Iterator firstLayer = optGraph.begin();
1096 Graph::Iterator lastLayer = optGraph.end();
Derek Lamberti84da38b2019-06-13 11:40:08 +01001097 OptimizationResult assignBackendsResult = AssignBackends(optNetObjPtr,
1098 backendSettings,
1099 firstLayer,
1100 lastLayer,
Rob Hughes23214432019-11-05 11:27:36 +00001101 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001102 if (assignBackendsResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +00001103 {
1104 // Failed to assign a backend to each layer
Mike Kelly3a613cc2020-09-29 20:50:35 +01001105 throw InvalidArgumentException("Failed to assign a backend to each layer");
jimfly016b0b53d2018-10-08 14:43:01 +01001106 }
telsoa01c577f2c2018-08-31 09:22:23 +01001107
Matteo Martincighadddddb2019-01-24 14:06:23 +00001108 Optimizer::Pass(optGraph, MakeOptimizations(OptimizeInverseConversionsFp16(),
1109 OptimizeInverseConversionsFp32()));
telsoa01c577f2c2018-08-31 09:22:23 +01001110
Matteo Martincighadddddb2019-01-24 14:06:23 +00001111 // Apply the backend-specific optimizations
1112 OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr,
1113 backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +01001114 backends,
Mike Kelly07810fc2020-11-12 10:58:48 +00001115 options.m_ModelOptions,
Rob Hughes23214432019-11-05 11:27:36 +00001116 messages);
Matteo Martincighadddddb2019-01-24 14:06:23 +00001117 if (backendOptimizationResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +00001118 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001119 // Failed to apply the backend-specific optimizations
Mike Kelly3a613cc2020-09-29 20:50:35 +01001120 throw InvalidArgumentException("Failed to apply the backend-specific optimizations");
Matteo Martincigh49124022019-01-11 13:25:59 +00001121 }
1122
Matteo Martincighadddddb2019-01-24 14:06:23 +00001123 // If the debug flag is set, then insert a DebugLayer after each layer
1124 // Doing this after applying the backend optimizations as they might have changed some layers
1125 if (options.m_Debug)
1126 {
1127 Optimizer::Pass(optGraph, MakeOptimizations(InsertDebugLayer()));
1128 }
1129
Derek Lamberti84da38b2019-06-13 11:40:08 +01001130 // Calculate the compatibility strategies for tensor handles
1131 OptimizationResult strategyResult = SelectTensorHandleStrategy(optGraph,
1132 backends,
1133 tensorHandleFactoryRegistry,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001134 options.m_ImportEnabled,
Rob Hughes23214432019-11-05 11:27:36 +00001135 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001136 if (strategyResult.m_Error)
1137 {
1138 // Failed to apply the backend-specific optimizations
1139 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
1140 }
1141
1142 // Based on the tensor handle strategy determined above, insert copy layers where required.
Derek Lambertif674aa02019-08-01 15:56:25 +01001143 optGraph.AddCompatibilityLayers(backends, tensorHandleFactoryRegistry);
telsoa01c577f2c2018-08-31 09:22:23 +01001144
1145 // Convert constants
Matteo Martincighadddddb2019-01-24 14:06:23 +00001146 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
1147 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
telsoa01c577f2c2018-08-31 09:22:23 +01001148
Derek Lamberti84da38b2019-06-13 11:40:08 +01001149 // Run backend specific optimizations (deprecated)
Matteo Martincigh49124022019-01-11 13:25:59 +00001150 for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
David Beck263e3492018-11-09 14:46:40 +00001151 {
1152 auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
1153 auto backendPtr = factoryFun();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001154 ARMNN_ASSERT(backendPtr.get() != nullptr);
David Beck263e3492018-11-09 14:46:40 +00001155
Matteo Martincighed735042019-05-22 09:42:43 +01001156 ARMNN_NO_DEPRECATE_WARN_BEGIN
David Beck263e3492018-11-09 14:46:40 +00001157 auto backendSpecificOptimizations = backendPtr->GetOptimizations();
Matteo Martincighed735042019-05-22 09:42:43 +01001158 ARMNN_NO_DEPRECATE_WARN_END
1159
David Beck263e3492018-11-09 14:46:40 +00001160 if (!backendSpecificOptimizations.empty())
1161 {
1162 Optimizer::Pass(optNetObjPtr->GetGraph(), backendSpecificOptimizations);
1163 }
1164 }
1165
telsoa01c577f2c2018-08-31 09:22:23 +01001166 return optNet;
telsoa014fcda012018-03-09 14:13:49 +00001167}
Finn Williamsf24effa2020-07-03 10:12:03 +01001168bool Network::GetShapeInferenceMethod()
telsoa014fcda012018-03-09 14:13:49 +00001169{
Finn Williamsf24effa2020-07-03 10:12:03 +01001170 if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() == "ShapeInferenceMethod")
1171 {
1172 return m_NetworkOptions[0].GetOption(0).GetValue().AsBool();
1173 }
1174
1175 return false;
telsoa014fcda012018-03-09 14:13:49 +00001176}
Finn Williamsf24effa2020-07-03 10:12:03 +01001177Network::Network(NetworkOptions networkOptions)
1178: m_NetworkOptions(networkOptions),
1179 m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod()))
1180{}
telsoa014fcda012018-03-09 14:13:49 +00001181
1182Network::~Network()
1183{
1184}
1185
Jan Eilers99d9d4a2019-11-06 10:02:16 +00001186Status Network::PrintGraph()
1187{
1188 m_Graph->Print();
1189 return Status::Success;
1190}
1191
telsoa014fcda012018-03-09 14:13:49 +00001192IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
1193{
1194 return m_Graph->AddLayer<InputLayer>(id, name);
1195}
1196
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001197IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
1198 const char* name)
1199{
1200 return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
1201}
1202
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001203IConnectableLayer* Network::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
1204 const char* name)
1205{
1206 return m_Graph->AddLayer<ComparisonLayer>(comparisonDescriptor, name);
1207}
1208
josh minor4a3c6102020-01-06 16:40:46 -06001209IConnectableLayer* Network::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
1210 const char* name)
1211{
1212 return m_Graph->AddLayer<ElementwiseUnaryLayer>(elementwiseUnaryDescriptor, name);
1213}
1214
Ryan OSheaec6c6802020-06-05 17:17:06 +01001215IConnectableLayer* Network::AddFillLayer(const FillDescriptor& fillDescriptor,
1216 const char* name)
1217{
1218 return m_Graph->AddLayer<FillLayer>(fillDescriptor, name);
1219}
1220
telsoa014fcda012018-03-09 14:13:49 +00001221IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001222 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001223 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001224 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001225{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001226 if (fullyConnectedDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001227 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001228 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001229 }
1230
1231 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
1232
1233 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1234
1235 if (fullyConnectedDescriptor.m_BiasEnabled)
1236 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001237 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001238 }
1239
1240 return layer;
1241}
1242
1243IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001244 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001245 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001246 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001247{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001248 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001249}
1250
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001251IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
1252 const ConstTensor& weights,
1253 const char* name)
1254{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001255 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001256 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
1257}
1258
telsoa014fcda012018-03-09 14:13:49 +00001259IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001260 const ConstTensor& weights,
1261 const ConstTensor& biases,
1262 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001263{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001264 Optional<ConstTensor> optionalBiases(biases);
1265 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001266}
1267
Jim Flynne242f2d2019-05-22 14:24:13 +01001268IConnectableLayer* Network::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001269 const char* name)
1270{
Jim Flynne242f2d2019-05-22 14:24:13 +01001271 return m_Graph->AddLayer<ConcatLayer>(concatDescriptor, name);
Jim Flynn906f9462019-05-10 13:55:21 +01001272}
1273
telsoa014fcda012018-03-09 14:13:49 +00001274IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001275 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001276 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001277 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001278{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001279 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001280 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001281 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001282 }
1283
1284 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
1285
1286 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1287
1288 if (convolution2dDescriptor.m_BiasEnabled)
1289 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001290 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001291 }
1292
1293 return layer;
1294}
1295
1296IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001297 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001298 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001299 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001300{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001301 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001302}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001303
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001304IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
1305 const ConstTensor& weights,
1306 const char* name)
1307{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001308 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001309 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1310}
1311
telsoa014fcda012018-03-09 14:13:49 +00001312IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001313 const ConstTensor& weights,
1314 const ConstTensor& biases,
1315 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001316{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001317 Optional<ConstTensor> optionalBiases(biases);
1318 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001319}
1320
1321IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
1322 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1323 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001324 const Optional<ConstTensor>& biases,
telsoa014fcda012018-03-09 14:13:49 +00001325 const char* name)
1326{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001327 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001328 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001329 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001330 }
1331
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00001332 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001333
1334 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1335
1336 if (convolution2dDescriptor.m_BiasEnabled)
1337 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001338 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001339 }
1340
1341 return layer;
1342}
1343
Aron Virginas-Tardd6247f2019-09-19 14:31:17 +01001344IConnectableLayer* Network::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
1345 const char* name)
1346{
1347 return m_Graph->AddLayer<DepthToSpaceLayer>(depthToSpaceDescriptor, name);
1348}
1349
telsoa014fcda012018-03-09 14:13:49 +00001350IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001351 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1352 const ConstTensor& weights,
1353 const Optional<ConstTensor>& biases,
1354 const char* name)
1355{
1356 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1357}
1358
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001359IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
telsoa014fcda012018-03-09 14:13:49 +00001360 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1361 const ConstTensor& weights,
1362 const char* name)
1363{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001364 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001365 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001366}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001367
telsoa014fcda012018-03-09 14:13:49 +00001368IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
1369 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1370 const ConstTensor& weights,
1371 const ConstTensor& biases,
1372 const char* name)
1373{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001374 Optional<ConstTensor> optionalBiases(biases);
1375 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001376}
1377
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001378IConnectableLayer* Network::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001379 const ConstTensor& anchors, const char* name)
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001380{
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001381 const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
1382
1383 layer->m_Anchors = std::make_unique<ScopedCpuTensorHandle>(anchors);
1384
1385 return layer;
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001386}
1387
telsoa014fcda012018-03-09 14:13:49 +00001388IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
1389 const char* name)
1390{
1391 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
1392}
1393
1394IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
1395 const char* name)
1396{
1397 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
1398}
1399
1400IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
1401 const char* name)
1402{
1403 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
1404}
1405
Nikhil Rajee391d52019-09-05 17:50:44 +01001406IConnectableLayer* Network::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor,
1407 const char* name)
1408{
1409 return m_Graph->AddLayer<ArgMinMaxLayer>(argMinMaxDescriptor, name);
1410}
1411
telsoa01c577f2c2018-08-31 09:22:23 +01001412IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
1413normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001414 const char* name)
1415{
1416 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
1417}
1418
Aron Virginas-Tar636ab402019-09-16 14:27:45 +01001419IConnectableLayer* Network::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
1420{
1421 return m_Graph->AddLayer<SliceLayer>(sliceDescriptor, name);
1422}
1423
telsoa014fcda012018-03-09 14:13:49 +00001424IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
1425 const char* name)
1426{
1427 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
1428}
1429
1430IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
1431 const char* name)
1432{
1433 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
1434}
1435
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001436IConnectableLayer* Network::AddMaximumLayer(const char* name)
1437{
1438 return m_Graph->AddLayer<MaximumLayer>(name);
1439}
1440
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001441IConnectableLayer* Network::AddMinimumLayer(const char* name)
1442{
1443 return m_Graph->AddLayer<MinimumLayer>(name);
1444}
1445
Jim Flynne242f2d2019-05-22 14:24:13 +01001446IConnectableLayer* Network::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001447 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001448{
Jim Flynne242f2d2019-05-22 14:24:13 +01001449 return AddConcatLayer(mergerDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001450}
1451
Kevin May868eb142019-09-04 17:29:31 +01001452IConnectableLayer* Network::AddAbsLayer(const char * name)
1453{
josh minor4a3c6102020-01-06 16:40:46 -06001454 return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
Kevin May868eb142019-09-04 17:29:31 +01001455}
1456
telsoa014fcda012018-03-09 14:13:49 +00001457IConnectableLayer* Network::AddAdditionLayer(const char* name)
1458{
1459 return m_Graph->AddLayer<AdditionLayer>(name);
1460}
1461
1462IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
1463{
1464 return m_Graph->AddLayer<MultiplicationLayer>(name);
1465}
1466
1467IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
1468{
1469 return m_Graph->AddLayer<OutputLayer>(id, name);
1470}
1471
1472IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
1473 const ConstTensor& mean,
1474 const ConstTensor& variance,
1475 const ConstTensor& beta,
1476 const ConstTensor& gamma,
1477 const char* name)
1478{
1479 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
1480
1481 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
1482 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
1483 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
1484 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
1485
1486 return layer;
1487}
1488
Finn Williams2605b232020-06-10 15:53:46 +01001489IConnectableLayer* Network::AddRankLayer(const char* name)
1490{
1491 return m_Graph->AddLayer<RankLayer>(name);
1492}
1493
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00001494IConnectableLayer* Network::AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
1495 const char* name)
1496{
1497 return m_Graph->AddLayer<ReduceLayer>(reduceDescriptor, name);
1498}
1499
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001500IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
1501 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001502{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001503 ResizeDescriptor resizeDescriptor;
David Monahan4a0c9b92020-05-30 09:48:39 +01001504 resizeDescriptor.m_Method = ResizeMethod::Bilinear;
1505 resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
1506 resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
1507 resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
1508 resizeDescriptor.m_AlignCorners = descriptor.m_AlignCorners;
1509 resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001510
1511 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001512}
1513
Teresa Charlina9075df2019-06-27 15:41:57 +01001514IConnectableLayer* Network::AddResizeLayer(const ResizeDescriptor&
1515resizeDescriptor, const char* name)
1516{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001517 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
Teresa Charlina9075df2019-06-27 15:41:57 +01001518}
1519
Kevin Mayce5045a2019-10-02 14:07:47 +01001520IConnectableLayer* Network::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
1521 const char* name)
1522{
1523 return m_Graph->AddLayer<InstanceNormalizationLayer>(desc, name);
1524}
1525
Matteo Martincighbcd3c852018-09-28 14:14:12 +01001526IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
1527 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001528{
Matteo Martincighbcd3c852018-09-28 14:14:12 +01001529 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +00001530}
1531
Aron Virginas-Tarf982dea2019-10-11 14:07:53 +01001532IConnectableLayer* Network::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& desc,
1533 const char* name)
1534{
1535 return m_Graph->AddLayer<LogSoftmaxLayer>(desc, name);
1536}
1537
telsoa014fcda012018-03-09 14:13:49 +00001538IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
1539{
telsoa01c577f2c2018-08-31 09:22:23 +01001540 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
1541
1542 layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
1543
1544 return layer;
telsoa014fcda012018-03-09 14:13:49 +00001545}
1546
telsoa01c577f2c2018-08-31 09:22:23 +01001547IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
1548 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001549{
1550 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
1551}
1552
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001553IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1554 const char* name)
1555{
1556 return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
1557}
1558
Aron Virginas-Tar972af152019-06-11 14:14:03 +01001559IConnectableLayer* Network::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
1560 const char* name)
1561{
1562 return m_Graph->AddLayer<SpaceToDepthLayer>(spaceToDepthDescriptor, name);
1563}
1564
telsoa014fcda012018-03-09 14:13:49 +00001565IConnectableLayer* Network::AddFloorLayer(const char* name)
1566{
1567 return m_Graph->AddLayer<FloorLayer>(name);
1568}
1569
telsoa01c577f2c2018-08-31 09:22:23 +01001570IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
1571 const LstmInputParams& params,
1572 const char* name)
1573{
1574 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
1575
1576 //Lstm Basic Parameters
1577 layer->m_BasicParameters.m_InputToForgetWeights =
1578 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
1579 layer->m_BasicParameters.m_InputToCellWeights =
1580 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
1581 layer->m_BasicParameters.m_InputToOutputWeights =
1582 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
1583 layer->m_BasicParameters.m_RecurrentToForgetWeights =
1584 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
1585 layer->m_BasicParameters.m_RecurrentToCellWeights =
1586 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
1587 layer->m_BasicParameters.m_RecurrentToOutputWeights =
1588 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
1589 layer->m_BasicParameters.m_ForgetGateBias =
1590 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
1591 layer->m_BasicParameters.m_CellBias =
1592 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
1593 layer->m_BasicParameters.m_OutputGateBias =
1594 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
1595
1596 //Lstm Cifg parameters
1597 if(!descriptor.m_CifgEnabled)
1598 {
1599 if(params.m_InputToInputWeights == nullptr)
1600 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001601 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL "
1602 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001603 }
1604 if(params.m_RecurrentToInputWeights == nullptr)
1605 {
1606 throw InvalidArgumentException(
Jan Eilerse2062cd2020-03-30 15:07:45 +01001607 "AddLstmLayer: Recurrent To Input Weights cannot be NULL "
1608 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001609 }
1610 if(params.m_InputGateBias == nullptr)
1611 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001612 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL "
1613 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001614 }
1615 layer->m_CifgParameters.m_InputToInputWeights =
1616 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
1617 layer->m_CifgParameters.m_RecurrentToInputWeights =
1618 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01001619 layer->m_CifgParameters.m_InputGateBias =
1620 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
1621 }
1622
1623 //Lstm projection parameters
1624 if(descriptor.m_ProjectionEnabled)
1625 {
1626 if(params.m_ProjectionWeights == nullptr)
1627 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001628 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL "
1629 "when projection is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001630 }
1631 layer->m_ProjectionParameters.m_ProjectionWeights =
1632 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
1633 if(params.m_ProjectionBias != nullptr)
1634 {
1635 layer->m_ProjectionParameters.m_ProjectionBias =
1636 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
1637 }
1638 }
1639
1640 //Lstm Peephole params
1641 if(descriptor.m_PeepholeEnabled)
1642 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001643 if(!descriptor.m_CifgEnabled)
1644 {
1645 if(params.m_CellToInputWeights == nullptr)
1646 {
1647 throw InvalidArgumentException("AddLstmLayer: Cell To Input Weights cannot be NULL "
1648 "when Peephole is enabled and CIFG disabled.");
1649 }
1650
1651 layer->m_PeepholeParameters.m_CellToInputWeights =
1652 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
1653 }
1654
telsoa01c577f2c2018-08-31 09:22:23 +01001655 if(params.m_CellToForgetWeights == nullptr)
1656 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001657 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL "
1658 "when Peephole is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001659 }
1660 if(params.m_CellToOutputWeights == nullptr)
1661 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001662 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL "
1663 "when Peephole is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001664 }
Jan Eilerse2062cd2020-03-30 15:07:45 +01001665
telsoa01c577f2c2018-08-31 09:22:23 +01001666 layer->m_PeepholeParameters.m_CellToForgetWeights =
1667 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
1668 layer->m_PeepholeParameters.m_CellToOutputWeights =
1669 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
1670 }
Jan Eilersf8c62972019-07-17 11:07:49 +01001671
1672 //Lstm Layer Normalization params
1673 if(descriptor.m_LayerNormEnabled)
1674 {
1675 if(!descriptor.m_CifgEnabled)
1676 {
1677 if(params.m_InputLayerNormWeights == nullptr)
1678 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001679 throw InvalidArgumentException("AddLstmLayer: Input layer normalization weights cannot be NULL "
1680 "when layer normalization is enabled and CIFG disabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01001681 }
1682 layer->m_LayerNormParameters.m_InputLayerNormWeights =
1683 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
1684 }
1685
1686 if(params.m_ForgetLayerNormWeights == nullptr)
1687 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001688 throw InvalidArgumentException("AddLstmLayer: Forget layer normalization weights cannot be NULL "
1689 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01001690 }
1691 if(params.m_CellLayerNormWeights == nullptr)
1692 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001693 throw InvalidArgumentException("AddLstmLayer: Cell layer normalization weights cannot be NULL "
1694 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01001695 }
1696 if(params.m_OutputLayerNormWeights == nullptr)
1697 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001698 throw InvalidArgumentException("AddLstmLayer: Output layer normalization weights cannot be NULL "
1699 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01001700 }
1701 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
1702 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
1703 layer->m_LayerNormParameters.m_CellLayerNormWeights =
1704 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
1705 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
1706 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
1707 }
telsoa01c577f2c2018-08-31 09:22:23 +01001708 return layer;
1709}
1710
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001711IConnectableLayer* Network::AddDivisionLayer(const char* name)
1712{
1713 return m_Graph->AddLayer<DivisionLayer>(name);
1714}
1715
David Beck19526222018-09-12 16:00:08 +01001716IConnectableLayer* Network::AddSubtractionLayer(const char* name)
1717{
1718 return m_Graph->AddLayer<SubtractionLayer>(name);
1719}
1720
narpra0132b90462018-09-13 11:07:48 +01001721IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
1722{
1723 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
1724}
1725
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +01001726IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
1727{
1728 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
1729}
1730
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001731IConnectableLayer *Network::AddQuantizeLayer(const char *name)
1732{
1733 return m_Graph->AddLayer<QuantizeLayer>(name);
1734}
1735
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001736IConnectableLayer* Network::AddDequantizeLayer(const char* name)
1737{
1738 return m_Graph->AddLayer<DequantizeLayer>(name);
1739}
1740
Conor Kennedy430b5d82018-11-14 15:28:28 +00001741IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
1742 const char* name)
1743{
1744 return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
1745}
1746
Matteo Martincigh59a950c2018-12-13 12:48:25 +00001747IConnectableLayer* Network::AddGreaterLayer(const char* name)
1748{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001749 return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
Matteo Martincigh59a950c2018-12-13 12:48:25 +00001750}
1751
FrancisMurtagh20995952018-12-17 12:11:36 +00001752IConnectableLayer* Network::AddEqualLayer(const char* name)
1753{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001754 return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
FrancisMurtagh20995952018-12-17 12:11:36 +00001755}
1756
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001757IConnectableLayer* Network::AddRsqrtLayer(const char * name)
1758{
josh minor4a3c6102020-01-06 16:40:46 -06001759 return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001760}
1761
narpra01b89b05f2019-01-16 09:53:09 +00001762IConnectableLayer* Network::AddGatherLayer(const char* name)
1763{
Teresa Charlin52664732020-06-29 16:27:03 +01001764 GatherDescriptor gatherDescriptor{};
1765 return AddGatherLayer(gatherDescriptor, name);
1766}
1767
1768IConnectableLayer* Network::AddGatherLayer(const GatherDescriptor& gatherDescriptor,
1769 const char* name)
1770{
1771 return m_Graph->AddLayer<GatherLayer>(gatherDescriptor, name);
narpra01b89b05f2019-01-16 09:53:09 +00001772}
1773
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001774IConnectableLayer* Network::AddMergeLayer(const char* name)
1775{
1776 return m_Graph->AddLayer<MergeLayer>(name);
1777}
1778
Sadik Armaganeff363d2019-04-05 15:25:46 +01001779IConnectableLayer* Network::AddSwitchLayer(const char* name)
1780{
1781 return m_Graph->AddLayer<SwitchLayer>(name);
1782}
1783
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01001784IConnectableLayer* Network::AddPreluLayer(const char* name)
1785{
1786 return m_Graph->AddLayer<PreluLayer>(name);
1787}
1788
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01001789IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
1790 const ConstTensor& weights,
1791 const Optional<ConstTensor>& biases,
1792 const char* name)
1793{
1794 if (descriptor.m_BiasEnabled && !biases.has_value())
1795 {
1796 throw InvalidArgumentException("AddTransposeConvolution2dLayer: Biases cannot be empty");
1797 }
1798
1799 const auto layer = m_Graph->AddLayer<TransposeConvolution2dLayer>(descriptor, name);
1800
1801 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1802
1803 if (descriptor.m_BiasEnabled)
1804 {
1805 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
1806 }
1807
1808 return layer;
1809}
1810
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001811IConnectableLayer* Network::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
1812 const char* name)
1813{
1814 return m_Graph->AddLayer<TransposeLayer>(transposeDescriptor, name);
1815}
1816
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001817IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor,
1818 const char* name)
1819{
1820 return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
1821}
1822
Derek Lamberti013c3902019-10-21 10:46:16 +01001823
1824IConnectableLayer* Network::AddStandInLayer(const StandInDescriptor& desc,
1825 const char* name)
1826{
1827 return m_Graph->AddLayer<StandInLayer>(desc, name);
1828}
1829
James Conroyee18dc82019-07-17 11:27:46 +01001830IConnectableLayer* Network::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
1831 const char* name)
1832{
1833 const auto layer = m_Graph->AddLayer<QuantizedLstmLayer>(name);
1834
1835 // InputToX weights
1836 layer->m_QuantizedLstmParameters.m_InputToInputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001837 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001838 layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001839 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001840 layer->m_QuantizedLstmParameters.m_InputToCellWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001841 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001842 layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001843 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001844
1845 // RecurrentToX weights
1846 layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001847 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001848 layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001849 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001850 layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001851 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001852 layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001853 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001854
1855 // Bias
1856 layer->m_QuantizedLstmParameters.m_InputGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001857 std::make_unique<ScopedCpuTensorHandle>(params.GetInputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001858 layer->m_QuantizedLstmParameters.m_ForgetGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001859 std::make_unique<ScopedCpuTensorHandle>(params.GetForgetGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001860 layer->m_QuantizedLstmParameters.m_CellBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001861 std::make_unique<ScopedCpuTensorHandle>(params.GetCellBias());
James Conroyee18dc82019-07-17 11:27:46 +01001862 layer->m_QuantizedLstmParameters.m_OutputGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001863 std::make_unique<ScopedCpuTensorHandle>(params.GetOutputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001864
1865 return layer;
1866}
1867
James Conroy586a9aa2020-03-20 08:49:33 +00001868IConnectableLayer* Network::AddQLstmLayer(const QLstmDescriptor& descriptor,
1869 const LstmInputParams& params,
1870 const char* name)
1871{
1872 const auto layer = m_Graph->AddLayer<QLstmLayer>(descriptor, name);
1873
1874 // QLstm Basic Parameters
1875 layer->m_BasicParameters.m_InputToForgetWeights =
1876 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
1877 layer->m_BasicParameters.m_InputToCellWeights =
1878 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
1879 layer->m_BasicParameters.m_InputToOutputWeights =
1880 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
1881 layer->m_BasicParameters.m_RecurrentToForgetWeights =
1882 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
1883 layer->m_BasicParameters.m_RecurrentToCellWeights =
1884 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
1885 layer->m_BasicParameters.m_RecurrentToOutputWeights =
1886 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
1887 layer->m_BasicParameters.m_ForgetGateBias =
1888 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
1889 layer->m_BasicParameters.m_CellBias =
1890 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
1891 layer->m_BasicParameters.m_OutputGateBias =
1892 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
1893
1894 // QLstm Cifg parameters
1895 if(!descriptor.m_CifgEnabled)
1896 {
1897 if(params.m_InputToInputWeights == nullptr)
1898 {
1899 throw InvalidArgumentException("AddQLstmLayer: Input To Input Weights cannot be NULL");
1900 }
1901
1902 if(params.m_RecurrentToInputWeights == nullptr)
1903 {
1904 throw InvalidArgumentException(
1905 "AddQLstmLayer: Recurrent To Input Weights cannot be NULL");
1906 }
1907
1908 if(params.m_InputGateBias == nullptr)
1909 {
1910 throw InvalidArgumentException("AddQLstmLayer: Input Gate Bias cannot be NULL");
1911 }
1912
1913 layer->m_CifgParameters.m_InputToInputWeights =
1914 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
1915 layer->m_CifgParameters.m_RecurrentToInputWeights =
1916 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
1917 layer->m_CifgParameters.m_InputGateBias =
1918 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
1919 }
1920
1921 // QLstm Projection parameters
1922 if(descriptor.m_ProjectionEnabled)
1923 {
1924 if(params.m_ProjectionWeights == nullptr)
1925 {
1926 throw InvalidArgumentException("AddQLstmLayer: Projection Weights cannot be NULL");
1927 }
1928
James Conroy586a9aa2020-03-20 08:49:33 +00001929 layer->m_ProjectionParameters.m_ProjectionWeights =
1930 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
James Conroyed324052020-05-18 15:16:42 +01001931
1932 // Projection bias is optional even if projection is enabled
1933 if(params.m_ProjectionWeights != nullptr)
1934 {
1935 layer->m_ProjectionParameters.m_ProjectionBias =
1936 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
1937 }
1938
James Conroy586a9aa2020-03-20 08:49:33 +00001939 }
1940
1941 // QLstm Peephole params
1942 if(descriptor.m_PeepholeEnabled)
1943 {
1944 if(params.m_CellToForgetWeights == nullptr)
1945 {
1946 throw InvalidArgumentException("AddQLstmLayer: Cell To Forget Weights cannot be NULL");
1947 }
1948
1949 if(params.m_CellToOutputWeights == nullptr)
1950 {
1951 throw InvalidArgumentException("AddQLstmLayer: Cell To Output Weights cannot be NULL");
1952 }
1953
1954 if(!descriptor.m_CifgEnabled)
1955 {
1956 if(params.m_CellToInputWeights == nullptr)
1957 {
1958 throw InvalidArgumentException("AddQLstmLayer: Cell To Input Weights cannot be NULL");
1959 }
1960
1961 layer->m_PeepholeParameters.m_CellToInputWeights =
1962 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
1963 }
1964
1965 layer->m_PeepholeParameters.m_CellToForgetWeights =
1966 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
1967 layer->m_PeepholeParameters.m_CellToOutputWeights =
1968 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
1969 }
1970
1971 // QLstm Layer Normalization params
1972 if(descriptor.m_LayerNormEnabled)
1973 {
1974 if(params.m_ForgetLayerNormWeights == nullptr)
1975 {
1976 throw InvalidArgumentException("AddQLstmLayer: Forget layer normalization weights cannot be NULL");
1977 }
1978
1979 if(params.m_CellLayerNormWeights == nullptr)
1980 {
1981 throw InvalidArgumentException("AddQLstmLayer: Cell layer normalization weights cannot be NULL");
1982 }
1983
1984 if(params.m_OutputLayerNormWeights == nullptr)
1985 {
1986 throw InvalidArgumentException("AddQLstmLayer: Output layer normalization weights cannot be NULL");
1987 }
1988
1989 if(!descriptor.m_CifgEnabled)
1990 {
1991 if(params.m_InputLayerNormWeights == nullptr)
1992 {
1993 throw InvalidArgumentException("AddQLstmLayer: Input layer normalization weights cannot be NULL");
1994 }
1995
1996 layer->m_LayerNormParameters.m_InputLayerNormWeights =
1997 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
1998 }
1999
2000 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
2001 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
2002 layer->m_LayerNormParameters.m_CellLayerNormWeights =
2003 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
2004 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
2005 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
2006 }
2007 return layer;
2008}
2009
James Conroyaba90cd2020-11-06 16:28:18 +00002010IConnectableLayer* Network::AddLogicalBinaryLayer(const LogicalBinaryDescriptor& logicalBinaryDescriptor,
2011 const char* name)
2012{
2013 return m_Graph->AddLayer<LogicalBinaryLayer>(logicalBinaryDescriptor, name);
2014}
2015
Mike Kelly8c1701a2019-02-11 17:01:27 +00002016void Network::Accept(ILayerVisitor& visitor) const
2017{
2018 for (auto layer : GetGraph())
2019 {
2020 layer->Accept(visitor);
2021 };
2022}
2023
Finn Williamsb454c5c2021-02-09 15:56:23 +00002024void Network::ExecuteStrategy(IStrategy& strategy) const
2025{
2026 for (auto layer : GetGraph())
2027 {
2028 layer->ExecuteStrategy(strategy);
2029 };
2030}
2031
telsoa014fcda012018-03-09 14:13:49 +00002032OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
Sadik Armagan3184c902020-03-18 10:57:30 +00002033 : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid())
telsoa014fcda012018-03-09 14:13:49 +00002034{
2035}
2036
Sadik Armagan045f6be2020-09-10 13:37:32 +01002037OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
2038 : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid()), m_ModelOptions(modelOptions)
2039{
2040}
2041
telsoa014fcda012018-03-09 14:13:49 +00002042OptimizedNetwork::~OptimizedNetwork()
2043{
2044}
2045
2046} // namespace armnn