blob: 17813a8983df0c8fcb947bd7f0a800bef22e5632 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlin52664732020-06-29 16:27:03 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matteo Martincigh49124022019-01-11 13:25:59 +00005
telsoa014fcda012018-03-09 14:13:49 +00006#include "Network.hpp"
7#include "Graph.hpp"
8#include "Layer.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01009#include "DeviceSpec.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010#include "Optimizer.hpp"
Derek Lambertiff05cc52019-04-26 13:05:17 +010011#include "SubgraphViewSelector.hpp"
Matteo Martincigh49124022019-01-11 13:25:59 +000012#include "BackendSettings.hpp"
David Beckac42efd2018-09-26 17:41:13 +010013#include "optimizations/All.hpp"
telsoa014fcda012018-03-09 14:13:49 +000014
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000015#include <backendsCommon/CpuTensorHandle.hpp>
16#include <backendsCommon/WorkloadFactory.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000017#include <armnn/backends/IBackendInternal.hpp>
Derek Lamberti84da38b2019-06-13 11:40:08 +010018#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019
20#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000021#include <armnn/Utils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022#include <armnn/TypesUtils.hpp>
Matteo Martincighc601aa62019-10-29 15:03:22 +000023#include <armnn/BackendRegistry.hpp>
Matthew Benthamf48afc62020-01-15 17:55:08 +000024#include <armnn/Logging.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010025#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000026#include <armnn/utility/IgnoreUnused.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010027#include <armnn/utility/PolymorphicDowncast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000028
Jan Eilers99d9d4a2019-11-06 10:02:16 +000029#include <ProfilingService.hpp>
30
telsoa014fcda012018-03-09 14:13:49 +000031#include <fcntl.h>
32#include <algorithm>
33#include <fstream>
34#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010035#include <vector>
36#include <algorithm>
telsoa014fcda012018-03-09 14:13:49 +000037
telsoa014fcda012018-03-09 14:13:49 +000038#include <boost/format.hpp>
telsoa014fcda012018-03-09 14:13:49 +000039#include <boost/numeric/conversion/converter_policies.hpp>
40#include <boost/cast.hpp>
41
42namespace armnn
43{
44
Finn Williamsf24effa2020-07-03 10:12:03 +010045armnn::INetwork* INetwork::CreateRaw(NetworkOptions networkOptions)
telsoa014fcda012018-03-09 14:13:49 +000046{
Finn Williamsf24effa2020-07-03 10:12:03 +010047 return new Network(networkOptions);
telsoa014fcda012018-03-09 14:13:49 +000048}
49
Finn Williamsf24effa2020-07-03 10:12:03 +010050armnn::INetworkPtr INetwork::Create(NetworkOptions networkOptions)
telsoa014fcda012018-03-09 14:13:49 +000051{
Finn Williamsf24effa2020-07-03 10:12:03 +010052 return INetworkPtr(CreateRaw(networkOptions), &INetwork::Destroy);
telsoa014fcda012018-03-09 14:13:49 +000053}
54
55void INetwork::Destroy(INetwork* network)
56{
Jan Eilersbb446e52020-04-02 13:56:54 +010057 delete PolymorphicDowncast<Network*>(network);
telsoa014fcda012018-03-09 14:13:49 +000058}
59
telsoa014fcda012018-03-09 14:13:49 +000060void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
61{
Jan Eilersbb446e52020-04-02 13:56:54 +010062 delete PolymorphicDowncast<OptimizedNetwork*>(network);
telsoa014fcda012018-03-09 14:13:49 +000063}
64
65Status OptimizedNetwork::PrintGraph()
66{
67 m_Graph->Print();
68 return Status::Success;
69}
70
surmeh01bceff2f2018-03-29 16:29:27 +010071Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
72{
73 return m_Graph->SerializeToDot(stream);
74}
75
Matteo Martincigh49124022019-01-11 13:25:59 +000076void ReportError(const std::string& errorMessage,
77 Optional<std::vector<std::string>&> errorMessages)
78{
79 std::stringstream fullErrorMessage;
80 fullErrorMessage << "ERROR: " << errorMessage;
Derek Lamberti08446972019-11-26 16:38:31 +000081 ARMNN_LOG(warning) << fullErrorMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +000082 if (errorMessages)
83 {
84 errorMessages.value().push_back(fullErrorMessage.str());
85 }
86}
87
88void ReportWarning(const std::string& warningMessage,
89 Optional<std::vector<std::string>&> warningMessages)
90{
91 std::stringstream fullWarningMessage;
92 fullWarningMessage << "WARNING: " << warningMessage;
Derek Lamberti08446972019-11-26 16:38:31 +000093 ARMNN_LOG(warning) << fullWarningMessage.str();
Matteo Martincigh49124022019-01-11 13:25:59 +000094 if (warningMessages)
95 {
96 warningMessages.value().push_back(fullWarningMessage.str());
97 }
98}
99
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000100OptimizationResult ReturnWithError(OptimizationResult res,
101 const Layer* layer,
102 const BackendSettings& backendSettings,
103 Optional<std::vector<std::string>&> errMessages)
104{
105 std::stringstream failureMsg;
106 failureMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
107 << " is not supported on any preferred backend " << backendSettings.m_PreferredBackends;
108 ReportError(failureMsg.str(), errMessages);
109
110 res.m_Error = true;
111 return res;
112}
113
114
jimfly016b0b53d2018-10-08 14:43:01 +0100115bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
116{
117 bool noErrors = true;
118 unsigned int numOutputs = layer->GetNumOutputSlots();
119 for (unsigned int i = 0; i < numOutputs; i++) {
David Monahanb8554702019-04-25 16:03:38 +0100120 OutputSlot& outputSlot = layer->GetOutputSlot(i);
121 TensorInfo info = outputSlot.GetTensorInfo();
Derek Lambertif90c56d2020-01-10 17:14:08 +0000122 if (DataType::QAsymmU8 == info.GetDataType()) {
jimfly016b0b53d2018-10-08 14:43:01 +0100123 if (0.f == info.GetQuantizationScale()) {
124 noErrors = false;
125 std::stringstream ss;
Matteo Martincigh49124022019-01-11 13:25:59 +0000126 ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
jimfly016b0b53d2018-10-08 14:43:01 +0100127 << " (" << layer->GetNameStr() << ") is of type"
128 << " Quantized 8 bit but its scale parameter has not been set";
Matteo Martincigh49124022019-01-11 13:25:59 +0000129 ReportError(ss.str(), errMessages);
jimfly016b0b53d2018-10-08 14:43:01 +0100130 }
David Monahanb8554702019-04-25 16:03:38 +0100131 // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
132 if ((info.GetQuantizationScale() != (1.0f / 256.0f) ||
133 info.GetQuantizationOffset() != 0) &&
134 layer->GetType() == armnn::LayerType::Softmax)
135 {
136 std::stringstream ss;
137 ss << "Quantization parameters for Softmax layer (Scale: " <<
138 info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() <<
139 ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
Derek Lamberti08446972019-11-26 16:38:31 +0000140 ARMNN_LOG(warning) << ss.str();
David Monahanb8554702019-04-25 16:03:38 +0100141 info.SetQuantizationScale((1.0f /256.0f));
142 info.SetQuantizationOffset(0);
143 outputSlot.SetTensorInfo(info);
144 }
jimfly016b0b53d2018-10-08 14:43:01 +0100145 }
146 }
147 return noErrors;
148}
149
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100150template <typename LayerT>
151LayerT* ConvertBf16ToFp32Weight(Layer* l)
152{
Jan Eilersbb446e52020-04-02 13:56:54 +0100153 LayerT* layer = PolymorphicDowncast<LayerT*>(l);
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100154 if ((layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected)
155 && layer->m_Weight)
156 {
157 const TensorInfo& info = layer->m_Weight->GetTensorInfo();
158
159 if (info.GetDataType() == DataType::BFloat16)
160 {
161 std::vector<float> newValues(info.GetNumElements());
162
163 armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(
164 layer->m_Weight->template GetTensor<armnn::BFloat16>(), info.GetNumElements(), newValues.data());
165
166 TensorInfo newInfo(info.GetShape(), DataType::Float32);
167 ConstTensor newInput(newInfo, newValues);
168 layer->m_Weight.reset(new ScopedCpuTensorHandle(newInput));
169 }
170 }
171 return layer;
172}
173
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000174OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings,
175 Graph& graph,
176 Layer* layer,
177 BackendId backend,
178 DataType dataTypeIn,
179 DataType dataTypeOut,
180 const std::vector<BackendId>& availablePreferredBackends,
181 std::string& reasonIfUnsupported,
182 Optional<std::vector<std::string>&> errMessages)
183{
184 OptimizationResult result;
185
186 // Helper lambda to compose meaningful error message before returning with error
187 auto ReturnError = [&](const Layer* layer)
188 {
189 return ReturnWithError(result, layer, backendSettings, errMessages);
190 };
191
192 // need to set the compute device on the layer
193 // before we can check if it is supported
194 layer->SetBackendId(backend);
195 if (!IWorkloadFactory::IsLayerSupported(*layer, EmptyOptional(), reasonIfUnsupported))
196 {
197 if (dataTypeIn == DataType::Float16 || dataTypeOut == DataType::Float16)
198 {
199 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
200 && layer->GetType() != LayerType::ConvertFp32ToFp16
201 && layer->GetType() != LayerType::ConvertFp16ToFp32)
202 {
203 // Insert FP16 -> FP32 conversion layer before current layer
204 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers;
205 if (dataTypeIn == DataType::Float16)
206 {
207 convertFp16ToFp32Layers =
208 InsertConvertFp16ToFp32LayersBefore(graph, *layer);
209 }
210
211 // Insert FP32 -> FP16 conversion layer after current layer
212 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers;
213 if (dataTypeOut == DataType::Float16)
214 {
215 convertFp32ToFp16Layers =
216 InsertConvertFp32ToFp16LayersAfter(graph, *layer);
217 }
218
219 // Assign a supported backend to the newly introduced conversion layers
220 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
221 {
222 bool supportedBackendFound = false;
223 std::string reasonIfUnsupported;
224
225 // Try preferred backend first
226 layer->SetBackendId(preferredBackend);
227 if (IWorkloadFactory::IsLayerSupported(*layer,
228 EmptyOptional(),
229 reasonIfUnsupported))
230 {
231 supportedBackendFound = true;
232 }
233 else
234 {
235 for (const auto& backend : availablePreferredBackends)
236 {
237 // Skip preferred backend (we already determined that it is not supported)
238 if (backend == preferredBackend)
239 {
240 continue;
241 }
242
243 layer->SetBackendId(backend);
244 if (IWorkloadFactory::IsLayerSupported(*layer,
245 EmptyOptional(),
246 reasonIfUnsupported))
247 {
248 supportedBackendFound = true;
249 break;
250 }
251 }
252 }
253
254 return supportedBackendFound;
255 };
256
257 for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
258 {
259 if (!AssignFirstSupportedBackend(convertLayer, backend))
260 {
261 return ReturnError(convertLayer);
262 }
263 }
264
265 for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
266 {
267 if (!AssignFirstSupportedBackend(convertLayer, backend))
268 {
269 return ReturnError(convertLayer);
270 }
271 }
272
273 return result;
274 }
275 }
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000276 else if (dataTypeIn == DataType::BFloat16 || dataTypeOut == DataType::BFloat16)
277 {
278 if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
279 && layer->GetType() != LayerType::ConvertFp32ToBf16
280 && layer->GetType() != LayerType::ConvertBf16ToFp32)
281 {
282 // Insert BF16 -> FP32 conversion layer before current layer
283 std::vector<ConvertBf16ToFp32Layer*> convertBf16ToFp32Layers;
284 if (dataTypeIn == DataType::BFloat16)
285 {
286 convertBf16ToFp32Layers =
287 InsertConvertBf16ToFp32LayersBefore(graph, *layer);
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100288 if (layer->GetType() == LayerType::Convolution2d)
289 {
290 ConvertBf16ToFp32Weight<Convolution2dLayer>(layer);
291 }
292 else if (layer->GetType() == LayerType::FullyConnected)
293 {
294 ConvertBf16ToFp32Weight<FullyConnectedLayer>(layer);
295 }
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000296 }
297
298 // Insert FP32 -> BF16 conversion layer after current layer
299 std::vector<ConvertFp32ToBf16Layer*> convertFp32ToBf16Layers;
300 if (dataTypeOut == DataType::BFloat16)
301 {
302 convertFp32ToBf16Layers =
303 InsertConvertFp32ToBf16LayersAfter(graph, *layer);
304 }
305
306 // Assign a supported backend to the newly introduced conversion layers
307 auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
308 {
309 bool supportedBackendFound = false;
310 std::string reasonIfUnsupported;
311
312 // Try preferred backend first
313 layer->SetBackendId(preferredBackend);
314 if (IWorkloadFactory::IsLayerSupported(*layer,
315 EmptyOptional(),
316 reasonIfUnsupported))
317 {
318 supportedBackendFound = true;
319 }
320 else
321 {
322 for (const auto& backend : availablePreferredBackends)
323 {
324 // Skip preferred backend (we already determined that it is not supported)
325 if (backend == preferredBackend)
326 {
327 continue;
328 }
329
330 layer->SetBackendId(backend);
331 if (IWorkloadFactory::IsLayerSupported(*layer,
332 EmptyOptional(),
333 reasonIfUnsupported))
334 {
335 supportedBackendFound = true;
336 break;
337 }
338 }
339 }
340
341 return supportedBackendFound;
342 };
343
344 for (ConvertBf16ToFp32Layer* convertLayer : convertBf16ToFp32Layers)
345 {
346 if (!AssignFirstSupportedBackend(convertLayer, backend))
347 {
348 return ReturnError(convertLayer);
349 }
350 }
351
352 for (ConvertFp32ToBf16Layer* convertLayer : convertFp32ToBf16Layers)
353 {
354 if (!AssignFirstSupportedBackend(convertLayer, backend))
355 {
356 return ReturnError(convertLayer);
357 }
358 }
359
360 return result;
361 }
362 }
363
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000364 std::stringstream warningMsg;
365 warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
366 << " is not supported on requested backend " << layer->GetBackendId().Get()
367 << " for input data type " << GetDataTypeName(dataTypeIn)
368 << " and output data type " << GetDataTypeName(dataTypeOut)
369 << " (reason: " << reasonIfUnsupported
370 << "), falling back to the next backend.";
371 ReportWarning(warningMsg.str(), errMessages);
372
373 return OptimizationResult(true, false);
374 }
375 else
376 {
377 return result;
378 }
379}
380
381
Matteo Martincigh49124022019-01-11 13:25:59 +0000382OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
383 BackendSettings& backendSettings,
384 Graph::Iterator& firstLayer,
385 Graph::Iterator& lastLayer,
386 Optional<std::vector<std::string>&> errMessages)
telsoa014fcda012018-03-09 14:13:49 +0000387{
Matteo Martincigh49124022019-01-11 13:25:59 +0000388 OptimizationResult result;
telsoa014fcda012018-03-09 14:13:49 +0000389
Matteo Martincigh49124022019-01-11 13:25:59 +0000390 // Helper lambda to compose meaningful error message before returning with error
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000391 auto ReturnError = [&](const Layer* layer)
392 {
393 return ReturnWithError(result, layer, backendSettings, errMessages);
394 };
Matteo Martincigh49124022019-01-11 13:25:59 +0000395
telsoa01c577f2c2018-08-31 09:22:23 +0100396
Matteo Martincigh49124022019-01-11 13:25:59 +0000397 auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
398 if (availablePreferredBackends.empty())
telsoa01c577f2c2018-08-31 09:22:23 +0100399 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000400 std::stringstream failureMsg;
401 failureMsg << "No preferred backends are available";
402 ReportError(failureMsg.str(), errMessages);
403
404 result.m_Error = true;
405 return result;
406 }
407
408 for (auto it = firstLayer; it != lastLayer; ++it)
409 {
410 auto layer = *it;
Aron Virginas-Tar87972be2019-11-13 15:16:28 +0000411
412 DataType dataTypeIn = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
413 layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
414 DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
415 layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
416
telsoa01c577f2c2018-08-31 09:22:23 +0100417 std::string reasonIfUnsupported;
418 bool found = false;
jimfly016b0b53d2018-10-08 14:43:01 +0100419 if (!CheckScaleSetOnQuantizedType(layer, errMessages))
420 {
421 // don't bomb immediately, find all the quantized outputs
422 // which haven't had a scale set and report them all back.
Matteo Martincigh49124022019-01-11 13:25:59 +0000423 result.m_Error = true;
jimfly016b0b53d2018-10-08 14:43:01 +0100424 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000425
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000426 // First try assign layer to hint backend
427 if (layer->GetBackendHint().has_value() &&
428 backendSettings.IsBackendSupported(layer->GetBackendHint().value()) &&
429 AttemptBackendAssignment(backendSettings,
430 optNetObjPtr->GetGraph(),
431 layer,
432 layer->GetBackendHint().value(),
433 dataTypeIn,
434 dataTypeOut,
435 availablePreferredBackends,
436 reasonIfUnsupported,
437 errMessages).IsOk())
telsoa01c577f2c2018-08-31 09:22:23 +0100438 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000439 found = true;
440 backendSettings.m_SelectedBackends.insert(layer->GetBackendHint().value());
441 }
442 else
443 {
444 // Try assign layer to prefered list of backends
445 for (const auto& backend : availablePreferredBackends)
telsoa01c577f2c2018-08-31 09:22:23 +0100446 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000447 if (layer->GetBackendHint().has_value() &&
448 layer->GetBackendHint().value() == backend)
telsoa01c577f2c2018-08-31 09:22:23 +0100449 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000450 continue; //Don't re-test the backend hint
telsoa01c577f2c2018-08-31 09:22:23 +0100451 }
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000452
453 OptimizationResult res = AttemptBackendAssignment(backendSettings,
454 optNetObjPtr->GetGraph(),
455 layer,
456 backend,
457 dataTypeIn,
458 dataTypeOut,
459 availablePreferredBackends,
460 reasonIfUnsupported,
461 errMessages);
462
463 if (res.IsOk())
464 {
465 found = true;
466 backendSettings.m_SelectedBackends.insert(backend);
467 break;
468 }
469 else if (res.IsError())
470 {
471 return res; // Cannot continue.
472 // Note: we don't need to log the error as it would already
473 // be logged in AttemptBackendAssignment().
474 }
475 else
476 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100477 ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000478 }
telsoa01c577f2c2018-08-31 09:22:23 +0100479 }
480 }
481
482 // If the layer is unsupported by any devices, log and return a null network.
Matteo Martincigh49124022019-01-11 13:25:59 +0000483 if (!found)
484 {
telsoa01c577f2c2018-08-31 09:22:23 +0100485 // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
486 // fallback we should set the compute device on the layer to CpuRef (these are not
487 // available as accelerated operations, or are only available under certain
488 // conditions, currently they comprise MemCopy, Constant, Permute)
489 armnn::LayerType layerType = layer->GetType();
Matteo Martincigh49124022019-01-11 13:25:59 +0000490 if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
491 layerType == armnn::LayerType::Constant ||
492 layerType == armnn::LayerType::Permute))
telsoa01c577f2c2018-08-31 09:22:23 +0100493 {
Matteo Martincigh49124022019-01-11 13:25:59 +0000494 BackendId cpuBackendId(armnn::Compute::CpuRef);
495 layer->SetBackendId(cpuBackendId);
496 backendSettings.m_SelectedBackends.insert(cpuBackendId);
telsoa01c577f2c2018-08-31 09:22:23 +0100497 }
498 else
499 {
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000500 return ReturnError(layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100501 }
502 }
503 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000504
505 return result;
506}
507
Matteo Martincighadddddb2019-01-24 14:06:23 +0000508OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
509 BackendSettings& backendSettings,
Derek Lambertiff05cc52019-04-26 13:05:17 +0100510 SubgraphView& subgraph,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000511 Optional<std::vector<std::string>&> errMessages)
Matteo Martincigh49124022019-01-11 13:25:59 +0000512{
Derek Lambertiff05cc52019-04-26 13:05:17 +0100513 Graph::Iterator firstLayer = subgraph.begin();
514 Graph::Iterator lastLayer = subgraph.end();
Matteo Martincighadddddb2019-01-24 14:06:23 +0000515 return AssignBackends(optNetObjPtr,
516 backendSettings,
517 firstLayer,
518 lastLayer,
519 errMessages);
520}
521
Derek Lamberti84da38b2019-06-13 11:40:08 +0100522BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry& handleFactoryRegistry,
523 BackendSettings& backendSettings)
524{
525 BackendsMap backends;
526 auto const& backendRegistry = BackendRegistryInstance();
527 for (auto&& selectedBackend : backendSettings.m_SupportedBackends)
528 {
529 auto backendFactory = backendRegistry.GetFactory(selectedBackend);
530 auto backendObjPtr = backendFactory();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100531 ARMNN_ASSERT(backendObjPtr);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100532
533 backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
534
535 backends[backendObjPtr->GetId()] = std::move(backendObjPtr);
536 }
537
538 return backends;
539}
540
Matteo Martincighadddddb2019-01-24 14:06:23 +0000541OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
542 BackendSettings& backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +0100543 BackendsMap& backends,
Matteo Martincighadddddb2019-01-24 14:06:23 +0000544 Optional<std::vector<std::string>&> errMessages)
545{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100546 ARMNN_ASSERT(optNetObjPtr);
Matteo Martincigh49124022019-01-11 13:25:59 +0000547
548 OptimizationResult result;
549
Matteo Martincighadddddb2019-01-24 14:06:23 +0000550 // Get the optimized graph
551 Graph& optGraph = optNetObjPtr->GetGraph();
Matteo Martincigh49124022019-01-11 13:25:59 +0000552
Matteo Martincighadddddb2019-01-24 14:06:23 +0000553 // Run backend specific optimizations
Matteo Martincighadddddb2019-01-24 14:06:23 +0000554 for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
Matteo Martincigh49124022019-01-11 13:25:59 +0000555 {
Derek Lamberti84da38b2019-06-13 11:40:08 +0100556 auto backendObjPtr = backends.find(selectedBackend)->second.get();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100557 ARMNN_ASSERT(backendObjPtr);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000558
559 // Select sub-graphs based on backend
Derek Lambertiff05cc52019-04-26 13:05:17 +0100560 SubgraphViewSelector::Subgraphs subgraphs =
Rob Hughes65c32262019-07-23 15:33:39 +0100561 SubgraphViewSelector::SelectSubgraphs(optGraph,
Matteo Martincigh602af092019-05-01 10:31:27 +0100562 // Select layers assigned to the requested backend
563 [&backendObjPtr](const Layer& layer)
564 {
565 return layer.GetType() != LayerType::Input &&
566 layer.GetType() != LayerType::Output &&
567 layer.GetBackendId() == backendObjPtr->GetId();
568 });
Derek Lambertiff05cc52019-04-26 13:05:17 +0100569 if (subgraphs.empty())
Matteo Martincigh49124022019-01-11 13:25:59 +0000570 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000571 // No sub-graphs found, try with next selected backend
572 continue;
Matteo Martincigh49124022019-01-11 13:25:59 +0000573 }
Matteo Martincighadddddb2019-01-24 14:06:23 +0000574
575 // Try to optimize each sub-graph
Derek Lambertiff05cc52019-04-26 13:05:17 +0100576 for (auto& subgraph : subgraphs)
Matteo Martincigh49124022019-01-11 13:25:59 +0000577 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000578 // Try to optimize the current sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100579 OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100580 ARMNN_ASSERT(optimizationViews.Validate(*subgraph));
Matteo Martincighadddddb2019-01-24 14:06:23 +0000581
582 // Optimization attempted, check the resulting optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100583 for (auto& substitution : optimizationViews.GetSubstitutions())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000584 {
585 // Sub-graph optimized, substitute the sub-graph with the new optimized one in the main optimized graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100586 SubgraphView& replacementSubgraph = substitution.m_ReplacementSubgraph;
587 SubgraphView& substitutableSubgraph = substitution.m_SubstitutableSubgraph;
588 optGraph.SubstituteSubgraph(substitutableSubgraph, replacementSubgraph);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000589
590 // Assign the current backend to the optimized sub-graph
Matteo Martincigh84924332019-05-09 12:46:16 +0100591 std::for_each(replacementSubgraph.begin(), replacementSubgraph.end(), [&selectedBackend](Layer* l)
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100592 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100593 ARMNN_ASSERT(l);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100594 l->SetBackendId(selectedBackend);
595 });
Matteo Martincighadddddb2019-01-24 14:06:23 +0000596 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100597
Matteo Martincigh84924332019-05-09 12:46:16 +0100598 if (!optimizationViews.GetFailedSubgraphs().empty())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000599 {
Matteo Martincighadddddb2019-01-24 14:06:23 +0000600 std::stringstream warningMsg;
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100601 warningMsg << "Some sub-graph(s) failed to optimized on " << backendObjPtr->GetId() << " backend.";
Matteo Martincighadddddb2019-01-24 14:06:23 +0000602 ReportWarning(warningMsg.str(), errMessages);
603
604 // Failed to optimize the given sub-graph, re-assign the sub-graph layers to other available backends
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100605 BackendSettings settingsCopy(backendSettings);
Matteo Martincighadddddb2019-01-24 14:06:23 +0000606 if (!backendObjPtr->GetId().IsCpuRef())
607 {
608 // Add the current backend to the list of backends to ignore
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100609 settingsCopy.m_IgnoredBackends.insert(backendObjPtr->GetId());
Matteo Martincighadddddb2019-01-24 14:06:23 +0000610 }
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100611
612 int count=0;
Matteo Martincigh84924332019-05-09 12:46:16 +0100613 for (auto& failedSubgraph : optimizationViews.GetFailedSubgraphs())
Matteo Martincighadddddb2019-01-24 14:06:23 +0000614 {
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100615 // An error occurred: the optimization was attempted but not performed, try different backends
616 std::stringstream subgraphMsg;
617 subgraphMsg << "Re-assigning backends to " << failedSubgraph.GetLayers().size()
618 << " layers inside sub-graph " << count++;
Matteo Martincigh328d92b2019-07-04 17:52:55 +0100619 ReportWarning(subgraphMsg.str(), errMessages);
Derek Lambertic2fe5fb2019-05-08 10:23:08 +0100620
621 OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr,
622 settingsCopy,
623 *subgraph,
624 errMessages);
625 if (reassignmentResult.m_Error)
626 {
627 // Failed to re-assign one of the remaining backends to each layer of the sub-graph
628 result.m_Error = true;
629 return result;
630 }
Matteo Martincighadddddb2019-01-24 14:06:23 +0000631 }
Matteo Martincigh49124022019-01-11 13:25:59 +0000632 }
633 }
634 }
635
636 return result;
637}
638
Derek Lamberti84da38b2019-06-13 11:40:08 +0100639bool RequiresCopy(ITensorHandleFactory::FactoryId src,
640 ITensorHandleFactory::FactoryId dst,
641 TensorHandleFactoryRegistry& registry)
642{
643 if (src != dst)
644 {
645 ITensorHandleFactory* srcFactory = registry.GetFactory(src);
646 ITensorHandleFactory* dstFactory = registry.GetFactory(dst);
647
Matteo Martincigha6539ed2019-08-27 13:43:32 +0100648 if (srcFactory && dstFactory &&
649 (srcFactory->GetExportFlags() & dstFactory->GetImportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100650 {
651 return false;
652 }
653 return true;
654 }
655 return false;
656}
657
658// Find the handle factory for the input layer which results in fewest required copies.
659ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backends,
660 OutputSlot& slot,
661 TensorHandleFactoryRegistry& registry)
662{
663 Layer& layer = slot.GetOwningLayer();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100664 ARMNN_ASSERT(layer.GetType() == LayerType::Input);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100665
666 // Explicitly select the tensorhandle factory for InputLayer because the rules for it are slightly different. It
667 // doesn't matter which backend it is assigned to because they all use the same implementation, which
668 // requires Map/Unmap support. This means that, so long as the handle type supports map/unmap semantics, we can
669 // select a factory with maximum compatibility with the layers connected to the InputLayer.
670
671 // First ensure the from backends can support the TensorHandeAPI
672 auto frmBackend = backends.find(layer.GetBackendId());
673 if (frmBackend == backends.end() ||
674 !frmBackend->second->SupportsTensorAllocatorAPI())
675 {
676 return ITensorHandleFactory::LegacyFactoryId;
677 }
678
679 // Go through all connections to the output slot and determine the TensorHandleFactory which results in the
680 // fewest copies.
681 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
682 int topScore = 0;
683 ITensorHandleFactory::FactoryId topChoice = ITensorHandleFactory::LegacyFactoryId;
684
685 for (auto&& connection : slot.GetConnections())
686 {
687 const Layer& connectedLayer = connection->GetOwningLayer();
688
689 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100690 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +0100691
692 if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
693 {
694 // The destination backend does not support the tensor allocator API, move to the next one
695 continue;
696 }
697
698 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
699 for (auto&& dst : dstPrefs)
700 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100701 // Input layers use the mem copy workload or import, so the selected factory must
702 // support either the map/unmap API or Import API
Derek Lamberti84da38b2019-06-13 11:40:08 +0100703 ITensorHandleFactory* factory = registry.GetFactory(dst);
Derek Lambertif674aa02019-08-01 15:56:25 +0100704 if (!factory->SupportsMapUnmap() &&
705 !CheckFlag(factory->GetImportFlags(), MemorySource::Malloc)) // Just support cpu mem imports for now
Derek Lamberti84da38b2019-06-13 11:40:08 +0100706 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100707 // The current tensor handle factory does not support the map/unmap or import
708 // strategy, move to the next one
Derek Lamberti84da38b2019-06-13 11:40:08 +0100709 continue;
710 }
711
712 auto it = factoryScores.find(dst);
713 if (it == factoryScores.end())
714 {
715 // Add new score to the table
716 factoryScores[dst] = 0;
717 if (topChoice == ITensorHandleFactory::LegacyFactoryId)
718 {
719 topChoice = dst;
720 }
721 }
722 else
723 {
724 // Increase the score
725 factoryScores[dst]++;
726
727 // Track the best option
728 if (factoryScores[dst] > topScore)
729 {
730 topScore = factoryScores[dst];
731 topChoice = dst;
732 }
733 }
734 }
735 }
736
737 return topChoice;
738}
739
740// Find the handle factory for the output layer which results in fewest required copies.
741ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backends,
742 OutputSlot& slot,
743 TensorHandleFactoryRegistry& registry)
744{
Jan Eilers8eb25602020-03-09 12:13:48 +0000745 IgnoreUnused(backends, slot, registry);
Derek Lamberti94a88d22019-12-10 21:12:59 +0000746 return ITensorHandleFactory::DeferredFactoryId;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100747}
748
749// For all handle factories supported on the source backend, we wish to find the one which requires the fewest copies
750// when considering all connections.
751ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
752 OutputSlot& outputSlot,
753 TensorHandleFactoryRegistry& registry)
754{
755 // First ensure the from backends can support the TensorHandeAPI
756 Layer& layer = outputSlot.GetOwningLayer();
757 auto frmBackend = backends.find(layer.GetBackendId());
758 if (frmBackend == backends.end() ||
759 !frmBackend->second->SupportsTensorAllocatorAPI())
760 {
761 return ITensorHandleFactory::LegacyFactoryId;
762 }
763
764 // Connections to Output Layers requires support for map/unmap on the TensorHandle.
765 bool requiresMapUnmap = false;
766 for (auto&& connection : outputSlot.GetConnections())
767 {
768 const Layer& connectedLayer = connection->GetOwningLayer();
769 if (connectedLayer.GetType() == LayerType::Output)
770 {
771 requiresMapUnmap = true;
772 }
773 }
774
775 IBackendInternal* srcBackend = frmBackend->second.get();
776 auto srcPrefs = srcBackend->GetHandleFactoryPreferences();
777
778 // Initialize the scores
779 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
780 for (auto&& pref : srcPrefs)
781 {
782 if (requiresMapUnmap) // Only consider factories that support map/unmap if required
783 {
784 ITensorHandleFactory* factory = registry.GetFactory(pref);
785 if (!factory->SupportsMapUnmap())
786 {
787 // The current tensor handle factory does not support the map/unmap strategy, move to the next one
788 continue;
789 }
790 }
791
792 auto it = factoryScores.find(pref);
793 if (it == factoryScores.end())
794 {
795 // Add new score to the table
796 factoryScores[pref] = 0;
797 }
798 }
799
800 // Score each handle factory based on how many times it requires copies on the slot connections
801 for (auto&& connection : outputSlot.GetConnections())
802 {
803 const Layer& connectedLayer = connection->GetOwningLayer();
804
805 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100806 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +0100807
808 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
809 for (auto&& src : srcPrefs)
810 {
811 if (factoryScores.find(src) == factoryScores.end()) // Don't consider excluded factories
812 {
813 continue;
814 }
815
816 for (auto&& dst : dstPrefs)
817 {
818 if (RequiresCopy(src, dst, registry))
819 {
820 // Copy avoided, increase the score
821 factoryScores[src]++;
822 break;
823 }
824 }
825 }
826 }
827
828 // Find the lowest score
829 int minScore = std::numeric_limits<int>::max();
830 for (auto it : factoryScores)
831 {
832 minScore = std::min(minScore, it.second);
833 }
834
835 // Collect factories matching the best(lowest) score
836 std::vector<ITensorHandleFactory::FactoryId> optimalFactories;
837 for (auto it : factoryScores)
838 {
839 if (it.second == minScore)
840 {
841 optimalFactories.push_back(it.first);
842 }
843 }
844
845 // For all compatible Factories matching the best score, find the preferred one for the current layer.
846 for (auto&& srcPref : srcPrefs)
847 {
848 for (auto&& comp : optimalFactories)
849 {
850 if (comp == srcPref)
851 {
852 return comp;
853 }
854 }
855 }
856
857 return ITensorHandleFactory::LegacyFactoryId;
858}
859
Derek Lambertif674aa02019-08-01 15:56:25 +0100860EdgeStrategy CalculateEdgeStrategy(BackendsMap& backends,
861 ITensorHandleFactory::FactoryId srcFactoryId,
862 const Layer& layer,
863 const Layer& connectedLayer,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100864 TensorHandleFactoryRegistry& registry,
865 bool importEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100866{
867 auto toBackend = backends.find(connectedLayer.GetBackendId());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100868 ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
Derek Lamberti84da38b2019-06-13 11:40:08 +0100869
870 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
871
872 // Legacy API check for backward compatibility
873 if (srcFactoryId == ITensorHandleFactory::LegacyFactoryId || dstPrefs.empty())
874 {
875 if (layer.GetBackendId() != connectedLayer.GetBackendId())
876 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100877 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100878 }
879 else
880 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100881 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100882 }
883 }
884
885 // TensorHandleFactory API present, so perform more sophisticated strategies.
Derek Lambertif674aa02019-08-01 15:56:25 +0100886 // Dst Output layers don't require copy because they use import or map/unmap
Derek Lamberti84da38b2019-06-13 11:40:08 +0100887 if (connectedLayer.GetType() == LayerType::Output)
888 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100889 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100890 }
891
892 // Search for direct match in prefs
893 for (auto&& pref : dstPrefs)
894 {
895 if (pref == srcFactoryId)
896 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100897 return EdgeStrategy::DirectCompatibility;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100898 }
899 }
900
901 // Search for export/import options
902 ITensorHandleFactory* srcFactory = registry.GetFactory(srcFactoryId);
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100903 if (srcFactory->GetExportFlags() != 0 && importEnabled)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100904 {
905 for (auto&& pref : dstPrefs)
906 {
907 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroyffab16f2019-11-07 14:37:09 +0000908
James Conroy47e863d2019-11-18 17:07:43 +0000909 // Handles cases when a destPref is not listed in TensorHandleFactoryRegistry
James Conroyffab16f2019-11-07 14:37:09 +0000910 if (!dstFactory) {
James Conroy47e863d2019-11-18 17:07:43 +0000911 continue;
James Conroyffab16f2019-11-07 14:37:09 +0000912 }
913
Derek Lambertif674aa02019-08-01 15:56:25 +0100914 if ((dstFactory->GetImportFlags() & srcFactory->GetExportFlags()) != 0)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100915 {
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +0100916 auto srcCapability = srcFactory->GetCapabilities(&layer, &layer, CapabilityClass::PaddingRequired);
917 auto dstCapability = dstFactory->GetCapabilities(&connectedLayer,
918 &connectedLayer,
919 CapabilityClass::PaddingRequired);
920 // Do not require memory copy if the source and destination do not require padding.
921 if (srcCapability.empty() && dstCapability.empty())
922 {
923 return EdgeStrategy::ExportToTarget;
924 }
Derek Lamberti84da38b2019-06-13 11:40:08 +0100925 }
926 }
927 }
928
929 // Search for copy options via map/unmap
930 if (srcFactory->SupportsMapUnmap())
931 {
932 for (auto&& pref : dstPrefs)
933 {
934 ITensorHandleFactory* dstFactory = registry.GetFactory(pref);
James Conroy47e863d2019-11-18 17:07:43 +0000935 if (dstFactory && dstFactory->SupportsMapUnmap())
Derek Lamberti84da38b2019-06-13 11:40:08 +0100936 {
Derek Lambertif674aa02019-08-01 15:56:25 +0100937 return EdgeStrategy::CopyToTarget;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100938 }
939 }
940 }
941
Derek Lambertif674aa02019-08-01 15:56:25 +0100942 return EdgeStrategy::Undefined;
Derek Lamberti84da38b2019-06-13 11:40:08 +0100943}
944
945// Select the TensorHandleFactories and the corresponding memory strategy
946OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
947 BackendsMap& backends,
948 TensorHandleFactoryRegistry& registry,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100949 bool importEnabled,
Derek Lamberti84da38b2019-06-13 11:40:08 +0100950 Optional<std::vector<std::string>&> errMessages)
951{
952 OptimizationResult result;
953
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100954 optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled](Layer* layer)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100955 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100956 ARMNN_ASSERT(layer);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100957
958 // Lets make sure the backend is in our list of supported backends. Something went wrong during backend
959 // assignment if this check fails
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100960 ARMNN_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
Derek Lamberti84da38b2019-06-13 11:40:08 +0100961
962 // Check each output separately
963 for (unsigned int slotIdx = 0; slotIdx < layer->GetNumOutputSlots(); slotIdx++)
964 {
965 OutputSlot& outputSlot = layer->GetOutputSlot(slotIdx);
966
967 ITensorHandleFactory::FactoryId slotOption = ITensorHandleFactory::LegacyFactoryId;
968
969 // Calculate the factory to use which results in the fewest copies being made.
970 switch(layer->GetType())
971 {
972 case LayerType::Input:
973 slotOption = CalculateSlotOptionForInput(backends, outputSlot, registry);
974 break;
975 case LayerType::Output:
976 slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
977 break;
978 default:
979 slotOption = CalculateSlotOption(backends, outputSlot, registry);
980 break;
981 }
982 outputSlot.SetTensorHandleFactory(slotOption);
983
Derek Lambertif674aa02019-08-01 15:56:25 +0100984 // Now determine the "best" edge strategy for each connection given the slotOption.
Derek Lamberti84da38b2019-06-13 11:40:08 +0100985 unsigned int connectionIdx = 0;
986 for (auto&& connection : outputSlot.GetConnections())
987 {
988 const Layer& connectedLayer = connection->GetOwningLayer();
989
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100990 EdgeStrategy strategy = CalculateEdgeStrategy(backends, slotOption, *layer, connectedLayer,
991 registry, importEnabled);
Derek Lamberti84da38b2019-06-13 11:40:08 +0100992
Derek Lambertif674aa02019-08-01 15:56:25 +0100993 if (strategy == EdgeStrategy::Undefined)
Derek Lamberti84da38b2019-06-13 11:40:08 +0100994 {
995 result.m_Error = true;
996 if (errMessages)
997 {
998 errMessages.value().emplace_back("Could not find valid strategy required for compatibility"
999 " between backends.");
1000 }
1001 return;
1002 }
1003
Derek Lambertif674aa02019-08-01 15:56:25 +01001004 outputSlot.SetEdgeStrategy(connectionIdx, strategy);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001005
1006 connectionIdx++;
1007 }
1008 }
1009 });
1010
1011 return result;
1012}
1013
Matteo Martincigh49124022019-01-11 13:25:59 +00001014IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
1015 const std::vector<BackendId>& backendPreferences,
1016 const IDeviceSpec& deviceSpec,
1017 const OptimizerOptions& options,
Rob Hughes23214432019-11-05 11:27:36 +00001018 Optional<std::vector<std::string>&> messages)
Matteo Martincigh49124022019-01-11 13:25:59 +00001019{
1020 if (backendPreferences.empty())
1021 {
1022 throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
1023 }
1024
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001025 if (options.m_ReduceFp32ToFp16 && options.m_ReduceFp32ToBf16)
1026 {
1027 throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
1028 }
1029
Jan Eilersbb446e52020-04-02 13:56:54 +01001030 const Network& network = *PolymorphicDowncast<const Network*>(&inNetwork);
Matteo Martincigh49124022019-01-11 13:25:59 +00001031 std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
1032
Sadik Armagan045f6be2020-09-10 13:37:32 +01001033 auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph), options.m_ModelOptions),
1034 &IOptimizedNetwork::Destroy);
Matteo Martincigh49124022019-01-11 13:25:59 +00001035
Jan Eilersbb446e52020-04-02 13:56:54 +01001036 OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
Matteo Martincigh49124022019-01-11 13:25:59 +00001037
Matteo Martincighadddddb2019-01-24 14:06:23 +00001038 // Get the optimized graph
1039 Graph& optGraph = optNetObjPtr->GetGraph();
1040
Narumol Prangnawaratbbf71a62020-09-07 14:05:22 +01001041 // Infer the tensor infos for all output slots. Throws an exception on failure
1042 optGraph.InferTensorInfos();
1043
Matteo Martincigh49124022019-01-11 13:25:59 +00001044 // Perform optimisation passes
1045 using namespace optimizations;
Matteo Martincighadddddb2019-01-24 14:06:23 +00001046 Optimizer::Pass(optGraph, MakeOptimizations(SquashEqualPermuteSiblings(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001047 SquashEqualTransposeSiblings(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001048 SquashEqualReshapeSiblings(),
1049 OptimizeInversePermutes(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001050 OptimizeInverseTransposes(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001051 MovePermuteUp(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001052 MoveTransposeUp(),
Matteo Martincighadddddb2019-01-24 14:06:23 +00001053 PermuteAsReshape(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001054 TransposeAsReshape(),
Nina Drozd861985f2019-04-18 14:48:51 +01001055 OptimizeConsecutiveReshapes(),
Rob Hughes3a7d3a72019-09-24 16:59:56 +01001056 FoldPadIntoConvolution2d(),
Mike Kelly490b7be2020-03-03 12:39:09 +00001057 PermuteAndBatchToSpaceAsDepthToSpace(),
1058 TransposeAndBatchToSpaceAsDepthToSpace()));
Matteo Martincigh49124022019-01-11 13:25:59 +00001059
Matteo Martincigh49124022019-01-11 13:25:59 +00001060 // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
1061 if (options.m_ReduceFp32ToFp16)
1062 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001063 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToFp16Converter()));
Derek Lambertidd6804b2019-11-27 09:29:57 +00001064 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
Matteo Martincigh49124022019-01-11 13:25:59 +00001065 }
1066
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001067 // If Fp32 to Bf16 optimization is set convert Fp32 network to Bf16
Narumol Prangnawarat57ef0082020-03-26 09:20:43 +00001068 // Convert input of Convolution2d and FullyConnected from Fp32 to Bf16
1069 // Only Constant weight of Convolution2d and FullyConnected are converted from Fp32 to Bf16
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001070 if (options.m_ReduceFp32ToBf16)
1071 {
1072 Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToBf16Converter()));
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +00001073 }
1074
Matteo Martincigh49124022019-01-11 13:25:59 +00001075 // Initialize backend settings
1076 BackendSettings backendSettings(backendPreferences, deviceSpec);
1077 if (backendSettings.GetAvailablePreferredBackends().empty())
1078 {
1079 std::stringstream failureMsg;
1080 failureMsg << "None of the preferred backends " << backendPreferences
1081 << " are supported. Current platform provides " << backendSettings.m_SupportedBackends;
Rob Hughes23214432019-11-05 11:27:36 +00001082 ReportError(failureMsg.str(), messages);
Matteo Martincigh49124022019-01-11 13:25:59 +00001083 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
1084 }
1085
Derek Lamberti84da38b2019-06-13 11:40:08 +01001086 // Create a map to temporarily hold initialized backend objects
1087 TensorHandleFactoryRegistry tensorHandleFactoryRegistry;
1088 BackendsMap backends = CreateSupportedBackends(tensorHandleFactoryRegistry, backendSettings);
1089
Matteo Martincigh49124022019-01-11 13:25:59 +00001090 // Assign an available backend to each layer
Matteo Martincighadddddb2019-01-24 14:06:23 +00001091 Graph::Iterator firstLayer = optGraph.begin();
1092 Graph::Iterator lastLayer = optGraph.end();
Derek Lamberti84da38b2019-06-13 11:40:08 +01001093 OptimizationResult assignBackendsResult = AssignBackends(optNetObjPtr,
1094 backendSettings,
1095 firstLayer,
1096 lastLayer,
Rob Hughes23214432019-11-05 11:27:36 +00001097 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001098 if (assignBackendsResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +00001099 {
1100 // Failed to assign a backend to each layer
jimfly016b0b53d2018-10-08 14:43:01 +01001101 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
1102 }
telsoa01c577f2c2018-08-31 09:22:23 +01001103
Matteo Martincighadddddb2019-01-24 14:06:23 +00001104 Optimizer::Pass(optGraph, MakeOptimizations(OptimizeInverseConversionsFp16(),
1105 OptimizeInverseConversionsFp32()));
telsoa01c577f2c2018-08-31 09:22:23 +01001106
Matteo Martincighadddddb2019-01-24 14:06:23 +00001107 // Apply the backend-specific optimizations
1108 OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr,
1109 backendSettings,
Derek Lamberti84da38b2019-06-13 11:40:08 +01001110 backends,
Rob Hughes23214432019-11-05 11:27:36 +00001111 messages);
Matteo Martincighadddddb2019-01-24 14:06:23 +00001112 if (backendOptimizationResult.m_Error)
Matteo Martincigh49124022019-01-11 13:25:59 +00001113 {
Matteo Martincighadddddb2019-01-24 14:06:23 +00001114 // Failed to apply the backend-specific optimizations
1115 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
Matteo Martincigh49124022019-01-11 13:25:59 +00001116 }
1117
Matteo Martincighadddddb2019-01-24 14:06:23 +00001118 // If the debug flag is set, then insert a DebugLayer after each layer
1119 // Doing this after applying the backend optimizations as they might have changed some layers
1120 if (options.m_Debug)
1121 {
1122 Optimizer::Pass(optGraph, MakeOptimizations(InsertDebugLayer()));
1123 }
1124
Derek Lamberti84da38b2019-06-13 11:40:08 +01001125 // Calculate the compatibility strategies for tensor handles
1126 OptimizationResult strategyResult = SelectTensorHandleStrategy(optGraph,
1127 backends,
1128 tensorHandleFactoryRegistry,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +01001129 options.m_ImportEnabled,
Rob Hughes23214432019-11-05 11:27:36 +00001130 messages);
Derek Lamberti84da38b2019-06-13 11:40:08 +01001131 if (strategyResult.m_Error)
1132 {
1133 // Failed to apply the backend-specific optimizations
1134 return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
1135 }
1136
1137 // Based on the tensor handle strategy determined above, insert copy layers where required.
Derek Lambertif674aa02019-08-01 15:56:25 +01001138 optGraph.AddCompatibilityLayers(backends, tensorHandleFactoryRegistry);
telsoa01c577f2c2018-08-31 09:22:23 +01001139
1140 // Convert constants
Matteo Martincighadddddb2019-01-24 14:06:23 +00001141 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
1142 Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
telsoa01c577f2c2018-08-31 09:22:23 +01001143
Derek Lamberti84da38b2019-06-13 11:40:08 +01001144 // Run backend specific optimizations (deprecated)
Matteo Martincigh49124022019-01-11 13:25:59 +00001145 for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
David Beck263e3492018-11-09 14:46:40 +00001146 {
1147 auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
1148 auto backendPtr = factoryFun();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001149 ARMNN_ASSERT(backendPtr.get() != nullptr);
David Beck263e3492018-11-09 14:46:40 +00001150
Matteo Martincighed735042019-05-22 09:42:43 +01001151 ARMNN_NO_DEPRECATE_WARN_BEGIN
David Beck263e3492018-11-09 14:46:40 +00001152 auto backendSpecificOptimizations = backendPtr->GetOptimizations();
Matteo Martincighed735042019-05-22 09:42:43 +01001153 ARMNN_NO_DEPRECATE_WARN_END
1154
David Beck263e3492018-11-09 14:46:40 +00001155 if (!backendSpecificOptimizations.empty())
1156 {
1157 Optimizer::Pass(optNetObjPtr->GetGraph(), backendSpecificOptimizations);
1158 }
1159 }
1160
telsoa01c577f2c2018-08-31 09:22:23 +01001161 return optNet;
telsoa014fcda012018-03-09 14:13:49 +00001162}
Finn Williamsf24effa2020-07-03 10:12:03 +01001163bool Network::GetShapeInferenceMethod()
telsoa014fcda012018-03-09 14:13:49 +00001164{
Finn Williamsf24effa2020-07-03 10:12:03 +01001165 if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() == "ShapeInferenceMethod")
1166 {
1167 return m_NetworkOptions[0].GetOption(0).GetValue().AsBool();
1168 }
1169
1170 return false;
telsoa014fcda012018-03-09 14:13:49 +00001171}
Finn Williamsf24effa2020-07-03 10:12:03 +01001172Network::Network(NetworkOptions networkOptions)
1173: m_NetworkOptions(networkOptions),
1174 m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod()))
1175{}
telsoa014fcda012018-03-09 14:13:49 +00001176
1177Network::~Network()
1178{
1179}
1180
Jan Eilers99d9d4a2019-11-06 10:02:16 +00001181Status Network::PrintGraph()
1182{
1183 m_Graph->Print();
1184 return Status::Success;
1185}
1186
telsoa014fcda012018-03-09 14:13:49 +00001187IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
1188{
1189 return m_Graph->AddLayer<InputLayer>(id, name);
1190}
1191
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001192IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
1193 const char* name)
1194{
1195 return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
1196}
1197
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001198IConnectableLayer* Network::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
1199 const char* name)
1200{
1201 return m_Graph->AddLayer<ComparisonLayer>(comparisonDescriptor, name);
1202}
1203
josh minor4a3c6102020-01-06 16:40:46 -06001204IConnectableLayer* Network::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
1205 const char* name)
1206{
1207 return m_Graph->AddLayer<ElementwiseUnaryLayer>(elementwiseUnaryDescriptor, name);
1208}
1209
Ryan OSheaec6c6802020-06-05 17:17:06 +01001210IConnectableLayer* Network::AddFillLayer(const FillDescriptor& fillDescriptor,
1211 const char* name)
1212{
1213 return m_Graph->AddLayer<FillLayer>(fillDescriptor, name);
1214}
1215
telsoa014fcda012018-03-09 14:13:49 +00001216IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001217 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001218 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001219 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001220{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001221 if (fullyConnectedDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001222 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001223 throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001224 }
1225
1226 const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
1227
1228 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1229
1230 if (fullyConnectedDescriptor.m_BiasEnabled)
1231 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001232 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001233 }
1234
1235 return layer;
1236}
1237
1238IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001239 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001240 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001241 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001242{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001243 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001244}
1245
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001246IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
1247 const ConstTensor& weights,
1248 const char* name)
1249{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001250 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001251 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
1252}
1253
telsoa014fcda012018-03-09 14:13:49 +00001254IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001255 const ConstTensor& weights,
1256 const ConstTensor& biases,
1257 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001258{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001259 Optional<ConstTensor> optionalBiases(biases);
1260 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001261}
1262
Jim Flynne242f2d2019-05-22 14:24:13 +01001263IConnectableLayer* Network::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001264 const char* name)
1265{
Jim Flynne242f2d2019-05-22 14:24:13 +01001266 return m_Graph->AddLayer<ConcatLayer>(concatDescriptor, name);
Jim Flynn906f9462019-05-10 13:55:21 +01001267}
1268
telsoa014fcda012018-03-09 14:13:49 +00001269IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001270 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001271 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001272 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001273{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001274 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001275 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001276 throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001277 }
1278
1279 const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
1280
1281 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1282
1283 if (convolution2dDescriptor.m_BiasEnabled)
1284 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001285 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001286 }
1287
1288 return layer;
1289}
1290
1291IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001292 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001293 const Optional<ConstTensor>& biases,
telsoa01c577f2c2018-08-31 09:22:23 +01001294 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001295{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001296 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001297}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001298
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001299IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
1300 const ConstTensor& weights,
1301 const char* name)
1302{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001303 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001304 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1305}
1306
telsoa014fcda012018-03-09 14:13:49 +00001307IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
telsoa01c577f2c2018-08-31 09:22:23 +01001308 const ConstTensor& weights,
1309 const ConstTensor& biases,
1310 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001311{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001312 Optional<ConstTensor> optionalBiases(biases);
1313 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001314}
1315
1316IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
1317 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1318 const ConstTensor& weights,
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001319 const Optional<ConstTensor>& biases,
telsoa014fcda012018-03-09 14:13:49 +00001320 const char* name)
1321{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001322 if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
telsoa014fcda012018-03-09 14:13:49 +00001323 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001324 throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be empty");
telsoa014fcda012018-03-09 14:13:49 +00001325 }
1326
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00001327 const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001328
1329 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1330
1331 if (convolution2dDescriptor.m_BiasEnabled)
1332 {
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001333 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
telsoa014fcda012018-03-09 14:13:49 +00001334 }
1335
1336 return layer;
1337}
1338
Aron Virginas-Tardd6247f2019-09-19 14:31:17 +01001339IConnectableLayer* Network::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
1340 const char* name)
1341{
1342 return m_Graph->AddLayer<DepthToSpaceLayer>(depthToSpaceDescriptor, name);
1343}
1344
telsoa014fcda012018-03-09 14:13:49 +00001345IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001346 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1347 const ConstTensor& weights,
1348 const Optional<ConstTensor>& biases,
1349 const char* name)
1350{
1351 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1352}
1353
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001354IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
telsoa014fcda012018-03-09 14:13:49 +00001355 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1356 const ConstTensor& weights,
1357 const char* name)
1358{
Matteo Martincighfc598e12019-05-14 10:36:13 +01001359 Optional<ConstTensor> biases;
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001360 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
telsoa014fcda012018-03-09 14:13:49 +00001361}
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001362
telsoa014fcda012018-03-09 14:13:49 +00001363IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
1364 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
1365 const ConstTensor& weights,
1366 const ConstTensor& biases,
1367 const char* name)
1368{
Aron Virginas-Tarad402702019-02-22 17:03:44 +00001369 Optional<ConstTensor> optionalBiases(biases);
1370 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
telsoa014fcda012018-03-09 14:13:49 +00001371}
1372
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001373IConnectableLayer* Network::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001374 const ConstTensor& anchors, const char* name)
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001375{
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001376 const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
1377
1378 layer->m_Anchors = std::make_unique<ScopedCpuTensorHandle>(anchors);
1379
1380 return layer;
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001381}
1382
telsoa014fcda012018-03-09 14:13:49 +00001383IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
1384 const char* name)
1385{
1386 return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
1387}
1388
1389IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
1390 const char* name)
1391{
1392 return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
1393}
1394
1395IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
1396 const char* name)
1397{
1398 return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
1399}
1400
Nikhil Rajee391d52019-09-05 17:50:44 +01001401IConnectableLayer* Network::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor,
1402 const char* name)
1403{
1404 return m_Graph->AddLayer<ArgMinMaxLayer>(argMinMaxDescriptor, name);
1405}
1406
telsoa01c577f2c2018-08-31 09:22:23 +01001407IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
1408normalizationDescriptor,
telsoa014fcda012018-03-09 14:13:49 +00001409 const char* name)
1410{
1411 return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
1412}
1413
Aron Virginas-Tar636ab402019-09-16 14:27:45 +01001414IConnectableLayer* Network::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
1415{
1416 return m_Graph->AddLayer<SliceLayer>(sliceDescriptor, name);
1417}
1418
telsoa014fcda012018-03-09 14:13:49 +00001419IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
1420 const char* name)
1421{
1422 return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
1423}
1424
1425IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
1426 const char* name)
1427{
1428 return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
1429}
1430
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001431IConnectableLayer* Network::AddMaximumLayer(const char* name)
1432{
1433 return m_Graph->AddLayer<MaximumLayer>(name);
1434}
1435
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001436IConnectableLayer* Network::AddMinimumLayer(const char* name)
1437{
1438 return m_Graph->AddLayer<MinimumLayer>(name);
1439}
1440
Jim Flynne242f2d2019-05-22 14:24:13 +01001441IConnectableLayer* Network::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
Jim Flynn906f9462019-05-10 13:55:21 +01001442 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001443{
Jim Flynne242f2d2019-05-22 14:24:13 +01001444 return AddConcatLayer(mergerDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001445}
1446
Kevin May868eb142019-09-04 17:29:31 +01001447IConnectableLayer* Network::AddAbsLayer(const char * name)
1448{
josh minor4a3c6102020-01-06 16:40:46 -06001449 return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
Kevin May868eb142019-09-04 17:29:31 +01001450}
1451
telsoa014fcda012018-03-09 14:13:49 +00001452IConnectableLayer* Network::AddAdditionLayer(const char* name)
1453{
1454 return m_Graph->AddLayer<AdditionLayer>(name);
1455}
1456
1457IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
1458{
1459 return m_Graph->AddLayer<MultiplicationLayer>(name);
1460}
1461
1462IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
1463{
1464 return m_Graph->AddLayer<OutputLayer>(id, name);
1465}
1466
1467IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
1468 const ConstTensor& mean,
1469 const ConstTensor& variance,
1470 const ConstTensor& beta,
1471 const ConstTensor& gamma,
1472 const char* name)
1473{
1474 const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
1475
1476 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
1477 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
1478 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
1479 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
1480
1481 return layer;
1482}
1483
Finn Williams2605b232020-06-10 15:53:46 +01001484IConnectableLayer* Network::AddRankLayer(const char* name)
1485{
1486 return m_Graph->AddLayer<RankLayer>(name);
1487}
1488
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001489IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
1490 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001491{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001492 ResizeDescriptor resizeDescriptor;
David Monahan4a0c9b92020-05-30 09:48:39 +01001493 resizeDescriptor.m_Method = ResizeMethod::Bilinear;
1494 resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
1495 resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
1496 resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
1497 resizeDescriptor.m_AlignCorners = descriptor.m_AlignCorners;
1498 resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001499
1500 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
telsoa014fcda012018-03-09 14:13:49 +00001501}
1502
Teresa Charlina9075df2019-06-27 15:41:57 +01001503IConnectableLayer* Network::AddResizeLayer(const ResizeDescriptor&
1504resizeDescriptor, const char* name)
1505{
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001506 return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
Teresa Charlina9075df2019-06-27 15:41:57 +01001507}
1508
Kevin Mayce5045a2019-10-02 14:07:47 +01001509IConnectableLayer* Network::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
1510 const char* name)
1511{
1512 return m_Graph->AddLayer<InstanceNormalizationLayer>(desc, name);
1513}
1514
Matteo Martincighbcd3c852018-09-28 14:14:12 +01001515IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
1516 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001517{
Matteo Martincighbcd3c852018-09-28 14:14:12 +01001518 return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
telsoa014fcda012018-03-09 14:13:49 +00001519}
1520
Aron Virginas-Tarf982dea2019-10-11 14:07:53 +01001521IConnectableLayer* Network::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& desc,
1522 const char* name)
1523{
1524 return m_Graph->AddLayer<LogSoftmaxLayer>(desc, name);
1525}
1526
telsoa014fcda012018-03-09 14:13:49 +00001527IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
1528{
telsoa01c577f2c2018-08-31 09:22:23 +01001529 auto layer = m_Graph->AddLayer<ConstantLayer>(name);
1530
1531 layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
1532
1533 return layer;
telsoa014fcda012018-03-09 14:13:49 +00001534}
1535
telsoa01c577f2c2018-08-31 09:22:23 +01001536IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
1537 const char* name)
telsoa014fcda012018-03-09 14:13:49 +00001538{
1539 return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
1540}
1541
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001542IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1543 const char* name)
1544{
1545 return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
1546}
1547
Aron Virginas-Tar972af152019-06-11 14:14:03 +01001548IConnectableLayer* Network::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
1549 const char* name)
1550{
1551 return m_Graph->AddLayer<SpaceToDepthLayer>(spaceToDepthDescriptor, name);
1552}
1553
telsoa014fcda012018-03-09 14:13:49 +00001554IConnectableLayer* Network::AddFloorLayer(const char* name)
1555{
1556 return m_Graph->AddLayer<FloorLayer>(name);
1557}
1558
telsoa01c577f2c2018-08-31 09:22:23 +01001559IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
1560 const LstmInputParams& params,
1561 const char* name)
1562{
1563 const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
1564
1565 //Lstm Basic Parameters
1566 layer->m_BasicParameters.m_InputToForgetWeights =
1567 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
1568 layer->m_BasicParameters.m_InputToCellWeights =
1569 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
1570 layer->m_BasicParameters.m_InputToOutputWeights =
1571 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
1572 layer->m_BasicParameters.m_RecurrentToForgetWeights =
1573 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
1574 layer->m_BasicParameters.m_RecurrentToCellWeights =
1575 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
1576 layer->m_BasicParameters.m_RecurrentToOutputWeights =
1577 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
1578 layer->m_BasicParameters.m_ForgetGateBias =
1579 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
1580 layer->m_BasicParameters.m_CellBias =
1581 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
1582 layer->m_BasicParameters.m_OutputGateBias =
1583 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
1584
1585 //Lstm Cifg parameters
1586 if(!descriptor.m_CifgEnabled)
1587 {
1588 if(params.m_InputToInputWeights == nullptr)
1589 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001590 throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL "
1591 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001592 }
1593 if(params.m_RecurrentToInputWeights == nullptr)
1594 {
1595 throw InvalidArgumentException(
Jan Eilerse2062cd2020-03-30 15:07:45 +01001596 "AddLstmLayer: Recurrent To Input Weights cannot be NULL "
1597 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001598 }
1599 if(params.m_InputGateBias == nullptr)
1600 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001601 throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL "
1602 "when CIFG is disabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001603 }
1604 layer->m_CifgParameters.m_InputToInputWeights =
1605 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
1606 layer->m_CifgParameters.m_RecurrentToInputWeights =
1607 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
telsoa01c577f2c2018-08-31 09:22:23 +01001608 layer->m_CifgParameters.m_InputGateBias =
1609 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
1610 }
1611
1612 //Lstm projection parameters
1613 if(descriptor.m_ProjectionEnabled)
1614 {
1615 if(params.m_ProjectionWeights == nullptr)
1616 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001617 throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL "
1618 "when projection is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001619 }
1620 layer->m_ProjectionParameters.m_ProjectionWeights =
1621 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
1622 if(params.m_ProjectionBias != nullptr)
1623 {
1624 layer->m_ProjectionParameters.m_ProjectionBias =
1625 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
1626 }
1627 }
1628
1629 //Lstm Peephole params
1630 if(descriptor.m_PeepholeEnabled)
1631 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001632 if(!descriptor.m_CifgEnabled)
1633 {
1634 if(params.m_CellToInputWeights == nullptr)
1635 {
1636 throw InvalidArgumentException("AddLstmLayer: Cell To Input Weights cannot be NULL "
1637 "when Peephole is enabled and CIFG disabled.");
1638 }
1639
1640 layer->m_PeepholeParameters.m_CellToInputWeights =
1641 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
1642 }
1643
telsoa01c577f2c2018-08-31 09:22:23 +01001644 if(params.m_CellToForgetWeights == nullptr)
1645 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001646 throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL "
1647 "when Peephole is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001648 }
1649 if(params.m_CellToOutputWeights == nullptr)
1650 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001651 throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL "
1652 "when Peephole is enabled.");
telsoa01c577f2c2018-08-31 09:22:23 +01001653 }
Jan Eilerse2062cd2020-03-30 15:07:45 +01001654
telsoa01c577f2c2018-08-31 09:22:23 +01001655 layer->m_PeepholeParameters.m_CellToForgetWeights =
1656 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
1657 layer->m_PeepholeParameters.m_CellToOutputWeights =
1658 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
1659 }
Jan Eilersf8c62972019-07-17 11:07:49 +01001660
1661 //Lstm Layer Normalization params
1662 if(descriptor.m_LayerNormEnabled)
1663 {
1664 if(!descriptor.m_CifgEnabled)
1665 {
1666 if(params.m_InputLayerNormWeights == nullptr)
1667 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001668 throw InvalidArgumentException("AddLstmLayer: Input layer normalization weights cannot be NULL "
1669 "when layer normalization is enabled and CIFG disabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01001670 }
1671 layer->m_LayerNormParameters.m_InputLayerNormWeights =
1672 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
1673 }
1674
1675 if(params.m_ForgetLayerNormWeights == nullptr)
1676 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001677 throw InvalidArgumentException("AddLstmLayer: Forget layer normalization weights cannot be NULL "
1678 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01001679 }
1680 if(params.m_CellLayerNormWeights == nullptr)
1681 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001682 throw InvalidArgumentException("AddLstmLayer: Cell layer normalization weights cannot be NULL "
1683 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01001684 }
1685 if(params.m_OutputLayerNormWeights == nullptr)
1686 {
Jan Eilerse2062cd2020-03-30 15:07:45 +01001687 throw InvalidArgumentException("AddLstmLayer: Output layer normalization weights cannot be NULL "
1688 "when layer normalization is enabled.");
Jan Eilersf8c62972019-07-17 11:07:49 +01001689 }
1690 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
1691 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
1692 layer->m_LayerNormParameters.m_CellLayerNormWeights =
1693 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
1694 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
1695 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
1696 }
telsoa01c577f2c2018-08-31 09:22:23 +01001697 return layer;
1698}
1699
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001700IConnectableLayer* Network::AddDivisionLayer(const char* name)
1701{
1702 return m_Graph->AddLayer<DivisionLayer>(name);
1703}
1704
David Beck19526222018-09-12 16:00:08 +01001705IConnectableLayer* Network::AddSubtractionLayer(const char* name)
1706{
1707 return m_Graph->AddLayer<SubtractionLayer>(name);
1708}
1709
narpra0132b90462018-09-13 11:07:48 +01001710IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
1711{
1712 return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
1713}
1714
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +01001715IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
1716{
1717 return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
1718}
1719
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001720IConnectableLayer *Network::AddQuantizeLayer(const char *name)
1721{
1722 return m_Graph->AddLayer<QuantizeLayer>(name);
1723}
1724
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001725IConnectableLayer* Network::AddDequantizeLayer(const char* name)
1726{
1727 return m_Graph->AddLayer<DequantizeLayer>(name);
1728}
1729
Conor Kennedy430b5d82018-11-14 15:28:28 +00001730IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
1731 const char* name)
1732{
1733 return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
1734}
1735
Matteo Martincigh59a950c2018-12-13 12:48:25 +00001736IConnectableLayer* Network::AddGreaterLayer(const char* name)
1737{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001738 return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
Matteo Martincigh59a950c2018-12-13 12:48:25 +00001739}
1740
FrancisMurtagh20995952018-12-17 12:11:36 +00001741IConnectableLayer* Network::AddEqualLayer(const char* name)
1742{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001743 return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
FrancisMurtagh20995952018-12-17 12:11:36 +00001744}
1745
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001746IConnectableLayer* Network::AddRsqrtLayer(const char * name)
1747{
josh minor4a3c6102020-01-06 16:40:46 -06001748 return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001749}
1750
narpra01b89b05f2019-01-16 09:53:09 +00001751IConnectableLayer* Network::AddGatherLayer(const char* name)
1752{
Teresa Charlin52664732020-06-29 16:27:03 +01001753 GatherDescriptor gatherDescriptor{};
1754 return AddGatherLayer(gatherDescriptor, name);
1755}
1756
1757IConnectableLayer* Network::AddGatherLayer(const GatherDescriptor& gatherDescriptor,
1758 const char* name)
1759{
1760 return m_Graph->AddLayer<GatherLayer>(gatherDescriptor, name);
narpra01b89b05f2019-01-16 09:53:09 +00001761}
1762
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001763IConnectableLayer* Network::AddMergeLayer(const char* name)
1764{
1765 return m_Graph->AddLayer<MergeLayer>(name);
1766}
1767
Sadik Armaganeff363d2019-04-05 15:25:46 +01001768IConnectableLayer* Network::AddSwitchLayer(const char* name)
1769{
1770 return m_Graph->AddLayer<SwitchLayer>(name);
1771}
1772
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01001773IConnectableLayer* Network::AddPreluLayer(const char* name)
1774{
1775 return m_Graph->AddLayer<PreluLayer>(name);
1776}
1777
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01001778IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
1779 const ConstTensor& weights,
1780 const Optional<ConstTensor>& biases,
1781 const char* name)
1782{
1783 if (descriptor.m_BiasEnabled && !biases.has_value())
1784 {
1785 throw InvalidArgumentException("AddTransposeConvolution2dLayer: Biases cannot be empty");
1786 }
1787
1788 const auto layer = m_Graph->AddLayer<TransposeConvolution2dLayer>(descriptor, name);
1789
1790 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1791
1792 if (descriptor.m_BiasEnabled)
1793 {
1794 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
1795 }
1796
1797 return layer;
1798}
1799
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001800IConnectableLayer* Network::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
1801 const char* name)
1802{
1803 return m_Graph->AddLayer<TransposeLayer>(transposeDescriptor, name);
1804}
1805
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001806IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor,
1807 const char* name)
1808{
1809 return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
1810}
1811
Derek Lamberti013c3902019-10-21 10:46:16 +01001812
1813IConnectableLayer* Network::AddStandInLayer(const StandInDescriptor& desc,
1814 const char* name)
1815{
1816 return m_Graph->AddLayer<StandInLayer>(desc, name);
1817}
1818
James Conroyee18dc82019-07-17 11:27:46 +01001819IConnectableLayer* Network::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
1820 const char* name)
1821{
1822 const auto layer = m_Graph->AddLayer<QuantizedLstmLayer>(name);
1823
1824 // InputToX weights
1825 layer->m_QuantizedLstmParameters.m_InputToInputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001826 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001827 layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001828 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001829 layer->m_QuantizedLstmParameters.m_InputToCellWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001830 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001831 layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001832 std::make_unique<ScopedCpuTensorHandle>(params.GetInputToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001833
1834 // RecurrentToX weights
1835 layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001836 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToInputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001837 layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001838 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToForgetWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001839 layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001840 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToCellWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001841 layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001842 std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToOutputWeights());
James Conroyee18dc82019-07-17 11:27:46 +01001843
1844 // Bias
1845 layer->m_QuantizedLstmParameters.m_InputGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001846 std::make_unique<ScopedCpuTensorHandle>(params.GetInputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001847 layer->m_QuantizedLstmParameters.m_ForgetGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001848 std::make_unique<ScopedCpuTensorHandle>(params.GetForgetGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001849 layer->m_QuantizedLstmParameters.m_CellBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001850 std::make_unique<ScopedCpuTensorHandle>(params.GetCellBias());
James Conroyee18dc82019-07-17 11:27:46 +01001851 layer->m_QuantizedLstmParameters.m_OutputGateBias =
Francis Murtaghbb590b42019-08-14 09:51:36 +01001852 std::make_unique<ScopedCpuTensorHandle>(params.GetOutputGateBias());
James Conroyee18dc82019-07-17 11:27:46 +01001853
1854 return layer;
1855}
1856
James Conroy586a9aa2020-03-20 08:49:33 +00001857IConnectableLayer* Network::AddQLstmLayer(const QLstmDescriptor& descriptor,
1858 const LstmInputParams& params,
1859 const char* name)
1860{
1861 const auto layer = m_Graph->AddLayer<QLstmLayer>(descriptor, name);
1862
1863 // QLstm Basic Parameters
1864 layer->m_BasicParameters.m_InputToForgetWeights =
1865 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
1866 layer->m_BasicParameters.m_InputToCellWeights =
1867 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
1868 layer->m_BasicParameters.m_InputToOutputWeights =
1869 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
1870 layer->m_BasicParameters.m_RecurrentToForgetWeights =
1871 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
1872 layer->m_BasicParameters.m_RecurrentToCellWeights =
1873 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
1874 layer->m_BasicParameters.m_RecurrentToOutputWeights =
1875 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
1876 layer->m_BasicParameters.m_ForgetGateBias =
1877 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
1878 layer->m_BasicParameters.m_CellBias =
1879 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
1880 layer->m_BasicParameters.m_OutputGateBias =
1881 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
1882
1883 // QLstm Cifg parameters
1884 if(!descriptor.m_CifgEnabled)
1885 {
1886 if(params.m_InputToInputWeights == nullptr)
1887 {
1888 throw InvalidArgumentException("AddQLstmLayer: Input To Input Weights cannot be NULL");
1889 }
1890
1891 if(params.m_RecurrentToInputWeights == nullptr)
1892 {
1893 throw InvalidArgumentException(
1894 "AddQLstmLayer: Recurrent To Input Weights cannot be NULL");
1895 }
1896
1897 if(params.m_InputGateBias == nullptr)
1898 {
1899 throw InvalidArgumentException("AddQLstmLayer: Input Gate Bias cannot be NULL");
1900 }
1901
1902 layer->m_CifgParameters.m_InputToInputWeights =
1903 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
1904 layer->m_CifgParameters.m_RecurrentToInputWeights =
1905 std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
1906 layer->m_CifgParameters.m_InputGateBias =
1907 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
1908 }
1909
1910 // QLstm Projection parameters
1911 if(descriptor.m_ProjectionEnabled)
1912 {
1913 if(params.m_ProjectionWeights == nullptr)
1914 {
1915 throw InvalidArgumentException("AddQLstmLayer: Projection Weights cannot be NULL");
1916 }
1917
James Conroy586a9aa2020-03-20 08:49:33 +00001918 layer->m_ProjectionParameters.m_ProjectionWeights =
1919 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
James Conroyed324052020-05-18 15:16:42 +01001920
1921 // Projection bias is optional even if projection is enabled
1922 if(params.m_ProjectionWeights != nullptr)
1923 {
1924 layer->m_ProjectionParameters.m_ProjectionBias =
1925 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
1926 }
1927
James Conroy586a9aa2020-03-20 08:49:33 +00001928 }
1929
1930 // QLstm Peephole params
1931 if(descriptor.m_PeepholeEnabled)
1932 {
1933 if(params.m_CellToForgetWeights == nullptr)
1934 {
1935 throw InvalidArgumentException("AddQLstmLayer: Cell To Forget Weights cannot be NULL");
1936 }
1937
1938 if(params.m_CellToOutputWeights == nullptr)
1939 {
1940 throw InvalidArgumentException("AddQLstmLayer: Cell To Output Weights cannot be NULL");
1941 }
1942
1943 if(!descriptor.m_CifgEnabled)
1944 {
1945 if(params.m_CellToInputWeights == nullptr)
1946 {
1947 throw InvalidArgumentException("AddQLstmLayer: Cell To Input Weights cannot be NULL");
1948 }
1949
1950 layer->m_PeepholeParameters.m_CellToInputWeights =
1951 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
1952 }
1953
1954 layer->m_PeepholeParameters.m_CellToForgetWeights =
1955 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
1956 layer->m_PeepholeParameters.m_CellToOutputWeights =
1957 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
1958 }
1959
1960 // QLstm Layer Normalization params
1961 if(descriptor.m_LayerNormEnabled)
1962 {
1963 if(params.m_ForgetLayerNormWeights == nullptr)
1964 {
1965 throw InvalidArgumentException("AddQLstmLayer: Forget layer normalization weights cannot be NULL");
1966 }
1967
1968 if(params.m_CellLayerNormWeights == nullptr)
1969 {
1970 throw InvalidArgumentException("AddQLstmLayer: Cell layer normalization weights cannot be NULL");
1971 }
1972
1973 if(params.m_OutputLayerNormWeights == nullptr)
1974 {
1975 throw InvalidArgumentException("AddQLstmLayer: Output layer normalization weights cannot be NULL");
1976 }
1977
1978 if(!descriptor.m_CifgEnabled)
1979 {
1980 if(params.m_InputLayerNormWeights == nullptr)
1981 {
1982 throw InvalidArgumentException("AddQLstmLayer: Input layer normalization weights cannot be NULL");
1983 }
1984
1985 layer->m_LayerNormParameters.m_InputLayerNormWeights =
1986 std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
1987 }
1988
1989 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
1990 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
1991 layer->m_LayerNormParameters.m_CellLayerNormWeights =
1992 std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
1993 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
1994 std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
1995 }
1996 return layer;
1997}
1998
Mike Kelly8c1701a2019-02-11 17:01:27 +00001999void Network::Accept(ILayerVisitor& visitor) const
2000{
2001 for (auto layer : GetGraph())
2002 {
2003 layer->Accept(visitor);
2004 };
2005}
2006
telsoa014fcda012018-03-09 14:13:49 +00002007OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
Sadik Armagan3184c902020-03-18 10:57:30 +00002008 : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid())
telsoa014fcda012018-03-09 14:13:49 +00002009{
2010}
2011
Sadik Armagan045f6be2020-09-10 13:37:32 +01002012OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
2013 : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid()), m_ModelOptions(modelOptions)
2014{
2015}
2016
telsoa014fcda012018-03-09 14:13:49 +00002017OptimizedNetwork::~OptimizedNetwork()
2018{
2019}
2020
2021} // namespace armnn