Optimize the calling of IsLayerSupported().

  * Done as part of 22.11/23.02 innovation days.
  * IsLayerSupported() is called in model prepare (delegate, android-nn-driver and shim/support_library)
    and again in ArmNN once model otimization is performed.
  * From calling IsLayerSupported() the first time, we should know that the layers are supported
    and what backend they are supported on.
  * Solution is to set the BackendId of the IConnectableLayer when IsLayerSupported() is called the first time,
  * In the Optimize() function we then check if the backend is set. If so, we do not call IsLayerSupported() again.
  * In the case a layer that is supported gets optimized, then the BackendId of that layer get set to "Unknown"
    for the new optimized layer and IsLayerSupported() will get called on the newly optimized layer.
  * Includes bug fix IVGCVSW-7213 for Android Mean FP16 CpuAcc tests. Also related to bug IVGCVSW-7211.

Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: I7a7820d0cdb079ffb5a3a2e0c44e252f652df53b
diff --git a/InstallationViaAptRepository.md b/InstallationViaAptRepository.md
index 037e5cc..fac714f 100644
--- a/InstallationViaAptRepository.md
+++ b/InstallationViaAptRepository.md
@@ -117,7 +117,7 @@
  sudo apt-get install -y python3-pyarmnn armnn-latest-all
  # Verify installation via python:
  python3 -c "import pyarmnn as ann;print(ann.GetVersion())"
- # Returns '{ARMNN_MAJOR_VERSION}.0.0' e.g. 31.0.0
+ # Returns '{ARMNN_MAJOR_VERSION}.0.0' e.g. 32.0.0
 ```
 This will install PyArmNN and the three backends for Neon (CpuAcc), OpenCL (GpuAcc) and our Reference Backend.
 It will also install their dependencies including the arm-compute-library package along with the Tensorflow Lite Parser
diff --git a/delegate/src/Activation.hpp b/delegate/src/Activation.hpp
index 0071873..3560bfd 100644
--- a/delegate/src/Activation.hpp
+++ b/delegate/src/Activation.hpp
@@ -29,6 +29,7 @@
                                    IsActivationSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   armnn::BackendId(),
                                    inputInfo,
                                    outputInfo,
                                    activationDesc);
diff --git a/delegate/src/ArgMinMax.hpp b/delegate/src/ArgMinMax.hpp
index 057dc8b..dd28807 100644
--- a/delegate/src/ArgMinMax.hpp
+++ b/delegate/src/ArgMinMax.hpp
@@ -91,6 +91,7 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("ARGMINMAX",
@@ -98,6 +99,7 @@
                                    IsArgMinMaxSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outInfo,
                                    desc);
@@ -111,6 +113,7 @@
 
     // Add an ArgMinMax layer
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddArgMinMaxLayer(desc);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/BatchMatMul.hpp b/delegate/src/BatchMatMul.hpp
index 391301e..3b884a0 100644
--- a/delegate/src/BatchMatMul.hpp
+++ b/delegate/src/BatchMatMul.hpp
@@ -68,6 +68,7 @@
 
         // Check if supported
         bool isSupported = false;
+        armnn::BackendId setBackend;
         auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
         {
             FORWARD_LAYER_SUPPORT_FUNC("BATCH_MATMUL",
@@ -75,6 +76,7 @@
                                        IsBatchMatMulSupported,
                                        delegateData.m_Backends,
                                        isSupported,
+                                       setBackend,
                                        armnnLHSInputTensorInfo,
                                        armnnRHSInputTensorInfo,
                                        outputTensorInfo,
@@ -88,6 +90,7 @@
         }
 
         armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchMatMulLayer(descriptor);
+        layer->SetBackendId(setBackend);
         ARMNN_ASSERT(layer != nullptr);
 
         armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -96,4 +99,4 @@
 
         return kTfLiteOk;
     }
-} // namespace armnnDelegate
\ No newline at end of file
+} // namespace armnnDelegate
diff --git a/delegate/src/BatchSpace.hpp b/delegate/src/BatchSpace.hpp
index 847d6f1..903fe37 100644
--- a/delegate/src/BatchSpace.hpp
+++ b/delegate/src/BatchSpace.hpp
@@ -72,6 +72,7 @@
 
     // Check if supported
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("BATCH_TO_SPACE_ND",
@@ -79,6 +80,7 @@
                                    IsBatchToSpaceNdSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfo,
                                    descriptor);
@@ -95,6 +97,7 @@
 
     // Add a BatchToSpace layer
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -163,6 +166,7 @@
 
     // Check if supported
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("SPACE_TO_BATCH_ND",
@@ -170,6 +174,7 @@
                                    IsSpaceToBatchNdSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfo,
                                    descriptor);
@@ -186,6 +191,7 @@
 
     // Add a SpaceToBatch layer
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToBatchNdLayer(descriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Comparison.hpp b/delegate/src/Comparison.hpp
index 8bf53c7..ee121e3 100644
--- a/delegate/src/Comparison.hpp
+++ b/delegate/src/Comparison.hpp
@@ -88,7 +88,7 @@
 
     armnn::ComparisonDescriptor descriptor(comparisonOperation);
     bool isSupported = false;
-
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("COMPARISON",
@@ -96,6 +96,7 @@
                                    IsComparisonSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo0,
                                    inputTensorInfo1,
                                    outputTensorInfo,
@@ -109,6 +110,7 @@
     }
 
     armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor);
+    comparisonLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(comparisonLayer != nullptr);
 
     armnn::IOutputSlot& outputSlot = comparisonLayer->GetOutputSlot(0);
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
index f04245b..02426a5 100644
--- a/delegate/src/Control.hpp
+++ b/delegate/src/Control.hpp
@@ -119,6 +119,7 @@
 
     // Check if supported
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("CONCATENATION",
@@ -126,6 +127,7 @@
                                    IsConcatSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputConstTensorInfos,
                                    outputTensorInfo,
                                    concatDescriptor);
@@ -139,6 +141,7 @@
 
     // Setup layer and connect.
     armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor);
+    concatenationLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(concatenationLayer != nullptr);
 
     // Connect the Constant Inputs
@@ -258,6 +261,7 @@
 
     // Check if supported
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("MEAN",
@@ -265,6 +269,7 @@
                                    IsMeanSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfo,
                                    desc);
@@ -278,6 +283,7 @@
 
     // Setup layer and connect.
     armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc);
+    meanLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(meanLayer != nullptr);
 
     armnn::IOutputSlot& outputSlot = meanLayer->GetOutputSlot(0);
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
index 93da4c8..e307bb9 100644
--- a/delegate/src/Convolution.hpp
+++ b/delegate/src/Convolution.hpp
@@ -144,6 +144,7 @@
     CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
                 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
 
+    armnn::BackendId setBackend;
     if (!delegateData.m_Network)
     {
         bool isSupported = false;
@@ -152,6 +153,7 @@
                                    IsConvolution2dSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfo,
                                    descriptor,
@@ -162,6 +164,7 @@
 
     // Set up filter and biases
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
+    layer->SetBackendId(setBackend);
 
     if(tflite::IsConstantTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]]))
     {
@@ -300,6 +303,7 @@
     // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
     // support for the operator
     // If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
+    armnn::BackendId setBackend;
     if (!delegateData.m_Network)
     {
         bool isSupported = false;
@@ -308,6 +312,7 @@
                                    IsConvolution3dSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfo,
                                    descriptor,
@@ -317,6 +322,7 @@
     }
 
     armnn::IConnectableLayer* layer =  delegateData.m_Network->AddConvolution3dLayer(descriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     // Add a constant layer for weights and biases if inputs are constant,
@@ -497,6 +503,7 @@
         biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
     }
 
+    armnn::BackendId setBackend;
     if (!delegateData.m_Network)
     {
         bool isSupported = false;
@@ -505,6 +512,7 @@
                                    IsDepthwiseConvolutionSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfo,
                                    descriptor,
@@ -514,6 +522,7 @@
     }
 
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
+    layer->SetBackendId(setBackend);
 
     if(tflite::IsConstantTensor(&tfLiteFilterTensor))
     {
@@ -699,6 +708,7 @@
     auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
                                           filterTensorInfo,
                                           armnn::Optional<armnn::PermutationVector&>());
+    armnn::BackendId setBackend;
     if (!delegateData.m_Network)
     {
         bool isSupported = false;
@@ -707,6 +717,7 @@
                                    IsTransposeConvolution2dSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfo,
                                    descriptor,
@@ -718,6 +729,7 @@
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
                                                                                              filterTensor,
                                                                                              armnn::EmptyOptional());
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 58d8048..850b279 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -25,7 +25,7 @@
 {
 
 // Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
-#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, ...) \
+#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
 try \
 { \
     for (auto&& backendId : backends) \
@@ -38,6 +38,7 @@
                 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
             if (supported) \
             { \
+                setBackend = backendId; \
                 break; \
             } \
             else \
@@ -224,11 +225,13 @@
     armnn::ReshapeDescriptor reshapeDescriptor;
     reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
     bool isSupported = false;
+    armnn::BackendId setBackend;
     FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
                                tfLiteContext,
                                IsReshapeSupported,
                                delegateData.m_Backends,
                                isSupported,
+                               setBackend,
                                smallInfo,
                                reshapedInfo,
                                reshapeDescriptor);
@@ -240,6 +243,7 @@
     ARMNN_ASSERT(delegateData.m_Network != nullptr);
     // Add Reshape layer
     armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
+    reshapeLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(reshapeLayer != nullptr);
     reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
 
@@ -331,11 +335,13 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
                                tfLiteContext,
                                IsActivationSupported,
                                data.m_Backends,
                                isSupported,
+                               setBackend,
                                prevLayer->GetOutputSlot(0).GetTensorInfo(),
                                activationOutputInfo,
                                activationDesc);
@@ -344,6 +350,7 @@
         return kTfLiteError;
     }
     armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+    activationLayer->SetBackendId(setBackend);
 
     ARMNN_ASSERT(activationLayer != nullptr);
     activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
@@ -566,11 +573,13 @@
 {
     IgnoreUnused(layer);
     bool isSupported = false;
+    armnn::BackendId setBackend;
     FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
                                tfLiteContext,
                                IsConstantSupported,
                                data.m_Backends,
                                isSupported,
+                               setBackend,
                                constTensorInfo);
     if (!isSupported)
     {
@@ -581,6 +590,7 @@
                                            constTensorInfo,
                                            armnn::Optional<armnn::PermutationVector&>());
     armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
+    constantLayer->SetBackendId(setBackend);
     armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(constTensorInfo);
 
@@ -615,11 +625,13 @@
         {
             armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
             bool isSupported = false;
+            armnn::BackendId setBackend;
             FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
                                        tfLiteContext,
                                        IsConstantSupported,
                                        delegateData.m_Backends,
                                        isSupported,
+                                       setBackend,
                                        inputTensorInfo);
             if (!isSupported)
             {
@@ -629,6 +641,7 @@
                                                    inputTensorInfo,
                                                    armnn::Optional<armnn::PermutationVector&>());
             armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
+            constantLayer->SetBackendId(setBackend);
             armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
             outputSlot.SetTensorInfo(inputTensorInfo);
 
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index 6e81db4..caf0262 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -32,6 +32,7 @@
                                    IsAdditionSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   armnn::BackendId(),
                                    inputInfo1,
                                    inputInfo2,
                                    outputTensorInfo);
@@ -56,6 +57,7 @@
                                    IsDivisionSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   armnn::BackendId(),
                                    inputInfo1,
                                    inputInfo2,
                                    outputTensorInfo);
@@ -108,6 +110,7 @@
                                    IsMaximumSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   armnn::BackendId(),
                                    inputInfo1,
                                    inputInfo2,
                                    outputTensorInfo);
@@ -131,6 +134,7 @@
                                    IsMinimumSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   armnn::BackendId(),
                                    inputInfo1,
                                    inputInfo2,
                                    outputTensorInfo);
@@ -154,6 +158,7 @@
                                    IsMultiplicationSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   armnn::BackendId(),
                                    inputInfo1,
                                    inputInfo2,
                                    outputTensorInfo);
@@ -177,6 +182,7 @@
                                    IsSubtractionSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   armnn::BackendId(),
                                    inputInfo1,
                                    inputInfo2,
                                    outputTensorInfo);
diff --git a/delegate/src/ElementwiseUnary.hpp b/delegate/src/ElementwiseUnary.hpp
index 79d7f82..947e531 100644
--- a/delegate/src/ElementwiseUnary.hpp
+++ b/delegate/src/ElementwiseUnary.hpp
@@ -51,7 +51,7 @@
 
     armnn::ElementwiseUnaryDescriptor descriptor(unaryOperation);
     bool isSupported = false;
-
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("ELEMENTWISE_UNARY",
@@ -59,6 +59,7 @@
                                    IsElementwiseUnarySupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfo,
                                    descriptor);
@@ -71,6 +72,7 @@
     }
 
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Fill.hpp b/delegate/src/Fill.hpp
index dc30e53..e79133e 100644
--- a/delegate/src/Fill.hpp
+++ b/delegate/src/Fill.hpp
@@ -72,6 +72,7 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("FILL",
@@ -79,6 +80,7 @@
                                    IsFillSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outInfo,
                                    descriptor);
@@ -91,6 +93,7 @@
     }
 
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp
index 6677ab9..a2960e2 100644
--- a/delegate/src/FullyConnected.hpp
+++ b/delegate/src/FullyConnected.hpp
@@ -110,6 +110,7 @@
     descriptor.m_ConstantWeights       = isConstantWeights;
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("FULLY_CONNECTED",
@@ -117,6 +118,7 @@
                                    IsFullyConnectedSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    reshapedTensorInfo,
                                    outputTensorInfo,
                                    weightsTensorInfo,
@@ -131,6 +133,7 @@
     }
 
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     // Add a constant layer for weights and biases if inputs are constant.
diff --git a/delegate/src/Gather.hpp b/delegate/src/Gather.hpp
index 616de7e..9e98966 100644
--- a/delegate/src/Gather.hpp
+++ b/delegate/src/Gather.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -69,6 +69,7 @@
         return kTfLiteError;
     }
 
+    armnn::BackendId setBackend;
     if (!delegateData.m_Network)
     {
         // Check if supported
@@ -78,6 +79,7 @@
                                    IsGatherSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    indicesTensorInfo,
                                    outputTensorInfo,
@@ -86,6 +88,7 @@
     }
 
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
diff --git a/delegate/src/GatherNd.hpp b/delegate/src/GatherNd.hpp
index 1e12c5c..f2192f7 100644
--- a/delegate/src/GatherNd.hpp
+++ b/delegate/src/GatherNd.hpp
@@ -46,6 +46,7 @@
     const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteIndicesTensor);
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
+    armnn::BackendId setBackend;
     if (!delegateData.m_Network)
     {
         // Check if supported
@@ -55,6 +56,7 @@
                                    IsGatherNdSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    indicesTensorInfo,
                                    outputTensorInfo);
@@ -62,6 +64,7 @@
     }
 
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherNdLayer();
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
diff --git a/delegate/src/LogicalBinary.hpp b/delegate/src/LogicalBinary.hpp
index 562b5d3..b6a8f5d 100644
--- a/delegate/src/LogicalBinary.hpp
+++ b/delegate/src/LogicalBinary.hpp
@@ -52,6 +52,7 @@
 
     // Check if supported
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("LOGICAL_BINARY",
@@ -59,6 +60,7 @@
                                    IsLogicalBinarySupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo0,
                                    inputTensorInfo1,
                                    outputTensorInfo,
@@ -72,6 +74,7 @@
     }
 
     armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc);
+    logicalBinaryLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(logicalBinaryLayer != nullptr);
 
     armnn::IOutputSlot& outputSlot = logicalBinaryLayer->GetOutputSlot(0);
diff --git a/delegate/src/Lstm.hpp b/delegate/src/Lstm.hpp
index 253cd21..8c1f877 100644
--- a/delegate/src/Lstm.hpp
+++ b/delegate/src/Lstm.hpp
@@ -216,6 +216,7 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("LSTM",
@@ -223,6 +224,7 @@
                                    IsLstmSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputStateInInfo,
                                    cellStateInInfo,
@@ -241,6 +243,7 @@
     }
 
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddLstmLayer(desc, params);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     layer->GetOutputSlot(0).SetTensorInfo(scratchBufferTensorInfo);
diff --git a/delegate/src/MultiLayerFacade.hpp b/delegate/src/MultiLayerFacade.hpp
index 2fdfc70..aa00be8 100644
--- a/delegate/src/MultiLayerFacade.hpp
+++ b/delegate/src/MultiLayerFacade.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -117,6 +117,8 @@
 
     virtual const armnn::BaseDescriptor& GetParameters() const override { return m_NullDescriptor; }
 
+    void SetBackendId(const armnn::BackendId& id) override {}
+
 protected:
     /// Retrieve the handles to the constant values stored by the layer.
     /// @return A vector of the constant tensors stored by this layer.
diff --git a/delegate/src/Normalization.hpp b/delegate/src/Normalization.hpp
index 0933552..d0db43e 100644
--- a/delegate/src/Normalization.hpp
+++ b/delegate/src/Normalization.hpp
@@ -42,6 +42,7 @@
     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("L2_NORMALIZATION",
@@ -49,6 +50,7 @@
                                    IsL2NormalizationSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outInfo,
                                    descriptor);
@@ -62,6 +64,7 @@
 
     // Add a L2Normalization layer
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -112,6 +115,7 @@
     descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("NORMALIZATION",
@@ -119,6 +123,7 @@
                                    IsNormalizationSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outInfo,
                                    descriptor);
@@ -132,6 +137,7 @@
 
     // Add a Normalization layer
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Pack.hpp b/delegate/src/Pack.hpp
index 458c174..57d3b46 100644
--- a/delegate/src/Pack.hpp
+++ b/delegate/src/Pack.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -74,6 +74,7 @@
 
     // Check if supported
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("STACK",
@@ -81,6 +82,7 @@
                                    IsStackSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputConstTensorInfos,
                                    outputTensorInfo,
                                    desc);
@@ -97,6 +99,7 @@
 
     // The TfLite Pack operator is equivalent to the ArmNN Stack operator
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     // Connect the Constant Inputs
diff --git a/delegate/src/Pad.hpp b/delegate/src/Pad.hpp
index daedede..2ecf2a0 100644
--- a/delegate/src/Pad.hpp
+++ b/delegate/src/Pad.hpp
@@ -149,6 +149,7 @@
         }
     }
 
+    armnn::BackendId setBackend;
     if (!delegateData.m_Network)
     {
         bool isSupported = false;
@@ -157,6 +158,7 @@
                                    IsPadSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfo,
                                    descriptor);
@@ -165,6 +167,7 @@
     }
 
     armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor);
+    padLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(padLayer != nullptr);
 
     armnn::IOutputSlot& outputSlot = padLayer->GetOutputSlot(0);
diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp
index dfe90cb..8241567 100644
--- a/delegate/src/Pooling.hpp
+++ b/delegate/src/Pooling.hpp
@@ -84,6 +84,7 @@
                 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("POOLING_2D",
@@ -91,6 +92,7 @@
                                    IsPooling2dSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfo,
                                    descriptor);
@@ -103,6 +105,7 @@
     }
 
     armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor);
+    poolingLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(poolingLayer != nullptr);
 
     armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
@@ -215,12 +218,14 @@
 
     // Validate the output info.
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) {
         FORWARD_LAYER_SUPPORT_FUNC("POOLING_3D",
                                    tfLiteContext,
                                    IsPooling3dSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfo,
                                    descriptor);
@@ -234,6 +239,7 @@
 
     // Create the Layer
     armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
+    poolingLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(poolingLayer != nullptr);
 
     // Create and set output slots
diff --git a/delegate/src/Prelu.hpp b/delegate/src/Prelu.hpp
index 398abaf..06e74ed 100644
--- a/delegate/src/Prelu.hpp
+++ b/delegate/src/Prelu.hpp
@@ -29,6 +29,7 @@
                                    IsPreluSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   armnn::BackendId(),
                                    inputInfo,
                                    alphaInfo,
                                    outputInfo);
diff --git a/delegate/src/Quantization.hpp b/delegate/src/Quantization.hpp
index 7871375..64f57de 100644
--- a/delegate/src/Quantization.hpp
+++ b/delegate/src/Quantization.hpp
@@ -51,6 +51,7 @@
     UpdateConstantTensorOutputs(inputTensorInfo, outputTensorInfo);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("DEQUANTIZE",
@@ -58,6 +59,7 @@
                                    IsDequantizeSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfo);
     };
@@ -69,6 +71,7 @@
     }
 
     armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
+    dequantizeLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(dequantizeLayer != nullptr);
 
     armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0);
@@ -130,6 +133,7 @@
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("QUANTIZE",
@@ -137,6 +141,7 @@
                                    IsQuantizeSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfo);
     };
@@ -148,6 +153,7 @@
     }
 
     armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
+    quantizeLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(quantizeLayer != nullptr);
 
     armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
diff --git a/delegate/src/Redefine.hpp b/delegate/src/Redefine.hpp
index cdae719..8f9a4e4 100644
--- a/delegate/src/Redefine.hpp
+++ b/delegate/src/Redefine.hpp
@@ -44,6 +44,7 @@
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("CAST",
@@ -51,6 +52,7 @@
                                    IsCastSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outInfo);
     };
@@ -66,6 +68,7 @@
 
     // Add a Cast layer
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer();
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -210,6 +213,7 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
@@ -217,6 +221,7 @@
                                    IsReshapeSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo0,
                                    outInfo,
                                    reshapeDesc);
@@ -229,6 +234,7 @@
     }
 
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Reduce.hpp b/delegate/src/Reduce.hpp
index 79f2f52..3f4c118 100644
--- a/delegate/src/Reduce.hpp
+++ b/delegate/src/Reduce.hpp
@@ -105,6 +105,7 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("REDUCE",
@@ -112,6 +113,7 @@
                                    IsReduceSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outInfo,
                                    desc);
@@ -125,6 +127,7 @@
 
     // Add an Reduce layer
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Resize.hpp b/delegate/src/Resize.hpp
index b59006c..0cb15d3 100644
--- a/delegate/src/Resize.hpp
+++ b/delegate/src/Resize.hpp
@@ -33,6 +33,7 @@
                                IsResizeSupported,
                                delegateData.m_Backends,
                                isSupported,
+                               armnn::BackendId(),
                                inputInfo,
                                outputInfo,
                                descriptor);
diff --git a/delegate/src/Shape.hpp b/delegate/src/Shape.hpp
index 284dc9f..625e6a8 100644
--- a/delegate/src/Shape.hpp
+++ b/delegate/src/Shape.hpp
@@ -52,6 +52,7 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("SHAPE",
@@ -59,6 +60,7 @@
                                    IsShapeSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outInfo);
     };
@@ -74,6 +76,7 @@
 
     // Add a Shape layer
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddShapeLayer();
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/SharedFunctions.cpp b/delegate/src/SharedFunctions.cpp
index ad5d310..22f578a 100644
--- a/delegate/src/SharedFunctions.cpp
+++ b/delegate/src/SharedFunctions.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -29,6 +29,7 @@
                                    IsFloorSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   armnn::BackendId(),
                                    inputTensorInfo,
                                    outInfo);
     };
diff --git a/delegate/src/Slice.hpp b/delegate/src/Slice.hpp
index cbcb45e..d5712ae 100644
--- a/delegate/src/Slice.hpp
+++ b/delegate/src/Slice.hpp
@@ -99,6 +99,7 @@
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("SLICE",
@@ -106,6 +107,7 @@
                                    IsSliceSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outInfo,
                                    descriptor);
@@ -117,8 +119,9 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    // Add a StridedSlice layer
+    // Add a Slice layer
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddSliceLayer(descriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -129,3 +132,4 @@
 }
 
 } // namespace armnnDelegate
+
diff --git a/delegate/src/Softmax.hpp b/delegate/src/Softmax.hpp
index efc1cba..738f542 100644
--- a/delegate/src/Softmax.hpp
+++ b/delegate/src/Softmax.hpp
@@ -27,6 +27,7 @@
                                IsSoftmaxSupported,
                                delegateData.m_Backends,
                                isSupported,
+                               armnn::BackendId(),
                                inputInfo,
                                outputTensorInfo,
                                descriptor);
@@ -46,6 +47,7 @@
                                IsLogSoftmaxSupported,
                                delegateData.m_Backends,
                                isSupported,
+                               armnn::BackendId(),
                                inputInfo,
                                outputTensorInfo,
                                descriptor);
diff --git a/delegate/src/SpaceDepth.hpp b/delegate/src/SpaceDepth.hpp
index 593d0e7..2172d86 100644
--- a/delegate/src/SpaceDepth.hpp
+++ b/delegate/src/SpaceDepth.hpp
@@ -43,6 +43,7 @@
     descriptor.m_BlockSize = params->block_size;
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("SPACE_TO_DEPTH",
@@ -50,6 +51,7 @@
                                    IsSpaceToDepthSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outInfo,
                                    descriptor);
@@ -63,6 +65,7 @@
 
     // Add a SpaceToDepth layer
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -102,6 +105,7 @@
     descriptor.m_BlockSize = params->block_size;
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("DEPTH_TO_SPACE",
@@ -109,6 +113,7 @@
                                    IsDepthToSpaceSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outInfo,
                                    descriptor);
@@ -122,6 +127,7 @@
 
     // Add a DepthToSpace layer
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Split.hpp b/delegate/src/Split.hpp
index a535585..5c094b4 100644
--- a/delegate/src/Split.hpp
+++ b/delegate/src/Split.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -113,6 +113,7 @@
         splitDescriptor.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
     }
 
+    armnn::BackendId setBackend;
     if (!delegateData.m_Network)
     {
         // Check if supported
@@ -122,6 +123,7 @@
                                    IsSplitterSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfos,
                                    splitDescriptor);
@@ -129,6 +131,7 @@
     }
 
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
@@ -305,6 +308,7 @@
         accumSplit += splitSize;
     }
 
+    armnn::BackendId setBackend;
     if (!delegateData.m_Network)
     {
         // Check if supported
@@ -314,6 +318,7 @@
                                    IsSplitterSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputTensorInfos,
                                    splitDescriptor);
@@ -321,6 +326,7 @@
     }
 
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
diff --git a/delegate/src/StridedSlice.hpp b/delegate/src/StridedSlice.hpp
index 515c819..d2c4d5d 100644
--- a/delegate/src/StridedSlice.hpp
+++ b/delegate/src/StridedSlice.hpp
@@ -114,6 +114,7 @@
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("STRIDED_SLICE",
@@ -121,6 +122,7 @@
                                    IsStridedSliceSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outInfo,
                                    descriptor);
@@ -134,6 +136,7 @@
 
     // Add a StridedSlice layer
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddStridedSliceLayer(descriptor);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -144,3 +147,4 @@
 }
 
 } // namespace armnnDelegate
+
diff --git a/delegate/src/Transpose.hpp b/delegate/src/Transpose.hpp
index 80bb122..15c5310 100644
--- a/delegate/src/Transpose.hpp
+++ b/delegate/src/Transpose.hpp
@@ -71,7 +71,7 @@
         static_cast<armnn::PermutationVector::SizeType>(numEl)));
 
     bool isSupported = false;
-
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE",
@@ -79,6 +79,7 @@
                                    IsTransposeSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo0,
                                    outputTensorInfo,
                                    descriptor);
@@ -91,6 +92,7 @@
     }
 
     armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor);
+    transposeLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(transposeLayer != nullptr);
     ARMNN_ASSERT(transposeLayer->GetNumInputSlots() == 1);     // permutation vector given to descriptor object
 
diff --git a/delegate/src/UnidirectionalSequenceLstm.hpp b/delegate/src/UnidirectionalSequenceLstm.hpp
index 64ed778..9408397 100644
--- a/delegate/src/UnidirectionalSequenceLstm.hpp
+++ b/delegate/src/UnidirectionalSequenceLstm.hpp
@@ -253,6 +253,7 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC("UNIDIRECTIONAL_SEQUENCE_LSTM",
@@ -260,6 +261,7 @@
                                    IsUnidirectionalSequenceLstmSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputTensorInfo,
                                    outputStateInInfo,
                                    cellStateInInfo,
@@ -277,6 +279,7 @@
     }
 
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     layer->GetOutputSlot(0).SetTensorInfo(outputStateOutTensorInfo);
diff --git a/delegate/src/Unpack.hpp b/delegate/src/Unpack.hpp
index aaea005..ad541ad 100644
--- a/delegate/src/Unpack.hpp
+++ b/delegate/src/Unpack.hpp
@@ -133,6 +133,7 @@
     std::vector<std::reference_wrapper<armnn::TensorInfo>> splitterOutputTensorInfos(splitterOutputs.begin(),
                                                                                      splitterOutputs.end());
 
+    armnn::BackendId setBackendSplit;
     if (!delegateData.m_Network)
     {
         // Check if splitter is supported
@@ -142,6 +143,7 @@
                                    IsSplitterSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackendSplit,
                                    inputTensorInfo,
                                    splitterOutputTensorInfos,
                                    splitDesc);
@@ -153,6 +155,7 @@
     armnn::ReshapeDescriptor reshapeDescriptor;
     reshapeDescriptor.m_TargetShape = outputTensorInfos[0].get().GetShape();
 
+    armnn::BackendId setBackendReshape;
     if (!delegateData.m_Network)
     {
         bool isSupported = false;
@@ -161,6 +164,7 @@
                                    IsReshapeSupported,
                                    delegateData.m_Backends,
                                    isSupported,
+                                   setBackendReshape,
                                    splitterOutputTensorInfos[0],
                                    outputTensorInfos[0],
                                    reshapeDescriptor);
@@ -171,6 +175,7 @@
 
     armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc,
                                                                                        splitterLayerName.c_str());
+    splitterLayer->SetBackendId(setBackendSplit);
     ARMNN_ASSERT(splitterLayer != nullptr);
 
     for (unsigned int k = 0; k < splitterLayer->GetNumOutputSlots(); ++k)
@@ -187,6 +192,7 @@
         std::string reshapeLayerName("Unpack Reshape");
         armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor,
                                                                                          reshapeLayerName.c_str());
+        reshapeLayer->SetBackendId(setBackendReshape);
         ARMNN_ASSERT(reshapeLayer != nullptr);
 
         splitterLayer->GetOutputSlot(outputIndex).SetTensorInfo(splitterOutputTensorInfos[outputIndex]);
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 2bb9ad9..c9c8a04 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -98,7 +98,11 @@
     /// Apply a visitor to this layer
     virtual void ExecuteStrategy(IStrategy& strategy) const = 0;
 
-    /// Provide a hint for the optimizer as to which backend to prefer for this layer
+    /// Provide a hint for the optimizer as to which backend to prefer for this layer.
+    /// By providing a BackendSelectionHint there is no guarantee the input backend supports that layer.
+    /// If IsLayerSupported() returns false with the backend hint, we default to calling IsLayerSupported()
+    /// on the BackendPreferences vector. Use SetBackendId() if we can guarantee a backend supports that
+    /// layer (IsLayerSupported returns true for a specific backend).
     virtual void BackendSelectionHint(Optional<BackendId> backend) = 0;
 
     /// Returns the armnn::LayerType of this layer
@@ -111,6 +115,12 @@
     /// the BaseDescriptor IsNull function is invoked.
     virtual const BaseDescriptor& GetParameters() const = 0;
 
+    /// Set the backend of the IConnectableLayer.
+    /// By using SetBackendId() we guarantee that the input backend supports that
+    /// layer (IsLayerSupported returns true for a specific backend). If there is
+    /// no guarantee the input backend supports that layer use BackendSelectionHint().
+    virtual void SetBackendId(const BackendId& id) = 0;
+
     using ConstantTensors = std::vector<std::reference_wrapper<std::shared_ptr<ConstTensorHandle>>>;
 
     // Returns ConstantTensors of this Layer if it has any, otherwise returns empty vector.
diff --git a/include/armnn/Version.hpp b/include/armnn/Version.hpp
index 7fdb20a..aedd4a0 100644
--- a/include/armnn/Version.hpp
+++ b/include/armnn/Version.hpp
@@ -10,7 +10,7 @@
 #define STRINGIFY_MACRO(s) #s
 
 // ArmNN version components
-#define ARMNN_MAJOR_VERSION 31
+#define ARMNN_MAJOR_VERSION 32
 #define ARMNN_MINOR_VERSION 0
 #define ARMNN_PATCH_VERSION 0
 
diff --git a/python/pyarmnn/README.md b/python/pyarmnn/README.md
index 0d2c511..3962e11 100644
--- a/python/pyarmnn/README.md
+++ b/python/pyarmnn/README.md
@@ -69,8 +69,8 @@
 
 Binary package is platform dependent, the name of the package will indicate the platform it was built for, e.g.:
 
-* Linux x86 64bit machine: pyarmnn-31.0.0-cp36-cp36m-*linux_x86_64*.whl
-* Linux Aarch 64 bit machine: pyarmnn-31.0.0-cp36-cp36m-*linux_aarch64*.whl
+* Linux x86 64bit machine: pyarmnn-32.0.0-cp36-cp36m-*linux_x86_64*.whl
+* Linux Aarch 64 bit machine: pyarmnn-32.0.0-cp36-cp36m-*linux_aarch64*.whl
 
 The source package is platform independent but installation involves compilation of Arm NN python extension. You will need to have g++ compatible with C++ 14 standard and a python development library installed on the build machine.
 
@@ -110,7 +110,7 @@
 You can also verify it by running the following and getting output similar to below:
 ```bash
 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'31.0.0'
+'32.0.0'
 ```
 
 # PyArmNN API overview
diff --git a/python/pyarmnn/examples/image_classification/README.md b/python/pyarmnn/examples/image_classification/README.md
index 04718e2..fa0f89e 100644
--- a/python/pyarmnn/examples/image_classification/README.md
+++ b/python/pyarmnn/examples/image_classification/README.md
@@ -20,7 +20,7 @@
 You can also verify it by running the following and getting output similar to below:

 ```bash

 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"

-'31.0.0'

+'32.0.0'

 ```

 

 ##### Dependencies

diff --git a/python/pyarmnn/examples/keyword_spotting/README.md b/python/pyarmnn/examples/keyword_spotting/README.md
index 98158e6..905cae1 100644
--- a/python/pyarmnn/examples/keyword_spotting/README.md
+++ b/python/pyarmnn/examples/keyword_spotting/README.md
@@ -18,7 +18,7 @@
 
 ```bash
 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'31.0.0'
+'32.0.0'
 ```
 
 ### Dependencies
diff --git a/python/pyarmnn/examples/object_detection/README.md b/python/pyarmnn/examples/object_detection/README.md
index 73bafb6..3c4b100 100644
--- a/python/pyarmnn/examples/object_detection/README.md
+++ b/python/pyarmnn/examples/object_detection/README.md
@@ -54,7 +54,7 @@
 You can also verify it by running the following and getting output similar to below:
 ```bash
 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'31.0.0'
+'32.0.0'
 ```
 
 ##### Dependencies
diff --git a/python/pyarmnn/examples/speech_recognition/README.md b/python/pyarmnn/examples/speech_recognition/README.md
index e442aad..af0196f 100644
--- a/python/pyarmnn/examples/speech_recognition/README.md
+++ b/python/pyarmnn/examples/speech_recognition/README.md
@@ -18,7 +18,7 @@
 
 ```bash
 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'31.0.0'
+'32.0.0'
 ```
 
 ### Dependencies
diff --git a/python/pyarmnn/src/pyarmnn/_version.py b/python/pyarmnn/src/pyarmnn/_version.py
index d68a893..4501f88 100644
--- a/python/pyarmnn/src/pyarmnn/_version.py
+++ b/python/pyarmnn/src/pyarmnn/_version.py
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: MIT
 import os
 
-version_info = (31, 0, 0)
+version_info = (32, 0, 0)
 
 __dev_version_env = os.getenv("PYARMNN_DEV_VER", "")
 
@@ -24,7 +24,7 @@
     """Compares expected Arm NN version and Arm NN version used to build the package.
 
     Args:
-        installed_armnn_version (str): Arm NN version used to generate the package (e.g. 31.0.0)
+        installed_armnn_version (str): Arm NN version used to generate the package (e.g. 32.0.0)
         expected_armnn_version (str): Expected Arm NN version
 
     Returns:
diff --git a/python/pyarmnn/test/test_setup.py b/python/pyarmnn/test/test_setup.py
index ada96cc..8275a53 100644
--- a/python/pyarmnn/test/test_setup.py
+++ b/python/pyarmnn/test/test_setup.py
@@ -87,15 +87,15 @@
 
 
 def test_armnn_version():
-    check_armnn_version('31.0.0', '31.0.0')
+    check_armnn_version('32.0.0', '32.0.0')
 
 
 def test_incorrect_armnn_version():
     with pytest.raises(AssertionError) as err:
-        check_armnn_version('31.0.0', '31.1.0')
+        check_armnn_version('32.0.0', '32.1.0')
 
-    assert 'Expected ArmNN version is 31.1.0 but installed ArmNN version is 31.0.0' in str(err.value)
+    assert 'Expected ArmNN version is 32.1.0 but installed ArmNN version is 32.0.0' in str(err.value)
 
 
 def test_armnn_version_patch_does_not_matter():
-    check_armnn_version('31.0.0', '31.0.1')
+    check_armnn_version('32.0.0', '32.0.1')
diff --git a/python/pyarmnn/test/test_version.py b/python/pyarmnn/test/test_version.py
index f68adff..145fc3b 100644
--- a/python/pyarmnn/test/test_version.py
+++ b/python/pyarmnn/test/test_version.py
@@ -18,7 +18,7 @@
 
     importlib.reload(v)
 
-    assert "31.0.0.dev1" == v.__version__
+    assert "32.0.0.dev1" == v.__version__
 
     del os.environ["PYARMNN_DEV_VER"]
     del v
@@ -30,7 +30,7 @@
 
     importlib.reload(v)
 
-    assert "31.0.0" == v.__arm_ml_version__
+    assert "32.0.0" == v.__arm_ml_version__
 
     del os.environ["PYARMNN_DEV_VER"]
     del v
diff --git a/shim/sl/canonical/ConversionUtils.cpp b/shim/sl/canonical/ConversionUtils.cpp
index f48af32..b648548 100644
--- a/shim/sl/canonical/ConversionUtils.cpp
+++ b/shim/sl/canonical/ConversionUtils.cpp
@@ -256,6 +256,7 @@
                                            IsInputSupported,
                                            data.m_Backends,
                                            isInputSupported,
+                                           armnn::BackendId(),
                                            operandTensorInfo);
 
                 if (!isInputSupported)
@@ -292,10 +293,12 @@
                 if (tensorPin.IsValid())
                 {
                     bool isSupported = false;
+                    armnn::BackendId setBackend;
                     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                                IsConstantSupported,
                                                data.m_Backends,
                                                isSupported,
+                                               setBackend,
                                                tensorPin.GetConstTensor().GetInfo());
                     if (!isSupported)
                     {
@@ -304,6 +307,7 @@
 
                     armnn::IConnectableLayer* constantLayer =
                         data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
+                    constantLayer->SetBackendId(setBackend);
                     armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
                     armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
                     outputSlot.SetTensorInfo(constantTensorInfo);
@@ -455,13 +459,14 @@
     }
 
     bool isSupported = false;
-
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsPooling2dSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    desc);
@@ -483,6 +488,7 @@
     }
 
     armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
+    pooling2dLayer->SetBackendId(setBackend);
     if (!pooling2dLayer)
     {
         return Fail("%s: AddPooling2dLayer failed", __func__);
@@ -547,12 +553,14 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsReduceSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -573,6 +581,7 @@
     }
 
     armnn::IConnectableLayer* const layer = data.m_Network->AddReduceLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -601,13 +610,14 @@
     const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
 
     bool isSupported = false;
-
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsActivationSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    outInfo,
                                    activationDesc);
@@ -628,6 +638,7 @@
     }
 
     armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -925,10 +936,12 @@
         }
 
         bool isSupported = false;
+        armnn::BackendId setBackend;
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsActivationSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    prevLayer->GetOutputSlot(0).GetTensorInfo(),
                                    tensorInfo,
                                    activationDesc);
@@ -938,6 +951,7 @@
         }
 
         activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+        activationLayer->SetBackendId(setBackend);
 
         prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
         activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
diff --git a/shim/sl/canonical/ConversionUtils.hpp b/shim/sl/canonical/ConversionUtils.hpp
index beee00d..91a8e30 100644
--- a/shim/sl/canonical/ConversionUtils.hpp
+++ b/shim/sl/canonical/ConversionUtils.hpp
@@ -150,7 +150,7 @@
 
 // Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
 // Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
-#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
+#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend, ...) \
 try \
 { \
     for (auto&& backendId : backends) \
@@ -163,6 +163,7 @@
                 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
             if (supported) \
             { \
+                setBackend = backendId; \
                 break; \
             } \
             else \
@@ -322,10 +323,12 @@
     armnn::ReshapeDescriptor reshapeDescriptor;
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsReshapeSupported,
                                data.m_Backends,
                                isSupported,
+                               setBackend,
                                smallInfo,
                                reshapedInfo,
                                reshapeDescriptor);
@@ -336,6 +339,7 @@
 
     ARMNN_ASSERT(data.m_Network != nullptr);
     armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
+    reshapeLayer.SetBackendId(setBackend);
 
     if (input0IsSmaller)
     {
@@ -527,7 +531,8 @@
 inline void SwizzleInputs(armnn::INetwork& network,
                    std::vector<LayerInputHandle>& inputs,
                    std::vector<armnn::TensorShape>& inputShapes,
-                   const armnn::PermutationVector& mapping)
+                   const armnn::PermutationVector& mapping,
+                   std::vector<armnn::BackendId>& setBackends)
 {
     if (!mapping.IsEqual(IdentityPermutation4D))
     {
@@ -536,6 +541,7 @@
         {
             // add swizzle layer
             armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
+            swizzleLayer.SetBackendId(setBackends[i]);
             auto& outputSlot = swizzleLayer.GetOutputSlot(0);
             auto& outputInfo = outputSlot.GetTensorInfo();
             // replace inputs with the swizzled ones
@@ -553,6 +559,7 @@
     // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
     if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
     {
+        std::vector<armnn::BackendId> setBackendsVec;
         armnn::TensorInfo outputTransposeInfo;
         size_t nInputs = inputs.size();
         for (size_t i=0; i<nInputs; ++i)
@@ -563,20 +570,23 @@
             outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
 
             bool isSupported = false;
+            armnn::BackendId setBackend;
             FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                        IsTransposeSupported,
                                        data.m_Backends,
                                        isSupported,
+                                       setBackend,
                                        inputs[i].GetTensorInfo(),
                                        outputTransposeInfo,
                                        transposeDesc);
+            setBackendsVec.push_back(setBackend);
             if (!isSupported)
             {
                 return false;
             }
 
         }
-        SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
+        SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping, setBackendsVec);
     }
     return true;
 }
diff --git a/shim/sl/canonical/Converter.cpp b/shim/sl/canonical/Converter.cpp
index 8885faf..be052a6 100644
--- a/shim/sl/canonical/Converter.cpp
+++ b/shim/sl/canonical/Converter.cpp
@@ -206,12 +206,14 @@
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsAdditionSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo0,
                                    inputInfo1,
                                    outputInfo);
@@ -232,6 +234,7 @@
     }
 
     armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
+    startLayer->SetBackendId(setBackend);
 
     bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
     if (!isReshapeSupported)
@@ -290,13 +293,14 @@
     descriptor.m_Axis     = axis;
 
     bool isSupported = false;
-
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsArgMinMaxSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo0,
                                    outputInfo,
                                    descriptor);
@@ -317,6 +321,7 @@
     }
 
     armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
 
     input0.Connect(layer->GetInputSlot(0));
@@ -391,12 +396,14 @@
     batchMatMulDesc.m_TransposeY = GetOptionalBool(operation, 3, model, data);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsBatchMatMulSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo0,
                                    inputInfo1,
                                    outputInfo,
@@ -419,6 +426,7 @@
     }
 
     armnn::IConnectableLayer* const layer = data.m_Network->AddBatchMatMulLayer(batchMatMulDesc);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input0.Connect(layer->GetInputSlot(0));
     input1.Connect(layer->GetInputSlot(1));
@@ -482,12 +490,14 @@
     batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsBatchToSpaceNdSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    batchToSpaceNdDesc);
@@ -509,6 +519,7 @@
     }
 
     armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -536,13 +547,14 @@
     const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
     bool isSupported = false;
-
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsCastSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo);
     };
@@ -562,6 +574,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddCastLayer();
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -597,12 +610,14 @@
     ComparisonDescriptor descriptor(comparisonOperation);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsComparisonSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo0,
                                    inputInfo1,
                                    outputInfo,
@@ -624,6 +639,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddComparisonLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
 
     bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
@@ -735,10 +751,12 @@
             reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
 
             bool isSupported = false;
+            armnn::BackendId setBackendReshape;
             FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                        IsReshapeSupported,
                                        data.m_Backends,
                                        isSupported,
+                                       setBackendReshape,
                                        operandInputHandle.GetTensorInfo(),
                                        reshapeInfo,
                                        reshapeDescriptor);
@@ -748,6 +766,7 @@
                 return false;
             }
             armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
+            newReshape.SetBackendId(setBackendReshape);
 
             // Point to the reshape operation rather then the input operation
             operandShape       = reshapeInfo.GetShape();
@@ -850,9 +869,16 @@
                    [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
 
     bool isSupported  = false;
+    armnn::BackendId setBackendConcat;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
-        FORWARD_LAYER_SUPPORT_FUNC(__func__, IsConcatSupported, data.m_Backends, isSupported, inputTensorInfos,
-                                   outputInfo, concatDescriptor);
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsConcatSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   setBackendConcat,
+                                   inputTensorInfos,
+                                   outputInfo,
+                                   concatDescriptor);
     };
 
     if (!isDynamicTensor)
@@ -870,6 +896,7 @@
     }
 
     armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
+    layer->SetBackendId(setBackendConcat);
     assert(layer != nullptr);
     layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
     // Connect inputs to the layer
@@ -889,10 +916,12 @@
         armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
                                                                                  permutationPair.second);
         isSupported = false;
+        armnn::BackendId setBackendTranspose;
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsTransposeSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackendTranspose,
                                    inputTransposeInfo,
                                    outputTransposeInfo,
                                    transposeDesc);
@@ -903,6 +932,7 @@
         // Add permutation layer and connect the output to it, the permutation becomes the output layer
         armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
                                                                      permutationPair.second);
+        deswizzleLayer.SetBackendId(setBackendTranspose);
         layer = &deswizzleLayer;
 
         return true;
@@ -945,11 +975,13 @@
         armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
 
         isSupported = false;
+        armnn::BackendId setBackendReshape2;
         auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
             FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                        IsReshapeSupported,
                                        data.m_Backends,
                                        isSupported,
+                                       setBackendReshape2,
                                        concatInfo,
                                        afterConcatInfo,
                                        reshapeDescriptor);
@@ -969,6 +1001,7 @@
             return false;
         }
         layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
+        layer->SetBackendId(setBackendReshape2);
         return SetupAndTrackLayerOutputSlot(operation,
                                             0,
                                             *layer,
@@ -1109,11 +1142,13 @@
         VLOG(DRIVER) << "Converter::ConvertConv2d(): Weights and Biases are as INPUTS.";
     }
 
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsConvolution2dSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    desc,
@@ -1141,6 +1176,7 @@
     }
 
     armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
+    startLayer->SetBackendId(setBackend);
 
     if (!startLayer)
     {
@@ -1194,12 +1230,14 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsDepthToSpaceSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -1220,6 +1258,7 @@
     }
 
     armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -1355,11 +1394,13 @@
         VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d(): Weights and Biases are as INPUTS.";
     }
 
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsDepthwiseConvolutionSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    desc,
@@ -1387,6 +1428,7 @@
     }
 
     armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
+    startLayer->SetBackendId(setBackend);
 
     if (!startLayer)
     {
@@ -1428,12 +1470,14 @@
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsDequantizeSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo);
     };
@@ -1453,6 +1497,7 @@
     }
 
     armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -1488,12 +1533,14 @@
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsDivisionSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input0.GetTensorInfo(),
                                    input1.GetTensorInfo(),
                                    outputInfo);
@@ -1514,6 +1561,7 @@
     }
 
     armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
+    startLayer->SetBackendId(setBackend);
 
     bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
     if (!isReshapeSupported)
@@ -1552,13 +1600,14 @@
     ElementwiseUnaryDescriptor descriptor(unaryOperation);
 
     bool isSupported = false;
-
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsElementwiseUnarySupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -1579,6 +1628,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddElementwiseUnaryLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -1672,12 +1722,14 @@
     reshapeDescriptor.m_TargetShape = targetShape;
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsReshapeSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    outputInfo,
                                    reshapeDescriptor);
@@ -1702,6 +1754,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -1769,10 +1822,12 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsFillSupported,
                                data.m_Backends,
                                isSupported,
+                               setBackend,
                                inputInfo,
                                outputInfo,
                                descriptor);
@@ -1782,6 +1837,7 @@
     }
 
     IConnectableLayer* const layer = data.m_Network->AddFillLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -1806,12 +1862,14 @@
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsFloorSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    outputInfo);
     };
@@ -1831,6 +1889,7 @@
     }
 
     armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -1912,6 +1971,7 @@
     desc.m_ConstantWeights       = IsOperandConstant(*weightsOperand);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
@@ -1928,6 +1988,7 @@
                                    IsFullyConnectedSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    reshapedInfo,
                                    outputInfo,
                                    weightsInfo,
@@ -1951,6 +2012,7 @@
 
     // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
     armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
+    startLayer->SetBackendId(setBackend);
 
     if (inputInfo.GetNumDimensions() > 2U)
     {
@@ -2022,12 +2084,14 @@
     desc.m_Axis = axis;
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsGatherSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    indices.GetTensorInfo(),
                                    outputInfo,
@@ -2049,6 +2113,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddGatherLayer(desc);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
     indices.Connect(layer->GetInputSlot(1));
@@ -2209,10 +2274,12 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackendSplit;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsSplitterSupported,
                                data.m_Backends,
                                isSupported,
+                               setBackendSplit,
                                inputInfo,
                                splitterOutputInfos,
                                splitterDesc);
@@ -2222,6 +2289,7 @@
     }
 
     IConnectableLayer* splitterLayer = data.m_Network->AddSplitterLayer(splitterDesc);
+    splitterLayer->SetBackendId(setBackendSplit);
     if (!splitterLayer)
     {
         return Fail("%s: Failed to add SplitterLayer", __func__);
@@ -2305,12 +2373,14 @@
                                                               biasesDataOffset));
 
             isSupported = false;
+            armnn::BackendId setBackendConv;
             auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
             {
                 FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                            IsConvolution2dSupported,
                                            data.m_Backends,
                                            isSupported,
+                                           setBackendConv,
                                            groupInputInfo,
                                            outputInfo,
                                            desc,
@@ -2336,6 +2406,8 @@
             IConnectableLayer* biasLayer = data.m_Network->AddConstantLayer(groupBiases);
             IConnectableLayer* convLayer = data.m_Network->AddConvolution2dLayer(desc);
 
+            convLayer->SetBackendId(setBackendConv);
+
             if (!convLayer)
             {
                 return Fail("%s: AddConvolution2dLayer failed", __func__);
@@ -2384,10 +2456,12 @@
     }
 
     isSupported = false;
+    armnn::BackendId setBackendConcat;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsConcatSupported,
                                data.m_Backends,
                                isSupported,
+                               setBackendConcat,
                                std::vector<const TensorInfo*>(numGroups * channelMultiplier, &groupOutputInfo),
                                outputInfo,
                                concatDescriptor);
@@ -2398,6 +2472,7 @@
     }
 
     IConnectableLayer* concatLayer = data.m_Network->AddConcatLayer(concatDescriptor);
+    concatLayer->SetBackendId(setBackendConcat);
     if (!concatLayer)
     {
         return Fail("%s: AddConcatLayer failed", __func__);
@@ -2488,12 +2563,14 @@
     desc.m_DataLayout = OptionalDataLayout(operation, 4, model, data);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsInstanceNormalizationSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    outputInfo,
                                    desc);
@@ -2514,6 +2591,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddInstanceNormalizationLayer(desc);
+    layer->SetBackendId(setBackend);
     input.Connect(layer->GetInputSlot(0));
 
     return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -2552,12 +2630,14 @@
     desc.m_DataLayout = armnn::DataLayout::NHWC;
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsL2NormalizationSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    desc);
@@ -2578,6 +2658,7 @@
     }
 
     armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -2640,12 +2721,14 @@
     descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsNormalizationSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -2667,6 +2750,7 @@
 
 
     armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -2703,13 +2787,14 @@
     LogicalBinaryDescriptor descriptor(logicalOperation);
 
     bool isSupported = false;
-
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsLogicalBinarySupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo0,
                                    inputInfo1,
                                    outputInfo,
@@ -2731,6 +2816,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddLogicalBinaryLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
 
     bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
@@ -2808,12 +2894,14 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsLogSoftmaxSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    outputInfo,
                                    descriptor);
@@ -2834,6 +2922,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddLogSoftmaxLayer(descriptor);
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: AddLogSoftmaxLayer() returned nullptr", __func__);
@@ -3193,12 +3282,14 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsLstmSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputStateInInfo,
                                    cellStateInInfo,
@@ -3231,6 +3322,7 @@
 
     // Add the layer
     IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
+    layer->SetBackendId(setBackend);
 
     input.Connect(layer->GetInputSlot(0));
     outputStateIn.Connect(layer->GetInputSlot(1));
@@ -3283,12 +3375,14 @@
     const TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsMaximumSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input0.GetTensorInfo(),
                                    input1.GetTensorInfo(),
                                    outInfo);
@@ -3309,6 +3403,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
     if (!isReshapeSupported)
@@ -3370,12 +3465,14 @@
     descriptor.m_KeepDims = keepDims > 0;
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsMeanSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -3396,6 +3493,7 @@
     }
 
     armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -3423,12 +3521,14 @@
     const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsMinimumSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input0.GetTensorInfo(),
                                    input1.GetTensorInfo(),
                                    outputInfo);
@@ -3449,6 +3549,7 @@
     }
 
     IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
     if (!isReshapeSupported)
@@ -3489,12 +3590,14 @@
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsMultiplicationSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input0.GetTensorInfo(),
                                    input1.GetTensorInfo(),
                                    outputInfo);
@@ -3515,6 +3618,7 @@
     }
 
     armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
+    startLayer->SetBackendId(setBackend);
 
     bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
     if (!isReshapeSupported)
@@ -3564,12 +3668,14 @@
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsPadSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -3590,6 +3696,7 @@
     }
 
     armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -3666,12 +3773,14 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsPadSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -3692,6 +3801,7 @@
     }
 
     IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -3722,12 +3832,14 @@
     const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsPreluSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    alphaInfo,
                                    outputInfo);
@@ -3748,6 +3860,7 @@
     }
 
     IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
+    layer->SetBackendId(setBackend);
 
     if (!layer)
     {
@@ -3782,12 +3895,14 @@
     const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsQuantizeSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    outputInfo);
     };
@@ -3807,6 +3922,7 @@
     }
 
     IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -4259,12 +4375,14 @@
 
     // Check if the layer is supported
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& cellStateOutInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsQLstmSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputStatePrevTimeStepInfo,
                                    cellStatePrevTimeStepInfo,
@@ -4295,6 +4413,7 @@
 
     // Add the layer
     IConnectableLayer* layer = data.m_Network->AddQLstmLayer(desc, params, "QLstm");
+    layer->SetBackendId(setBackend);
 
     input.Connect(layer->GetInputSlot(0));
     outputStatePrevTimeStep.Connect(layer->GetInputSlot(1));
@@ -4502,12 +4621,14 @@
     paramsInfo.m_OutputGateBias           = &(params.m_OutputGateBias->GetInfo());
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsQuantizedLstmSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    previousCellStateInInfo,
                                    previousOutputInInfo,
@@ -4534,6 +4655,7 @@
     }
 
     IConnectableLayer* const layer = data.m_Network->AddQuantizedLstmLayer(params, "QuantizedLstm");
+    layer->SetBackendId(setBackend);
     input.Connect(layer->GetInputSlot(0));
     previousCellStateIn.Connect(layer->GetInputSlot(1));
     previousOutputIn.Connect(layer->GetInputSlot(2));
@@ -4580,10 +4702,12 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsRankSupported,
                                data.m_Backends,
                                isSupported,
+                               setBackend,
                                input.GetTensorInfo(),
                                outInfo);
     if (!isSupported)
@@ -4592,6 +4716,7 @@
     }
 
     armnn::IConnectableLayer* layer = data.m_Network->AddRankLayer();
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -4620,13 +4745,14 @@
     const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
 
     bool isSupported = false;
-
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsActivationSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    outInfo,
                                    desc);
@@ -4647,6 +4773,7 @@
     }
 
     armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(desc);
+    layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -4724,12 +4851,14 @@
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsReshapeSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    outputInfo,
                                    reshapeDescriptor);
@@ -4750,6 +4879,7 @@
     }
 
     armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -4868,12 +4998,14 @@
     descriptor.m_HalfPixelCenters = GetOptionalBool(operation, 5, model, data);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsResizeSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -4894,6 +5026,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -4982,12 +5115,14 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo &outputInfo, bool &isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsSpaceToBatchNdSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -5007,6 +5142,7 @@
     }
 
     armnn::IConnectableLayer *const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -5050,12 +5186,14 @@
     desc.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsSpaceToDepthSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    desc);
@@ -5076,6 +5214,7 @@
     }
 
     IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -5134,12 +5273,14 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsSoftmaxSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    outputInfo,
                                    desc);
@@ -5160,6 +5301,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -5195,12 +5337,14 @@
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsSubtractionSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input0.GetTensorInfo(),
                                    input1.GetTensorInfo(),
                                    outputInfo);
@@ -5221,6 +5365,7 @@
     }
 
     armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
+    startLayer->SetBackendId(setBackend);
 
     bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
     if (!isReshapeSupported)
@@ -5413,12 +5558,14 @@
     Optional<TensorInfo> biases(bias.GetInfo());
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsTransposeConvolution2dSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    desc,
@@ -5441,6 +5588,7 @@
 
     IConnectableLayer* startLayer =
         data.m_Network->AddTransposeConvolution2dLayer(desc, weights, Optional<ConstTensor>(bias));
+    startLayer->SetBackendId(setBackend);
     if (!startLayer)
     {
         return Fail("%s: AddTransposeConvolution2dLayer failed", __func__);
@@ -5526,10 +5674,12 @@
     reshapeDesc.m_TargetShape = outputInfo.GetShape();
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsReshapeSupported,
                                data.m_Backends,
                                isSupported,
+                               setBackend,
                                inputInfo,
                                outputInfo,
                                reshapeDesc);
@@ -5540,6 +5690,7 @@
     }
 
     armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -5623,12 +5774,14 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsStridedSliceSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -5672,6 +5825,7 @@
     }
 
     armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -5726,12 +5880,14 @@
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsTransposeSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    transposeDesc);
@@ -5752,6 +5908,7 @@
     }
 
     armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index b144c78..aab5227 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -275,7 +275,7 @@
     DataType GetDataType() const;
 
     const BackendId& GetBackendId() const { return m_BackendId; }
-    void SetBackendId(const BackendId& id) { m_BackendId = id; }
+    void SetBackendId(const BackendId& id) override { m_BackendId = id; }
 
     // Virtuals
 
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 6d3058c..a61624f 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -12,6 +12,7 @@
 #include "BackendSettings.hpp"
 #include "optimizations/All.hpp"
 #include "armnnUtils/Filesystem.hpp"
+#include "armnn/utility/Timer.hpp"
 
 #include <armnn/backends/TensorHandle.hpp>
 #include <armnn/backends/WorkloadFactory.hpp>
@@ -766,6 +767,15 @@
     }
 }
 
+inline std::vector<DataType> GetLayerInOutDatatype(const Layer* layer)
+{
+    DataType dataTypeIn  = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
+                           layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
+    DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
+                           layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
+    return {dataTypeIn, dataTypeOut};
+}
+
 // Refactor to allow passing the IConnectableLayer* rather than Layer Iterator
 // on Graph and SubgraphView which are different types.
 void AssignBackendsIConnectable(OptimizedNetworkImpl* optNetObjPtr,
@@ -787,10 +797,7 @@
         return;
     }
 
-    DataType dataTypeIn  = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
-                           layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
-    DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
-                           layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
+    std::vector<DataType> inOutDataType = GetLayerInOutDatatype(layer);
 
     std::string reasonIfUnsupported;
     bool found = false;
@@ -808,8 +815,8 @@
                                  optNetObjPtr->GetGraph(),
                                  layer,
                                  layer->GetBackendHint().value(),
-                                 dataTypeIn,
-                                 dataTypeOut,
+                                 inOutDataType[0],
+                                 inOutDataType[1],
                                  availablePreferredBackends,
                                  reasonIfUnsupported,
                                  errMessages).IsOk())
@@ -832,8 +839,8 @@
                                                               optNetObjPtr->GetGraph(),
                                                               layer,
                                                               backend,
-                                                              dataTypeIn,
-                                                              dataTypeOut,
+                                                              inOutDataType[0],
+                                                              inOutDataType[1],
                                                               availablePreferredBackends,
                                                               reasonIfUnsupported,
                                                               errMessages);
@@ -903,12 +910,33 @@
 
     for (auto it = firstLayer; it != lastLayer; ++it)
     {
-        AssignBackendsIConnectable(optNetObjPtr,
-                                   *it,
-                                   errMessages,
-                                   result,
-                                   backendSettings,
-                                   availablePreferredBackends);
+        auto layer = PolymorphicDowncast<Layer*>(*it);
+        std::vector<DataType> inOutDataType = GetLayerInOutDatatype(layer);
+
+        // In AttemptBackendAssignment() we check:
+        //     - if input/output datatypes of the layer are float16
+        //     - if the layer is supported with these datatypes
+        // If the layer is not supported (failing on ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED() in clframework),
+        // we attempt to insert convertion layers either side of the new fp32 layer.
+        bool isFloat16 = false;
+        for (auto type : inOutDataType)
+        {
+            if (type == DataType::Float16)
+            {
+                isFloat16 = true;
+                break;
+            }
+        }
+
+        if (layer->GetBackendId() == "Unknown" || isFloat16)
+        {
+            AssignBackendsIConnectable(optNetObjPtr,
+                                       *it,
+                                       errMessages,
+                                       result,
+                                       backendSettings,
+                                       availablePreferredBackends);
+        }
     }
 
     for (auto it = firstLayer; it != lastLayer; ++it)
@@ -1540,6 +1568,8 @@
                               const OptimizerOptions& options,
                               Optional<std::vector<std::string>&> messages)
 {
+    const auto start_time = armnn::GetTimeNow();
+
     ARMNN_LOG(debug) << options.ToString();
 
     // Enable profiling
@@ -1723,6 +1753,9 @@
         optGraph.AddCompatibilityLayers(backends, tensorHandleFactoryRegistry);
     }
 
+    ARMNN_LOG(info) << "!! New time !! : " << std::setprecision(2)
+                    << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms.";
+
     return optNet;
 }