Optimize the calling of IsLayerSupported().

!armnn:8742

  * Done as part of 22.11/23.02 innovation days.
  * IsLayerSupported() is called in model prepare (delegate, android-nn-driver and shim/support_library)
    and again in ArmNN once model optimization is performed.
  * From calling IsLayerSupported() the first time, we should know that the layers are supported
    and what backend they are supported on.
  * Solution is to set the BackendId of the IConnectableLayer when IsLayerSupported() is called the first time,
  * In the Optimize() function we then check if the backend is set. If so, we do not call IsLayerSupported() again.
  * In the case a layer that is supported gets optimized, then the BackendId of that layer get set to "Unknown"
    for the new optimized layer and IsLayerSupported() will get called on the newly optimized layer.

Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: Ie5c6c9cd10d81f90b1ee78dd6e3442f353b6c109
diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp
index b1848e3..ce6be44 100644
--- a/ConversionUtils_1_2.hpp
+++ b/ConversionUtils_1_2.hpp
@@ -162,13 +162,14 @@
     const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
     bool isSupported = false;
-
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsCastSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo);
     };
@@ -188,6 +189,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddCastLayer();
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the CastLayer", __func__);
@@ -248,13 +250,14 @@
     const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
     bool isSupported = false;
-
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsChannelShuffleSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -275,6 +278,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddChannelShuffleLayer(descriptor);
+    layer->SetBackendId(setBackend);
     assert(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
@@ -315,12 +319,14 @@
     ComparisonDescriptor descriptor(comparisonOperation);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsComparisonSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo0,
                                    inputInfo1,
                                    outputInfo,
@@ -343,6 +349,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddComparisonLayer(descriptor);
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the ComparisonLayer", __func__);
@@ -482,12 +489,14 @@
     Optional<TensorInfo> biases(biasInfo);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsConvolution2dSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    desc,
@@ -510,6 +519,7 @@
     }
 
     armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
+    startLayer->SetBackendId(setBackend);
 
     if (!startLayer)
     {
@@ -648,12 +658,14 @@
     Optional<TensorInfo> biases(biasInfo);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsDepthwiseConvolutionSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    desc,
@@ -676,6 +688,7 @@
     }
 
     armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
+    startLayer->SetBackendId(setBackend);
 
     if (!startLayer)
     {
@@ -741,13 +754,14 @@
     ElementwiseUnaryDescriptor descriptor(unaryOperation);
 
     bool isSupported = false;
-
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsElementwiseUnarySupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -768,6 +782,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddElementwiseUnaryLayer(descriptor);
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the ElementwiseUnaryLayer", __func__);
@@ -823,12 +838,14 @@
     reshapeDescriptor.m_TargetShape = targetShape;
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsReshapeSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    outputInfo,
                                    reshapeDescriptor);
@@ -853,6 +870,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the ReshapeLayer", __func__);
@@ -915,12 +933,14 @@
     desc.m_Axis = axis;
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsGatherSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    indices.GetTensorInfo(),
                                    outputInfo,
@@ -942,6 +962,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddGatherLayer(desc);
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the GatherLayer", __func__);
@@ -1111,10 +1132,12 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackendSplit;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsSplitterSupported,
                                data.m_Backends,
                                isSupported,
+                               setBackendSplit,
                                inputInfo,
                                splitterOutputInfos,
                                splitterDesc);
@@ -1124,6 +1147,7 @@
     }
 
     IConnectableLayer* splitterLayer = data.m_Network->AddSplitterLayer(splitterDesc);
+    splitterLayer->SetBackendId(setBackendSplit);
     if (!splitterLayer)
     {
         return Fail("%s: Failed to add SplitterLayer", __func__);
@@ -1207,12 +1231,14 @@
                                                               biasesDataOffset));
 
             isSupported = false;
+            armnn::BackendId setBackendConv;
             auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
             {
                 FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                            IsConvolution2dSupported,
                                            data.m_Backends,
                                            isSupported,
+                                           setBackendConv,
                                            groupInputInfo,
                                            outputInfo,
                                            desc,
@@ -1237,6 +1263,7 @@
             IConnectableLayer* weightsLayer = data.m_Network->AddConstantLayer(groupWeights);
             IConnectableLayer* biasLayer = data.m_Network->AddConstantLayer(groupBiases);
             IConnectableLayer* convLayer = data.m_Network->AddConvolution2dLayer(desc);
+            convLayer->SetBackendId(setBackendConv);
 
             if (!convLayer)
             {
@@ -1286,10 +1313,12 @@
     }
 
     isSupported = false;
+    armnn::BackendId setBackendConcat;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsConcatSupported,
                                data.m_Backends,
                                isSupported,
+                               setBackendConcat,
                                std::vector<const TensorInfo*>(numGroups * channelMultiplier, &groupOutputInfo),
                                outputInfo,
                                concatDescriptor);
@@ -1300,6 +1329,7 @@
     }
 
     IConnectableLayer* concatLayer = data.m_Network->AddConcatLayer(concatDescriptor);
+    concatLayer->SetBackendId(setBackendConcat);
     if (!concatLayer)
     {
         return Fail("%s: AddConcatLayer failed", __func__);
@@ -1387,12 +1417,14 @@
     desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 4, model, data);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsInstanceNormalizationSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    outputInfo,
                                    desc);
@@ -1413,6 +1445,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddInstanceNormalizationLayer(desc);
+    layer->SetBackendId(setBackend);
     input.Connect(layer->GetInputSlot(0));
 
     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -1481,12 +1514,14 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsLogSoftmaxSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    outputInfo,
                                    descriptor);
@@ -1507,6 +1542,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddLogSoftmaxLayer(descriptor);
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the LogSoftmaxLayer", __func__);
@@ -1542,12 +1578,14 @@
     const TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsMaximumSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input0.GetTensorInfo(),
                                    input1.GetTensorInfo(),
                                    outInfo);
@@ -1568,6 +1606,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the MaximumLayer", __func__);
@@ -1607,12 +1646,14 @@
     const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsMinimumSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input0.GetTensorInfo(),
                                    input1.GetTensorInfo(),
                                    outputInfo);
@@ -1633,6 +1674,7 @@
     }
 
     IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the MinimumLayer", __func__);
@@ -1722,12 +1764,14 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsPadSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -1748,6 +1792,7 @@
     }
 
     IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the PadLayer", __func__);
@@ -1786,12 +1831,14 @@
     const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsPreluSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    alphaInfo,
                                    outputInfo);
@@ -1812,6 +1859,7 @@
     }
 
     IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the PreluLayer", __func__);
@@ -1850,12 +1898,14 @@
     const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsQuantizeSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    outputInfo);
     };
@@ -1875,6 +1925,7 @@
     }
 
     IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the QuantizeLayer", __func__);
@@ -2072,12 +2123,14 @@
     paramsInfo.m_OutputGateBias           = &(params.m_OutputGateBias->GetInfo());
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsQuantizedLstmSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    previousCellStateInInfo,
                                    previousOutputInInfo,
@@ -2104,6 +2157,7 @@
     }
 
     IConnectableLayer* const layer = data.m_Network->AddQuantizedLstmLayer(params, "QuantizedLstm");
+    layer->SetBackendId(setBackend);
     input.Connect(layer->GetInputSlot(0));
     previousCellStateIn.Connect(layer->GetInputSlot(1));
     previousOutputIn.Connect(layer->GetInputSlot(2));
@@ -2176,12 +2230,14 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsReduceSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -2202,6 +2258,7 @@
     }
 
     armnn::IConnectableLayer* const layer = data.m_Network->AddReduceLayer(descriptor);
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the ReduceLayer", __func__);
@@ -2328,12 +2385,14 @@
     descriptor.m_HalfPixelCenters = GetOptionalBool<HalPolicy>(operation, 5, model, data);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsResizeSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    descriptor);
@@ -2354,6 +2413,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the ResizeLayer", __func__);
@@ -2406,12 +2466,14 @@
     desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsSpaceToDepthSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    desc);
@@ -2432,6 +2494,7 @@
     }
 
     IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the SpaceToDepthLayer", __func__);
@@ -2499,12 +2562,14 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsSoftmaxSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    input.GetTensorInfo(),
                                    outputInfo,
                                    desc);
@@ -2525,6 +2590,7 @@
     }
 
     IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
+    layer->SetBackendId(setBackend);
     if (!layer)
     {
         return Fail("%s: Could not add the SoftmaxLayer", __func__);
@@ -2889,12 +2955,14 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsLstmSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputStateInInfo,
                                    cellStateInInfo,
@@ -2927,6 +2995,7 @@
 
     // Add the layer
     IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
+    layer->SetBackendId(setBackend);
 
     input.Connect(layer->GetInputSlot(0));
     outputStateIn.Connect(layer->GetInputSlot(1));
@@ -3126,12 +3195,14 @@
     Optional<TensorInfo> biases(bias.GetInfo());
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsTransposeConvolution2dSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputInfo,
                                    desc,
@@ -3154,6 +3225,7 @@
 
     IConnectableLayer* startLayer =
         data.m_Network->AddTransposeConvolution2dLayer(desc, weights, Optional<ConstTensor>(bias));
+    startLayer->SetBackendId(setBackend);
     if (!startLayer)
     {
         return Fail("%s: AddTransposeConvolution2dLayer failed", __func__);
@@ -3556,12 +3628,14 @@
     }
 
     bool isSupported = false;
+    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                    IsUnidirectionalSequenceLstmSupported,
                                    data.m_Backends,
                                    isSupported,
+                                   setBackend,
                                    inputInfo,
                                    outputStateInInfo,
                                    cellStateInInfo,
@@ -3592,6 +3666,7 @@
     IConnectableLayer* layer = data.m_Network->AddUnidirectionalSequenceLstmLayer(desc,
                                                                                   params,
                                                                                   "UnidirectionalSequenceLstm");
+    layer->SetBackendId(setBackend);
 
     input.Connect(layer->GetInputSlot(0));
     outputStateIn.Connect(layer->GetInputSlot(1));