IVGCVSW-7450 Fix delegate fallback when fused activation is unsupported

 In layers that support fused activations, we check for activation
 layer support after we already create the base layer. This breaks
 the fallback as we already added the base layer to the graph.

 * Creates ValidateFusedActivation shared function
 * Moves Activation validation higher in the VisitFunction

Signed-off-by: Ryan OShea <ryan.oshea3@arm.com>
Change-Id: I239af360923f695fc374ddeaeefa24c062eaf9e8
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
index 02426a5..fd1fdee 100644
--- a/delegate/src/Control.hpp
+++ b/delegate/src/Control.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -99,6 +99,12 @@
     uint32_t inputRank = tfLiteTensors[tfLiteNode->inputs->data[0]].dims->size;
 
     auto* concatenationParameters = reinterpret_cast<TfLiteConcatenationParams*>(tfLiteNode->builtin_data);
+
+    if(!concatenationParameters)
+    {
+        throw armnn::Exception(&"TfLiteArmnnDelegate: Concat parameters are null in: " [ nodeIndex]);
+    }
+
     const unsigned int concatDimInput = static_cast<unsigned int>(
             (static_cast<int>(inputRank) + concatenationParameters->axis) % static_cast<int>(inputRank));
 
@@ -117,6 +123,17 @@
 
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
+    // Verify we support the fused activation before attempting to create a layer
+    TfLiteFusedActivation activationType = concatenationParameters->activation;
+
+    const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+    TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+                                                                    outputTensorInfo, activationType);
+    if(activationStatus != kTfLiteOk)
+    {
+        return kTfLiteError;
+    }
+
     // Check if supported
     bool isSupported = false;
     armnn::BackendId setBackend;
@@ -158,14 +175,13 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
     Connect(concatenationLayer, tfLiteNode, delegateData);
 
-    if (!concatenationParameters)
+    if (activationType == kTfLiteActNone)
     {
         // No Activation
         return kTfLiteOk;
     }
 
-    // Check activation
-    TfLiteFusedActivation activationType = concatenationParameters->activation;
+    // Check and Create activation
     return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData);
 }
 
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
index e307bb9..7ea3a3a 100644
--- a/delegate/src/Convolution.hpp
+++ b/delegate/src/Convolution.hpp
@@ -1,11 +1,12 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #pragma once
 
 #include "DelegateUtils.hpp"
+#include "SharedFunctions.hpp"
 
 #include <tensorflow/lite/builtin_ops.h>
 #include <tensorflow/lite/c/builtin_op_data.h>
@@ -100,6 +101,22 @@
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
+    auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
+    TfLiteFusedActivation activationType;
+    if (tfLiteNodeParameters)
+    {
+        activationType = tfLiteNodeParameters->activation;
+
+        const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+        TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+                                                                        outputTensorInfo, activationType);
+        if(activationStatus != kTfLiteOk)
+        {
+            return kTfLiteError;
+        }
+
+    }
+
     armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
 
     armnn::TensorInfo biasTensorInfo;
@@ -198,14 +215,12 @@
 
     Connect(layer, tfLiteNode, delegateData);
 
-    auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
     if (!tfLiteNodeParameters)
     {
         // No Activation
         return kTfLiteOk;
     }
-    // Check activation
-    TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
+    // Check and Create activation
     return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
 
 }
@@ -263,6 +278,22 @@
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
+    auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
+    TfLiteFusedActivation activationType;
+    if (tfLiteNodeParameters)
+    {
+        activationType = tfLiteNodeParameters->activation;
+
+        const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+        TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+                                                                        outputTensorInfo, activationType);
+        if(activationStatus != kTfLiteOk)
+        {
+            return kTfLiteError;
+        }
+
+    }
+
     armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
 
     armnn::TensorInfo biasTensorInfo;
@@ -362,15 +393,13 @@
 
     Connect(layer, tfLiteNode, delegateData);
 
-    auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
     if (!tfLiteNodeParameters)
     {
         // No Activation
         return kTfLiteOk;
     }
 
-    // Check activation
-    TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
+    // Check and create activation
     return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
 }
 #endif
@@ -460,6 +489,22 @@
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
+    auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams *>(tfLiteNode->builtin_data);
+    TfLiteFusedActivation activationType;
+    if (tfLiteNodeParameters)
+    {
+        activationType = tfLiteNodeParameters->activation;
+
+        const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+        TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+                                                                        outputTensorInfo, activationType);
+        if(activationStatus != kTfLiteOk)
+        {
+            return kTfLiteError;
+        }
+
+    }
+
     armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
 
     // Assuming input is NHWC
@@ -553,14 +598,12 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     Connect(layer, tfLiteNode, delegateData);
-    auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
     if (!tfLiteNodeParameters)
     {
         // No Activation
         return kTfLiteOk;
     }
-    // Check activation
-    TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
+    // Check and create activation
     return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
 }
 
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index caf0262..f21d6af 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -254,6 +254,21 @@
 
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
+    auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
+    TfLiteFusedActivation activationType;
+    if (tfLiteNodeParameters)
+    {
+        activationType = tfLiteNodeParameters->activation;
+
+        const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+        TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+                                                                        outputTensorInfo, activationType);
+        if(activationStatus != kTfLiteOk)
+        {
+            return kTfLiteError;
+        }
+    }
+
     if (!delegateData.m_Network)
     {
         switch(elementwiseBinaryOperatorCode)
@@ -361,14 +376,12 @@
         return kTfLiteError;
     }
 
-    auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
     if (!tfLiteNodeParameters)
     {
         // No Activation
         return kTfLiteOk;
     }
-    // Check activation
-    TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
+    // Check and Create Activation
     return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData);
 }
 
diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp
index 2243ad0..ee553ce 100644
--- a/delegate/src/FullyConnected.hpp
+++ b/delegate/src/FullyConnected.hpp
@@ -57,6 +57,22 @@
     armnn::TensorInfo weightsTensorInfo       = GetTensorInfoForTfLiteTensor(tfLiteWeightsTensor);
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
+    // Check that we support fused activation before we attempt to create a layer
+    auto* tfLiteNodeParameters = reinterpret_cast<TfLiteFullyConnectedParams *>(tfLiteNode->builtin_data);
+    TfLiteFusedActivation activationType;
+    if (tfLiteNodeParameters)
+    {
+        activationType = tfLiteNodeParameters->activation;
+
+        const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+        TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+                                                                        outputTensorInfo, activationType);
+        if(activationStatus != kTfLiteOk)
+        {
+            return kTfLiteError;
+        }
+    }
+
     // Fully Connected Layer accepts two dimensional weights input
     int32_t weightsDimension = static_cast<int32_t>(weightsTensorInfo.GetNumDimensions());
     if (weightsDimension != 2)
@@ -221,9 +237,7 @@
     {
         Connect(layer, tfLiteNode, delegateData);
     }
-
-    auto* tfLiteNodeParameters = reinterpret_cast<TfLiteFullyConnectedParams*>(tfLiteNode->builtin_data);
-
+    
     if (outputTensorInfo.GetNumDimensions() > 2)
     {
         layer = AddReshapeLayer(tfLiteContext, tfLiteNode, layer, reshapedOutputTensorInfo, outputTensorInfo,
@@ -244,8 +258,8 @@
         // No Activation
         return kTfLiteOk;
     }
-    // Check Activation
-    TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
+
+    // Check and Create Activation
     return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
 }
 
diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp
index 8241567..d0a73b4 100644
--- a/delegate/src/Pooling.hpp
+++ b/delegate/src/Pooling.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -49,6 +49,22 @@
     const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
+    auto* tfLiteNodeParameters = reinterpret_cast<TfLitePoolParams*>(tfLiteNode->builtin_data);
+    TfLiteFusedActivation activationType;
+    if (tfLiteNodeParameters)
+    {
+        activationType = tfLiteNodeParameters->activation;
+
+        const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+        TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+                                                                        outputTensorInfo, activationType);
+        if(activationStatus != kTfLiteOk)
+        {
+            return kTfLiteError;
+        }
+
+    }
+
     armnn::PoolingAlgorithm poolingAlgorithm;
     switch(tfLitePoolingOperatorCode)
     {
@@ -68,20 +84,19 @@
     armnn::Pooling2dDescriptor descriptor;
     descriptor.m_PoolType = poolingAlgorithm;
 
-    auto* params = reinterpret_cast<TfLitePoolParams*>(tfLiteNode->builtin_data);
-    descriptor.m_PoolWidth = params->filter_width;
-    descriptor.m_PoolHeight = params->filter_height;
-    descriptor.m_StrideX = params->stride_width;
-    descriptor.m_StrideY = params->stride_height;
+    descriptor.m_PoolWidth = tfLiteNodeParameters->filter_width;
+    descriptor.m_PoolHeight = tfLiteNodeParameters->filter_height;
+    descriptor.m_StrideX = tfLiteNodeParameters->stride_width;
+    descriptor.m_StrideY = tfLiteNodeParameters->stride_height;
     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
 
     unsigned int inputHeight = inputTensorInfo.GetShape()[1];
     unsigned int inputWidth  = inputTensorInfo.GetShape()[2];
 
     CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
-                descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
+                descriptor.m_PadTop, descriptor.m_PadBottom, tfLiteNodeParameters->padding);
     CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
-                descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
+                descriptor.m_PadLeft, descriptor.m_PadRight, tfLiteNodeParameters->padding);
 
     bool isSupported = false;
     armnn::BackendId setBackend;
@@ -112,8 +127,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
     Connect(poolingLayer, tfLiteNode, delegateData);
 
-    // Check activation
-    TfLiteFusedActivation activationType = params->activation;
+    // Check and create activation
     return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
 }
 
@@ -216,36 +230,6 @@
     CalcPadding(inputDepth, descriptor.m_PoolDepth, descriptor.m_StrideZ, 1u,
                 descriptor.m_PadFront, descriptor.m_PadBack, padding);
 
-    // Validate the output info.
-    bool isSupported = false;
-    armnn::BackendId setBackend;
-    auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) {
-        FORWARD_LAYER_SUPPORT_FUNC("POOLING_3D",
-                                   tfLiteContext,
-                                   IsPooling3dSupported,
-                                   delegateData.m_Backends,
-                                   isSupported,
-                                   setBackend,
-                                   inputTensorInfo,
-                                   outputTensorInfo,
-                                   descriptor);
-    };
-
-    if (!delegateData.m_Network)
-    {
-        validateFunc(outputTensorInfo, isSupported);
-        return isSupported ? kTfLiteOk : kTfLiteError;
-    }
-
-    // Create the Layer
-    armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
-    poolingLayer->SetBackendId(setBackend);
-    ARMNN_ASSERT(poolingLayer != nullptr);
-
-    // Create and set output slots
-    armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
-    outputSlot.SetTensorInfo(outputTensorInfo);
-    Connect(poolingLayer, tfLiteNode, delegateData);
 
     // Check activation by parsing the string from the flexbuffer map
     std::string activationTypeStr = m["activation"].AsString().str();
@@ -280,6 +264,46 @@
         activationType = kTfLiteActNone;
     }
 
+    const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+    TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+                                                                    outputTensorInfo, activationType);
+    if(activationStatus != kTfLiteOk)
+    {
+        return kTfLiteError;
+    }
+
+
+    // Validate the output info.
+    bool isSupported = false;
+    armnn::BackendId setBackend;
+    auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) {
+        FORWARD_LAYER_SUPPORT_FUNC("POOLING_3D",
+                                   tfLiteContext,
+                                   IsPooling3dSupported,
+                                   delegateData.m_Backends,
+                                   isSupported,
+                                   setBackend,
+                                   inputTensorInfo,
+                                   outputTensorInfo,
+                                   descriptor);
+    };
+
+    if (!delegateData.m_Network)
+    {
+        validateFunc(outputTensorInfo, isSupported);
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    // Create the Layer
+    armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
+    poolingLayer->SetBackendId(setBackend);
+    ARMNN_ASSERT(poolingLayer != nullptr);
+
+    // Create and set output slots
+    armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
+    outputSlot.SetTensorInfo(outputTensorInfo);
+    Connect(poolingLayer, tfLiteNode, delegateData);
+
     return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
 }
 
diff --git a/delegate/src/SharedFunctions.cpp b/delegate/src/SharedFunctions.cpp
index 22f578a..fef9701 100644
--- a/delegate/src/SharedFunctions.cpp
+++ b/delegate/src/SharedFunctions.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -37,5 +37,80 @@
     return isSupported ? kTfLiteOk : kTfLiteError;
 }
 
+TfLiteStatus ValidateFusedActivationOperator(DelegateData& delegateData,
+                                             TfLiteContext* tfLiteContext,
+                                             const armnn::TensorInfo& inputInfo,
+                                             const armnn::TensorInfo& outputInfo,
+                                             TfLiteFusedActivation activationType)
+{
+    armnn::ActivationDescriptor activationDesc;
+
+    switch (activationType)
+    {
+        case kTfLiteActNone:
+        {
+            // No Activation
+            return kTfLiteOk;
+        }
+        case kTfLiteActRelu:
+        {
+            activationDesc.m_Function = armnn::ActivationFunction::ReLu;
+            break;
+        }
+// The name of kTfLiteActRelu1 changed after TF Lite v2.3
+#if defined(ARMNN_POST_TFLITE_2_3)
+        case kTfLiteActReluN1To1:
+#else
+            case kTfLiteActRelu1:
+#endif
+        {
+            activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+            activationDesc.m_A = 1.0f;
+            activationDesc.m_B = -1.0f;
+            break;
+        }
+        case kTfLiteActRelu6:
+        {
+            activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+            activationDesc.m_A = 6.0f;
+            activationDesc.m_B = 0.0f;
+            break;
+        }
+        case kTfLiteActSigmoid:
+        {
+            activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
+            break;
+        }
+        case kTfLiteActTanh:
+        {
+            activationDesc.m_Function = armnn::ActivationFunction::TanH;
+            activationDesc.m_A = 1.0f;
+            activationDesc.m_B = 1.0f;
+            break;
+        }
+        default:
+            return kTfLiteError;
+    }
+
+    bool isSupported = false;
+    armnn::BackendId setBackend;
+
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
+                                   tfLiteContext,
+                                   IsActivationSupported,
+                                   delegateData.m_Backends,
+                                   isSupported,
+                                   armnn::BackendId(),
+                                   inputInfo,
+                                   outputInfo,
+                                   activationDesc);
+    };
+    validateFunc(outputInfo, isSupported);
+    return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+
 } // namespace armnnDelegate
 
diff --git a/delegate/src/SharedFunctions.hpp b/delegate/src/SharedFunctions.hpp
index bf6b603..b03a63d 100644
--- a/delegate/src/SharedFunctions.hpp
+++ b/delegate/src/SharedFunctions.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -15,5 +15,11 @@
                                    const armnn::TensorInfo& inputTensorInfo,
                                    const armnn::TensorInfo& outputTensorInfo);
 
+TfLiteStatus ValidateFusedActivationOperator(DelegateData& delegateData,
+                                             TfLiteContext* tfLiteContext,
+                                             const armnn::TensorInfo& inputInfo,
+                                             const armnn::TensorInfo& outputInfo,
+                                             TfLiteFusedActivation activationType);
+
 } // namespace armnnDelegate