IVGCVSW-7589 IVGCVSW-7595 IVGCVSW-7593 ElementwiseUnary, Normalization and LogicalBinary operators for opaque delegate

* Report the operator as part of the layer name for:
   - LogicalBinary,
   - ElementwiseUnary
   - Comparison
   - Activation
* Fixing indentation in Gather.hpp
* Removing not needed includes in Gather, GatherNd and Comparison
* Correct end of namespace comment in Comparison
* Correct log from TfLiteArmnnDelegate to TfLiteArmnnOpaqueDelegate

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Ia0d497709309e912d31eb4b6db0fef9e79b7a3af
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 3914a24..f46119c 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -274,12 +274,18 @@
              test/Convolution2dTest.cpp
              test/ConvolutionTestHelper.hpp
              test/DepthwiseConvolution2dTest.cpp
+             test/ElementwiseUnaryTestHelper.hpp
+             test/ElementwiseUnaryTest.cpp
              test/FullyConnectedTest.cpp
              test/FullyConnectedTestHelper.hpp
              test/GatherTest.cpp
              test/GatherTestHelper.hpp
              test/GatherNdTest.cpp
              test/GatherNdTestHelper.hpp
+             test/LogicalTest.cpp
+             test/LogicalTestHelper.hpp
+             test/NormalizationTest.cpp
+             test/NormalizationTestHelper.hpp
              test/PreluTest.cpp
              test/PreluTestHelper.hpp
              test/TestUtils.hpp
diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt
index 5a248f9..c019649 100644
--- a/delegate/opaque/CMakeLists.txt
+++ b/delegate/opaque/CMakeLists.txt
@@ -14,9 +14,12 @@
         src/Comparison.hpp
         src/Control.hpp
         src/Convolution.hpp
+        src/ElementwiseUnary.hpp
         src/FullyConnected.hpp
         src/Gather.hpp
         src/GatherNd.hpp
+        src/LogicalBinary.hpp
+        src/Normalization.hpp
         src/Prelu.hpp
         src/Redefine.hpp
         src/SharedFunctions.cpp
diff --git a/delegate/opaque/src/Activation.hpp b/delegate/opaque/src/Activation.hpp
index a45bba9..9fce7a1 100644
--- a/delegate/opaque/src/Activation.hpp
+++ b/delegate/opaque/src/Activation.hpp
@@ -10,6 +10,53 @@
 namespace armnnOpaqueDelegate
 {
 
+std::string GetLayerName(armnn::ActivationFunction activationFunction)
+{
+    std::string layerName = "ACTIVATION";
+    switch (activationFunction)
+    {
+        case armnn::ActivationFunction::Abs:
+            layerName += " ABS";
+            break;
+        case armnn::ActivationFunction::BoundedReLu:
+            layerName += " BOUNDED_RELU";
+            break;
+        case armnn::ActivationFunction::Elu:
+            layerName += " ELU";
+            break;
+        case armnn::ActivationFunction::HardSwish:
+            layerName += " HARD_SWISH";
+            break;
+        case armnn::ActivationFunction::LeakyReLu:
+            layerName += " LEAKY_RELU";
+            break;
+        case armnn::ActivationFunction::Linear:
+            layerName += " LINEAR";
+            break;
+        case armnn::ActivationFunction::ReLu:
+            layerName += " RELU";
+            break;
+        case armnn::ActivationFunction::Sigmoid:
+            layerName += " SIGMOID";
+            break;
+        case armnn::ActivationFunction::SoftReLu:
+            layerName += " SOFT_RELU";
+            break;
+        case armnn::ActivationFunction::Square:
+            layerName += " SQUARE";
+            break;
+        case armnn::ActivationFunction::Sqrt:
+            layerName += " SQRT";
+            break;
+        case armnn::ActivationFunction::TanH:
+            layerName += " TANH";
+            break;
+        default:
+            layerName += " UNKNOWN";
+    }
+    return layerName;
+}
+
 TfLiteStatus ValidateActivationOperator(DelegateData& delegateData,
                                         TfLiteOpaqueContext* tfLiteContext,
                                         const armnn::TensorInfo& inputInfo,
@@ -17,9 +64,9 @@
                                         armnn::ActivationDescriptor& activationDesc)
 {
     bool isSupported = false;
-    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported, std::string layerName)
     {
-        FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("ACTIVATION",
+        FORWARD_LAYER_OPAQUE_SUPPORT_FUNC(layerName.c_str(),
                                           tfLiteContext,
                                           IsActivationSupported,
                                           delegateData.m_Backends,
@@ -30,7 +77,7 @@
                                           activationDesc);
     };
 
-    validateFunc(outputInfo, isSupported);
+    validateFunc(outputInfo, isSupported, GetLayerName(activationDesc.m_Function));
     return isSupported ? kTfLiteOk : kTfLiteError;
 }
 
diff --git a/delegate/opaque/src/Comparison.hpp b/delegate/opaque/src/Comparison.hpp
index 046be83..8740cfb 100644
--- a/delegate/opaque/src/Comparison.hpp
+++ b/delegate/opaque/src/Comparison.hpp
@@ -7,19 +7,44 @@
 
 #include <OpaqueDelegateUtils.hpp>
 
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
 namespace armnnOpaqueDelegate
 {
 
+std::string GetLayerName(armnn::ComparisonOperation comparisonOperation)
+{
+    std::string layerName = "COMPARISON";
+    switch (comparisonOperation)
+    {
+        case armnn::ComparisonOperation::NotEqual:
+            layerName += " NOT_EQUAL";
+            break;
+        case armnn::ComparisonOperation::Equal:
+            layerName += " EQUAL";
+            break;
+        case armnn::ComparisonOperation::Greater:
+            layerName += " GREATER";
+            break;
+        case armnn::ComparisonOperation::GreaterOrEqual:
+            layerName += " GREATER_OR_EQUAL";
+            break;
+        case armnn::ComparisonOperation::Less:
+            layerName += " LESS";
+            break;
+        case armnn::ComparisonOperation::LessOrEqual:
+            layerName += " LESS_OR_EQUAL";
+            break;
+        default:
+            layerName += " UNKNOWN";
+    }
+    return layerName;
+}
+
 TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
                                      TfLiteOpaqueContext* tfLiteContext,
                                      TfLiteOpaqueNode* tfLiteNode,
                                      int nodeIndex,
-                                     int32_t tfLiteComparisonOperatorCode)
+                                     int32_t tfLiteComparisonOperatorCode,
+                                     armnn::ComparisonOperation comparisonOperation)
 {
     TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
     TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
@@ -61,6 +86,7 @@
         return kTfLiteError;
     }
 
+    // Use output indices to get output tensor.
     const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
     if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteComparisonOperatorCode, nodeIndex))
     {
@@ -78,37 +104,12 @@
         ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
     }
 
-    armnn::ComparisonOperation comparisonOperation = armnn::ComparisonOperation::Equal;
-    switch(tfLiteComparisonOperatorCode)
-    {
-        case kTfLiteBuiltinEqual:
-            comparisonOperation = armnn::ComparisonOperation::Equal;
-            break;
-        case kTfLiteBuiltinGreater:
-            comparisonOperation = armnn::ComparisonOperation::Greater;
-            break;
-        case kTfLiteBuiltinGreaterEqual:
-            comparisonOperation = armnn::ComparisonOperation::GreaterOrEqual;
-            break;
-        case kTfLiteBuiltinLess:
-            comparisonOperation = armnn::ComparisonOperation::Less;
-            break;
-        case kTfLiteBuiltinLessEqual:
-            comparisonOperation = armnn::ComparisonOperation::LessOrEqual;
-            break;
-        case kTfLiteBuiltinNotEqual:
-            comparisonOperation = armnn::ComparisonOperation::NotEqual;
-            break;
-        default:
-            return kTfLiteError;
-    }
-
     armnn::ComparisonDescriptor descriptor(comparisonOperation);
     bool isSupported = false;
     armnn::BackendId setBackend;
-    auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+    auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported, std::string layerName)
     {
-        FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("COMPARISON",
+        FORWARD_LAYER_OPAQUE_SUPPORT_FUNC(layerName.c_str(),
                                           tfLiteContext,
                                           IsComparisonSupported,
                                           delegateData.m_Backends,
@@ -122,7 +123,7 @@
 
     if (!delegateData.m_Network)
     {
-        validateFunc(outputTensorInfo, isSupported);
+        validateFunc(outputTensorInfo, isSupported, GetLayerName(comparisonOperation));
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
@@ -142,4 +143,4 @@
     return Connect(comparisonLayer, tfLiteContext, tfLiteNode, delegateData);
 }
 
-} // namespace armnnDelegate
+} // namespace armnnOpaqueDelegate
diff --git a/delegate/opaque/src/Control.hpp b/delegate/opaque/src/Control.hpp
index abc6b6f..bcf3c33 100644
--- a/delegate/opaque/src/Control.hpp
+++ b/delegate/opaque/src/Control.hpp
@@ -86,7 +86,7 @@
 
     if(!concatenationParameters)
     {
-        throw armnn::Exception(&"TfLiteArmnnDelegate: Concat parameters are null in: " [ nodeIndex ]);
+        throw armnn::Exception(&"TfLiteArmnnOpaqueDelegate: Concat parameters are null in: " [ nodeIndex ]);
     }
 
     const auto concatDimInput = static_cast<unsigned int>(
diff --git a/delegate/opaque/src/ElementwiseUnary.hpp b/delegate/opaque/src/ElementwiseUnary.hpp
index e169697..df84846 100644
--- a/delegate/opaque/src/ElementwiseUnary.hpp
+++ b/delegate/opaque/src/ElementwiseUnary.hpp
@@ -2,3 +2,138 @@
 // Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
+#pragma once
+
+#include "OpaqueDelegateUtils.hpp"
+
+namespace armnnOpaqueDelegate
+{
+
+std::string GetLayerName(armnn::UnaryOperation unaryOperation)
+{
+    std::string layerName = "ELEMENTWISE_UNARY";
+    switch (unaryOperation)
+    {
+        case armnn::UnaryOperation::Abs:
+            layerName += " ABS";
+            break;
+        case armnn::UnaryOperation::Ceil:
+            layerName += " CEIL";
+            break;
+        case armnn::UnaryOperation::Exp:
+            layerName += " EXP";
+            break;
+        case armnn::UnaryOperation::Log:
+            layerName += " LOG";
+            break;
+        case armnn::UnaryOperation::LogicalNot:
+            layerName += " LOGICALNOT";
+            break;
+        case armnn::UnaryOperation::Neg:
+            layerName += " NEG";
+            break;
+        case armnn::UnaryOperation::Rsqrt:
+            layerName += " RSQRT";
+            break;
+        case armnn::UnaryOperation::Sin:
+            layerName += " SIN";
+            break;
+        case armnn::UnaryOperation::Sqrt:
+            layerName += " SQRT";
+            break;
+        default:
+            layerName += " UNKNOWN";
+    }
+    return layerName;
+}
+
+TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
+                                           TfLiteOpaqueContext* tfLiteContext,
+                                           TfLiteOpaqueNode* tfLiteNode,
+                                           int nodeIndex,
+                                           int32_t tfLiteElementWiseUnaryOperatorCode,
+                                           armnn::UnaryOperation unaryOperation)
+{
+    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    // Gather input indices and use to get input tensor.
+    int numInputs = 0;
+    const int* inputTensors;
+    if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+    // Use input indices to get input tensor.
+    const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteElementWiseUnaryOperatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    // Gather output indices and use to get output tensor.
+    int numOutputs = 0;
+    const int* outputTensors;
+    if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+    // Use output indices to get output tensor.
+    const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteElementWiseUnaryOperatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+    armnn::ElementwiseUnaryDescriptor descriptor(unaryOperation);
+    bool isSupported = false;
+    armnn::BackendId setBackend;
+    auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported, std::string layerName)
+    {
+        FORWARD_LAYER_OPAQUE_SUPPORT_FUNC(layerName.c_str(),
+                                          tfLiteContext,
+                                          IsElementwiseUnarySupported,
+                                          delegateData.m_Backends,
+                                          isSupported,
+                                          setBackend,
+                                          inputTensorInfo,
+                                          outputTensorInfo,
+                                          descriptor);
+    };
+
+    if (!delegateData.m_Network)
+    {
+        validateFunc(outputTensorInfo, isSupported, GetLayerName(unaryOperation));
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor);
+    layer->SetBackendId(setBackend);
+    ARMNN_ASSERT(layer != nullptr);
+
+    armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+    outputSlot.SetTensorInfo(outputTensorInfo);
+
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
+    // Connect
+    return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+}
+
+} // namespace armnnOpaqueDelegate
\ No newline at end of file
diff --git a/delegate/opaque/src/Gather.hpp b/delegate/opaque/src/Gather.hpp
index b9eef2b..b27016e 100644
--- a/delegate/opaque/src/Gather.hpp
+++ b/delegate/opaque/src/Gather.hpp
@@ -7,126 +7,122 @@
 
 #include <OpaqueDelegateUtils.hpp>
 
-#include <algorithm>
-#include <iterator>
-#include <string>
-#include <vector>
-
 namespace armnnOpaqueDelegate
 {
 
-    TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
-                                     TfLiteOpaqueContext* tfLiteContext,
-                                     TfLiteOpaqueNode* tfLiteNode,
-                                     int nodeIndex,
-                                     int32_t operatorCode)
+TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
+                                 TfLiteOpaqueContext* tfLiteContext,
+                                 TfLiteOpaqueNode* tfLiteNode,
+                                 int nodeIndex,
+                                 int32_t operatorCode)
+{
+    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    int numInputs = 0;
+    const int* inputTensors;
+    if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
     {
-        TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
-        TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
-        int numInputs = 0;
-        const int* inputTensors;
-        if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
-        {
-            TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
-                    tfLiteContext,
-                    "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
-                    nodeIndex);
-            return kTfLiteError;
-        }
-
-        int numOutputs = 0;
-        const int* outputTensors;
-        if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
-        {
-            TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
-                    tfLiteContext,
-                    "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
-                    nodeIndex);
-            return kTfLiteError;
-        }
-
-        const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
-                                                                                         inputTensors[0]);
-        if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
-        {
-            return kTfLiteError;
-        }
-
-        const TfLiteOpaqueTensor* tfLiteIndicesTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
-                                                                                           inputTensors[1]);
-        if (!IsValid(tfLiteContext, tfLiteIndicesTensor, operatorCode, nodeIndex))
-        {
-            return kTfLiteError;
-        }
-
-        const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
-                                                                                          outputTensors[0]);
-        if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
-        {
-            return kTfLiteError;
-        }
-        auto* tfLiteNodeParameters = reinterpret_cast<TfLiteGatherParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
-        auto axis = tfLiteNodeParameters->axis;
-
-        const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
-        const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteIndicesTensor);
-        const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
-        armnn::GatherDescriptor gatherDescriptor;
-        gatherDescriptor.m_Axis = axis;
-
-        auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
-        auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
-        auto outputDimensions = outputTensorInfo.GetNumDimensions();
-        if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
-        {
-            TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
-                    tfLiteContext,
-                    "TfLiteArmnnDelegate: Operation has invalid axis: %d. It is out of bounds [-%d, %d))",
-                    axis, inputDimensions, inputDimensions);
-            return kTfLiteError;
-        }
-        if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
-        {
-            TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
-                    tfLiteContext,
-                    "Operation has invalid output dimensions: %d. Output must be an (%d + %d - 1)-D tensor",
-                    outputDimensions, inputDimensions, indicesDimensions);
-            return kTfLiteError;
-        }
-
-        armnn::BackendId setBackend;
-        if (!delegateData.m_Network)
-        {
-            // Check if supported
-            bool isSupported = false;
-            FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("GATHER",
-                                              tfLiteContext,
-                                              IsGatherSupported,
-                                              delegateData.m_Backends,
-                                              isSupported,
-                                              setBackend,
-                                              inputTensorInfo,
-                                              indicesTensorInfo,
-                                              outputTensorInfo,
-                                              gatherDescriptor);
-            return isSupported ? kTfLiteOk : kTfLiteError;
-        }
-
-        armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor);
-        layer->SetBackendId(setBackend);
-        ARMNN_ASSERT(layer != nullptr);
-        layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
-
-        auto inputsTensorsProcess = ProcessInputs(layer,
-                                                  delegateData,
-                                                  tfLiteContext,
-                                                  tfLiteNode);
-        if (inputsTensorsProcess == kTfLiteError)
-        {
-            return inputsTensorsProcess;
-        }
-
-        return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
     }
+
+    int numOutputs = 0;
+    const int* outputTensors;
+    if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+
+    const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
+                                                                                     inputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const TfLiteOpaqueTensor* tfLiteIndicesTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
+                                                                                       inputTensors[1]);
+    if (!IsValid(tfLiteContext, tfLiteIndicesTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
+                                                                                      outputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+    auto* tfLiteNodeParameters = reinterpret_cast<TfLiteGatherParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+    auto axis = tfLiteNodeParameters->axis;
+
+    const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+    const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteIndicesTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+    armnn::GatherDescriptor gatherDescriptor;
+    gatherDescriptor.m_Axis = axis;
+
+    auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
+    auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
+    auto outputDimensions = outputTensorInfo.GetNumDimensions();
+    if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Operation has invalid axis: %d. It is out of bounds [-%d, %d))",
+                axis, inputDimensions, inputDimensions);
+        return kTfLiteError;
+    }
+    if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Operation has invalid output dimensions: %d. "
+                "Output must be an (%d + %d - 1)-D tensor",
+                outputDimensions, inputDimensions, indicesDimensions);
+        return kTfLiteError;
+    }
+
+    armnn::BackendId setBackend;
+    if (!delegateData.m_Network)
+    {
+        // Check if supported
+        bool isSupported = false;
+        FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("GATHER",
+                                          tfLiteContext,
+                                          IsGatherSupported,
+                                          delegateData.m_Backends,
+                                          isSupported,
+                                          setBackend,
+                                          inputTensorInfo,
+                                          indicesTensorInfo,
+                                          outputTensorInfo,
+                                          gatherDescriptor);
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor);
+    layer->SetBackendId(setBackend);
+    ARMNN_ASSERT(layer != nullptr);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    auto inputsTensorsProcess = ProcessInputs(layer,
+                                              delegateData,
+                                              tfLiteContext,
+                                              tfLiteNode);
+    if (inputsTensorsProcess == kTfLiteError)
+    {
+        return inputsTensorsProcess;
+    }
+
+    return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+}
 } // namespace armnnOpaqueDelegate
\ No newline at end of file
diff --git a/delegate/opaque/src/GatherNd.hpp b/delegate/opaque/src/GatherNd.hpp
index a23fa88..a767d01 100644
--- a/delegate/opaque/src/GatherNd.hpp
+++ b/delegate/opaque/src/GatherNd.hpp
@@ -7,11 +7,6 @@
 
 #include <OpaqueDelegateUtils.hpp>
 
-#include <algorithm>
-#include <iterator>
-#include <string>
-#include <vector>
-
 namespace armnnOpaqueDelegate
 {
 TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
diff --git a/delegate/opaque/src/LogicalBinary.hpp b/delegate/opaque/src/LogicalBinary.hpp
index e169697..44a443b 100644
--- a/delegate/opaque/src/LogicalBinary.hpp
+++ b/delegate/opaque/src/LogicalBinary.hpp
@@ -2,3 +2,140 @@
 // Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
+#pragma once
+
+#include <OpaqueDelegateUtils.hpp>
+
+namespace armnnOpaqueDelegate
+{
+
+std::string GetLayerName(armnn::LogicalBinaryOperation logicalBinaryOperation)
+{
+    std::string layerName = "LOGICAL_BINARY";
+    switch (logicalBinaryOperation)
+    {
+        case armnn::LogicalBinaryOperation::LogicalAnd:
+            layerName += " LOGICAL_AND";
+            break;
+        case armnn::LogicalBinaryOperation::LogicalOr:
+            layerName += " LOGICAL_OR";
+            break;
+        default:
+            layerName += " UNKNOWN";
+    }
+    return layerName;
+}
+
+TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
+                                        TfLiteOpaqueContext* tfLiteContext,
+                                        TfLiteOpaqueNode* tfLiteNode,
+                                        int nodeIndex,
+                                        int32_t logicalOperatorCode,
+                                        armnn::LogicalBinaryOperation binaryOperation)
+{
+    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    // Gather input indices and use to get input tensor.
+    int numInputs = 0;
+    const int* inputTensors;
+    if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+
+    // Use input indices to get input tensors.
+    const TfLiteOpaqueTensor* tfLiteInputTensor0 = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteInputTensor0, logicalOperatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const TfLiteOpaqueTensor* tfLiteInputTensor1 = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
+    if (!IsValid(tfLiteContext, tfLiteInputTensor1, logicalOperatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    // Gather output indices and use to get output tensors.
+    int numOutputs = 0;
+    const int* outputTensors;
+    if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+
+    // Use output indices to get output tensor.
+    const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteOutputTensor, logicalOperatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor0);
+    armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor1);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+    // Check if we need to expand the dims of any input tensor infos.
+    // This is required for a few of the backends.
+    if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+    {
+        ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+    }
+
+    // Setup descriptor and assign operation
+    armnn::LogicalBinaryDescriptor desc;
+    desc.m_Operation = binaryOperation;
+
+    // Check if supported
+    bool isSupported = false;
+    armnn::BackendId setBackend;
+    auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported, std::string layerName)
+    {
+        FORWARD_LAYER_OPAQUE_SUPPORT_FUNC(layerName.c_str(),
+                                          tfLiteContext,
+                                          IsLogicalBinarySupported,
+                                          delegateData.m_Backends,
+                                          isSupported,
+                                          setBackend,
+                                          inputTensorInfo0,
+                                          inputTensorInfo1,
+                                          outputTensorInfo,
+                                          desc);
+    };
+
+    if (!delegateData.m_Network)
+    {
+        validateFunc(outputTensorInfo, isSupported, GetLayerName(binaryOperation));
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc);
+    logicalBinaryLayer->SetBackendId(setBackend);
+    ARMNN_ASSERT(logicalBinaryLayer != nullptr);
+
+    armnn::IOutputSlot& outputSlot = logicalBinaryLayer->GetOutputSlot(0);
+    outputSlot.SetTensorInfo(outputTensorInfo);
+
+    auto inputsTensorsProcess = ProcessInputs(logicalBinaryLayer,
+                                              delegateData,
+                                              tfLiteContext,
+                                              tfLiteNode);
+    if (inputsTensorsProcess == kTfLiteError)
+    {
+        return inputsTensorsProcess;
+    }
+
+    return Connect(logicalBinaryLayer, tfLiteContext, tfLiteNode, delegateData);
+}
+
+} // namespace armnnOpaqueDelegate
diff --git a/delegate/opaque/src/Normalization.hpp b/delegate/opaque/src/Normalization.hpp
index e169697..c6ac676 100644
--- a/delegate/opaque/src/Normalization.hpp
+++ b/delegate/opaque/src/Normalization.hpp
@@ -2,3 +2,202 @@
 // Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
+#pragma once
+
+#include <OpaqueDelegateUtils.hpp>
+
+namespace armnnOpaqueDelegate
+{
+
+TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
+                                          TfLiteOpaqueContext* tfLiteContext,
+                                          TfLiteOpaqueNode* tfLiteNode,
+                                          int nodeIndex,
+                                          int32_t tfLiteL2NormalizationOperatorCode)
+{
+    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    // Gather input indices and use to get input tensor.
+    int numInputs = 0;
+    const int* inputTensors;
+    if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+    // Use input indices to get input tensor.
+    const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteL2NormalizationOperatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+    // Gather output indices and use to get output tensor.
+    int numOutputs = 0;
+    const int* outputTensors;
+    if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+    // Use output indices to get output tensor.
+    const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteL2NormalizationOperatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+    armnn::L2NormalizationDescriptor descriptor;
+    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+    bool isSupported = false;
+    armnn::BackendId setBackend;
+    auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("L2_NORMALIZATION",
+                                          tfLiteContext,
+                                          IsL2NormalizationSupported,
+                                          delegateData.m_Backends,
+                                          isSupported,
+                                          setBackend,
+                                          inputTensorInfo,
+                                          outInfo,
+                                          descriptor);
+    };
+
+    if (!delegateData.m_Network)
+    {
+        validateFunc(outputTensorInfo, isSupported);
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    // Add a L2Normalization layer
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor);
+    layer->SetBackendId(setBackend);
+    ARMNN_ASSERT(layer != nullptr);
+
+    armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+    outputSlot.SetTensorInfo(outputTensorInfo);
+
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
+    // Connect
+    return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+}
+
+
+TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
+                                                     TfLiteOpaqueContext* tfLiteContext,
+                                                     TfLiteOpaqueNode* tfLiteNode,
+                                                     int nodeIndex,
+                                                     int32_t tfLiteNormalizationOperatorCode)
+{
+    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    // Gather input indices and use to get input tensor.
+    int numInputs = 0;
+    const int* inputTensors;
+    if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+    // Use input indices to get input tensor.
+    const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteNormalizationOperatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+    // Gather output indices and use to get output tensor.
+    int numOutputs = 0;
+    const int* outputTensors;
+    if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+    // Use output indices to get output tensor.
+    const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteNormalizationOperatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+    armnn::NormalizationDescriptor descriptor;
+    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+    descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
+    descriptor.m_NormMethodType  = armnn::NormalizationAlgorithmMethod::LocalBrightness;
+
+    auto* nodeParams = reinterpret_cast<TfLiteLocalResponseNormParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+    descriptor.m_NormSize = nodeParams->radius;
+    descriptor.m_K        = nodeParams->bias;
+    descriptor.m_Alpha    = nodeParams->alpha;
+    descriptor.m_Beta     = nodeParams->beta;
+
+    // ArmNN expects normSize to be the full size of the normalization window
+    descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
+
+    bool isSupported = false;
+    armnn::BackendId setBackend;
+    auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("NORMALIZATION",
+                                          tfLiteContext,
+                                          IsNormalizationSupported,
+                                          delegateData.m_Backends,
+                                          isSupported,
+                                          setBackend,
+                                          inputTensorInfo,
+                                          outInfo,
+                                          descriptor);
+    };
+
+    if (!delegateData.m_Network)
+    {
+        validateFunc(outputTensorInfo, isSupported);
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    // Add a Normalization layer
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor);
+    layer->SetBackendId(setBackend);
+    ARMNN_ASSERT(layer != nullptr);
+
+    armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+    outputSlot.SetTensorInfo(outputTensorInfo);
+
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
+    // Connect
+    return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+}
+
+} // namespace armnnOpaqueDelegate
diff --git a/delegate/opaque/src/OpaqueDelegateUtils.hpp b/delegate/opaque/src/OpaqueDelegateUtils.hpp
index 1fbfade..fd943c8 100644
--- a/delegate/opaque/src/OpaqueDelegateUtils.hpp
+++ b/delegate/opaque/src/OpaqueDelegateUtils.hpp
@@ -424,7 +424,7 @@
             return armnn::DataType::Signed64;
         default:
             throw armnn::Exception(
-                    &"TfLiteArmnnDelegate: Unsupported data type: " [ TfLiteOpaqueTensorType(tfLiteTensor) ]);
+                    &"TfLiteArmnnOpaqueDelegate: Unsupported data type: " [ TfLiteOpaqueTensorType(tfLiteTensor) ]);
     }
 }
 
@@ -528,7 +528,7 @@
     auto allocType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
     if (allocType != kTfLiteMmapRo)
     {
-        throw armnn::Exception("TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(allocType));
+       throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Not constant allocation type: " + std::to_string(allocType));
     }
 
     return armnn::ConstTensor(tensorInfo, TfLiteOpaqueTensorData(tfLiteTensor));
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index de88c3e..d631d91 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -87,7 +87,7 @@
             if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
             {
                 TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO,
-                                "TfLiteArmnnDelegate: Requested unknown backend %s", backend.Get().c_str());
+                                "TfLiteArmnnOpaqueDelegate: Requested unknown backend %s", backend.Get().c_str());
             }
             else
             {
@@ -622,6 +622,13 @@
 {
     switch (TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration))
     {
+        case kTfLiteBuiltinAbs:
+            return VisitElementwiseUnaryOperator(delegateData,
+                                                 tfLiteContext,
+                                                 tfLiteNode,
+                                                 nodeIndex,
+                                                 kTfLiteBuiltinAbs,
+                                                 armnn::UnaryOperation::Abs);
         case kTfLiteBuiltinArgMax:
             return VisitArgMinMaxOperator(delegateData,
                                           tfLiteContext,
@@ -646,6 +653,13 @@
                                      tfLiteNode,
                                      nodeIndex,
                                      kTfLiteBuiltinCast);
+        case kTfLiteBuiltinCeil:
+            return VisitElementwiseUnaryOperator(delegateData,
+                                                 tfLiteContext,
+                                                 tfLiteNode,
+                                                 nodeIndex,
+                                                 kTfLiteBuiltinCeil,
+                                                 armnn::UnaryOperation::Ceil);
         case kTfLiteBuiltinConcatenation:
             return VisitControlOperator(delegateData,
                                         tfLiteContext,
@@ -675,7 +689,15 @@
                                            tfLiteContext,
                                            tfLiteNode,
                                            nodeIndex,
-                                           kTfLiteBuiltinEqual);
+                                           kTfLiteBuiltinEqual,
+                                           armnn::ComparisonOperation::Equal);
+        case kTfLiteBuiltinExp:
+            return VisitElementwiseUnaryOperator(delegateData,
+                                                 tfLiteContext,
+                                                 tfLiteNode,
+                                                 nodeIndex,
+                                                 kTfLiteBuiltinExp,
+                                                 armnn::UnaryOperation::Exp);
         case kTfLiteBuiltinFullyConnected:
             return VisitFullyConnectedOperator(delegateData,
                                                tfLiteContext,
@@ -699,49 +721,101 @@
                                            tfLiteContext,
                                            tfLiteNode,
                                            nodeIndex,
-                                           kTfLiteBuiltinGreater);
+                                           kTfLiteBuiltinGreater,
+                                           armnn::ComparisonOperation::Greater);
         case kTfLiteBuiltinGreaterEqual:
             return VisitComparisonOperator(delegateData,
                                            tfLiteContext,
                                            tfLiteNode,
                                            nodeIndex,
-                                           kTfLiteBuiltinGreaterEqual);
+                                           kTfLiteBuiltinGreaterEqual,
+                                           armnn::ComparisonOperation::GreaterOrEqual);
         case kTfLiteBuiltinHardSwish:
             return VisitActivationOperator(delegateData,
                                            tfLiteContext,
                                            tfLiteNode,
                                            nodeIndex,
                                            kTfLiteBuiltinHardSwish);
+        case kTfLiteBuiltinL2Normalization:
+            return VisitL2NormalizationOperator(delegateData,
+                                                tfLiteContext,
+                                                tfLiteNode,
+                                                nodeIndex,
+                                                kTfLiteBuiltinL2Normalization);
         case kTfLiteBuiltinLess:
             return VisitComparisonOperator(delegateData,
                                            tfLiteContext,
                                            tfLiteNode,
                                            nodeIndex,
-                                           kTfLiteBuiltinLess);
+                                           kTfLiteBuiltinLess,
+                                           armnn::ComparisonOperation::Less);
         case kTfLiteBuiltinLessEqual:
             return VisitComparisonOperator(delegateData,
                                            tfLiteContext,
                                            tfLiteNode,
                                            nodeIndex,
-                                           kTfLiteBuiltinLessEqual);
+                                           kTfLiteBuiltinLessEqual,
+                                           armnn::ComparisonOperation::LessOrEqual);
         case kTfLiteBuiltinLogistic:
             return VisitActivationOperator(delegateData,
                                            tfLiteContext,
                                            tfLiteNode,
                                            nodeIndex,
                                            kTfLiteBuiltinLogistic);
+        case kTfLiteBuiltinLocalResponseNormalization:
+            return VisitLocalResponseNormalizationOperator(delegateData,
+                                                           tfLiteContext,
+                                                           tfLiteNode,
+                                                           nodeIndex,
+                                                           kTfLiteBuiltinLocalResponseNormalization);
+        case kTfLiteBuiltinLog:
+            return VisitElementwiseUnaryOperator(delegateData,
+                                                 tfLiteContext,
+                                                 tfLiteNode,
+                                                 nodeIndex,
+                                                 kTfLiteBuiltinLog,
+                                                 armnn::UnaryOperation::Log);
+        case kTfLiteBuiltinLogicalAnd:
+            return VisitLogicalBinaryOperator(delegateData,
+                                              tfLiteContext,
+                                              tfLiteNode,
+                                              nodeIndex,
+                                              kTfLiteBuiltinLogicalAnd,
+                                              armnn::LogicalBinaryOperation::LogicalAnd);
+        case kTfLiteBuiltinLogicalNot:
+            return VisitElementwiseUnaryOperator(delegateData,
+                                                 tfLiteContext,
+                                                 tfLiteNode,
+                                                 nodeIndex,
+                                                 kTfLiteBuiltinLogicalNot,
+                                                 armnn::UnaryOperation::LogicalNot);
+        case kTfLiteBuiltinLogicalOr:
+            return VisitLogicalBinaryOperator(delegateData,
+                                              tfLiteContext,
+                                              tfLiteNode,
+                                              nodeIndex,
+                                              kTfLiteBuiltinLogicalOr,
+                                              armnn::LogicalBinaryOperation::LogicalOr);
         case kTfLiteBuiltinMean:
             return VisitControlOperator(delegateData,
                                         tfLiteContext,
                                         tfLiteNode,
                                         nodeIndex,
                                         kTfLiteBuiltinMean);
+        case kTfLiteBuiltinNeg:
+            return VisitElementwiseUnaryOperator(delegateData,
+                                                 tfLiteContext,
+                                                 tfLiteNode,
+                                                 nodeIndex,
+                                                 kTfLiteBuiltinNeg,
+                                                 armnn::UnaryOperation::Neg);
         case kTfLiteBuiltinNotEqual:
             return VisitComparisonOperator(delegateData,
                                            tfLiteContext,
                                            tfLiteNode,
                                            nodeIndex,
-                                           kTfLiteBuiltinNotEqual);
+                                           kTfLiteBuiltinNotEqual,
+                                           armnn::ComparisonOperation::NotEqual);
         case kTfLiteBuiltinPrelu:
             return VisitPreluOperator(delegateData,
                                       tfLiteContext,
@@ -766,12 +840,33 @@
                                            tfLiteNode,
                                            nodeIndex,
                                            kTfLiteBuiltinRelu6);
+        case kTfLiteBuiltinRsqrt:
+            return VisitElementwiseUnaryOperator(delegateData,
+                                                 tfLiteContext,
+                                                 tfLiteNode,
+                                                 nodeIndex,
+                                                 kTfLiteBuiltinRsqrt,
+                                                 armnn::UnaryOperation::Rsqrt);
+        case kTfLiteBuiltinSin:
+            return VisitElementwiseUnaryOperator(delegateData,
+                                                 tfLiteContext,
+                                                 tfLiteNode,
+                                                 nodeIndex,
+                                                 kTfLiteBuiltinSin,
+                                                 armnn::UnaryOperation::Sin);
         case kTfLiteBuiltinSpaceToBatchNd:
             return VisitSpaceToBatchNdOperator(delegateData,
                                                tfLiteContext,
                                                tfLiteNode,
                                                nodeIndex,
                                                kTfLiteBuiltinSpaceToBatchNd);
+        case kTfLiteBuiltinSqrt:
+            return VisitElementwiseUnaryOperator(delegateData,
+                                                 tfLiteContext,
+                                                 tfLiteNode,
+                                                 nodeIndex,
+                                                 kTfLiteBuiltinSqrt,
+                                                 armnn::UnaryOperation::Sqrt);
         case kTfLiteBuiltinTanh:
             return VisitActivationOperator(delegateData,
                                            tfLiteContext,