IVGCVSW-5379 'TfLiteDelegate: Implement the ElementWiseBinary operators'

* Enabled elementwise binary operators DIV, MAXIMUM, MINIMUM, MUL, SUB
* Implemented unit tests for elementwise binary operators

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I196998d53201a6e8888bb203eb640530b8feeac9
diff --git a/delegate/cmake/Modules/FindTfLite.cmake b/delegate/cmake/Modules/FindTfLite.cmake
index d6c274c..96e15db 100644
--- a/delegate/cmake/Modules/FindTfLite.cmake
+++ b/delegate/cmake/Modules/FindTfLite.cmake
@@ -11,7 +11,7 @@
             tensorflow/lite
             third_party
         HINTS
-            ${TFLITE_LIB_ROOT})
+            ${TENSORFLOW_ROOT}/..)
 
 find_library(TfLite_LIB
         NAMES
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index a22d9f5..3d3f1a0 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -38,15 +38,119 @@
     return isSupported ? kTfLiteOk : kTfLiteError;
 }
 
-armnn::IConnectableLayer* AddAdditionLayer(DelegateData& delegateData)
+TfLiteStatus ValidateDivOperator(DelegateData& delegateData,
+                                 TfLiteContext* tfLiteContext,
+                                 const armnn::TensorInfo& inputInfo1,
+                                 const armnn::TensorInfo& inputInfo2,
+                                 const armnn::TensorInfo& outputInfo)
 {
-
-    if (!delegateData.m_Network)
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
-        return nullptr;
-    }
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   tfLiteContext,
+                                   IsDivisionSupported,
+                                   delegateData.m_Backends,
+                                   isSupported,
+                                   inputInfo1,
+                                   inputInfo2,
+                                   outputTensorInfo);
+    };
 
-    return delegateData.m_Network->AddAdditionLayer();
+    validateFunc(outputInfo, isSupported);
+    return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateMaximumOperator(DelegateData& delegateData,
+                                     TfLiteContext* tfLiteContext,
+                                     const armnn::TensorInfo& inputInfo1,
+                                     const armnn::TensorInfo& inputInfo2,
+                                     const armnn::TensorInfo& outputInfo)
+{
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   tfLiteContext,
+                                   IsMaximumSupported,
+                                   delegateData.m_Backends,
+                                   isSupported,
+                                   inputInfo1,
+                                   inputInfo2,
+                                   outputTensorInfo);
+    };
+
+    validateFunc(outputInfo, isSupported);
+    return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateMinimumOperator(DelegateData& delegateData,
+                                     TfLiteContext* tfLiteContext,
+                                     const armnn::TensorInfo& inputInfo1,
+                                     const armnn::TensorInfo& inputInfo2,
+                                     const armnn::TensorInfo& outputInfo)
+{
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   tfLiteContext,
+                                   IsMinimumSupported,
+                                   delegateData.m_Backends,
+                                   isSupported,
+                                   inputInfo1,
+                                   inputInfo2,
+                                   outputTensorInfo);
+    };
+
+    validateFunc(outputInfo, isSupported);
+    return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateMulOperator(DelegateData& delegateData,
+                                 TfLiteContext* tfLiteContext,
+                                 const armnn::TensorInfo& inputInfo1,
+                                 const armnn::TensorInfo& inputInfo2,
+                                 const armnn::TensorInfo& outputInfo)
+{
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   tfLiteContext,
+                                   IsMultiplicationSupported,
+                                   delegateData.m_Backends,
+                                   isSupported,
+                                   inputInfo1,
+                                   inputInfo2,
+                                   outputTensorInfo);
+    };
+
+    validateFunc(outputInfo, isSupported);
+    return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateSubOperator(DelegateData& delegateData,
+                                 TfLiteContext* tfLiteContext,
+                                 const armnn::TensorInfo& inputInfo1,
+                                 const armnn::TensorInfo& inputInfo2,
+                                 const armnn::TensorInfo& outputInfo)
+{
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   tfLiteContext,
+                                   IsSubtractionSupported,
+                                   delegateData.m_Backends,
+                                   isSupported,
+                                   inputInfo1,
+                                   inputInfo2,
+                                   outputTensorInfo);
+    };
+
+    validateFunc(outputInfo, isSupported);
+    return isSupported ? kTfLiteOk : kTfLiteError;
 }
 
 TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
@@ -103,6 +207,36 @@
                                            inputTensorInfo0,
                                            inputTensorInfo1,
                                            outputTensorInfo);
+            case kTfLiteBuiltinDiv:
+                return ValidateDivOperator(delegateData,
+                                           tfLiteContext,
+                                           inputTensorInfo0,
+                                           inputTensorInfo1,
+                                           outputTensorInfo);
+            case kTfLiteBuiltinMaximum:
+                return ValidateMaximumOperator(delegateData,
+                                               tfLiteContext,
+                                               inputTensorInfo0,
+                                               inputTensorInfo1,
+                                               outputTensorInfo);
+            case kTfLiteBuiltinMinimum:
+                return ValidateMinimumOperator(delegateData,
+                                               tfLiteContext,
+                                               inputTensorInfo0,
+                                               inputTensorInfo1,
+                                               outputTensorInfo);
+            case kTfLiteBuiltinMul:
+                return ValidateDivOperator(delegateData,
+                                           tfLiteContext,
+                                           inputTensorInfo0,
+                                           inputTensorInfo1,
+                                           outputTensorInfo);
+            case kTfLiteBuiltinSub:
+                return ValidateDivOperator(delegateData,
+                                           tfLiteContext,
+                                           inputTensorInfo0,
+                                           inputTensorInfo1,
+                                           outputTensorInfo);
             default:
                 return kTfLiteError;
         }
@@ -113,7 +247,22 @@
     switch(elementwiseBinaryOperatorCode)
     {
         case kTfLiteBuiltinAdd:
-            elementwiseBinaryLayer = AddAdditionLayer(delegateData);
+            elementwiseBinaryLayer = delegateData.m_Network->AddAdditionLayer();
+            break;
+        case kTfLiteBuiltinDiv:
+            elementwiseBinaryLayer = delegateData.m_Network->AddDivisionLayer();
+            break;
+        case kTfLiteBuiltinMaximum:
+            elementwiseBinaryLayer = delegateData.m_Network->AddMaximumLayer();
+            break;
+        case kTfLiteBuiltinMinimum:
+            elementwiseBinaryLayer = delegateData.m_Network->AddMinimumLayer();
+            break;
+        case kTfLiteBuiltinMul:
+            elementwiseBinaryLayer = delegateData.m_Network->AddMultiplicationLayer();
+            break;
+        case kTfLiteBuiltinSub:
+            elementwiseBinaryLayer = delegateData.m_Network->AddSubtractionLayer();
             break;
         default:
             return kTfLiteError;
diff --git a/delegate/src/test/ElementwiseBinaryTest.cpp b/delegate/src/test/ElementwiseBinaryTest.cpp
index bd4019a..2a8c91b 100644
--- a/delegate/src/test/ElementwiseBinaryTest.cpp
+++ b/delegate/src/test/ElementwiseBinaryTest.cpp
@@ -19,18 +19,11 @@
 namespace armnnDelegate
 {
 
-TEST_SUITE("ElementwiseBinaryTest")
+void AddFP32Test(std::vector<armnn::BackendId>& backends)
 {
-
-TEST_CASE ("Add_Float32_GpuAcc_Test")
-{
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
-                                               armnn::Compute::CpuRef };
-    // Set input data
     std::vector<int32_t> input0Shape { 2, 2, 2, 3 };
     std::vector<int32_t> input1Shape { 2, 2, 2, 3 };
-    std::vector<int32_t> outputShape { 2, 2, 2, 3 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 3 };
 
     std::vector<float> input0Values =
     {
@@ -45,7 +38,6 @@
 
         0.0f, 0.0f, 1.0f,
         0.2f, 1.0f, 2.0f,
-
     };
 
     std::vector<float> input1Values =
@@ -78,27 +70,23 @@
         0.9f, 2.0f,  7.0f,
     };
 
-
-    ElementwiseBinaryFP32Test(tflite::BuiltinOperator_ADD,
-                              tflite::ActivationFunctionType_NONE,
-                              backends,
-                              input0Shape,
-                              input1Shape,
-                              outputShape,
-                              input0Values,
-                              input1Values,
-                              expectedOutputValues);
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
 }
 
-TEST_CASE ("Add_Broadcast_Float32_GpuAcc_Test")
+void AddBroadcastTest(std::vector<armnn::BackendId>& backends)
 {
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
-                                               armnn::Compute::CpuRef };
-    // Set input data
     std::vector<int32_t> input0Shape { 1, 3, 2, 1 };
     std::vector<int32_t> input1Shape { 1, 1, 2, 3 };
-    std::vector<int32_t> outputShape { 1, 3, 2, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 3, 2, 3 };
 
     std::vector<float> input0Values
     {
@@ -128,42 +116,830 @@
         4.5f, 5.5f, 6.5f,
         8.5f, 9.5f, 10.5f,
     };
-    ElementwiseBinaryFP32Test(tflite::BuiltinOperator_ADD,
-                              tflite::ActivationFunctionType_NONE,
-                              backends,
-                              input0Shape,
-                              input1Shape,
-                              outputShape,
-                              input0Values,
-                              input1Values,
-                              expectedOutputValues);
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
 }
 
-TEST_CASE ("Add_ActivationRELU_Float32_GpuAcc_Test")
+void AddActivationTest(std::vector<armnn::BackendId>& backends)
 {
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
-                                               armnn::Compute::CpuRef };
-    // Set input data
     std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
     std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
-    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
 
     std::vector<float> input0Values { 4.0f, 0.8f, 0.7f, -0.8f };
     std::vector<float> input1Values { 0.7f, -1.2f, 0.8f, 0.5f };
-    // Set output data
     std::vector<float> expectedOutputValues { 4.7f, 0.0f, 1.5f, 0.0f };
-    ElementwiseBinaryFP32Test(tflite::BuiltinOperator_ADD,
-                              tflite::ActivationFunctionType_RELU,
-                              backends,
-                              input0Shape,
-                              input1Shape,
-                              outputShape,
-                              input0Values,
-                              input1Values,
-                              expectedOutputValues);
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
+                                 tflite::ActivationFunctionType_RELU,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
 }
 
+void AddUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<uint8_t> input0Values =
+    {
+        63,  35,  77,  70,  56, 112,
+        203,  28, 252, 168, 245,  91
+    };
+
+    std::vector<uint8_t> input1Values =
+    {
+        21,   7, 175, 231, 175, 210,
+        126, 161,  63,  21, 105, 126
+    };
+
+    std::vector<uint8_t> expectedOutputValues =
+    {
+        81,  39, 249, 255, 228, 255,
+        255, 186, 255, 186, 255, 214,
+    };
+
+    ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_ADD,
+                                   tflite::ActivationFunctionType_NONE,
+                                   ::tflite::TensorType_UINT8,
+                                   backends,
+                                   input0Shape,
+                                   input1Shape,
+                                   expectedOutputShape,
+                                   input0Values,
+                                   input1Values,
+                                   expectedOutputValues, 7.0f, 3);
 }
 
+void DivFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<float> input0Values =
+    {
+        2.f, 2.f, 2.f, 2.f, 3.f, 3.f, 3.f, 3.f,
+        4.f, 4.f, 4.f, 4.f, 5.f, 5.f, 5.f, 5.f
+
+    };
+
+    std::vector<float> input1Values =
+    {
+        1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f,
+        4.f, 4.f, 4.f, 4.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<float> expectedOutputValues =
+    {
+        2.f, 2.f, 2.f, 2.f, 1.50f, 1.50f, 1.50f, 1.50f,
+        1.f, 1.f, 1.f, 1.f, 1.25f, 1.25f, 1.25f, 1.25f
+    };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_DIV,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void DivBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 2 };
+
+    std::vector<float> input0Values = { 2, 4, 6, 8, 10, 12, 14, 16 };
+    std::vector<float> input1Values = { 2 };
+    std::vector<float> expectedOutputValues = { 1, 2, 3, 4, 5, 6, 7, 8 };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_DIV,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void DivUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<uint8_t> input0Values =
+    {
+        2, 2, 2, 2,  3, 3, 3, 3,
+        4, 4, 4, 4,  5, 5, 5, 5
+
+    };
+
+    std::vector<uint8_t> input1Values =
+    {
+        1, 1, 1, 1,  2, 2, 2, 2,
+        4, 4, 4, 4,  4, 4, 4, 4
+    };
+
+    std::vector<uint8_t> expectedOutputValues =
+    {
+        8, 8, 8, 8,  6, 6, 6, 6,
+        4, 4, 4, 4,  5, 5, 5, 5
+    };
+
+    ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_DIV,
+                                   tflite::ActivationFunctionType_NONE,
+                                   ::tflite::TensorType_UINT8,
+                                   backends,
+                                   input0Shape,
+                                   input1Shape,
+                                   expectedOutputShape,
+                                   input0Values,
+                                   input1Values,
+                                   expectedOutputValues, 0.25f, 0);
+}
+
+void MaxFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<float> input0Values =
+    {
+        1.f, 1.f, 5.f, 1.f,  2.f, 2.f, 7.f, 2.f,
+        3.f, 3.f, 3.f, 3.f,  4.f, 4.f, 4.f, 4.f
+
+    };
+
+    std::vector<float> input1Values =
+    {
+        2.f, 2.f, 2.f, 2.f,  3.f, 3.f, 3.f, 3.f,
+        4.f, 4.f, 4.f, 4.f,  5.f, 5.f, 5.f, 5.f
+    };
+
+    std::vector<float> expectedOutputValues =
+    {
+        2.f,  2.f, 5.f,  2.f,   3.f,  3.f,  7.f,  3.f,
+        4.f, 4.f, 4.f, 4.f,  5.f, 5.f, 5.f, 5.f
+    };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MAXIMUM,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void MaxBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 2 };
+
+    std::vector<float> input0Values = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f };
+    std::vector<float> input1Values = { 4.f };
+    std::vector<float> expectedOutputValues = { 4.f, 4.f, 4.f, 4.f, 5.f, 6.f, 7.f, 8.f };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MAXIMUM,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void MaxUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<uint8_t> input0Values =
+    {
+        1, 1, 1, 1, 7, 8, 9, 9,
+        3, 3, 3, 3, 4, 4, 4, 4
+
+    };
+
+    std::vector<uint8_t> input1Values =
+    {
+        2, 2, 2, 2, 3, 3, 3, 3,
+        4, 4, 4, 4, 5, 5, 5, 5
+    };
+
+    std::vector<uint8_t> expectedOutputValues =
+    {
+        2, 2, 2, 2, 7, 8, 9, 9,
+        4, 4, 4, 4, 5, 5, 5, 5
+    };
+
+    ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_MAXIMUM,
+                                   tflite::ActivationFunctionType_NONE,
+                                   ::tflite::TensorType_UINT8,
+                                   backends,
+                                   input0Shape,
+                                   input1Shape,
+                                   expectedOutputShape,
+                                   input0Values,
+                                   input1Values,
+                                   expectedOutputValues, 1.0f, 0);
+}
+
+void MinFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<float> input0Values =
+    {
+        1.f, 1.f, 5.f, 1.f,  2.f, 2.f, 7.f, 2.f,
+        3.f, 3.f, 3.f, 3.f,  4.f, 4.f, 4.f, 4.f
+
+    };
+
+    std::vector<float> input1Values =
+    {
+        2.f, 2.f, 2.f, 2.f,  3.f, 3.f, 3.f, 3.f,
+        1.f, 1.f, 1.f, 1.f,  5.f, 5.f, 5.f, 5.f
+    };
+
+    std::vector<float> expectedOutputValues =
+    {
+        1.f,  1.f, 2.f,  1.f,   2.f,  2.f,  3.f,  2.f,
+        1.f, 1.f, 1.f, 1.f,  4.f, 4.f, 4.f, 4.f
+    };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MINIMUM,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void MinBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 2 };
+
+    std::vector<float> input0Values = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f };
+
+    std::vector<float> input1Values = { 4.f };
+
+    std::vector<float> expectedOutputValues = { 1.f, 2.f, 3.f, 4.f, 4.f, 4.f, 4.f, 4.f };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MINIMUM,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void MinUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<uint8_t> input0Values =
+    {
+        1, 1, 1, 1, 7, 8, 9, 9,
+        3, 3, 3, 3, 4, 4, 4, 4
+
+    };
+
+    std::vector<uint8_t> input1Values =
+    {
+        2, 2, 2, 2, 3, 3, 3, 3,
+        4, 4, 4, 4, 5, 5, 5, 5
+    };
+
+    std::vector<uint8_t> expectedOutputValues =
+    {
+        1, 1, 1, 1, 3, 3, 3, 3,
+        3, 3, 3, 3, 4, 4, 4, 4
+    };
+
+    ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_MINIMUM,
+                                   tflite::ActivationFunctionType_NONE,
+                                   ::tflite::TensorType_UINT8,
+                                   backends,
+                                   input0Shape,
+                                   input1Shape,
+                                   expectedOutputShape,
+                                   input0Values,
+                                   input1Values,
+                                   expectedOutputValues, 1.0f, 0);
+}
+
+void MulFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<float> input0Values =
+    {
+        1.f, 1.f, 1.f, 1.f,  2.f, 2.f, 2.f, 2.f,
+        3.f, 3.f, 3.f, 3.f,  4.f, 4.f, 4.f, 4.f
+
+    };
+
+    std::vector<float> input1Values =
+    {
+        2.f, 2.f, 2.f, 2.f,  3.f, 3.f, 3.f, 3.f,
+        4.f, 4.f, 4.f, 4.f,  5.f, 5.f, 5.f, 5.f
+    };
+
+    std::vector<float> expectedOutputValues =
+    {
+        2.f,  2.f,  2.f,  2.f,   6.f,  6.f,  6.f,  6.f,
+        12.f, 12.f, 12.f, 12.f,  20.f, 20.f, 20.f, 20.f
+    };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MUL,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void MulBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 2 };
+
+    std::vector<float> input0Values = { 2, 4, 6, 8, 10, 12, 14, 16 };
+    std::vector<float> input1Values = { 2 };
+    std::vector<float> expectedOutputValues = { 4, 8, 12, 16, 20, 24, 28, 32 };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MUL,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void MulUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<uint8_t> input0Values =
+    {
+        1, 2, 3,    4,  5,  6,
+        7, 8, 9,   10, 11, 12
+
+    };
+
+    std::vector<uint8_t> input1Values = { 1, 2, 3 };
+
+    std::vector<uint8_t> expectedOutputValues =
+    {
+        1,  4,   9,     4, 10, 18,
+        7, 16,  27,    10, 22, 36
+    };
+
+    ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_MUL,
+                                   tflite::ActivationFunctionType_NONE,
+                                   ::tflite::TensorType_UINT8,
+                                   backends,
+                                   input0Shape,
+                                   input1Shape,
+                                   expectedOutputShape,
+                                   input0Values,
+                                   input1Values,
+                                   expectedOutputValues, 1.0f, 0);
+}
+
+void MulActivationTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<float> input0Values { 4.0f, 0.0f, 1.0f, 0.5f };
+    std::vector<float> input1Values { -2.0f, -1.2f, 2.5f, 2.0f };
+    std::vector<float> expectedOutputValues { 0.0f, 0.0f, 2.5f, 1.0f };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MUL,
+                                 tflite::ActivationFunctionType_RELU,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void SubFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 1, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 1, 1, 2, 2 };
+
+    std::vector<float> input0Values = { 1, 3, 3, -7 };
+    std::vector<float> input1Values = { 1, -1, 0, -2 };
+    std::vector<float> expectedOutputValues = { 0, 4, 3, -5 };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_SUB,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void SubBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 1, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 1, 2, 2 };
+
+    std::vector<float> input0Values = { 2, 3, 4, 5};
+    std::vector<float> input1Values = { 10 };
+    std::vector<float> expectedOutputValues = { -8, -7, -6, -5 };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_SUB,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void SubUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 1, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 1, 2, 2 };
+
+    std::vector<uint8_t> input0Values = { 10, 12, 14, 16 };
+    std::vector<uint8_t> input1Values = { 2 };
+    std::vector<uint8_t> expectedOutputValues = { 8, 10, 12, 14 };
+
+    ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_SUB,
+                                   tflite::ActivationFunctionType_NONE,
+                                   ::tflite::TensorType_UINT8,
+                                   backends,
+                                   input0Shape,
+                                   input1Shape,
+                                   expectedOutputShape,
+                                   input0Values,
+                                   input1Values,
+                                   expectedOutputValues, 1.0f, 0);
+}
+
+TEST_SUITE("ElementwiseBinaryTest")
+{
+
+TEST_CASE ("ADD_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    AddFP32Test(backends);
+}
+
+TEST_CASE ("ADD_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    AddFP32Test(backends);
+}
+
+TEST_CASE ("ADD_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    AddBroadcastTest(backends);
+}
+
+TEST_CASE ("ADD_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    AddBroadcastTest(backends);
+}
+
+TEST_CASE ("ADD_Activation_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    AddActivationTest(backends);
+}
+
+TEST_CASE ("ADD_Actiation_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    AddActivationTest(backends);
+}
+
+TEST_CASE ("ADD_UINT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    AddUint8Test(backends);
+}
+
+TEST_CASE ("ADD_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    AddUint8Test(backends);
+}
+
+TEST_CASE ("DIV_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    DivFP32Test(backends);
+}
+
+TEST_CASE ("DIV_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    DivFP32Test(backends);
+}
+
+TEST_CASE ("DIV_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    DivBroadcastTest(backends);
+}
+
+TEST_CASE ("DIV_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    DivBroadcastTest(backends);
+}
+
+TEST_CASE ("DIV_UINT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    DivUint8Test(backends);
+}
+
+TEST_CASE ("DIV_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    DivUint8Test(backends);
+}
+
+TEST_CASE ("MAX_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxFP32Test(backends);
+}
+
+TEST_CASE ("MAX_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxFP32Test(backends);
+}
+
+TEST_CASE ("MAX_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxBroadcastTest(backends);
+}
+
+TEST_CASE ("MAX_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxBroadcastTest(backends);
+}
+
+TEST_CASE ("MAX_UINT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxUint8Test(backends);
+}
+
+TEST_CASE ("MAX_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxUint8Test(backends);
+}
+
+TEST_CASE ("MIN_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MinFP32Test(backends);
+}
+
+TEST_CASE ("MIN_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MinFP32Test(backends);
+}
+
+TEST_CASE ("MIN_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MinBroadcastTest(backends);
+}
+
+TEST_CASE ("MIN_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MinBroadcastTest(backends);
+}
+
+TEST_CASE ("MIN_UINT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MinUint8Test(backends);
+}
+
+TEST_CASE ("MIN_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MinUint8Test(backends);
+}
+
+TEST_CASE ("MUL_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MulFP32Test(backends);
+}
+
+TEST_CASE ("MUL_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MulFP32Test(backends);
+}
+
+TEST_CASE ("MUL_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MulBroadcastTest(backends);
+}
+
+TEST_CASE ("MUL_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MulBroadcastTest(backends);
+}
+
+TEST_CASE ("MUL_Activation_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MulActivationTest(backends);
+}
+
+TEST_CASE ("MUL_Actiation_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MulActivationTest(backends);
+}
+
+TEST_CASE ("MUL_UINT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MulUint8Test(backends);
+}
+
+TEST_CASE ("MUL_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MulUint8Test(backends);
+}
+
+TEST_CASE ("SUB_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    SubFP32Test(backends);
+}
+
+TEST_CASE ("SUB_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    SubFP32Test(backends);
+}
+
+TEST_CASE ("SUB_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    SubBroadcastTest(backends);
+}
+
+TEST_CASE ("SUB_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    SubBroadcastTest(backends);
+}
+
+TEST_CASE ("SUB_UINT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    SubUint8Test(backends);
+}
+
+TEST_CASE ("SUB_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    SubUint8Test(backends);
+}
+
+} // End of TEST_SUITE("ElementwiseBinaryTest")
+
 } // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/src/test/ElementwiseBinaryTestHelper.hpp b/delegate/src/test/ElementwiseBinaryTestHelper.hpp
index 72f9f85..6009272 100644
--- a/delegate/src/test/ElementwiseBinaryTestHelper.hpp
+++ b/delegate/src/test/ElementwiseBinaryTestHelper.hpp
@@ -24,7 +24,9 @@
                                                      tflite::TensorType tensorType,
                                                      const std::vector <int32_t>& input0TensorShape,
                                                      const std::vector <int32_t>& input1TensorShape,
-                                                     const std::vector <int32_t>& outputTensorShape)
+                                                     const std::vector <int32_t>& outputTensorShape,
+                                                     float quantScale = 1.0f,
+                                                     int quantOffset  = 0)
 {
     using namespace tflite;
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
@@ -32,19 +34,36 @@
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
     buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
 
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+
     std::array<flatbuffers::Offset<Tensor>, 3> tensors;
     tensors[0] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
                                                                       input0TensorShape.size()),
-                              tensorType, 0);
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("input_0"),
+                              quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
                                                                       input1TensorShape.size()),
-                              tensorType, 0);
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("input_1"),
+                              quantizationParameters);
     tensors[2] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
-                              tensorType);
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
 
     // create operator
     tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
@@ -63,6 +82,18 @@
             operatorBuiltinOptions = CreateDivOptions(flatBufferBuilder, activationType).Union();
             break;
         }
+        case BuiltinOperator_MAXIMUM:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_MaximumMinimumOptions;
+            operatorBuiltinOptions = CreateMaximumMinimumOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_MINIMUM:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_MaximumMinimumOptions;
+            operatorBuiltinOptions = CreateMaximumMinimumOptions(flatBufferBuilder).Union();
+            break;
+        }
         case BuiltinOperator_MUL:
         {
             operatorBuiltinOptionsType = BuiltinOptions_MulOptions;
@@ -115,23 +146,29 @@
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
 }
 
-void ElementwiseBinaryFP32Test(tflite::BuiltinOperator binaryOperatorCode,
-                               tflite::ActivationFunctionType activationType,
-                               std::vector<armnn::BackendId>& backends,
-                               std::vector<int32_t>& input0Shape,
-                               std::vector<int32_t>& input1Shape,
-                               std::vector<int32_t>& outputShape,
-                               std::vector<float>& input0Values,
-                               std::vector<float>& input1Values,
-                               std::vector<float>& expectedOutputValues)
+template <typename T>
+void ElementwiseBinaryTest(tflite::BuiltinOperator binaryOperatorCode,
+                           tflite::ActivationFunctionType activationType,
+                           tflite::TensorType tensorType,
+                           std::vector<armnn::BackendId>& backends,
+                           std::vector<int32_t>& input0Shape,
+                           std::vector<int32_t>& input1Shape,
+                           std::vector<int32_t>& outputShape,
+                           std::vector<T>& input0Values,
+                           std::vector<T>& input1Values,
+                           std::vector<T>& expectedOutputValues,
+                           float quantScale = 1.0f,
+                           int quantOffset  = 0)
 {
     using namespace tflite;
     std::vector<char> modelBuffer = CreateElementwiseBinaryTfLiteModel(binaryOperatorCode,
                                                                        activationType,
-                                                                      ::tflite::TensorType_FLOAT32,
-                                                                      input0Shape,
-                                                                      input1Shape,
-                                                                      outputShape);
+                                                                       tensorType,
+                                                                       input0Shape,
+                                                                       input1Shape,
+                                                                       outputShape,
+                                                                       quantScale,
+                                                                       quantOffset);
 
     const Model* tfLiteModel = GetModel(modelBuffer.data());
     // Create TfLite Interpreters
@@ -158,28 +195,28 @@
 
     // Set input data
     auto tfLiteDelegateInput0Id = tfLiteInterpreter->inputs()[0];
-    auto tfLiteDelageInput0Data = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInput0Id);
+    auto tfLiteDelageInput0Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput0Id);
     for (unsigned int i = 0; i < input0Values.size(); ++i)
     {
         tfLiteDelageInput0Data[i] = input0Values[i];
     }
 
     auto tfLiteDelegateInput1Id = tfLiteInterpreter->inputs()[1];
-    auto tfLiteDelageInput1Data = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInput1Id);
+    auto tfLiteDelageInput1Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput1Id);
     for (unsigned int i = 0; i < input1Values.size(); ++i)
     {
         tfLiteDelageInput1Data[i] = input1Values[i];
     }
 
     auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
-    auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInput0Id);
+    auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput0Id);
     for (unsigned int i = 0; i < input0Values.size(); ++i)
     {
         armnnDelegateInput0Data[i] = input0Values[i];
     }
 
     auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
-    auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInput1Id);
+    auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput1Id);
     for (unsigned int i = 0; i < input1Values.size(); ++i)
     {
         armnnDelegateInput1Data[i] = input1Values[i];
@@ -191,9 +228,9 @@
 
     // Compare output data
     auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
+    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
     auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
     for (size_t i = 0; i < expectedOutputValues.size(); i++)
     {
         CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
@@ -204,8 +241,4 @@
     armnnDelegateInterpreter.reset(nullptr);
 }
 
-} // anonymous namespace
-
-
-
-
+} // anonymous namespace
\ No newline at end of file