IVGCVSW-7675 Rework DelegateUnitTests so backends are subcases.

The intent of this change is to remove the per backend test cases in
the delegate unit tests. They will be replaced by using DocTest
SUBCASES. The sub cases are paramaterized by the available backends.
The list of available backends are determined by the compilation flags.

Signed-off-by: Colm Donelan <colm.donelan@arm.com>
Change-Id: Ia377c7a7399d0e30dc287d7217b3e3b52e1ea074
diff --git a/delegate/test/ActivationTest.cpp b/delegate/test/ActivationTest.cpp
index 5eafa49..113e645 100644
--- a/delegate/test/ActivationTest.cpp
+++ b/delegate/test/ActivationTest.cpp
@@ -15,370 +15,164 @@
 namespace armnnDelegate
 {
 
-
-void ActivationReLuTest(std::vector<armnn::BackendId>& backends)
+TEST_SUITE("Activation_Tests")
 {
-    std::vector<float> inputData = {
-            -0.1f, -0.2f, -0.3f, -0.4f,
-            0.1f,  0.2f,  0.3f,  0.4f,
-            -1.0f, -2.0f, -3.0f, -4.0f,
-            1.0f,  2.0f,  3.0f,  4.0f
-    };
 
-    // Calculate output values for input.
-    auto f = [](float value)
+    TEST_CASE("Activation_ReLu_Test")
     {
-        return std::fmax(0.0f, value);
-    };
-    std::vector<float> outputExpectedData(inputData.size());
-    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
-
-    ActivationTest(tflite::BuiltinOperator_RELU,
-                   backends,
-                   inputData,
-                   outputExpectedData);
-}
-
-void ActivationBoundedReluTest(std::vector<armnn::BackendId>& backends)
-{
-    std::vector<float> inputData = {
+        std::vector<float> inputData = {
             -0.1f, -0.2f, -0.3f, -0.4f,
-            0.1f,  0.2f,  0.3f,  0.4f,
+            0.1f, 0.2f, 0.3f, 0.4f,
             -1.0f, -2.0f, -3.0f, -4.0f,
-            1.0f,  2.0f,  3.0f,  4.0f
-    };
+            1.0f, 2.0f, 3.0f, 4.0f
+        };
 
-    const float a = 6.0f;
-    const float b = 0.0f;
-    // Calculate output values for input.
-    auto f = [a, b](float value)
+        // Calculate output values for input.
+        auto f = [](float value) { return std::fmax(0.0f, value); };
+        std::vector<float> outputExpectedData(inputData.size());
+        std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+        ActivationTest(tflite::BuiltinOperator_RELU, inputData, outputExpectedData);
+    }
+
+    TEST_CASE("Activation_Bounded_Relu6_Test")
     {
-        return std::min(a, std::max(b, value));
-    };
-    std::vector<float> outputExpectedData(inputData.size());
-    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+        std::vector<float> inputData = { -0.1f, -0.2f, -0.3f, -0.4f,
+                                         0.1f, 0.2f, 0.3f, 0.4f,
+                                         -1.0f, -2.0f, -3.0f, -4.0f,
+                                         1.0f, 2.0f, 3.0f, 4.0f
+                                       };
 
-    ActivationTest(tflite::BuiltinOperator_RELU6,
-                   backends,
-                   inputData,
-                   outputExpectedData);
-}
-
-void ActivationSigmoidTest(std::vector<armnn::BackendId>& backends)
-{
-    std::vector<float> inputData = {
-            -0.1f, -0.2f, -0.3f, -0.4f,
-            0.1f,  0.2f,  0.3f,  0.4f,
-            -1.0f, -2.0f, -3.0f, -4.0f,
-            1.0f,  2.0f,  3.0f,  4.0f
-    };
-
-    // Calculate output values for input.
-    auto f = [](float value)
-    {
-        return 1.0f / (1.0f + std::exp(-value));
-    };
-    std::vector<float> outputExpectedData(inputData.size());
-    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
-
-    ActivationTest(tflite::BuiltinOperator_LOGISTIC,
-                   backends,
-                   inputData,
-                   outputExpectedData);
-}
-
-
-void ActivationTanHTest(std::vector<armnn::BackendId>& backends)
-{
-    std::vector<float> inputData = {
-            -0.1f, -0.2f, -0.3f, -0.4f,
-            0.1f,  0.2f,  0.3f,  0.4f,
-            -1.0f, -2.0f, -3.0f, -4.0f,
-            1.0f,  2.0f,  3.0f,  4.0f
-    };
-
-    // Calculate output values for input.
-    auto f = [](float value)
-    {
-        return tanhf(value);
-    };
-    std::vector<float> outputExpectedData(inputData.size());
-    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
-
-    ActivationTest(tflite::BuiltinOperator_TANH,
-                   backends,
-                   inputData,
-                   outputExpectedData);
-}
-
-void ActivationEluTest(std::vector<armnn::BackendId>& backends)
-{
-    std::vector<float> inputData = {
-            -0.1f, -0.2f, -0.3f, -0.4f,
-            0.1f,  0.2f,  0.3f,  0.4f,
-            -1.0f, -2.0f, -3.0f, -4.0f,
-            1.0f,  2.0f,  3.0f,  4.0f
-    };
-
-    // Calculate output values for input.
-    auto f = [](float value)
-    {
-        if (value < 0)
+        const float a = 6.0f;
+        const float b = 0.0f;
+        // Calculate output values for input.
+        auto f = [a, b](float value)
         {
-            // alpha * (exp(x) - 1)
-            return 1 * (std::exp(value) - 1);
-        }
-        return value;
-    };
-    std::vector<float> outputExpectedData(inputData.size());
-    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+            return std::min(a, std::max(b, value));
+        };
+        std::vector<float> outputExpectedData(inputData.size());
+        std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+        ActivationTest(tflite::BuiltinOperator_RELU6,
+                       inputData, outputExpectedData);
+    }
 
-    ActivationTest(tflite::BuiltinOperator_ELU,
-                   backends,
-                   inputData,
-                   outputExpectedData);
-}
-
-void ActivationHardSwishTest(std::vector<armnn::BackendId>& backends)
-{
-    std::vector<float> inputData = {
-            -0.1f, -0.2f, -0.3f, -0.4f,
-            0.1f,  0.2f,  0.3f,  0.4f,
-            -1.0f, -2.0f, -3.0f, -4.0f,
-            1.0f,  2.0f,  3.0f,  4.0f
-    };
-
-    // Calculate output values for input.
-    auto f = [](float x)
+    TEST_CASE("Activation_Sigmoid_Test")
     {
-        // Break down the calculation to help with verification.
-        // hard_swish(x) = x * relu6(x+3) / 6
-        // relu6(x) = min(max(x,0),6)
-        float reLu6_step1 = std::max((x + 3),0.0f);
-        float reLu6Complete = std::min(reLu6_step1, 6.0f);
-        float hardSwish_step1 = x * reLu6Complete;
-        float result = hardSwish_step1 / 6;
-        return result;
-    };
-    std::vector<float> outputExpectedData(inputData.size());
-    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+        std::vector<float> inputData = { -0.1f, -0.2f, -0.3f, -0.4f,
+                                         0.1f, 0.2f, 0.3f, 0.4f,
+                                         -1.0f, -2.0f, -3.0f, -4.0f,
+                                         1.0f, 2.0f, 3.0f, 4.0f
+                                       };
 
-    ActivationTest(tflite::BuiltinOperator_HARD_SWISH,
-                   backends,
-                   inputData,
-                   outputExpectedData);
-}
+        // Calculate output values for input.
+        auto f = [](float value) { return 1.0f / (1.0f + std::exp(-value)); };
+        std::vector<float> outputExpectedData(inputData.size());
+        std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
 
-void ActivationLeakyReLuTest(std::vector<armnn::BackendId>& backends)
-{
-    std::vector<float> inputData = {
-            -0.1f, -0.2f, -0.3f, -0.4f,
-            0.1f,  0.2f,  0.3f,  0.4f,
-            -1.0f, -2.0f, -3.0f, -4.0f,
-            1.0f,  2.0f,  3.0f,  4.0f
-    };
+        ActivationTest(tflite::BuiltinOperator_LOGISTIC, inputData, outputExpectedData);
+    }
 
-    float alpha = 0.3f;
-
-    // Calculate output values for input.
-    auto f = [alpha](float value)
+    TEST_CASE("Activation_TanH_Test")
     {
-        return value > 0 ? value : value * alpha;
-    };
-    std::vector<float> outputExpectedData(inputData.size());
-    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+        std::vector<float> inputData = { -0.1f, -0.2f, -0.3f, -0.4f,
+                                         0.1f, 0.2f, 0.3f, 0.4f,
+                                         -1.0f, -2.0f, -3.0f, -4.0f,
+                                         1.0f, 2.0f, 3.0f, 4.0f
+                                       };
 
-    ActivationTest(tflite::BuiltinOperator_LEAKY_RELU,
-                   backends,
-                   inputData,
-                   outputExpectedData,
-                   alpha);
-}
+        // Calculate output values for input.
+        auto f = [](float value) { return tanhf(value); };
+        std::vector<float> outputExpectedData(inputData.size());
+        std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
 
-void ActivationGeluTest(std::vector<armnn::BackendId>& backends)
-{
-    std::vector<float> inputData =
+        ActivationTest(tflite::BuiltinOperator_TANH, inputData, outputExpectedData);
+    }
+
+    TEST_CASE("Activation_Elu_Test")
     {
-        -0.1f, -0.2f, -0.3f, -0.4f,
-         0.1f,  0.2f,  0.3f,  0.4f,
-        -1.0f, -2.0f, -3.0f, -4.0f,
-         1.0f,  2.0f,  3.0f,  4.0f
-    };
+        std::vector<float> inputData = { -0.1f, -0.2f, -0.3f, -0.4f,
+                                         0.1f, 0.2f, 0.3f, 0.4f,
+                                         -1.0f, -2.0f, -3.0f, -4.0f,
+                                         1.0f, 2.0f, 3.0f, 4.0f
+                                       };
 
-    // Calculate output values for input.
-    auto f = [](float x)
+        // Calculate output values for input.
+        auto f = [](float value) {
+            if (value < 0)
+            {
+                // alpha * (exp(x) - 1)
+                return 1 * (std::exp(value) - 1);
+            }
+            return value;
+        };
+        std::vector<float> outputExpectedData(inputData.size());
+        std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+        ActivationTest(tflite::BuiltinOperator_ELU, inputData, outputExpectedData);
+    }
+
+    TEST_CASE("Activation_HardSwish_Test")
     {
-        // gelu(x) = x * 1/2 * (1 + erf(x / sqrt(2))),
-        // where erf is Gaussian error function
-        auto result = x * (0.5f * (1.0f + erff(static_cast<float>(x / std::sqrt(2)))));
-        return result;
-    };
-    std::vector<float> outputExpectedData(inputData.size());
-    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+        std::vector<float> inputData = { -0.1f, -0.2f, -0.3f, -0.4f,
+                                         0.1f, 0.2f, 0.3f, 0.4f,
+                                         -1.0f, -2.0f, -3.0f, -4.0f,
+                                         1.0f, 2.0f, 3.0f, 4.0f
+                                       };
 
-    ActivationTest(tflite::BuiltinOperator_GELU,
-                   backends,
-                   inputData,
-                   outputExpectedData);
+        // Calculate output values for input.
+        auto f = [](float x) {
+            // Break down the calculation to help with verification.
+            // hard_swish(x) = x * relu6(x+3) / 6
+            // relu6(x) = min(max(x,0),6)
+            float reLu6_step1     = std::max((x + 3), 0.0f);
+            float reLu6Complete   = std::min(reLu6_step1, 6.0f);
+            float hardSwish_step1 = x * reLu6Complete;
+            float result          = hardSwish_step1 / 6;
+            return result;
+        };
+        std::vector<float> outputExpectedData(inputData.size());
+        std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+        ActivationTest(tflite::BuiltinOperator_HARD_SWISH, inputData, outputExpectedData);
+    }
+
+    TEST_CASE("Activation_LeakyRelu_Test")
+    {
+        std::vector<float> inputData = { -0.1f, -0.2f, -0.3f, -0.4f,
+                                         0.1f, 0.2f, 0.3f, 0.4f,
+                                         -1.0f, -2.0f, -3.0f, -4.0f,
+                                         1.0f, 2.0f, 3.0f, 4.0f
+                                       };
+
+        float alpha = 0.3f;
+
+        // Calculate output values for input.
+        auto f = [alpha](float value) { return value > 0 ? value : value * alpha; };
+        std::vector<float> outputExpectedData(inputData.size());
+        std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+        ActivationTest(tflite::BuiltinOperator_LEAKY_RELU, inputData, outputExpectedData, alpha);
+    }
+
+    TEST_CASE("Activation_Gelu_Test")
+    {
+        std::vector<float> inputData = { -0.1f, -0.2f, -0.3f, -0.4f,
+                                         0.1f, 0.2f, 0.3f, 0.4f,
+                                         -1.0f, -2.0f, -3.0f, -4.0f,
+                                         1.0f, 2.0f, 3.0f, 4.0f
+                                       };
+
+        // Calculate output values for input.
+        auto f = [](float x) {
+            // gelu(x) = x * 1/2 * (1 + erf(x / sqrt(2))),
+            // where erf is Gaussian error function
+            auto result = x * (0.5f * (1.0f + erff(static_cast<float>(x / std::sqrt(2)))));
+            return result;
+        };
+        std::vector<float> outputExpectedData(inputData.size());
+        std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+        ActivationTest(tflite::BuiltinOperator_GELU, inputData, outputExpectedData);
+    }
 }
 
-TEST_SUITE("Activation_CpuRefTests")
-{
-
-TEST_CASE ("Activation_ReLu_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    ActivationReLuTest(backends);
-}
-
-TEST_CASE ("Activation_Bounded_Relu6_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    ActivationBoundedReluTest(backends);
-}
-
-TEST_CASE ("Activation_Sigmoid_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    ActivationSigmoidTest(backends);
-}
-
-TEST_CASE ("Activation_TanH_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    ActivationTanHTest(backends);
-}
-
-TEST_CASE ("Activation_Elu_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    ActivationEluTest(backends);
-}
-
-TEST_CASE ("Activation_HardSwish_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    ActivationHardSwishTest(backends);
-}
-
-TEST_CASE ("Activation_LeakyRelu_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    ActivationLeakyReLuTest(backends);
-}
-
-TEST_CASE ("Activation_Gelu_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    ActivationGeluTest(backends);
-}
-
-}
-
-TEST_SUITE("Activation_CpuAccTests")
-{
-
-TEST_CASE ("Activation_ReLu_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    ActivationReLuTest(backends);
-}
-
-TEST_CASE ("Activation_Bounded_Relu6_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    ActivationBoundedReluTest(backends);
-}
-
-TEST_CASE ("Activation_Sigmoid_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    ActivationSigmoidTest(backends);
-}
-
-TEST_CASE ("Activation_TanH_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    ActivationTanHTest(backends);
-}
-
-TEST_CASE ("Activation_Elu_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    ActivationEluTest(backends);
-}
-
-TEST_CASE ("Activation_HardSwish_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    ActivationHardSwishTest(backends);
-}
-
-TEST_CASE ("Activation_LeakyRelu_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    ActivationLeakyReLuTest(backends);
-}
-
-TEST_CASE ("Activation_Gelu_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    ActivationGeluTest(backends);
-}
-
-}
-
-TEST_SUITE("Activation_GpuAccTests")
-{
-
-TEST_CASE ("Activation_ReLu_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    ActivationReLuTest(backends);
-}
-
-TEST_CASE ("Activation_Bounded_Relu6_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    ActivationBoundedReluTest(backends);
-}
-
-TEST_CASE ("Activation_Sigmoid_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    ActivationSigmoidTest(backends);
-}
-
-TEST_CASE ("Activation_TanH_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    ActivationTanHTest(backends);
-}
-
-TEST_CASE ("Activation_Elu_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    ActivationEluTest(backends);
-}
-
-TEST_CASE ("Activation_HardSwish_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    ActivationHardSwishTest(backends);
-}
-
-TEST_CASE ("Activation_LeakyRelu_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    ActivationLeakyReLuTest(backends);
-}
-
-TEST_CASE ("Activation_Gelu_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    ActivationGeluTest(backends);
-}
-
-}
-
-} // namespace armnnDelegate
\ No newline at end of file
+}    // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ActivationTestHelper.hpp b/delegate/test/ActivationTestHelper.hpp
index 2bd118f..c023696 100644
--- a/delegate/test/ActivationTestHelper.hpp
+++ b/delegate/test/ActivationTestHelper.hpp
@@ -92,10 +92,10 @@
 }
 
 void ActivationTest(tflite::BuiltinOperator activationOperatorCode,
-                    std::vector<armnn::BackendId>& backends,
                     std::vector<float>& inputValues,
                     std::vector<float>& expectedOutputValues,
-                    float alpha = 0)
+                    float alpha = 0,
+                    const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
     std::vector<int32_t> inputShape  { { 4, 1, 4} };
@@ -113,7 +113,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/ArgMinMaxTest.cpp b/delegate/test/ArgMinMaxTest.cpp
index 2f08c94..7388986 100644
--- a/delegate/test/ArgMinMaxTest.cpp
+++ b/delegate/test/ArgMinMaxTest.cpp
@@ -14,7 +14,7 @@
 namespace armnnDelegate
 {
 
-void ArgMaxFP32Test(std::vector<armnn::BackendId>& backends, int axisValue)
+void ArgMaxFP32Test(int axisValue)
 {
     // Set input data
     std::vector<int32_t> inputShape { 1, 3, 2, 4 };
@@ -36,7 +36,6 @@
 
     ArgMinMaxTest<float, int32_t>(tflite::BuiltinOperator_ARG_MAX,
                                   ::tflite::TensorType_FLOAT32,
-                                  backends,
                                   inputShape,
                                   axisShape,
                                   outputShape,
@@ -46,7 +45,7 @@
                                   ::tflite::TensorType_INT32);
 }
 
-void ArgMinFP32Test(std::vector<armnn::BackendId>& backends, int axisValue)
+void ArgMinFP32Test(int axisValue)
 {
     // Set input data
     std::vector<int32_t> inputShape { 1, 3, 2, 4 };
@@ -68,7 +67,6 @@
 
     ArgMinMaxTest<float, int32_t>(tflite::BuiltinOperator_ARG_MIN,
                                   ::tflite::TensorType_FLOAT32,
-                                  backends,
                                   inputShape,
                                   axisShape,
                                   outputShape,
@@ -78,7 +76,7 @@
                                   ::tflite::TensorType_INT32);
 }
 
-void ArgMaxUint8Test(std::vector<armnn::BackendId>& backends, int axisValue)
+void ArgMaxUint8Test(int axisValue)
 {
     // Set input data
     std::vector<int32_t> inputShape { 1, 1, 1, 5 };
@@ -91,7 +89,6 @@
 
     ArgMinMaxTest<uint8_t, int32_t>(tflite::BuiltinOperator_ARG_MAX,
                                     ::tflite::TensorType_UINT8,
-                                    backends,
                                     inputShape,
                                     axisShape,
                                     outputShape,
@@ -101,73 +98,23 @@
                                     ::tflite::TensorType_INT32);
 }
 
-TEST_SUITE("ArgMinMax_CpuRefTests")
+TEST_SUITE("ArgMinMax_Tests")
 {
 
-TEST_CASE ("ArgMaxFP32Test_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    ArgMaxFP32Test(backends, 2);
+    TEST_CASE("ArgMaxFP32Test_Test")
+    {
+        ArgMaxFP32Test(2);
+    }
+
+    TEST_CASE("ArgMinFP32Test_Test")
+    {
+        ArgMinFP32Test(3);
+    }
+
+    TEST_CASE("ArgMaxUint8Test_Test")
+    {
+        ArgMaxUint8Test(-1);
+    }
 }
 
-TEST_CASE ("ArgMinFP32Test_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    ArgMinFP32Test(backends, 3);
-}
-
-TEST_CASE ("ArgMaxUint8Test_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    ArgMaxUint8Test(backends, -1);
-}
-
-} // TEST_SUITE("ArgMinMax_CpuRefTests")
-
-TEST_SUITE("ArgMinMax_CpuAccTests")
-{
-
-TEST_CASE ("ArgMaxFP32Test_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    ArgMaxFP32Test(backends, 2);
-}
-
-TEST_CASE ("ArgMinFP32Test_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    ArgMinFP32Test(backends, 3);
-}
-
-TEST_CASE ("ArgMaxUint8Test_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    ArgMaxUint8Test(backends, -1);
-}
-
-} // TEST_SUITE("ArgMinMax_CpuAccTests")
-
-TEST_SUITE("ArgMinMax_GpuAccTests")
-{
-
-TEST_CASE ("ArgMaxFP32Test_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    ArgMaxFP32Test(backends, 2);
-}
-
-TEST_CASE ("ArgMinFP32Test_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    ArgMinFP32Test(backends, 3);
-}
-
-TEST_CASE ("ArgMaxUint8Test_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    ArgMaxUint8Test(backends, -1);
-}
-
-} // TEST_SUITE("ArgMinMax_GpuAccTests")
-
 } // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ArgMinMaxTestHelper.hpp b/delegate/test/ArgMinMaxTestHelper.hpp
index 1086a5e..9c6ac8d 100644
--- a/delegate/test/ArgMinMaxTestHelper.hpp
+++ b/delegate/test/ArgMinMaxTestHelper.hpp
@@ -126,7 +126,6 @@
 template <typename InputT, typename OutputT>
 void ArgMinMaxTest(tflite::BuiltinOperator argMinMaxOperatorCode,
                    tflite::TensorType tensorType,
-                   const std::vector<armnn::BackendId>& backends,
                    const std::vector<int32_t>& inputShape,
                    const std::vector<int32_t>& axisShape,
                    std::vector<int32_t>& outputShape,
@@ -135,7 +134,8 @@
                    OutputT axisValue,
                    tflite::TensorType outputType,
                    float quantScale = 1.0f,
-                   int quantOffset  = 0)
+                   int quantOffset  = 0,
+                   const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateArgMinMaxTfLiteModel<InputT, OutputT>(argMinMaxOperatorCode,
@@ -157,7 +157,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<InputT>(inputValues, 0) == kTfLiteOk);
     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/BatchMatMulTest.cpp b/delegate/test/BatchMatMulTest.cpp
index d38e934..8c8633b 100644
--- a/delegate/test/BatchMatMulTest.cpp
+++ b/delegate/test/BatchMatMulTest.cpp
@@ -5,8 +5,6 @@
 
 #include "BatchMatMulTestHelper.hpp"
 
-#include <armnn_delegate.hpp>
-
 #include <flatbuffers/flatbuffers.h>
 
 #include <doctest/doctest.h>
@@ -14,401 +12,262 @@
 namespace armnnDelegate
 {
 
-    void BatchMatMul2DFp32SimpleTest(std::vector<armnn::BackendId>& backends)
+TEST_SUITE("BATCH_MATMUL_Tests")
+{
+    TEST_CASE("BatchMatMul2DFp32SimpleTest")
     {
         // Set input data
-        std::vector<int32_t> LHSInputShape { 2, 2 };
-        std::vector<int32_t> RHSInputShape { 2, 2 };
-        std::vector<int32_t> outputShape   { 2, 2 };
+        std::vector<int32_t> LHSInputShape{ 2, 2 };
+        std::vector<int32_t> RHSInputShape{ 2, 2 };
+        std::vector<int32_t> outputShape{ 2, 2 };
 
-        std::vector<float> LHSInputValues = { 1, 2,
-                                              3, 4 };
+        std::vector<float> LHSInputValues = { 1, 2, 3, 4 };
 
-        std::vector<float> RHSInputValues = { 5, 6,
-                                              7, 8  };
+        std::vector<float> RHSInputValues = { 5, 6, 7, 8 };
 
-        std::vector<float> expectedOutputValues = { 19, 22,
-                                                    43, 50 };
+        std::vector<float> expectedOutputValues = { 19, 22, 43, 50 };
 
-        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_FLOAT32,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
-                               false);
-    }
-    void BatchMatMul2DInt8SimpleTest(std::vector<armnn::BackendId>& backends)
-    {
-        // Set input data
-        std::vector<int32_t> LHSInputShape { 2, 2 };
-        std::vector<int32_t> RHSInputShape { 2, 2 };
-        std::vector<int32_t> outputShape   { 2, 2 };
-
-        std::vector<int8_t> LHSInputValues = { 1, 2,
-                                              3, 4 };
-
-        std::vector<int8_t> RHSInputValues = { 5, 6,
-                                              7, 8  };
-
-        std::vector<int8_t> expectedOutputValues = { 19, 22,
-                                                    43, 50 };
-
-        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_INT8,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_FLOAT32, LHSInputShape,
+                               RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
                                false);
     }
 
-    void BatchMatMul3DFp32SimpleTest(std::vector<armnn::BackendId>& backends)
+    TEST_CASE("BatchMatMul2DInt8SimpleTest")
     {
         // Set input data
-        std::vector<int32_t> LHSInputShape { 1,2,2 };
-        std::vector<int32_t> RHSInputShape { 1,2,2 };
-        std::vector<int32_t> outputShape   { 1,2,2 };
+        std::vector<int32_t> LHSInputShape{ 2, 2 };
+        std::vector<int32_t> RHSInputShape{ 2, 2 };
+        std::vector<int32_t> outputShape{ 2, 2 };
 
-        std::vector<float> LHSInputValues = { 1, 2,
-                                              3, 4 };
+        std::vector<int8_t> LHSInputValues = { 1, 2, 3, 4 };
 
-        std::vector<float> RHSInputValues = { 5, 6,
-                                              7, 8  };
+        std::vector<int8_t> RHSInputValues = { 5, 6, 7, 8 };
 
-        std::vector<float> expectedOutputValues = { 19, 22,
-                                                    43, 50 };
+        std::vector<int8_t> expectedOutputValues = { 19, 22, 43, 50 };
 
-        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_FLOAT32,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_INT8, LHSInputShape,
+                                RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
+                                false);
+    }
+
+    TEST_CASE("BatchMatMul3DFp32SimpleTest")
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape{ 1, 2, 2 };
+        std::vector<int32_t> RHSInputShape{ 1, 2, 2 };
+        std::vector<int32_t> outputShape{ 1, 2, 2 };
+
+        std::vector<float> LHSInputValues = { 1, 2, 3, 4 };
+
+        std::vector<float> RHSInputValues = { 5, 6, 7, 8 };
+
+        std::vector<float> expectedOutputValues = { 19, 22, 43, 50 };
+
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_FLOAT32, LHSInputShape,
+                               RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
                                false);
     }
 
-    void BatchMatMul3DInt8SimpleTest(std::vector<armnn::BackendId>& backends)
+    TEST_CASE("BatchMatMul3DInt8SimpleTest")
     {
         // Set input data
-        std::vector<int32_t> LHSInputShape { 1,2,2 };
-        std::vector<int32_t> RHSInputShape { 1,2,2 };
-        std::vector<int32_t> outputShape   { 1,2,2 };
+        std::vector<int32_t> LHSInputShape{ 1, 2, 2 };
+        std::vector<int32_t> RHSInputShape{ 1, 2, 2 };
+        std::vector<int32_t> outputShape{ 1, 2, 2 };
 
-        std::vector<int8_t> LHSInputValues = { 1, 2,
-                                              3, 4 };
+        std::vector<int8_t> LHSInputValues = { 1, 2, 3, 4 };
 
-        std::vector<int8_t> RHSInputValues = { 5, 6,
-                                              7, 8  };
+        std::vector<int8_t> RHSInputValues = { 5, 6, 7, 8 };
 
-        std::vector<int8_t> expectedOutputValues = { 19, 22,
-                                                    43, 50 };
+        std::vector<int8_t> expectedOutputValues = { 19, 22, 43, 50 };
 
-        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_INT8,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_INT8, LHSInputShape,
+                                RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
+                                false);
+    }
+
+    TEST_CASE("BatchMatMul4DFp32SimpleTest")
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape{ 1, 1, 2, 2 };
+        std::vector<int32_t> RHSInputShape{ 1, 1, 2, 2 };
+        std::vector<int32_t> outputShape{ 1, 1, 2, 2 };
+
+        std::vector<float> LHSInputValues = { 1, 2, 3, 4 };
+
+        std::vector<float> RHSInputValues = { 5, 6, 7, 8 };
+
+        std::vector<float> expectedOutputValues = { 19, 22, 43, 50 };
+
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_FLOAT32, LHSInputShape,
+                               RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
                                false);
     }
 
-    void BatchMatMul4DFp32SimpleTest(std::vector<armnn::BackendId>& backends)
+    TEST_CASE("BatchMatMul4DInt8SimpleTest")
     {
         // Set input data
-        std::vector<int32_t> LHSInputShape { 1,1,2,2 };
-        std::vector<int32_t> RHSInputShape { 1,1,2,2 };
-        std::vector<int32_t> outputShape   { 1,1,2,2 };
+        std::vector<int32_t> LHSInputShape{ 1, 1, 2, 2 };
+        std::vector<int32_t> RHSInputShape{ 1, 1, 2, 2 };
+        std::vector<int32_t> outputShape{ 1, 1, 2, 2 };
 
-        std::vector<float> LHSInputValues = { 1, 2,
-                                              3, 4 };
+        std::vector<int8_t> LHSInputValues = { 1, 2, 3, 4 };
 
-        std::vector<float> RHSInputValues = { 5, 6,
-                                              7, 8  };
+        std::vector<int8_t> RHSInputValues = { 5, 6, 7, 8 };
 
-        std::vector<float> expectedOutputValues = { 19, 22,
-                                                    43, 50 };
+        std::vector<int8_t> expectedOutputValues = { 19, 22, 43, 50 };
 
-        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_FLOAT32,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_INT8, LHSInputShape,
+                                RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
+                                false);
+    }
+
+    TEST_CASE("BatchMatMul3DFp32BatchTest")
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape{ 2, 2, 2 };
+        std::vector<int32_t> RHSInputShape{ 2, 2, 2 };
+        std::vector<int32_t> outputShape{ 2, 2, 2 };
+
+        std::vector<float> LHSInputValues = { 1, 2,  3,  4,
+
+                                              9, 10, 11, 12 };
+
+        std::vector<float> RHSInputValues = { 5,  6,  7,  8,
+
+                                              13, 14, 15, 16 };
+
+        std::vector<float> expectedOutputValues = { 19,  22,  43,  50,
+
+                                                    267, 286, 323, 346 };
+
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_FLOAT32, LHSInputShape,
+                               RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
                                false);
     }
 
-    void BatchMatMul4DInt8SimpleTest(std::vector<armnn::BackendId>& backends)
+    TEST_CASE("BatchMatMul3DInt8BatchTest")
     {
         // Set input data
-        std::vector<int32_t> LHSInputShape { 1,1,2,2};
-        std::vector<int32_t> RHSInputShape { 1,1,2,2 };
-        std::vector<int32_t> outputShape   { 1,1,2,2 };
+        std::vector<int32_t> LHSInputShape{ 2, 2, 2 };
+        std::vector<int32_t> RHSInputShape{ 2, 2, 2 };
+        std::vector<int32_t> outputShape{ 2, 2, 2 };
 
-        std::vector<int8_t> LHSInputValues = { 1, 2,
-                                              3, 4 };
+        std::vector<int8_t> LHSInputValues = { 1, 2,  3,  4,
 
-        std::vector<int8_t> RHSInputValues = { 5, 6,
-                                              7, 8 };
+                                               9, 10, 11, 12 };
 
-        std::vector<int8_t> expectedOutputValues = { 19, 22,
-                                                    43, 50 };
+        std::vector<int8_t> RHSInputValues = { 5, 6, 7, 8,
 
-        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_INT8,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
-                               false);
+                                               1, 2, 3, 4 };
+
+        std::vector<int8_t> expectedOutputValues = { 19, 22, 43, 50,
+
+                                                     39, 58, 47, 70 };
+
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_INT8, LHSInputShape,
+                                RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
+                                false);
     }
 
-    void BatchMatMul3DFp32BatchTest(std::vector<armnn::BackendId>& backends)
+    TEST_CASE("BatchMatMul3DFp32BroadcastTest")
     {
         // Set input data
-        std::vector<int32_t> LHSInputShape { 2,2,2 };
-        std::vector<int32_t> RHSInputShape { 2,2,2 };
-        std::vector<int32_t> outputShape   { 2,2,2 };
+        std::vector<int32_t> LHSInputShape{ 2, 2, 2 };
+        std::vector<int32_t> RHSInputShape{ 2, 2 };
+        std::vector<int32_t> outputShape{ 2, 2, 2 };
 
-        std::vector<float> LHSInputValues = { 1, 2,
-                                              3, 4,
+        std::vector<float> LHSInputValues = { 1, 2,  3,  4,
 
-                                              9, 10,
-                                              11, 12 };
+                                              9, 10, 11, 12 };
 
-        std::vector<float> RHSInputValues = { 5, 6,
-                                              7, 8,
+        std::vector<float> RHSInputValues = { 13, 14, 15, 16 };
 
-                                              13, 14,
-                                              15, 16 };
+        std::vector<float> expectedOutputValues = { 43,  46,  99,  106,
 
-        std::vector<float> expectedOutputValues = { 19, 22,
-                                                    43, 50,
+                                                    267, 286, 323, 346 };
 
-                                                    267, 286,
-                                                    323, 346 };
-
-        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_FLOAT32,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
-                               false);
+        // We know that this is only supported on CpuRef. To enable on all backends just remoev the last parameter.
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_FLOAT32, LHSInputShape,
+                               RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
+                               false, 1.0f, 0,{armnn::Compute::CpuRef});
     }
 
-    void BatchMatMul3DInt8BatchTest(std::vector<armnn::BackendId>& backends)
+    TEST_CASE("BatchMatMul3DInt8BroadcastTest")
     {
         // Set input data
-        std::vector<int32_t> LHSInputShape { 2,2,2 };
-        std::vector<int32_t> RHSInputShape { 2,2,2 };
-        std::vector<int32_t> outputShape   { 2,2,2 };
+        std::vector<int32_t> LHSInputShape{ 2, 2, 2 };
+        std::vector<int32_t> RHSInputShape{ 2, 2 };
+        std::vector<int32_t> outputShape{ 2, 2, 2 };
 
-        std::vector<int8_t> LHSInputValues = { 1, 2,
-                                              3, 4,
+        std::vector<int8_t> LHSInputValues = { 1, 2,  3,  4,
 
-                                              9, 10,
-                                              11, 12 };
+                                               9, 10, 11, 12 };
 
-        std::vector<int8_t> RHSInputValues = { 5, 6,
-                                              7, 8,
+        std::vector<int8_t> RHSInputValues = { 1, 2, 3, 4 };
 
-                                              1, 2,
-                                              3, 4 };
+        std::vector<int8_t> expectedOutputValues = { 7,  10, 15, 22,
 
-        std::vector<int8_t> expectedOutputValues = { 19, 22,
-                                                    43, 50,
+                                                     39, 58, 47, 70 };
 
-                                                    39, 58,
-                                                    47, 70 };
-
-        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_INT8,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
-                               false);
+        // We know that this is only supported on CpuRef. To enable on all backends just remoev the last parameter.
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_INT8, LHSInputShape,
+                                RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
+                                false, 1.0f, 0,{armnn::Compute::CpuRef});
     }
 
-    void BatchMatMul3DFp32BroadcastTest(std::vector<armnn::BackendId>& backends)
+    TEST_CASE("BatchMatMul3D2DFp32BroadcastTest")
     {
         // Set input data
-        std::vector<int32_t> LHSInputShape { 2,2,2 };
-        std::vector<int32_t> RHSInputShape { 2,2 };
-        std::vector<int32_t> outputShape   { 2,2,2 };
+        std::vector<int32_t> LHSInputShape{ 2, 2, 2 };
+        std::vector<int32_t> RHSInputShape{ 2, 2 };
+        std::vector<int32_t> outputShape{ 2, 2, 2 };
 
-        std::vector<float> LHSInputValues = { 1, 2,
-                                              3, 4,
+        std::vector<float> LHSInputValues = { 1, 2,  3,  4,
 
-                                              9, 10,
-                                              11, 12 };
+                                              9, 10, 11, 12 };
 
-        std::vector<float> RHSInputValues = { 13, 14,
-                                              15, 16 };
+        std::vector<float> RHSInputValues = { 13, 14, 15, 16 };
 
-        std::vector<float> expectedOutputValues = {  43, 46,
-                                                     99, 106,
+        std::vector<float> expectedOutputValues = { 43,  46,  99,  106,
 
-                                                     267, 286,
-                                                     323, 346 };
+                                                    267, 286, 323, 346 };
 
-        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_FLOAT32,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
-                               false);
+        // We know that this is only supported on CpuRef. To enable on all backends just remoev the last parameter.
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_FLOAT32, LHSInputShape,
+                               RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
+                               false, 1.0f, 0,{armnn::Compute::CpuRef});
     }
 
-    void BatchMatMul3DInt8BroadcastTest(std::vector<armnn::BackendId>& backends)
+    TEST_CASE("BatchMatMul3D2DInt8BroadcastTest")
     {
         // Set input data
-        std::vector<int32_t> LHSInputShape { 2,2,2 };
-        std::vector<int32_t> RHSInputShape { 2,2 };
-        std::vector<int32_t> outputShape   { 2,2,2 };
+        std::vector<int32_t> LHSInputShape{ 2, 2, 2 };
+        std::vector<int32_t> RHSInputShape{ 2, 2 };
+        std::vector<int32_t> outputShape{ 2, 2, 2 };
 
-        std::vector<int8_t> LHSInputValues = { 1, 2,
-                                              3, 4,
+        std::vector<int8_t> LHSInputValues = { 1, 2,  3,  4,
 
-                                              9, 10,
-                                              11, 12 };
+                                               9, 10, 11, 12 };
 
-        std::vector<int8_t> RHSInputValues = { 1, 2,
-                                               3, 4 };
+        std::vector<int8_t> RHSInputValues = { 1, 2, 3, 4 };
 
-        std::vector<int8_t> expectedOutputValues = {  7,  10,
-                                                      15, 22,
+        std::vector<int8_t> expectedOutputValues = { 7,  10, 15, 22,
 
-                                                      39, 58,
-                                                      47, 70 };
+                                                     39, 58, 47, 70 };
 
-        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_INT8,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
-                               false);
+        // We know that this is only supported on CpuRef. To enable on all backends just remoev the last parameter.
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_INT8, LHSInputShape,
+                                RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
+                                false, 1.0f, 0,{armnn::Compute::CpuRef});
     }
 
-    void BatchMatMul3D2DFp32BroadcastTest(std::vector<armnn::BackendId>& backends)
+    TEST_CASE("BatchMatMul2DFp32TinyTest")
     {
         // Set input data
-        std::vector<int32_t> LHSInputShape { 2,2,2 };
-        std::vector<int32_t> RHSInputShape { 2,2 };
-        std::vector<int32_t> outputShape   { 2,2,2 };
-
-        std::vector<float> LHSInputValues = { 1, 2,
-                                              3, 4,
-
-                                              9, 10,
-                                              11, 12 };
-
-        std::vector<float> RHSInputValues = { 13, 14,
-                                              15, 16 };
-
-        std::vector<float> expectedOutputValues = {  43, 46,
-                                                     99, 106,
-
-                                                     267, 286,
-                                                     323, 346 };
-
-        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_FLOAT32,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
-                               false);
-    }
-
-    void BatchMatMul3D2DInt8BroadcastTest(std::vector<armnn::BackendId>& backends)
-    {
-        // Set input data
-        std::vector<int32_t> LHSInputShape { 2,2,2 };
-        std::vector<int32_t> RHSInputShape { 2,2 };
-        std::vector<int32_t> outputShape   { 2,2,2 };
-
-        std::vector<int8_t> LHSInputValues = { 1, 2,
-                                              3, 4,
-
-                                              9, 10,
-                                              11, 12 };
-
-        std::vector<int8_t> RHSInputValues = { 1, 2,
-                                               3, 4 };
-
-        std::vector<int8_t> expectedOutputValues = {  7, 10,
-                                                      15, 22,
-
-                                                      39, 58,
-                                                      47, 70 };
-
-        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_INT8,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
-                               false);
-    }
-
-    void BatchMatMul2DFp32TinyTest(std::vector<armnn::BackendId>& backends)
-    {
-        // Set input data
-        std::vector<int32_t> LHSInputShape { 1,1 };
-        std::vector<int32_t> RHSInputShape { 1,1 };
-        std::vector<int32_t> outputShape   { 1,1 };
+        std::vector<int32_t> LHSInputShape{ 1, 1 };
+        std::vector<int32_t> RHSInputShape{ 1, 1 };
+        std::vector<int32_t> outputShape{ 1, 1 };
 
         std::vector<float> LHSInputValues = { 3 };
 
@@ -416,24 +275,17 @@
 
         std::vector<float> expectedOutputValues = { 15 };
 
-        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_FLOAT32,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_FLOAT32, LHSInputShape,
+                               RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
                                false);
     }
-    void BatchMatMul2DInt8TinyTest(std::vector<armnn::BackendId>& backends)
+
+    TEST_CASE("BatchMatMul2DInt8TinyTest")
     {
         // Set input data
-        std::vector<int32_t> LHSInputShape { 1,1 };
-        std::vector<int32_t> RHSInputShape { 1,1 };
-        std::vector<int32_t> outputShape   { 1,1 };
+        std::vector<int32_t> LHSInputShape{ 1, 1 };
+        std::vector<int32_t> RHSInputShape{ 1, 1 };
+        std::vector<int32_t> outputShape{ 1, 1 };
 
         std::vector<int8_t> LHSInputValues = { 3 };
 
@@ -441,268 +293,97 @@
 
         std::vector<int8_t> expectedOutputValues = { 15 };
 
-        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
-                                ::tflite::TensorType_INT8,
-                                backends,
-                                LHSInputShape,
-                                RHSInputShape,
-                                outputShape,
-                                LHSInputValues,
-                                RHSInputValues,
-                                expectedOutputValues,
-                                false,
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_INT8, LHSInputShape,
+                                RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
                                 false);
     }
 
-    void BatchMatMulNonSquareFp32Test(std::vector<armnn::BackendId>& backends)
+    TEST_CASE("BatchMatMulNonSquareFp32Test")
     {
         // Set input data
-        std::vector<int32_t> LHSInputShape { 2,5,3 };
-        std::vector<int32_t> RHSInputShape { 2,3,4 };
-        std::vector<int32_t> outputShape   { 2,5,4 };
+        std::vector<int32_t> LHSInputShape{ 2, 5, 3 };
+        std::vector<int32_t> RHSInputShape{ 2, 3, 4 };
+        std::vector<int32_t> outputShape{ 2, 5, 4 };
 
-        std::vector<float> LHSInputValues = { 8, 8, 4,
-                                              6, 1, 3,
-                                              8, 8, 3,
-                                              8, 9, 8,
-                                              5, 4, 4,
+        std::vector<float> LHSInputValues = { 8, 8, 4, 6, 1, 3, 8, 8, 3, 8, 9, 8, 5, 4, 4,
 
-                                              1, 8, 5,
-                                              7, 1, 1,
-                                              8, 7, 9,
-                                              3, 2, 7,
-                                              8, 5, 3 };
+                                              1, 8, 5, 7, 1, 1, 8, 7, 9, 3, 2, 7, 8, 5, 3 };
 
-        std::vector<float> RHSInputValues = { 6, 2, 3, 2,
-                                              6, 2, 2, 8,
-                                              3, 7, 8, 1,
+        std::vector<float> RHSInputValues = { 6, 2, 3, 2, 6, 2, 2, 8, 3, 7, 8, 1,
 
-                                              7, 2, 9, 5,
-                                              2, 3, 1, 3,
-                                              2, 7, 7, 5 };
+                                              7, 2, 9, 5, 2, 3, 1, 3, 2, 7, 7, 5 };
 
-        std::vector<float> expectedOutputValues = { 108, 60, 72, 84,
-                                                    51, 35, 44, 23,
-                                                    105, 53, 64, 83,
-                                                    126, 90, 106, 96,
-                                                    66, 46, 55, 46,
+        std::vector<float> expectedOutputValues = { 108, 60,  72,  84, 51,  35, 44, 23, 105, 53,
+                                                    64,  83,  126, 90, 106, 96, 66, 46, 55,  46,
 
-                                                    33, 61, 52, 54,
-                                                    53, 24, 71, 43,
-                                                    88, 100, 142, 106,
-                                                    39, 61, 78, 56,
-                                                    72, 52, 98, 70 };
+                                                    33,  61,  52,  54, 53,  24, 71, 43, 88,  100,
+                                                    142, 106, 39,  61, 78,  56, 72, 52, 98,  70 };
 
-        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_FLOAT32,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_FLOAT32, LHSInputShape,
+                               RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
                                false);
     }
 
-    void BatchMatMulNonSquareInt8Test(std::vector<armnn::BackendId>& backends)
+    TEST_CASE("BatchMatMulNonSquareInt8Test")
     {
         // Set input data
-        std::vector<int32_t> LHSInputShape { 2,5,3 };
-        std::vector<int32_t> RHSInputShape { 2,3,4 };
-        std::vector<int32_t> outputShape   { 2,5,4 };
+        std::vector<int32_t> LHSInputShape{ 2, 5, 3 };
+        std::vector<int32_t> RHSInputShape{ 2, 3, 4 };
+        std::vector<int32_t> outputShape{ 2, 5, 4 };
 
-        std::vector<int8_t> LHSInputValues = { 8, 8, 4,
-                                              6, 1, 3,
-                                              8, 8, 3,
-                                              8, 9, 8,
-                                              5, 4, 4,
+        std::vector<int8_t> LHSInputValues = { 8, 8, 4, 6, 1, 3, 8, 8, 3, 8, 9, 8, 5, 4, 4,
 
-                                              1, 8, 5,
-                                              7, 1, 1,
-                                              8, 7, 9,
-                                              3, 2, 7,
-                                              8, 5, 3 };
+                                               1, 8, 5, 7, 1, 1, 8, 7, 9, 3, 2, 7, 8, 5, 3 };
 
-        std::vector<int8_t> RHSInputValues = { 6, 2, 3, 2,
-                                              6, 2, 2, 8,
-                                              3, 7, 8, 1,
+        std::vector<int8_t> RHSInputValues = { 6, 2, 3, 2, 6, 2, 2, 8, 3, 7, 8, 1,
 
-                                              7, 2, 3, 5,
-                                              2, 3, 1, 3,
-                                              2, 7, 7, 5 };
+                                               7, 2, 3, 5, 2, 3, 1, 3, 2, 7, 7, 5 };
 
-        std::vector<int8_t> expectedOutputValues = { 108, 60, 72, 84,
-                                                    51, 35, 44, 23,
-                                                    105, 53, 64, 83,
-                                                    126, 90, 106, 96,
-                                                    66, 46, 55, 46,
+        std::vector<int8_t> expectedOutputValues = { 108, 60,  72,  84, 51,  35, 44, 23, 105, 53,
+                                                     64,  83,  126, 90, 106, 96, 66, 46, 55,  46,
 
-                                                    33, 61, 46, 54,
-                                                    53, 24, 29, 43,
-                                                    88, 100, 94, 106,
-                                                    39, 61, 60, 56,
-                                                    72, 52, 50, 70 };
+                                                     33,  61,  46,  54, 53,  24, 29, 43, 88,  100,
+                                                     94,  106, 39,  61, 60,  56, 72, 52, 50,  70 };
 
-        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_INT8,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               false,
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_INT8, LHSInputShape,
+                                RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, false,
+                                false);
+    }
+
+    TEST_CASE("BatchMatMul2DFp32SimpleAdjointTest")
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape{ 3, 3 };
+        std::vector<int32_t> RHSInputShape{ 3, 3 };
+        std::vector<int32_t> outputShape{ 3, 3 };
+
+        std::vector<float> LHSInputValues = { 3, 1, 1, 1, 3, -1, 2, 4, 1 };
+
+        std::vector<float> RHSInputValues = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
+
+        std::vector<float> expectedOutputValues = { 3, 1, 2, 1, 3, 4, 1, -1, 1 };
+
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_FLOAT32, LHSInputShape,
+                               RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, true,
                                false);
     }
 
-    void BatchMatMul2DFp32SimpleAdjointTest(std::vector<armnn::BackendId>& backends)
+    TEST_CASE("BatchMatMul2DInt8SimpleAdjointTest")
     {
         // Set input data
-        std::vector<int32_t> LHSInputShape { 3,3 };
-        std::vector<int32_t> RHSInputShape { 3,3 };
-        std::vector<int32_t> outputShape   { 3,3 };
+        std::vector<int32_t> LHSInputShape{ 3, 3 };
+        std::vector<int32_t> RHSInputShape{ 3, 3 };
+        std::vector<int32_t> outputShape{ 3, 3 };
 
-        std::vector<float> LHSInputValues = { 3, 1, 1,
-                                              1, 3, -1,
-                                              2, 4, 1 };
+        std::vector<int8_t> LHSInputValues = { 3, 1, 1, 1, 3, -1, 2, 4, 1 };
 
-        std::vector<float> RHSInputValues = { 1, 0, 0,
-                                              0, 1, 0,
-                                              0, 0, 1 };
+        std::vector<int8_t> RHSInputValues = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
 
-        std::vector<float> expectedOutputValues = { 3, 1, 2,
-                                                    1, 3, 4,
-                                                    1, -1, 1 };
+        std::vector<int8_t> expectedOutputValues = { 3, 1, 2, 1, 3, 4, 1, -1, 1 };
 
-        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_FLOAT32,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               true,
-                               false);
-    }
-
-    void BatchMatMul2DInt8SimpleAdjointTest(std::vector<armnn::BackendId>& backends)
-    {
-        // Set input data
-        std::vector<int32_t> LHSInputShape { 3,3 };
-        std::vector<int32_t> RHSInputShape { 3,3 };
-        std::vector<int32_t> outputShape   { 3,3 };
-
-        std::vector<int8_t> LHSInputValues = { 3, 1, 1,
-                                              1, 3, -1,
-                                              2, 4, 1 };
-
-        std::vector<int8_t> RHSInputValues = { 1, 0, 0,
-                                              0, 1, 0,
-                                              0, 0, 1 };
-
-        std::vector<int8_t> expectedOutputValues = { 3, 1, 2,
-                                                     1, 3, 4,
-                                                     1, -1, 1 };
-
-        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
-                               ::tflite::TensorType_INT8,
-                               backends,
-                               LHSInputShape,
-                               RHSInputShape,
-                               outputShape,
-                               LHSInputValues,
-                               RHSInputValues,
-                               expectedOutputValues,
-                               true,
-                               false);
-    }
-
-    TEST_SUITE("BATCH_MATMUL_CpuRefTests")
-    {
-        TEST_CASE("BATCH_MATMUL_Fp32_CpuRefTests")
-        {
-            std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-            BatchMatMul2DFp32SimpleTest       (backends);
-            BatchMatMul3DFp32SimpleTest       (backends);
-            BatchMatMul4DFp32SimpleTest       (backends);
-            BatchMatMul3DFp32BatchTest        (backends);
-            BatchMatMul3DFp32BroadcastTest    (backends);
-            BatchMatMul3D2DFp32BroadcastTest  (backends);
-            BatchMatMul2DFp32TinyTest         (backends);
-            BatchMatMulNonSquareFp32Test      (backends);
-            BatchMatMul2DFp32SimpleAdjointTest(backends);
-        }
-
-        TEST_CASE("BATCH_MATMUL_Int8_CpuRefTests")
-        {
-            std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-            BatchMatMul2DInt8SimpleTest       (backends);
-            BatchMatMul3DInt8SimpleTest       (backends);
-            BatchMatMul4DInt8SimpleTest       (backends);
-            BatchMatMul3DInt8BatchTest        (backends);
-            BatchMatMul3DInt8BroadcastTest    (backends);
-            BatchMatMul3D2DInt8BroadcastTest  (backends);
-            BatchMatMul2DInt8TinyTest         (backends);
-            BatchMatMulNonSquareInt8Test      (backends);
-            BatchMatMul2DInt8SimpleAdjointTest(backends);
-        }
-    }
-
-    TEST_SUITE("BATCH_MATMUL_CpuAccTests")
-    {
-        TEST_CASE("BATCH_MATMUL_Fp32_CpuAccTests")
-        {
-            std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-            BatchMatMul2DFp32SimpleTest       (backends);
-            BatchMatMul3DFp32SimpleTest       (backends);
-            BatchMatMul4DFp32SimpleTest       (backends);
-            BatchMatMul3DFp32BatchTest        (backends);
-            BatchMatMul2DFp32TinyTest         (backends);
-            BatchMatMulNonSquareFp32Test      (backends);
-            BatchMatMul2DFp32SimpleAdjointTest(backends);
-        }
-
-        TEST_CASE("BATCH_MATMUL_Int8_CpuAccTests")
-        {
-            std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-            BatchMatMul2DInt8SimpleTest       (backends);
-            BatchMatMul3DInt8SimpleTest       (backends);
-            BatchMatMul4DInt8SimpleTest       (backends);
-            BatchMatMul3DInt8BatchTest        (backends);
-            BatchMatMul2DInt8TinyTest         (backends);
-            BatchMatMulNonSquareInt8Test      (backends);
-            BatchMatMul2DInt8SimpleAdjointTest(backends);
-        }
-    }
-
-    TEST_SUITE("BATCH_MATMUL_GpuAccTests")
-    {
-        TEST_CASE("BATCH_MATMUL_Fp32_GpuAccTests")
-        {
-            std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-            BatchMatMul2DFp32SimpleTest       (backends);
-            BatchMatMul3DFp32SimpleTest       (backends);
-            BatchMatMul4DFp32SimpleTest       (backends);
-            BatchMatMul3DFp32BatchTest        (backends);
-            BatchMatMul2DFp32TinyTest         (backends);
-            BatchMatMulNonSquareFp32Test      (backends);
-            BatchMatMul2DFp32SimpleAdjointTest(backends);
-        }
-
-        TEST_CASE("BATCH_MATMUL_Int8_GpuAccTests")
-        {
-            std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-            BatchMatMul2DInt8SimpleTest       (backends);
-            BatchMatMul3DInt8SimpleTest       (backends);
-            BatchMatMul3DInt8BatchTest        (backends);
-            BatchMatMul2DInt8TinyTest         (backends);
-            BatchMatMulNonSquareInt8Test      (backends);
-            BatchMatMul2DInt8SimpleAdjointTest(backends);
-        }
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL, ::tflite::TensorType_INT8, LHSInputShape,
+                                RHSInputShape, outputShape, LHSInputValues, RHSInputValues, expectedOutputValues, true,
+                                false);
     }
 }
+}
\ No newline at end of file
diff --git a/delegate/test/BatchMatMulTestHelper.hpp b/delegate/test/BatchMatMulTestHelper.hpp
index 86f1c53..f2fb581 100644
--- a/delegate/test/BatchMatMulTestHelper.hpp
+++ b/delegate/test/BatchMatMulTestHelper.hpp
@@ -118,7 +118,6 @@
 template <typename T>
 void BatchMatMulTest(tflite::BuiltinOperator bmmOperatorCode,
                    tflite::TensorType tensorType,
-                   std::vector<armnn::BackendId>& backends,
                    std::vector<int32_t>& LHSInputShape,
                    std::vector<int32_t>& RHSInputShape,
                    std::vector<int32_t>& outputShape,
@@ -128,7 +127,8 @@
                    bool adjX = false,
                    bool adjY = false,
                    float quantScale = 1.0f,
-                   int quantOffset  = 0)
+                   int quantOffset  = 0,
+                   const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateBatchMatMulTfLiteModel(bmmOperatorCode,
@@ -151,7 +151,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<T>(LHSInputValues, 0) == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<T>(RHSInputValues, 1) == kTfLiteOk);
diff --git a/delegate/test/BatchSpaceTest.cpp b/delegate/test/BatchSpaceTest.cpp
index dd6047a..78cde2e 100644
--- a/delegate/test/BatchSpaceTest.cpp
+++ b/delegate/test/BatchSpaceTest.cpp
@@ -15,7 +15,7 @@
 {
 
 // BatchToSpaceND Operator
-void BatchToSpaceNDFp32Test(std::vector<armnn::BackendId>& backends)
+void BatchToSpaceNDFp32Test()
 {
     std::vector<int32_t> inputShape { 4, 1, 1, 1 };
     std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
@@ -28,7 +28,6 @@
 
     BatchSpaceTest<float>(tflite::BuiltinOperator_BATCH_TO_SPACE_ND,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           inputShape,
                           expectedOutputShape,
                           inputValues,
@@ -37,7 +36,7 @@
                           expectedOutputValues);
 }
 
-void BatchToSpaceNDFp32BatchOneTest(std::vector<armnn::BackendId>& backends)
+void BatchToSpaceNDFp32BatchOneTest()
 {
     std::vector<int32_t> inputShape { 1, 2, 2, 1 };
     std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
@@ -50,7 +49,6 @@
 
     BatchSpaceTest<float>(tflite::BuiltinOperator_BATCH_TO_SPACE_ND,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           inputShape,
                           expectedOutputShape,
                           inputValues,
@@ -59,7 +57,7 @@
                           expectedOutputValues);
 }
 
-void BatchToSpaceNDUint8Test(std::vector<armnn::BackendId>& backends)
+void BatchToSpaceNDUint8Test()
 {
     std::vector<int32_t> inputShape { 4, 1, 1, 3 };
     std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
@@ -72,7 +70,6 @@
 
     BatchSpaceTest<uint8_t>(tflite::BuiltinOperator_BATCH_TO_SPACE_ND,
                           ::tflite::TensorType_UINT8,
-                          backends,
                           inputShape,
                           expectedOutputShape,
                           inputValues,
@@ -82,7 +79,7 @@
 }
 
 // SpaceToBatchND Operator
-void SpaceToBatchNDFp32Test(std::vector<armnn::BackendId>& backends)
+void SpaceToBatchNDFp32Test()
 {
     std::vector<int32_t> inputShape { 1, 2, 2, 1 };
     std::vector<int32_t> expectedOutputShape { 4, 1, 1, 1 };
@@ -95,7 +92,6 @@
 
     BatchSpaceTest<float>(tflite::BuiltinOperator_SPACE_TO_BATCH_ND,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           inputShape,
                           expectedOutputShape,
                           inputValues,
@@ -104,7 +100,7 @@
                           expectedOutputValues);
 }
 
-void SpaceToBatchNDFp32PaddingTest(std::vector<armnn::BackendId>& backends)
+void SpaceToBatchNDFp32PaddingTest()
 {
     std::vector<int32_t> inputShape { 2, 2, 4, 1 };
     std::vector<int32_t> expectedOutputShape { 8, 1, 3, 1 };
@@ -124,7 +120,6 @@
 
     BatchSpaceTest<float>(tflite::BuiltinOperator_SPACE_TO_BATCH_ND,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           inputShape,
                           expectedOutputShape,
                           inputValues,
@@ -133,7 +128,7 @@
                           expectedOutputValues);
 }
 
-void SpaceToBatchNDUint8Test(std::vector<armnn::BackendId>& backends)
+void SpaceToBatchNDUint8Test()
 {
     std::vector<int32_t> inputShape { 1, 2, 2, 3 };
     std::vector<int32_t> expectedOutputShape { 4, 1, 1, 3 };
@@ -146,7 +141,6 @@
 
     BatchSpaceTest<uint8_t>(tflite::BuiltinOperator_SPACE_TO_BATCH_ND,
                             ::tflite::TensorType_UINT8,
-                            backends,
                             inputShape,
                             expectedOutputShape,
                             inputValues,
@@ -156,141 +150,43 @@
 }
 
 // BatchToSpaceND Tests
-TEST_SUITE("BatchToSpaceND_CpuAccTests")
+TEST_SUITE("BatchToSpaceNDTests")
 {
 
-TEST_CASE ("BatchToSpaceND_Fp32_CpuAcc_Test")
+TEST_CASE ("BatchToSpaceND_Fp32_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    BatchToSpaceNDFp32Test(backends);
+    BatchToSpaceNDFp32Test();
 }
 
-TEST_CASE ("BatchToSpaceND_Fp32_BatchOne_CpuAcc_Test")
+TEST_CASE ("BatchToSpaceND_Fp32_BatchOne_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    BatchToSpaceNDFp32BatchOneTest(backends);
+    BatchToSpaceNDFp32BatchOneTest();
 }
 
-TEST_CASE ("BatchToSpaceND_Uint8_CpuAcc_Test")
+TEST_CASE ("BatchToSpaceND_Uint8_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    BatchToSpaceNDUint8Test(backends);
-}
-
-}
-
-TEST_SUITE("BatchToSpaceND_GpuAccTests")
-{
-
-TEST_CASE ("BatchToSpaceND_Fp32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    BatchToSpaceNDFp32Test(backends);
-}
-
-TEST_CASE ("BatchToSpaceND_Fp32_BatchOne_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    BatchToSpaceNDFp32BatchOneTest(backends);
-}
-
-TEST_CASE ("BatchToSpaceND_Uint8_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    BatchToSpaceNDUint8Test(backends);
-}
-
-}
-
-TEST_SUITE("BatchToSpaceND_CpuRefTests")
-{
-
-TEST_CASE ("BatchToSpaceND_Fp32_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    BatchToSpaceNDFp32Test(backends);
-}
-
-TEST_CASE ("BatchToSpaceND_Fp32_BatchOne_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    BatchToSpaceNDFp32BatchOneTest(backends);
-}
-
-TEST_CASE ("BatchToSpaceND_Uint8_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    BatchToSpaceNDUint8Test(backends);
+    BatchToSpaceNDUint8Test();
 }
 
 }
 
 // SpaceToBatchND Tests
-TEST_SUITE("SpaceToBatchND_CpuAccTests")
+TEST_SUITE("SpaceToBatchND_Tests")
 {
 
-TEST_CASE ("SpaceToBatchND_Fp32_CpuAcc_Test")
+TEST_CASE ("SpaceToBatchND_Fp32_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    SpaceToBatchNDFp32Test(backends);
+    SpaceToBatchNDFp32Test();
 }
 
-TEST_CASE ("SpaceToBatchND_Fp32_Padding_CpuAcc_Test")
+TEST_CASE ("SpaceToBatchND_Fp32_Padding_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    SpaceToBatchNDFp32PaddingTest(backends);
+    SpaceToBatchNDFp32PaddingTest();
 }
 
-TEST_CASE ("SpaceToBatchND_Uint8_CpuAcc_Test")
+TEST_CASE ("SpaceToBatchND_Uint8_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    SpaceToBatchNDUint8Test(backends);
-}
-
-}
-
-TEST_SUITE("SpaceToBatchND_GpuAccTests")
-{
-
-TEST_CASE ("SpaceToBatchND_Fp32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    SpaceToBatchNDFp32Test(backends);
-}
-
-TEST_CASE ("SpaceToBatchND_Fp32_Padding_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    SpaceToBatchNDFp32PaddingTest(backends);
-}
-
-TEST_CASE ("SpaceToBatchND_Uint8_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    SpaceToBatchNDUint8Test(backends);
-}
-
-}
-
-TEST_SUITE("SpaceToBatchND_CpuRefTests")
-{
-
-TEST_CASE ("SpaceToBatchND_Fp32_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    SpaceToBatchNDFp32Test(backends);
-}
-
-TEST_CASE ("SpaceToBatchND_Fp32_Padding_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    SpaceToBatchNDFp32PaddingTest(backends);
-}
-
-TEST_CASE ("SpaceToBatchND_Uint8_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    SpaceToBatchNDUint8Test(backends);
+    SpaceToBatchNDUint8Test();
 }
 
 }
diff --git a/delegate/test/BatchSpaceTestHelper.hpp b/delegate/test/BatchSpaceTestHelper.hpp
index 8f3d76a..9c39c30 100644
--- a/delegate/test/BatchSpaceTestHelper.hpp
+++ b/delegate/test/BatchSpaceTestHelper.hpp
@@ -150,7 +150,6 @@
 template <typename T>
 void BatchSpaceTest(tflite::BuiltinOperator controlOperatorCode,
                     tflite::TensorType tensorType,
-                    std::vector<armnn::BackendId>& backends,
                     std::vector<int32_t>& inputShape,
                     std::vector<int32_t>& expectedOutputShape,
                     std::vector<T>& inputValues,
@@ -158,7 +157,8 @@
                     std::vector<std::pair<unsigned int, unsigned int>>& cropsPaddingValues,
                     std::vector<T>& expectedOutputValues,
                     float quantScale = 1.0f,
-                    int quantOffset  = 0)
+                    int quantOffset  = 0,
+                    const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateBatchSpaceTfLiteModel(controlOperatorCode,
@@ -179,7 +179,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/BroadcastToTest.cpp b/delegate/test/BroadcastToTest.cpp
index 30909b8..99f1a29 100644
--- a/delegate/test/BroadcastToTest.cpp
+++ b/delegate/test/BroadcastToTest.cpp
@@ -39,37 +39,40 @@
 
     BroadcastToTestImpl<T>(inputTensorType,
                            tflite::BuiltinOperator_BROADCAST_TO,
-                           backends,
                            inputValues,
                            inputShape,
                            shapeShape,
                            shapeData,
                            expectedOutputValues,
-                           expectedOutputShape);
+                           expectedOutputShape,
+                           backends);
 }
 
-TEST_SUITE("BroadcastToTests_CpuRefTests")
+TEST_SUITE("BroadcastToTests_Tests")
 {
 
-    TEST_CASE ("BroadcastTo_int_CpuRef_Test")
+    /**
+     * Only CpuRef is supported for these tests.
+     */
+    TEST_CASE ("BroadcastTo_int_Test")
     {
         std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
         BroadcastToTest<int32_t>(backends, ::tflite::TensorType::TensorType_INT32);
     }
 
-    TEST_CASE ("BroadcastTo_Float32_CpuRef_Test")
+    TEST_CASE ("BroadcastTo_Float32_Test")
     {
         std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
         BroadcastToTest<float>(backends, ::tflite::TensorType::TensorType_FLOAT32);
     }
 
-    TEST_CASE ("BroadcastTo_Uint8_t_CpuRef_Test")
+    TEST_CASE ("BroadcastTo_Uint8_t_Test")
     {
         std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
         BroadcastToTest<uint8_t>(backends, ::tflite::TensorType::TensorType_UINT8);
     }
 
-    TEST_CASE ("BroadcastTo_Int8_t_CpuRef_Test")
+    TEST_CASE ("BroadcastTo_Int8_t_Test")
     {
         std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
         BroadcastToTest<int8_t>(backends, ::tflite::TensorType::TensorType_INT8);
diff --git a/delegate/test/BroadcastToTestHelper.hpp b/delegate/test/BroadcastToTestHelper.hpp
index 6d05863..8fcb762 100644
--- a/delegate/test/BroadcastToTestHelper.hpp
+++ b/delegate/test/BroadcastToTestHelper.hpp
@@ -119,13 +119,13 @@
     template<typename T>
     void BroadcastToTestImpl(tflite::TensorType inputTensorType,
                              tflite::BuiltinOperator operatorCode,
-                             std::vector<armnn::BackendId>& backends,
                              std::vector<T>& inputValues,
                              std::vector<int32_t> inputShape,
                              std::vector<int32_t> shapeShapes,
                              std::vector<int32_t> shapeData,
                              std::vector<T>& expectedOutputValues,
-                             std::vector<int32_t> expectedOutputShape)
+                             std::vector<int32_t> expectedOutputShape,
+                             const std::vector<armnn::BackendId>& backends)
     {
         using namespace delegateTestInterpreter;
 
@@ -147,7 +147,7 @@
         std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
         // Setup interpreter with Arm NN Delegate applied.
-        auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+        auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
         CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
         CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
         CHECK(armnnInterpreter.FillInputTensor<int32_t>(shapeData, 1) == kTfLiteOk);
diff --git a/delegate/test/CastTest.cpp b/delegate/test/CastTest.cpp
index e02bf2a..a3e749b 100644
--- a/delegate/test/CastTest.cpp
+++ b/delegate/test/CastTest.cpp
@@ -26,10 +26,12 @@
 
     CastTest<uint8_t, float>(::tflite::TensorType_UINT8,
                              ::tflite::TensorType_FLOAT32,
-                             backends,
                              inputShape,
                              inputValues,
-                             expectedOutputValues);
+                             expectedOutputValues,
+                             1.0f,
+                             0,
+                             backends);
 }
 
 void CastInt32ToFp32Test(std::vector<armnn::BackendId>& backends)
@@ -44,47 +46,28 @@
 
     CastTest<int32_t, float>(::tflite::TensorType_INT32,
                              ::tflite::TensorType_FLOAT32,
-                             backends,
                              inputShape,
                              inputValues,
-                             expectedOutputValues);
+                             expectedOutputValues,
+                             1.0f,
+                             0,
+                             backends);
 }
 
 // CAST Test Suite
-TEST_SUITE("CAST_CpuRefTests")
+TEST_SUITE("CASTTests")
 {
 
 TEST_CASE ("CAST_UINT8_TO_FP32_CpuRef_Test")
 {
+    // This only works on CpuRef.
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
     CastUint8ToFp32Test(backends);
 }
 
-TEST_CASE ("CAST_INT32_TO_FP32_CpuRef_Test")
+TEST_CASE ("CAST_INT32_TO_FP32_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    CastInt32ToFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("CAST_CpuAccTests")
-{
-
-TEST_CASE ("CAST_INT32_TO_FP32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    CastInt32ToFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("CAST_GpuAccTests")
-{
-
-TEST_CASE ("CAST_INT32_TO_FP32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    std::vector<armnn::BackendId> backends = {};
     CastInt32ToFp32Test(backends);
 }
 
diff --git a/delegate/test/CastTestHelper.hpp b/delegate/test/CastTestHelper.hpp
index c169be9..47c822c 100644
--- a/delegate/test/CastTestHelper.hpp
+++ b/delegate/test/CastTestHelper.hpp
@@ -96,12 +96,12 @@
 template<typename T, typename K>
 void CastTest(tflite::TensorType inputTensorType,
               tflite::TensorType outputTensorType,
-              std::vector<armnn::BackendId>& backends,
               std::vector<int32_t>& shape,
               std::vector<T>& inputValues,
               std::vector<K>& expectedOutputValues,
               float quantScale = 1.0f,
-              int quantOffset = 0)
+              int quantOffset = 0,
+              const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateCastTfLiteModel(inputTensorType,
@@ -119,7 +119,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/ComparisonTest.cpp b/delegate/test/ComparisonTest.cpp
index 8374025..a6eda93 100644
--- a/delegate/test/ComparisonTest.cpp
+++ b/delegate/test/ComparisonTest.cpp
@@ -45,13 +45,16 @@
 
     ComparisonTest<float>(tflite::BuiltinOperator_EQUAL,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           input0Shape,
                           input1Shape,
                           expectedOutputShape,
                           input0Values,
                           input1Values,
-                          expectedOutputValues);
+                          expectedOutputValues,
+                          1.0f,
+                          0,
+                          backends);
+
 }
 
 void EqualBroadcastTest(std::vector<armnn::BackendId>& backends)
@@ -74,13 +77,15 @@
     };
     ComparisonTest<float>(tflite::BuiltinOperator_EQUAL,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           input0Shape,
                           input1Shape,
                           expectedOutputShape,
                           input0Values,
                           input1Values,
-                          expectedOutputValues);
+                          expectedOutputValues,
+                          1.0f,
+                          0,
+                          backends);
 }
 
 void EqualInt32Test(std::vector<armnn::BackendId>& backends)
@@ -97,13 +102,15 @@
 
     ComparisonTest<int32_t>(tflite::BuiltinOperator_EQUAL,
                             ::tflite::TensorType_INT32,
-                            backends,
                             input0Shape,
                             input1Shape,
                             expectedOutputShape,
                             input0Values,
                             input1Values,
-                            expectedOutputValues);
+                            expectedOutputValues,
+                            1.0f,
+                            0,
+                            backends);
 }
 
 void NotEqualFP32Test(std::vector<armnn::BackendId>& backends)
@@ -132,13 +139,15 @@
 
     ComparisonTest<float>(tflite::BuiltinOperator_NOT_EQUAL,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           input0Shape,
                           input1Shape,
                           expectedOutputShape,
                           input0Values,
                           input1Values,
-                          expectedOutputValues);
+                          expectedOutputValues,
+                          1.0f,
+                          0,
+                          backends);
 }
 
 void NotEqualBroadcastTest(std::vector<armnn::BackendId>& backends)
@@ -161,13 +170,15 @@
     };
     ComparisonTest<float>(tflite::BuiltinOperator_NOT_EQUAL,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           input0Shape,
                           input1Shape,
                           expectedOutputShape,
                           input0Values,
                           input1Values,
-                          expectedOutputValues);
+                          expectedOutputValues,
+                          1.0f,
+                          0,
+                          backends);
 }
 
 void NotEqualInt32Test(std::vector<armnn::BackendId>& backends)
@@ -184,13 +195,15 @@
 
     ComparisonTest<int32_t>(tflite::BuiltinOperator_NOT_EQUAL,
                             ::tflite::TensorType_INT32,
-                            backends,
                             input0Shape,
                             input1Shape,
                             expectedOutputShape,
                             input0Values,
                             input1Values,
-                            expectedOutputValues);
+                            expectedOutputValues,
+                            1.0f,
+                            0,
+                            backends);
 }
 
 void GreaterFP32Test(std::vector<armnn::BackendId>& backends)
@@ -207,13 +220,15 @@
 
     ComparisonTest<float>(tflite::BuiltinOperator_GREATER,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           input0Shape,
                           input1Shape,
                           expectedOutputShape,
                           input0Values,
                           input1Values,
-                          expectedOutputValues);
+                          expectedOutputValues,
+                          1.0f,
+                          0,
+                          backends);
 }
 
 void GreaterBroadcastTest(std::vector<armnn::BackendId>& backends)
@@ -236,13 +251,15 @@
     };
     ComparisonTest<float>(tflite::BuiltinOperator_GREATER,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           input0Shape,
                           input1Shape,
                           expectedOutputShape,
                           input0Values,
                           input1Values,
-                          expectedOutputValues);
+                          expectedOutputValues,
+                          1.0f,
+                          0,
+                          backends);
 }
 
 void GreaterInt32Test(std::vector<armnn::BackendId>& backends)
@@ -259,13 +276,15 @@
 
     ComparisonTest<int32_t>(tflite::BuiltinOperator_GREATER,
                             ::tflite::TensorType_INT32,
-                            backends,
                             input0Shape,
                             input1Shape,
                             expectedOutputShape,
                             input0Values,
                             input1Values,
-                            expectedOutputValues);
+                            expectedOutputValues,
+                            1.0f,
+                            0,
+                            backends);
 }
 
 void GreaterEqualFP32Test(std::vector<armnn::BackendId>& backends)
@@ -282,13 +301,15 @@
 
     ComparisonTest<float>(tflite::BuiltinOperator_GREATER_EQUAL,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           input0Shape,
                           input1Shape,
                           expectedOutputShape,
                           input0Values,
                           input1Values,
-                          expectedOutputValues);
+                          expectedOutputValues,
+                          1.0f,
+                          0,
+                          backends);
 }
 
 void GreaterEqualBroadcastTest(std::vector<armnn::BackendId>& backends)
@@ -312,13 +333,15 @@
 
     ComparisonTest<float>(tflite::BuiltinOperator_GREATER_EQUAL,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           input0Shape,
                           input1Shape,
                           expectedOutputShape,
                           input0Values,
                           input1Values,
-                          expectedOutputValues);
+                          expectedOutputValues,
+                          1.0f,
+                          0,
+                          backends);
 }
 
 void GreaterEqualInt32Test(std::vector<armnn::BackendId>& backends)
@@ -335,13 +358,15 @@
 
     ComparisonTest<int32_t>(tflite::BuiltinOperator_GREATER_EQUAL,
                             ::tflite::TensorType_INT32,
-                            backends,
                             input0Shape,
                             input1Shape,
                             expectedOutputShape,
                             input0Values,
                             input1Values,
-                            expectedOutputValues);
+                            expectedOutputValues,
+                            1.0f,
+                            0,
+                            backends);
 }
 
 void LessFP32Test(std::vector<armnn::BackendId>& backends)
@@ -358,13 +383,15 @@
 
     ComparisonTest<float>(tflite::BuiltinOperator_LESS,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           input0Shape,
                           input1Shape,
                           expectedOutputShape,
                           input0Values,
                           input1Values,
-                          expectedOutputValues);
+                          expectedOutputValues,
+                          1.0f,
+                          0,
+                          backends);
 }
 
 void LessBroadcastTest(std::vector<armnn::BackendId>& backends)
@@ -388,13 +415,15 @@
 
     ComparisonTest<float>(tflite::BuiltinOperator_LESS,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           input0Shape,
                           input1Shape,
                           expectedOutputShape,
                           input0Values,
                           input1Values,
-                          expectedOutputValues);
+                          expectedOutputValues,
+                          1.0f,
+                          0,
+                          backends);
 }
 
 void LessInt32Test(std::vector<armnn::BackendId>& backends)
@@ -411,13 +440,15 @@
 
     ComparisonTest<int32_t>(tflite::BuiltinOperator_LESS,
                             ::tflite::TensorType_INT32,
-                            backends,
                             input0Shape,
                             input1Shape,
                             expectedOutputShape,
                             input0Values,
                             input1Values,
-                            expectedOutputValues);
+                            expectedOutputValues,
+                            1.0f,
+                            0,
+                            backends);
 }
 
 void LessEqualFP32Test(std::vector<armnn::BackendId>& backends)
@@ -434,13 +465,15 @@
 
     ComparisonTest<float>(tflite::BuiltinOperator_LESS_EQUAL,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           input0Shape,
                           input1Shape,
                           expectedOutputShape,
                           input0Values,
                           input1Values,
-                          expectedOutputValues);
+                          expectedOutputValues,
+                          1.0f,
+                          0,
+                          backends);
 }
 
 void LessEqualBroadcastTest(std::vector<armnn::BackendId>& backends)
@@ -464,13 +497,15 @@
 
     ComparisonTest<float>(tflite::BuiltinOperator_LESS_EQUAL,
                           ::tflite::TensorType_FLOAT32,
-                          backends,
                           input0Shape,
                           input1Shape,
                           expectedOutputShape,
                           input0Values,
                           input1Values,
-                          expectedOutputValues);
+                          expectedOutputValues,
+                          1.0f,
+                          0,
+                          backends);
 }
 
 void LessEqualInt32Test(std::vector<armnn::BackendId>& backends)
@@ -487,357 +522,127 @@
 
     ComparisonTest<int32_t>(tflite::BuiltinOperator_LESS_EQUAL,
                             ::tflite::TensorType_INT32,
-                            backends,
                             input0Shape,
                             input1Shape,
                             expectedOutputShape,
                             input0Values,
                             input1Values,
-                            expectedOutputValues);
+                            expectedOutputValues,
+                            1.0f,
+                            0,
+                            backends);
 }
 
-TEST_SUITE("Comparison_CpuRefTests")
+TEST_SUITE("Comparison_Tests")
 {
 
-TEST_CASE ("EQUAL_FP32_CpuRef_Test")
+TEST_CASE ("EQUAL_FP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     EqualFP32Test(backends);
 }
 
-TEST_CASE ("EQUAL_Broadcast_CpuRef_Test")
+TEST_CASE ("EQUAL_Broadcast_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     EqualBroadcastTest(backends);
 }
 
-TEST_CASE ("EQUAL_INT32_CpuRef_Test")
+TEST_CASE ("EQUAL_INT32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     EqualInt32Test(backends);
 }
 
-TEST_CASE ("NOT_EQUAL_FP32_CpuRef_Test")
+TEST_CASE ("NOT_EQUAL_FP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     NotEqualFP32Test(backends);
 }
 
-TEST_CASE ("NOT_EQUAL_Broadcast_CpuRef_Test")
+TEST_CASE ("NOT_EQUAL_Broadcast_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     NotEqualBroadcastTest(backends);
 }
 
-TEST_CASE ("NOT_EQUAL_INT32_CpuRef_Test")
+TEST_CASE ("NOT_EQUAL_INT32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     NotEqualInt32Test(backends);
 }
 
-TEST_CASE ("GREATER_FP32_CpuRef_Test")
+TEST_CASE ("GREATER_FP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     GreaterFP32Test(backends);
 }
 
-TEST_CASE ("GREATER_Broadcast_CpuRef_Test")
+TEST_CASE ("GREATER_Broadcast_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     GreaterBroadcastTest(backends);
 }
 
-TEST_CASE ("GREATER_INT32_CpuRef_Test")
+TEST_CASE ("GREATER_INT32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     GreaterInt32Test(backends);
 }
 
-TEST_CASE ("GREATER_EQUAL_FP32_CpuRef_Test")
+TEST_CASE ("GREATER_EQUAL_FP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     GreaterEqualFP32Test(backends);
 }
 
-TEST_CASE ("GREATER_EQUAL_Broadcast_CpuRef_Test")
+TEST_CASE ("GREATER_EQUAL_Broadcast_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     GreaterEqualBroadcastTest(backends);
 }
 
-TEST_CASE ("GREATER_EQUAL_INT32_CpuRef_Test")
+TEST_CASE ("GREATER_EQUAL_INT32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     GreaterEqualInt32Test(backends);
 }
 
-TEST_CASE ("LESS_FP32_CpuRef_Test")
+TEST_CASE ("LESS_FP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     LessFP32Test(backends);
 }
 
-TEST_CASE ("LESS_Broadcast_CpuRef_Test")
+TEST_CASE ("LESS_Broadcast_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     LessBroadcastTest(backends);
 }
 
-TEST_CASE ("LESS_INT32_CpuRef_Test")
+TEST_CASE ("LESS_INT32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     LessInt32Test(backends);
 }
 
-TEST_CASE ("LESS_EQUAL_FP32_CpuRef_Test")
+TEST_CASE ("LESS_EQUAL_FP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     LessEqualFP32Test(backends);
 }
 
-TEST_CASE ("LESS_EQUAL_Broadcast_CpuRef_Test")
+TEST_CASE ("LESS_EQUAL_Broadcast_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     LessEqualBroadcastTest(backends);
 }
 
-TEST_CASE ("LESS_EQUAL_INT32_CpuRef_Test")
+TEST_CASE ("LESS_EQUAL_INT32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { };
     LessEqualInt32Test(backends);
 }
-} // End TEST_SUITE("Comparison_CpuRefTests")
-
-
-
-TEST_SUITE("Comparison_GpuAccTests")
-{
-
-TEST_CASE ("EQUAL_FP32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    EqualFP32Test(backends);
-}
-
-TEST_CASE ("EQUAL_Broadcast_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    EqualBroadcastTest(backends);
-}
-
-TEST_CASE ("EQUAL_INT32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    EqualInt32Test(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_FP32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    NotEqualFP32Test(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_Broadcast_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    NotEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_INT32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    NotEqualInt32Test(backends);
-}
-
-TEST_CASE ("GREATER_FP32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
-                                               armnn::Compute::CpuRef };
-    GreaterFP32Test(backends);
-}
-
-TEST_CASE ("GREATER_Broadcast_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
-                                               armnn::Compute::CpuRef };
-    GreaterBroadcastTest(backends);
-}
-
-TEST_CASE ("GREATER_INT32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
-                                               armnn::Compute::CpuRef };
-    GreaterInt32Test(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_FP32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    GreaterEqualFP32Test(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_Broadcast_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    GreaterEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_INT32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    GreaterEqualInt32Test(backends);
-}
-
-TEST_CASE ("LESS_FP32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    LessFP32Test(backends);
-}
-
-TEST_CASE ("LESS_Broadcast_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    LessBroadcastTest(backends);
-}
-
-TEST_CASE ("LESS_INT32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    LessInt32Test(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_FP32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    LessEqualFP32Test(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_Broadcast_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    LessEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_INT32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    LessEqualInt32Test(backends);
-}
-
-} // End TEST_SUITE("Comparison_GpuAccTests")
-
-
-TEST_SUITE("Comparison_CpuAccTests")
-{
-
-TEST_CASE ("EQUAL_FP32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    EqualFP32Test(backends);
-}
-
-TEST_CASE ("EQUAL_Broadcast_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    EqualBroadcastTest(backends);
-}
-
-TEST_CASE ("EQUAL_INT32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    EqualInt32Test(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_FP32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    NotEqualFP32Test(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_Broadcast_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    NotEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_INT32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    NotEqualInt32Test(backends);
-}
-
-TEST_CASE ("GREATER_FP32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    GreaterFP32Test(backends);
-}
-
-TEST_CASE ("GREATER_Broadcast_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    GreaterBroadcastTest(backends);
-}
-
-TEST_CASE ("GREATER_INT32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    GreaterInt32Test(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_FP32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    GreaterEqualFP32Test(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_Broadcast_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    GreaterEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_INT32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    GreaterEqualInt32Test(backends);
-}
-
-TEST_CASE ("LESS_FP32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    LessFP32Test(backends);
-}
-
-TEST_CASE ("LESS_Broadcast_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    LessBroadcastTest(backends);
-}
-
-TEST_CASE ("LESS_INT32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    LessInt32Test(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_FP32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    LessEqualFP32Test(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_Broadcast_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    LessEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_INT32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    LessEqualInt32Test(backends);
-}
-
-} // End TEST_SUITE("Comparison_CpuAccTests")
+} // End TEST_SUITE("Comparison_Tests")
 
 } // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ComparisonTestHelper.hpp b/delegate/test/ComparisonTestHelper.hpp
index dc471d8..436790d 100644
--- a/delegate/test/ComparisonTestHelper.hpp
+++ b/delegate/test/ComparisonTestHelper.hpp
@@ -148,7 +148,6 @@
 template <typename T>
 void ComparisonTest(tflite::BuiltinOperator comparisonOperatorCode,
                     tflite::TensorType tensorType,
-                    std::vector<armnn::BackendId>& backends,
                     std::vector<int32_t>& input0Shape,
                     std::vector<int32_t>& input1Shape,
                     std::vector<int32_t>& outputShape,
@@ -156,7 +155,8 @@
                     std::vector<T>& input1Values,
                     std::vector<bool>& expectedOutputValues,
                     float quantScale = 1.0f,
-                    int quantOffset  = 0)
+                    int quantOffset  = 0,
+                    const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateComparisonTfLiteModel(comparisonOperatorCode,
@@ -177,7 +177,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
diff --git a/delegate/test/ControlTest.cpp b/delegate/test/ControlTest.cpp
index e567d67..a52be63 100644
--- a/delegate/test/ControlTest.cpp
+++ b/delegate/test/ControlTest.cpp
@@ -15,7 +15,7 @@
 {
 
 // CONCATENATION Operator
-void ConcatUint8TwoInputsTest(std::vector<armnn::BackendId>& backends)
+void ConcatUint8TwoInputsTest()
 {
     std::vector<int32_t> inputShape { 2, 2 };
     std::vector<int32_t> expectedOutputShape { 4, 2 };
@@ -31,14 +31,13 @@
 
     ConcatenationTest<uint8_t>(tflite::BuiltinOperator_CONCATENATION,
                                ::tflite::TensorType_UINT8,
-                               backends,
                                inputShape,
                                expectedOutputShape,
                                inputValues,
                                expectedOutputValues);
 }
 
-void ConcatInt16TwoInputsTest(std::vector<armnn::BackendId>& backends)
+void ConcatInt16TwoInputsTest()
 {
     std::vector<int32_t> inputShape { 2, 2 };
     std::vector<int32_t> expectedOutputShape { 4, 2 };
@@ -53,14 +52,13 @@
 
     ConcatenationTest<int16_t>(tflite::BuiltinOperator_CONCATENATION,
                                ::tflite::TensorType_INT16,
-                               backends,
                                inputShape,
                                expectedOutputShape,
                                inputValues,
                                expectedOutputValues);
 }
 
-void ConcatFloat32TwoInputsTest(std::vector<armnn::BackendId>& backends)
+void ConcatFloat32TwoInputsTest()
 {
     std::vector<int32_t> inputShape { 2, 2 };
     std::vector<int32_t> expectedOutputShape { 4, 2 };
@@ -75,14 +73,13 @@
 
     ConcatenationTest<float>(tflite::BuiltinOperator_CONCATENATION,
                              ::tflite::TensorType_FLOAT32,
-                             backends,
                              inputShape,
                              expectedOutputShape,
                              inputValues,
                              expectedOutputValues);
 }
 
-void ConcatThreeInputsTest(std::vector<armnn::BackendId>& backends)
+void ConcatThreeInputsTest()
 {
     std::vector<int32_t> inputShape { 2, 2 };
     std::vector<int32_t> expectedOutputShape { 6, 2 };
@@ -99,14 +96,13 @@
 
     ConcatenationTest<uint8_t>(tflite::BuiltinOperator_CONCATENATION,
                                ::tflite::TensorType_UINT8,
-                               backends,
                                inputShape,
                                expectedOutputShape,
                                inputValues,
                                expectedOutputValues);
 }
 
-void ConcatAxisTest(std::vector<armnn::BackendId>& backends)
+void ConcatAxisTest()
 {
     std::vector<int32_t> inputShape { 1, 2, 2 };
     std::vector<int32_t> expectedOutputShape { 1, 2, 4 };
@@ -121,7 +117,6 @@
 
     ConcatenationTest<uint8_t>(tflite::BuiltinOperator_CONCATENATION,
                                ::tflite::TensorType_UINT8,
-                               backends,
                                inputShape,
                                expectedOutputShape,
                                inputValues,
@@ -130,7 +125,7 @@
 }
 
 // MEAN Operator
-void MeanUint8KeepDimsTest(std::vector<armnn::BackendId>& backends)
+void MeanUint8KeepDimsTest()
 {
     std::vector<int32_t> input0Shape { 1, 3 };
     std::vector<int32_t> input1Shape { 1 };
@@ -143,7 +138,6 @@
 
     MeanTest<uint8_t>(tflite::BuiltinOperator_MEAN,
                       ::tflite::TensorType_UINT8,
-                      backends,
                       input0Shape,
                       input1Shape,
                       expectedOutputShape,
@@ -153,7 +147,7 @@
                       true);
 }
 
-void MeanUint8Test(std::vector<armnn::BackendId>& backends)
+void MeanUint8Test()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2 };
     std::vector<int32_t> input1Shape { 1 };
@@ -166,7 +160,6 @@
 
     MeanTest<uint8_t>(tflite::BuiltinOperator_MEAN,
                       ::tflite::TensorType_UINT8,
-                      backends,
                       input0Shape,
                       input1Shape,
                       expectedOutputShape,
@@ -176,7 +169,7 @@
                       false);
 }
 
-void MeanFp32KeepDimsTest(std::vector<armnn::BackendId>& backends)
+void MeanFp32KeepDimsTest()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2 };
     std::vector<int32_t> input1Shape { 1 };
@@ -189,7 +182,6 @@
 
     MeanTest<float>(tflite::BuiltinOperator_MEAN,
                     ::tflite::TensorType_FLOAT32,
-                    backends,
                     input0Shape,
                     input1Shape,
                     expectedOutputShape,
@@ -199,7 +191,7 @@
                     true);
 }
 
-void MeanFp32Test(std::vector<armnn::BackendId>& backends)
+void MeanFp32Test()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
     std::vector<int32_t> input1Shape { 1 };
@@ -212,7 +204,6 @@
 
     MeanTest<float>(tflite::BuiltinOperator_MEAN,
                     ::tflite::TensorType_FLOAT32,
-                    backends,
                     input0Shape,
                     input1Shape,
                     expectedOutputShape,
@@ -223,195 +214,58 @@
 }
 
 // CONCATENATION Tests.
-TEST_SUITE("Concatenation_CpuAccTests")
+TEST_SUITE("Concatenation_Tests")
 {
 
-TEST_CASE ("Concatenation_Uint8_CpuAcc_Test")
+TEST_CASE ("Concatenation_Uint8_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    ConcatUint8TwoInputsTest(backends);
+    ConcatUint8TwoInputsTest();
 }
 
-TEST_CASE ("Concatenation_Int16_CpuAcc_Test")
+TEST_CASE ("Concatenation_Int16_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    ConcatInt16TwoInputsTest(backends);
+    ConcatInt16TwoInputsTest();
 }
 
-TEST_CASE ("Concatenation_Float32_CpuAcc_Test")
+TEST_CASE ("Concatenation_Float32_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    ConcatFloat32TwoInputsTest(backends);
+    ConcatFloat32TwoInputsTest();
 }
 
-TEST_CASE ("Concatenation_Three_Inputs_CpuAcc_Test")
+TEST_CASE ("Concatenation_Three_Inputs_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    ConcatThreeInputsTest(backends);
+    ConcatThreeInputsTest();
 }
 
-TEST_CASE ("Concatenation_Axis_CpuAcc_Test")
+TEST_CASE ("Concatenation_Axis_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    ConcatAxisTest(backends);
-}
-
-}
-
-TEST_SUITE("Concatenation_GpuAccTests")
-{
-
-TEST_CASE ("Concatenation_Uint8_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    ConcatUint8TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Int16_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    ConcatInt16TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Float32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    ConcatFloat32TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Three_Inputs_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    ConcatThreeInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Axis_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    ConcatAxisTest(backends);
-}
-
-}
-
-TEST_SUITE("Concatenation_CpuRefTests")
-{
-
-TEST_CASE ("Concatenation_Uint8_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    ConcatUint8TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Int16_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    ConcatInt16TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Float32_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    ConcatFloat32TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Three_Inputs_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    ConcatThreeInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Axis_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    ConcatAxisTest(backends);
+    ConcatAxisTest();
 }
 
 }
 
 // MEAN Tests
-TEST_SUITE("Mean_CpuAccTests")
+TEST_SUITE("Mean_Tests")
 {
 
-TEST_CASE ("Mean_Uint8_KeepDims_CpuAcc_Test")
+TEST_CASE ("Mean_Uint8_KeepDims_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    MeanUint8KeepDimsTest(backends);
+    MeanUint8KeepDimsTest();
 }
 
-TEST_CASE ("Mean_Uint8_CpuAcc_Test")
+TEST_CASE ("Mean_Uint8_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    MeanUint8Test(backends);
+    MeanUint8Test();
 }
 
-TEST_CASE ("Mean_Fp32_KeepDims_CpuAcc_Test")
+TEST_CASE ("Mean_Fp32_KeepDims_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    MeanFp32KeepDimsTest(backends);
+    MeanFp32KeepDimsTest();
 }
 
-TEST_CASE ("Mean_Fp32_CpuAcc_Test")
+TEST_CASE ("Mean_Fp32_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    MeanFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("Mean_GpuAccTests")
-{
-
-TEST_CASE ("Mean_Uint8_KeepDims_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    MeanUint8KeepDimsTest(backends);
-}
-
-TEST_CASE ("Mean_Uint8_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    MeanUint8Test(backends);
-}
-
-TEST_CASE ("Mean_Fp32_KeepDims_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    MeanFp32KeepDimsTest(backends);
-}
-
-TEST_CASE ("Mean_Fp32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    MeanFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("Mean_CpuRefTests")
-{
-
-TEST_CASE ("Mean_Uint8_KeepDims_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    MeanUint8KeepDimsTest(backends);
-}
-
-TEST_CASE ("Mean_Uint8_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    MeanUint8Test(backends);
-}
-
-TEST_CASE ("Mean_Fp32_KeepDims_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    MeanFp32KeepDimsTest(backends);
-}
-
-TEST_CASE ("Mean_Fp32_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    MeanFp32Test(backends);
+    MeanFp32Test();
 }
 
 }
diff --git a/delegate/test/ControlTestHelper.hpp b/delegate/test/ControlTestHelper.hpp
index 0b76ebe..7c2efc8 100644
--- a/delegate/test/ControlTestHelper.hpp
+++ b/delegate/test/ControlTestHelper.hpp
@@ -206,14 +206,14 @@
 template <typename T>
 void ConcatenationTest(tflite::BuiltinOperator controlOperatorCode,
                        tflite::TensorType tensorType,
-                       std::vector<armnn::BackendId>& backends,
                        std::vector<int32_t>& inputShapes,
                        std::vector<int32_t>& expectedOutputShape,
                        std::vector<std::vector<T>>& inputValues,
                        std::vector<T>& expectedOutputValues,
                        int32_t axis = 0,
                        float quantScale = 1.0f,
-                       int quantOffset  = 0)
+                       int quantOffset  = 0,
+                       const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateConcatTfLiteModel(controlOperatorCode,
@@ -230,7 +230,7 @@
     CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
 
     for (unsigned int i = 0; i < inputValues.size(); ++i)
@@ -257,7 +257,6 @@
 template <typename T>
 void MeanTest(tflite::BuiltinOperator controlOperatorCode,
               tflite::TensorType tensorType,
-              std::vector<armnn::BackendId>& backends,
               std::vector<int32_t>& input0Shape,
               std::vector<int32_t>& input1Shape,
               std::vector<int32_t>& expectedOutputShape,
@@ -266,7 +265,8 @@
               std::vector<T>& expectedOutputValues,
               const bool keepDims,
               float quantScale = 1.0f,
-              int quantOffset  = 0)
+              int quantOffset  = 0,
+              const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateMeanTfLiteModel(controlOperatorCode,
@@ -288,7 +288,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/Convolution2dTest.cpp b/delegate/test/Convolution2dTest.cpp
index 0e11445..e78f538 100644
--- a/delegate/test/Convolution2dTest.cpp
+++ b/delegate/test/Convolution2dTest.cpp
@@ -18,7 +18,7 @@
 namespace armnnDelegate
 {
 
-void Conv2DWithBiasesFp32Test(std::vector<armnn::BackendId>& backends)
+void Conv2DWithBiasesFp32Test()
 {
     // Set input data
     std::vector<int32_t> inputShape { 1, 5, 5, 1 };
@@ -61,7 +61,6 @@
                            1, // dilationY
                            padding,
                            tflite::ActivationFunctionType_NONE,
-                           backends,
                            inputShape,
                            filterShape,
                            outputShape,
@@ -72,7 +71,7 @@
                            biasValues);
 }
 
-void Conv2DWithBiasesInt8Test(std::vector<armnn::BackendId>& backends)
+void Conv2DWithBiasesInt8Test()
 {
     // Set input data
     std::vector<int32_t> inputShape { 1, 2, 2, 1 };
@@ -104,7 +103,6 @@
                                      1, // dilationY
                                      padding,
                                      tflite::ActivationFunctionType_NONE,
-                                     backends,
                                      inputShape,
                                      filterShape,
                                      outputShape,
@@ -115,7 +113,7 @@
                                      biasValues);
 }
 
-void Conv2DWithBiasesReluUint8Test(std::vector<armnn::BackendId>& backends)
+void Conv2DWithBiasesReluUint8Test()
 {
     // Set input data
     std::vector<int32_t> inputShape { 1, 2, 2, 1 };
@@ -156,7 +154,6 @@
                                      1, // dilationY
                                      padding,
                                      tflite::ActivationFunctionType_RELU,
-                                     backends,
                                      inputShape,
                                      filterShape,
                                      outputShape,
@@ -173,7 +170,7 @@
                                      20); // output offset
 }
 
-void Conv2DWithBiasesRelu6Uint8Test(std::vector<armnn::BackendId>& backends)
+void Conv2DWithBiasesRelu6Uint8Test()
 {
     // Set input data
     std::vector<int32_t> inputShape { 1, 2, 2, 1 };
@@ -210,7 +207,6 @@
                                      1, // dilationY
                                      padding,
                                      tflite::ActivationFunctionType_RELU6,
-                                     backends,
                                      inputShape,
                                      filterShape,
                                      outputShape,
@@ -222,7 +218,7 @@
 }
 
 
-void Conv2DPerChannelInt8Test(std::vector<armnn::BackendId>& backends)
+void Conv2DPerChannelInt8Test()
 {
     // Set input data
     std::vector<int32_t> inputShape  { 1,4,4,2 };
@@ -277,7 +273,6 @@
                                      1, // dilationY
                                      padding,
                                      tflite::ActivationFunctionType_NONE,
-                                     backends,
                                      inputShape,
                                      filterShape,
                                      outputShape,
@@ -298,73 +293,24 @@
                                      filterQuantizationDim);
 }
 
-TEST_SUITE("Convolution2dTest_CpuRefTests")
+TEST_SUITE("Convolution2dTest_Tests")
 {
 
-TEST_CASE ("Conv2DWithBiases_Fp32_CpuRef_Test")
+TEST_CASE ("Conv2DWithBiases_Fp32_Test")
 {
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    Conv2DWithBiasesFp32Test(backends);
+    Conv2DWithBiasesFp32Test();
 }
 
-TEST_CASE ("Conv2DWithBiases_Int8_CpuRef_Test")
+TEST_CASE ("Conv2DWithBiases_Int8_Test")
 {
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    Conv2DWithBiasesInt8Test(backends);
+    Conv2DWithBiasesInt8Test();
 }
 
-TEST_CASE ("Conv2DPerChannel_Int8_CpuRef_Test")
+TEST_CASE ("Conv2DPerChannel_Int8_Test")
 {
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    Conv2DPerChannelInt8Test(backends);
+    Conv2DPerChannelInt8Test();
 }
 
-} //End of TEST_SUITE("Convolution2dTest_CpuRef")
-
-TEST_SUITE("Convolution2dTest_CpuAccTests")
-{
-
-TEST_CASE ("Conv2DWithBiases_Fp32_CpuAcc_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    Conv2DWithBiasesFp32Test(backends);
-}
-
-TEST_CASE ("Conv2DWithBiases_Int8_CpuAcc_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    Conv2DWithBiasesInt8Test(backends);
-}
-
-TEST_CASE ("Conv2DPerChannel_Int8_CpuAcc_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    Conv2DPerChannelInt8Test(backends);
-}
-
-} //End of TEST_SUITE("Convolution2dTest_CpuAcc")
-
-TEST_SUITE("Convolution2dTest_GpuAccTests")
-{
-
-TEST_CASE ("Conv2DWithBiases_Fp32_GpuAcc_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    Conv2DWithBiasesFp32Test(backends);
-}
-
-TEST_CASE ("Conv2DWithBiases_Int8_GpuAcc_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    Conv2DWithBiasesInt8Test(backends);
-}
-
-TEST_CASE ("Conv2DPerChannel_Int8_GpuAcc_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    Conv2DPerChannelInt8Test(backends);
-}
-
-} //End of TEST_SUITE("Convolution2dTest_GpuAcc")
+} //End of TEST_SUITE("Convolution2dTest_Tests")
 
 } // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/Convolution3dTest.cpp b/delegate/test/Convolution3dTest.cpp
index e1a0a94..e118936 100644
--- a/delegate/test/Convolution3dTest.cpp
+++ b/delegate/test/Convolution3dTest.cpp
@@ -34,7 +34,7 @@
     return data;
 }
 
-void Conv3DWithBiasesSimpleWithPaddingFp32Test(std::vector<armnn::BackendId>& backends)
+void Conv3DWithBiasesSimpleWithPaddingFp32Test()
 {
     // Set input data
     std::vector<int32_t> inputShape { 1, 2, 2, 2, 1 };
@@ -65,7 +65,6 @@
                              { 1, 1, 1 }, // dilationX, dilationY, dilationZ
                              tflite::Padding_SAME,
                              tflite::ActivationFunctionType_NONE,
-                             backends,
                              inputShape,
                              filterShape,
                              outputShape,
@@ -76,7 +75,7 @@
                              biasValues);
 }
 
-void Conv3DWithBiasesStridesFp32Test(std::vector<armnn::BackendId>& backends)
+void Conv3DWithBiasesStridesFp32Test()
 {
     std::vector<int32_t> inputShape { 1, 3, 10, 10, 1 };
     std::vector<int32_t> filterShape { 3, 5, 5, 1, 1 };
@@ -123,7 +122,6 @@
                              { 1, 1, 1 }, // dilationX, dilationY, dilationZ
                              tflite::Padding_VALID,
                              tflite::ActivationFunctionType_NONE,
-                             backends,
                              inputShape,
                              filterShape,
                              outputShape,
@@ -175,7 +173,6 @@
                              { 3, 3, 3 }, // dilationX, dilationY, dilationZ
                              tflite::Padding_VALID,
                              tflite::ActivationFunctionType_NONE,
-                             backends,
                              inputShape,
                              filterShape,
                              outputShape,
@@ -183,10 +180,21 @@
                              filterValues,
                              expectedOutputValues,
                              biasShape,
-                             biasValues);
+                             biasValues,
+                             {1.0f},
+                             {0},
+                             {1.0f},
+                             {0},
+                             2.0f,
+                             0,
+                             1.0f,
+                             0,
+                             1,
+                             3,
+                             backends);
 }
 
-void Conv3DFp32SmallTest(std::vector<armnn::BackendId>& backends)
+void Conv3DFp32SmallTest()
 {
     std::vector<int32_t> inputShape { 1, 3, 10, 10, 1 };
     std::vector<int32_t> filterShape { 3, 3, 3, 1, 1 };
@@ -226,7 +234,6 @@
                              { 1, 1, 1 }, // dilationX, dilationY, dilationZ
                              tflite::Padding_VALID,
                              tflite::ActivationFunctionType_NONE,
-                             backends,
                              inputShape,
                              filterShape,
                              outputShape,
@@ -240,77 +247,29 @@
 TEST_SUITE("Convolution3dTest_CpuRefTests")
 {
 
-TEST_CASE ("Conv3DWithBiasesSimpleWithPadding_Fp32_CpuRef_Test")
+TEST_CASE ("Conv3DWithBiasesSimpleWithPadding_Fp32_Test")
 {
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    Conv3DWithBiasesSimpleWithPaddingFp32Test(backends);
+    Conv3DWithBiasesSimpleWithPaddingFp32Test();
 }
 
-TEST_CASE ("Conv3DWithBiasesStrides_Fp32_CpuRef_Test")
+TEST_CASE ("Conv3DWithBiasesStrides_Fp32_Test")
 {
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    Conv3DWithBiasesStridesFp32Test(backends);
+    Conv3DWithBiasesStridesFp32Test();
 }
 
 TEST_CASE ("Conv3DWithBiasesDilation_Fp32_CpuRef_Test")
 {
+    // Known to only work on CpuRef.
     std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
     Conv3DWithBiasesDilationFp32Test(backends);
 }
 
-TEST_CASE ("Conv3DFp32Small_Fp32_CpuRef_Test")
+TEST_CASE ("Conv3DFp32Small_Fp32_Test")
 {
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    Conv3DFp32SmallTest(backends);
+    Conv3DFp32SmallTest();
 }
 
-} //End of TEST_SUITE("Convolution3dTest_CpuRefTests")
-
-TEST_SUITE("Convolution3dTest_CpuAccTests")
-{
-
-TEST_CASE ("Conv3DWithBiasesSimpleWithPadding_Fp32_CpuAcc_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    Conv3DWithBiasesSimpleWithPaddingFp32Test(backends);
-}
-
-TEST_CASE ("Conv3DWithBiasesStrides_Fp32_CpuAcc_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    Conv3DWithBiasesStridesFp32Test(backends);
-}
-
-TEST_CASE ("Conv3DFp32Small_Fp32_CpuAcc_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    Conv3DFp32SmallTest(backends);
-}
-
-} //End of TEST_SUITE("Convolution3dTest_CpuAccTests")
-
-TEST_SUITE("Convolution3dTest_GpuAccTests")
-{
-
-TEST_CASE ("Conv3DWithBiasesSimpleWithPadding_Fp32_GpuAcc_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    Conv3DWithBiasesSimpleWithPaddingFp32Test(backends);
-}
-
-TEST_CASE ("Conv3DWithBiasesStrides_Fp32_GpuAcc_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    Conv3DWithBiasesStridesFp32Test(backends);
-}
-
-TEST_CASE ("Conv3DFp32Small_Fp32_GpuAcc_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    Conv3DFp32SmallTest(backends);
-}
-
-} //End of TEST_SUITE("Convolution3dTest_GpuAccTests")
+} //End of TEST_SUITE("Convolution3dTest_Tests")
 
 #endif
 
diff --git a/delegate/test/ConvolutionTestHelper.hpp b/delegate/test/ConvolutionTestHelper.hpp
index bb8852e..f651ad5 100644
--- a/delegate/test/ConvolutionTestHelper.hpp
+++ b/delegate/test/ConvolutionTestHelper.hpp
@@ -201,7 +201,6 @@
                      uint32_t dilationY,
                      tflite::Padding padding,
                      tflite::ActivationFunctionType fused_activation_function,
-                     std::vector<armnn::BackendId>& backends,
                      std::vector<int32_t>& inputShape,
                      std::vector<int32_t>& filterShape,
                      std::vector<int32_t>& outputShape,
@@ -219,8 +218,8 @@
                      float quantScale = 1.0f,
                      int quantOffset = 0,
                      int32_t depth_multiplier = 1,
-                     int32_t filterQuantizationDim = 3)
-
+                     int32_t filterQuantizationDim = 3,
+                     const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
 
@@ -259,7 +258,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
@@ -437,7 +436,6 @@
                        std::vector<uint32_t> dilation,
                        tflite::Padding padding,
                        tflite::ActivationFunctionType fused_activation_function,
-                       std::vector<armnn::BackendId>& backends,
                        std::vector<int32_t>& inputShape,
                        std::vector<int32_t>& filterShape,
                        std::vector<int32_t>& outputShape,
@@ -455,7 +453,8 @@
                        float quantScale = 1.0f,
                        int quantOffset = 0,
                        int32_t depth_multiplier = 1,
-                       int32_t filterQuantizationDim = 3)
+                       int32_t filterQuantizationDim = 3,
+                       const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
 
@@ -492,7 +491,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
@@ -631,8 +630,7 @@
 }
 
 template <typename T>
-void TransposeConvTest(std::vector<armnn::BackendId>& backends,
-                       tflite::TensorType tensorType,
+void TransposeConvTest(tflite::TensorType tensorType,
                        uint32_t strideX,
                        uint32_t strideY,
                        tflite::Padding padding,
@@ -649,7 +647,8 @@
                        float outputQuantScale = 1.0f,
                        int outputQuantOffset = 0,
                        float quantScale = 1.0f,
-                       int quantOffset = 0)
+                       int quantOffset = 0,
+                       const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
 
@@ -681,7 +680,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 2) == kTfLiteOk);
     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/DepthwiseConvolution2dTest.cpp b/delegate/test/DepthwiseConvolution2dTest.cpp
index 755c6ec..ebbe7fc 100644
--- a/delegate/test/DepthwiseConvolution2dTest.cpp
+++ b/delegate/test/DepthwiseConvolution2dTest.cpp
@@ -18,7 +18,7 @@
 namespace armnnDelegate
 {
 
-void DepthwiseConv2dValidReluFp32Test(std::vector<armnn::BackendId>& backends)
+void DepthwiseConv2dValidReluFp32Test()
 {
     // Set input data
     std::vector<int32_t> inputShape { 1, 3, 2, 2 };
@@ -60,7 +60,6 @@
                            1, // dilationY
                            padding,
                            tflite::ActivationFunctionType_RELU,
-                           backends,
                            inputShape,
                            filterShape,
                            outputShape,
@@ -80,7 +79,7 @@
                            depth_multiplier);
 }
 
-void DepthwiseConv2dSameUint8Test(std::vector<armnn::BackendId>& backends)
+void DepthwiseConv2dSameUint8Test()
 {
     // Set input data
     std::vector<int32_t> inputShape { 1, 3, 3, 1 };
@@ -116,7 +115,6 @@
                                       1, // dilationY
                                       padding,
                                       tflite::ActivationFunctionType_NONE,
-                                      backends,
                                       inputShape,
                                       filterShape,
                                       outputShape,
@@ -200,7 +198,6 @@
                                       1, // dilationY
                                       padding,
                                       tflite::ActivationFunctionType_NONE,
-                                      backends,
                                       inputShape,
                                       filterShape,
                                       outputShape,
@@ -218,64 +215,30 @@
                                       inputScale,
                                       0,
                                       depth_multiplier,
-                                      filterQuantizationDim);
+                                      filterQuantizationDim,
+                                      backends);
 }
 
-TEST_SUITE("DepthwiseConv2d_CpuRef_Tests")
+TEST_SUITE("DepthwiseConv2d_Tests")
 {
 
-TEST_CASE ("DepthwiseConv2d_Valid_Relu_Fp32_CpuRef_Test")
+TEST_CASE ("DepthwiseConv2d_Valid_Relu_Fp32_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    DepthwiseConv2dValidReluFp32Test(backends);
+    DepthwiseConv2dValidReluFp32Test();
 }
 
-TEST_CASE ("DepthwiseConv2d_Same_Uint8_CpuRef_Test")
+TEST_CASE ("DepthwiseConv2d_Same_Uint8_Test")
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    DepthwiseConv2dSameUint8Test(backends);
+    DepthwiseConv2dSameUint8Test();
 }
 
 TEST_CASE ("DepthwiseConv2d_Same_Int8_PerChannelQuantization_CpuRef_Test")
 {
+    // Only works on CpuRef.
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
     DepthwiseConv2dSameInt8PerChannelTest(backends);
 }
 
-}//End of TEST_SUITE("DepthwiseConv2d_CpuRef_Tests")
-
-TEST_SUITE("DepthwiseConv2d_CpuAcc_Tests")
-{
-
-TEST_CASE ("DepthwiseConv2d_Valid_Relu_Fp32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    DepthwiseConv2dValidReluFp32Test(backends);
-}
-
-TEST_CASE ("DepthwiseConv2d_Same_Uint8_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    DepthwiseConv2dSameUint8Test(backends);
-}
-
-}//End of TEST_SUITE("DepthwiseConv2d_CpuAcc_Tests")
-
-TEST_SUITE("DepthwiseConv2d_GpuAcc_Tests")
-{
-
-TEST_CASE ("DepthwiseConv2d_Valid_Relu_Fp32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    DepthwiseConv2dValidReluFp32Test(backends);
-}
-
-TEST_CASE ("DepthwiseConv2d_Same_Uint8_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    DepthwiseConv2dSameUint8Test(backends);
-}
-
-}//End of TEST_SUITE("DepthwiseConv2d_GpuAcc_Tests")
+}//End of TEST_SUITE("DepthwiseConv2d_Tests")
 
 } // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ElementwiseBinaryTest.cpp b/delegate/test/ElementwiseBinaryTest.cpp
index a36708d..49d4ae4 100644
--- a/delegate/test/ElementwiseBinaryTest.cpp
+++ b/delegate/test/ElementwiseBinaryTest.cpp
@@ -19,7 +19,7 @@
 namespace armnnDelegate
 {
 
-void AddFP32Test(std::vector<armnn::BackendId>& backends)
+void AddFP32Test()
 {
     std::vector<int32_t> input0Shape { 2, 2, 2, 3 };
     std::vector<int32_t> input1Shape { 2, 2, 2, 3 };
@@ -73,7 +73,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -82,7 +81,7 @@
                                  expectedOutputValues);
 }
 
-void AddBroadcastTest(std::vector<armnn::BackendId>& backends)
+void AddBroadcastTest()
 {
     std::vector<int32_t> input0Shape { 1, 3, 2, 1 };
     std::vector<int32_t> input1Shape { 1, 1, 2, 3 };
@@ -120,7 +119,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -129,7 +127,7 @@
                                  expectedOutputValues);
 }
 
-void AddConstInputTest(std::vector<armnn::BackendId>& backends)
+void AddConstInputTest()
 {
     std::vector<int32_t> input0Shape { 1, 3, 2, 1 };
     std::vector<int32_t> input1Shape { 1 };
@@ -166,7 +164,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -178,7 +175,7 @@
                                  true);
 }
 
-void AddActivationTest(std::vector<armnn::BackendId>& backends)
+void AddActivationTest()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
     std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
@@ -191,7 +188,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
                                  tflite::ActivationFunctionType_RELU,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -200,7 +196,7 @@
                                  expectedOutputValues);
 }
 
-void AddUint8Test(std::vector<armnn::BackendId>& backends)
+void AddUint8Test()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
     std::vector<int32_t> input1Shape { 1, 2, 2, 3 };
@@ -227,7 +223,6 @@
     ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_ADD,
                                    tflite::ActivationFunctionType_NONE,
                                    ::tflite::TensorType_UINT8,
-                                   backends,
                                    input0Shape,
                                    input1Shape,
                                    expectedOutputShape,
@@ -236,7 +231,7 @@
                                    expectedOutputValues, 7.0f, 3);
 }
 
-void DivFP32Test(std::vector<armnn::BackendId>& backends)
+void DivFP32Test()
 {
     std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
     std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
@@ -264,7 +259,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_DIV,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -273,7 +267,7 @@
                                  expectedOutputValues);
 }
 
-void DivBroadcastTest(std::vector<armnn::BackendId>& backends)
+void DivBroadcastTest()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
     std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
@@ -286,7 +280,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_DIV,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -323,16 +316,19 @@
     ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_DIV,
                                    tflite::ActivationFunctionType_NONE,
                                    ::tflite::TensorType_UINT8,
-                                   backends,
                                    input0Shape,
                                    input1Shape,
                                    expectedOutputShape,
                                    input0Values,
                                    input1Values,
-                                   expectedOutputValues, 0.25f, 0);
+                                   expectedOutputValues,
+                                   0.25f,
+                                   0,
+                                   false,
+                                   backends);
 }
 
-void FloorDivFP32Test(std::vector<armnn::BackendId>& backends)
+void FloorDivFP32Test()
 {
     std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
     std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
@@ -359,7 +355,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_FLOOR_DIV,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -369,7 +364,7 @@
 
 }
 
-void MaxFP32Test(std::vector<armnn::BackendId>& backends)
+void MaxFP32Test()
 {
     std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
     std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
@@ -397,7 +392,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MAXIMUM,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -406,7 +400,7 @@
                                  expectedOutputValues);
 }
 
-void MaxBroadcastTest(std::vector<armnn::BackendId>& backends)
+void MaxBroadcastTest()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
     std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
@@ -419,7 +413,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MAXIMUM,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -428,7 +421,7 @@
                                  expectedOutputValues);
 }
 
-void MaxUint8Test(std::vector<armnn::BackendId>& backends)
+void MaxUint8Test()
 {
     std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
     std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
@@ -456,7 +449,6 @@
     ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_MAXIMUM,
                                    tflite::ActivationFunctionType_NONE,
                                    ::tflite::TensorType_UINT8,
-                                   backends,
                                    input0Shape,
                                    input1Shape,
                                    expectedOutputShape,
@@ -465,7 +457,7 @@
                                    expectedOutputValues, 1.0f, 0);
 }
 
-void MinFP32Test(std::vector<armnn::BackendId>& backends)
+void MinFP32Test()
 {
     std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
     std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
@@ -493,7 +485,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MINIMUM,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -502,7 +493,7 @@
                                  expectedOutputValues);
 }
 
-void MinBroadcastTest(std::vector<armnn::BackendId>& backends)
+void MinBroadcastTest()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
     std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
@@ -517,7 +508,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MINIMUM,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -526,7 +516,7 @@
                                  expectedOutputValues);
 }
 
-void MinUint8Test(std::vector<armnn::BackendId>& backends)
+void MinUint8Test()
 {
     std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
     std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
@@ -554,7 +544,6 @@
     ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_MINIMUM,
                                    tflite::ActivationFunctionType_NONE,
                                    ::tflite::TensorType_UINT8,
-                                   backends,
                                    input0Shape,
                                    input1Shape,
                                    expectedOutputShape,
@@ -563,7 +552,7 @@
                                    expectedOutputValues, 1.0f, 0);
 }
 
-void MulFP32Test(std::vector<armnn::BackendId>& backends)
+void MulFP32Test()
 {
     std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
     std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
@@ -591,7 +580,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MUL,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -600,7 +588,7 @@
                                  expectedOutputValues);
 }
 
-void MulBroadcastTest(std::vector<armnn::BackendId>& backends)
+void MulBroadcastTest()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
     std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
@@ -613,7 +601,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MUL,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -622,7 +609,7 @@
                                  expectedOutputValues);
 }
 
-void MulUint8Test(std::vector<armnn::BackendId>& backends)
+void MulUint8Test()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
     std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
@@ -646,7 +633,6 @@
     ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_MUL,
                                    tflite::ActivationFunctionType_NONE,
                                    ::tflite::TensorType_UINT8,
-                                   backends,
                                    input0Shape,
                                    input1Shape,
                                    expectedOutputShape,
@@ -655,7 +641,7 @@
                                    expectedOutputValues, 1.0f, 0);
 }
 
-void MulActivationTest(std::vector<armnn::BackendId>& backends)
+void MulActivationTest()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
     std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
@@ -668,7 +654,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MUL,
                                  tflite::ActivationFunctionType_RELU,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -677,7 +662,7 @@
                                  expectedOutputValues);
 }
 
-void SubFP32Test(std::vector<armnn::BackendId>& backends)
+void SubFP32Test()
 {
     std::vector<int32_t> input0Shape { 1, 1, 2, 2 };
     std::vector<int32_t> input1Shape { 1, 1, 2, 2 };
@@ -690,7 +675,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_SUB,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -699,7 +683,7 @@
                                  expectedOutputValues);
 }
 
-void PowerFP32Test(std::vector<armnn::BackendId>& backends)
+void PowerFP32Test()
 {
     std::vector<int32_t> input0Shape { 1, 1, 2, 2 };
     std::vector<int32_t> input1Shape { 1, 1, 2, 2 };
@@ -712,7 +696,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_POW,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -721,7 +704,7 @@
                                  expectedOutputValues);
 }
 
-void SqDiffFP32Test(std::vector<armnn::BackendId>& backends)
+void SqDiffFP32Test()
 {
     std::vector<int32_t> input0Shape { 1, 1, 2, 2 };
     std::vector<int32_t> input1Shape { 1, 1, 2, 2 };
@@ -734,7 +717,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_SQUARED_DIFFERENCE,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -743,7 +725,7 @@
                                  expectedOutputValues);
 }
 
-void SubBroadcastTest(std::vector<armnn::BackendId>& backends)
+void SubBroadcastTest()
 {
     std::vector<int32_t> input0Shape { 1, 1, 2, 2 };
     std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
@@ -756,7 +738,6 @@
     ElementwiseBinaryTest<float>(tflite::BuiltinOperator_SUB,
                                  tflite::ActivationFunctionType_NONE,
                                  ::tflite::TensorType_FLOAT32,
-                                 backends,
                                  input0Shape,
                                  input1Shape,
                                  expectedOutputShape,
@@ -765,7 +746,7 @@
                                  expectedOutputValues);
 }
 
-void SubUint8Test(std::vector<armnn::BackendId>& backends)
+void SubUint8Test()
 {
     std::vector<int32_t> input0Shape { 1, 1, 2, 2 };
     std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
@@ -778,7 +759,6 @@
     ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_SUB,
                                    tflite::ActivationFunctionType_NONE,
                                    ::tflite::TensorType_UINT8,
-                                   backends,
                                    input0Shape,
                                    input1Shape,
                                    expectedOutputShape,
@@ -787,404 +767,129 @@
                                    expectedOutputValues, 1.0f, 0);
 }
 
-TEST_SUITE("ElementwiseBinary_GpuAccTests")
+TEST_SUITE("ElementwiseBinary_Tests")
 {
 
-TEST_CASE ("ADD_FP32_GpuAcc_Test")
+TEST_CASE ("ADD_FP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    AddFP32Test(backends);
+    AddFP32Test();
 }
 
-TEST_CASE ("ADD_Broadcast_GpuAcc_Test")
+TEST_CASE ("ADD_Broadcast_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    AddBroadcastTest(backends);
+    AddBroadcastTest();
 }
 
-TEST_CASE ("ADD_Activation_GpuAcc_Test")
+TEST_CASE ("ADD_Constant_Input_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    AddActivationTest(backends);
+    AddConstInputTest();
 }
 
-TEST_CASE ("ADD_UINT8_GpuAcc_Test")
+TEST_CASE ("ADD_Activation_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    AddUint8Test(backends);
+    AddActivationTest();
 }
 
-TEST_CASE ("DIV_FP32_GpuAcc_Test")
+TEST_CASE ("ADD_UINT8_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    DivFP32Test(backends);
+    AddUint8Test();
 }
 
-TEST_CASE ("DIV_Broadcast_GpuAcc_Test")
+TEST_CASE ("DIV_FP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    DivBroadcastTest(backends);
+    DivFP32Test();
 }
 
-TEST_CASE ("FLOORDIV_FP32_GpuAcc_Test")
+TEST_CASE ("DIV_Broadcast_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    FloorDivFP32Test(backends);
+    DivBroadcastTest();
 }
 
-TEST_CASE ("MAX_FP32_GpuAcc_Test")
+TEST_CASE ("FLOORDIV_FP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    MaxFP32Test(backends);
+    FloorDivFP32Test();
 }
 
-TEST_CASE ("MAX_Broadcast_GpuAcc_Test")
+TEST_CASE ("DIV_UINT8_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    MaxBroadcastTest(backends);
-}
-
-TEST_CASE ("MAX_UINT8_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    MaxUint8Test(backends);
-}
-
-TEST_CASE ("MIN_FP32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    MinFP32Test(backends);
-}
-
-TEST_CASE ("MIN_Broadcast_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    MinBroadcastTest(backends);
-}
-
-TEST_CASE ("MIN_UINT8_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    MinUint8Test(backends);
-}
-
-TEST_CASE ("MUL_FP32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    MulFP32Test(backends);
-}
-
-TEST_CASE ("MUL_Broadcast_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    MulBroadcastTest(backends);
-}
-
-TEST_CASE ("MUL_Activation_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    MulActivationTest(backends);
-}
-
-TEST_CASE ("MUL_UINT8_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    MulUint8Test(backends);
-}
-
-TEST_CASE ("SUB_FP32_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    SubFP32Test(backends);
-}
-
-TEST_CASE ("SUB_Broadcast_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    SubBroadcastTest(backends);
-}
-
-TEST_CASE ("SUB_UINT8_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    SubUint8Test(backends);
-}
-
-} //TEST_SUITE("ElementwiseBinary_GpuAccTests")
-
-
-
-TEST_SUITE("ElementwiseBinary_CpuAccTests")
-{
-
-TEST_CASE ("ADD_FP32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    AddFP32Test(backends);
-}
-
-TEST_CASE ("ADD_Broadcast_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    AddBroadcastTest(backends);
-}
-
-TEST_CASE ("ADD_Activation_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    AddActivationTest(backends);
-}
-
-TEST_CASE ("ADD_UINT8_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    AddUint8Test(backends);
-}
-
-TEST_CASE ("DIV_FP32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    DivFP32Test(backends);
-}
-
-TEST_CASE ("DIV_Broadcast_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    DivBroadcastTest(backends);
-}
-
-TEST_CASE ("FLOORDIV_FP32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    FloorDivFP32Test(backends);
-}
-
-TEST_CASE ("MAX_FP32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    MaxFP32Test(backends);
-}
-
-TEST_CASE ("MAX_Broadcast_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    MaxBroadcastTest(backends);
-}
-
-TEST_CASE ("MAX_UINT8_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    MaxUint8Test(backends);
-}
-
-TEST_CASE ("MIN_FP32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    MinFP32Test(backends);
-}
-
-TEST_CASE ("MIN_Broadcast_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    MinBroadcastTest(backends);
-}
-
-TEST_CASE ("MIN_UINT8_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    MinUint8Test(backends);
-}
-
-TEST_CASE ("MUL_FP32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    MulFP32Test(backends);
-}
-
-TEST_CASE ("MUL_Broadcast_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    MulBroadcastTest(backends);
-}
-
-TEST_CASE ("MUL_Actiation_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    MulActivationTest(backends);
-}
-
-TEST_CASE ("MUL_UINT8_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    MulUint8Test(backends);
-}
-
-TEST_CASE ("SUB_FP32_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    SubFP32Test(backends);
-}
-
-TEST_CASE ("SUB_Broadcast_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    SubBroadcastTest(backends);
-}
-
-TEST_CASE ("SUB_UINT8_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    SubUint8Test(backends);
-}
-
-} // TEST_SUITE("ElementwiseBinary_CpuAccTests")
-
-
-TEST_SUITE("ElementwiseBinary_CpuRefTests")
-{
-
-TEST_CASE ("ADD_FP32_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    AddFP32Test(backends);
-}
-
-TEST_CASE ("ADD_Broadcast_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    AddBroadcastTest(backends);
-}
-
-TEST_CASE ("ADD_Constant_Input_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    AddConstInputTest(backends);
-}
-
-TEST_CASE ("ADD_Activation_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    AddActivationTest(backends);
-}
-
-TEST_CASE ("ADD_UINT8_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    AddUint8Test(backends);
-}
-
-TEST_CASE ("DIV_FP32_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    DivFP32Test(backends);
-}
-
-TEST_CASE ("DIV_Broadcast_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    DivBroadcastTest(backends);
-}
-
-TEST_CASE ("FLOORDIV_FP32_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    FloorDivFP32Test(backends);
-}
-
-TEST_CASE ("DIV_UINT8_CpuRef_Test")
-{
+    // Only works on CpuRef.
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
     DivUint8Test(backends);
 }
 
-TEST_CASE ("MAX_FP32_CpuRef_Test")
+TEST_CASE ("MAX_FP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    MaxFP32Test(backends);
+    MaxFP32Test();
 }
 
-TEST_CASE ("MAX_Broadcast_CpuRef_Test")
+TEST_CASE ("MAX_Broadcast_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    MaxBroadcastTest(backends);
+    MaxBroadcastTest();
 }
 
-TEST_CASE ("MAX_UINT8_CpuRef_Test")
+TEST_CASE ("MAX_UINT8_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    MaxUint8Test(backends);
+    MaxUint8Test();
 }
 
-TEST_CASE ("MIN_FP32_CpuRef_Test")
+TEST_CASE ("MIN_FP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    MinFP32Test(backends);
+    MinFP32Test();
 }
 
-TEST_CASE ("MIN_Broadcast_CpuRef_Test")
+TEST_CASE ("MIN_Broadcast_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    MinBroadcastTest(backends);
+    MinBroadcastTest();
 }
 
-TEST_CASE ("MIN_UINT8_CpuRef_Test")
+TEST_CASE ("MIN_UINT8_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    MinUint8Test(backends);
+    MinUint8Test();
 }
 
-TEST_CASE ("MUL_FP32_CpuRef_Test")
+TEST_CASE ("MUL_FP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    MulFP32Test(backends);
+    MulFP32Test();
 }
 
-TEST_CASE ("MUL_Broadcast_CpuRef_Test")
+TEST_CASE ("MUL_Broadcast_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    MulBroadcastTest(backends);
+    MulBroadcastTest();
 }
 
-TEST_CASE ("MUL_Actiation_CpuRef_Test")
+TEST_CASE ("MUL_Actiation_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    MulActivationTest(backends);
+    MulActivationTest();
 }
 
-TEST_CASE ("MUL_UINT8_CpuRef_Test")
+TEST_CASE ("MUL_UINT8_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    MulUint8Test(backends);
+    MulUint8Test();
 }
 
-TEST_CASE ("SUB_FP32_CpuRef_Test")
+TEST_CASE ("SUB_FP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    SubFP32Test(backends);
+    SubFP32Test();
 }
 
-TEST_CASE ("SUB_Broadcast_CpuRef_Test")
+TEST_CASE ("SUB_Broadcast_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    SubBroadcastTest(backends);
+    SubBroadcastTest();
 }
 
-TEST_CASE ("SUB_UINT8_CpuRef_Test")
+TEST_CASE ("SUB_UINT8_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    SubUint8Test(backends);
+    SubUint8Test();
 }
 
-TEST_CASE ("SqDiffFP32_CpuRef_Test")
+TEST_CASE ("SqDiffFP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    SqDiffFP32Test(backends);
+    SqDiffFP32Test();
 }
 
-TEST_CASE ("PowerFP32_CpuRef_Test")
+TEST_CASE ("PowerFP32_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    PowerFP32Test(backends);
+    PowerFP32Test();
 }
 
 } // TEST_SUITE("ElementwiseBinary_CpuRefTests")
diff --git a/delegate/test/ElementwiseBinaryTestHelper.hpp b/delegate/test/ElementwiseBinaryTestHelper.hpp
index e2887a2..b376613 100644
--- a/delegate/test/ElementwiseBinaryTestHelper.hpp
+++ b/delegate/test/ElementwiseBinaryTestHelper.hpp
@@ -184,7 +184,6 @@
 void ElementwiseBinaryTest(tflite::BuiltinOperator binaryOperatorCode,
                            tflite::ActivationFunctionType activationType,
                            tflite::TensorType tensorType,
-                           std::vector<armnn::BackendId>& backends,
                            std::vector<int32_t>& input0Shape,
                            std::vector<int32_t>& input1Shape,
                            std::vector<int32_t>& outputShape,
@@ -193,7 +192,8 @@
                            std::vector<T>& expectedOutputValues,
                            float quantScale = 1.0f,
                            int quantOffset  = 0,
-                           bool constantInput = false)
+                           bool constantInput = false,
+                           const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateElementwiseBinaryTfLiteModel<T>(binaryOperatorCode,
@@ -217,7 +217,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
diff --git a/delegate/test/ElementwiseUnaryTest.cpp b/delegate/test/ElementwiseUnaryTest.cpp
index 14a6061..13d7039 100644
--- a/delegate/test/ElementwiseUnaryTest.cpp
+++ b/delegate/test/ElementwiseUnaryTest.cpp
@@ -18,140 +18,11 @@
 namespace armnnDelegate
 {
 
-TEST_SUITE("ElementwiseUnary_GpuAccTests")
+TEST_SUITE("ElementwiseUnary_Tests")
 {
 
-TEST_CASE ("Abs_Float32_GpuAcc_Test")
+TEST_CASE ("Abs_Float32_Test")
 {
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    // Set input data
-    std::vector<float> inputValues
-    {
-        -0.1f, -0.2f, -0.3f,
-        0.1f,  0.2f,  0.3f
-    };
-    // Calculate output data
-    std::vector<float> expectedOutputValues(inputValues.size());
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        expectedOutputValues[i] = std::abs(inputValues[i]);
-    }
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_ABS, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Exp_Float32_GpuAcc_Test")
-{
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    // Set input data
-    std::vector<float> inputValues
-    {
-        5.0f, 4.0f,
-        3.0f, 2.0f,
-        1.0f, 1.1f
-    };
-    // Set output data
-    std::vector<float> expectedOutputValues
-    {
-        148.413159102577f, 54.598150033144f,
-        20.085536923188f,  7.389056098931f,
-        2.718281828459f,  3.004166023946f
-    };
-
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_EXP, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Log_Float32_GpuAcc_Test")
-{
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    // Set input data
-    std::vector<float> inputValues
-    {
-        1.0f, 1.0f,  2.0f,
-        3.0f,  4.0f, 2.71828f
-    };
-    // Set output data
-    std::vector<float> expectedOutputValues
-    {
-        0.f,  0.f,  0.69314718056f,
-        1.09861228867f, 1.38629436112f, 0.99999932734f
-    };
-
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_LOG, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Neg_Float32_GpuAcc_Test")
-{
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    // Set input data
-    std::vector<float> inputValues
-    {
-        1.f, 0.f, 3.f,
-        25.f, 64.f, 100.f
-    };
-    // Set output data
-    std::vector<float> expectedOutputValues
-    {
-        -1.f, 0.f, -3.f,
-        -25.f, -64.f, -100.f
-    };
-
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_NEG, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Rsqrt_Float32_GpuAcc_Test")
-{
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    // Set input data
-    std::vector<float> inputValues
-    {
-        1.f, 4.f, 16.f,
-        25.f, 64.f, 100.f
-    };
-    // Set output data
-    std::vector<float> expectedOutputValues
-    {
-        1.f, 0.5f, 0.25f,
-        0.2f, 0.125f, 0.1f
-    };
-
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_RSQRT, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Sin_Float32_GpuAcc_Test")
-{
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    // Set input data
-    std::vector<float> inputValues
-    {
-            0.0f, 1.0f, 16.0f,
-            0.5f, 36.0f, -1.f
-    };
-    // Set output data
-    std::vector<float> expectedOutputValues
-    {
-            0.0f, 0.8414709848f, -0.28790331666f,
-            0.4794255386f, -0.99177885344f, -0.8414709848f
-    };
-
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SIN, backends, inputValues, expectedOutputValues);
-}
-} // TEST_SUITE("ElementwiseUnary_GpuAccTests")
-
-
-
-TEST_SUITE("ElementwiseUnary_CpuAccTests")
-{
-
-TEST_CASE ("Abs_Float32_CpuAcc_Test")
-{
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
     // Set input data
     std::vector<float> inputValues
     {
@@ -165,138 +36,12 @@
         expectedOutputValues[i] = std::abs(inputValues[i]);
     }
 
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_ABS, backends, inputValues, expectedOutputValues);
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_ABS, inputValues, expectedOutputValues);
 }
 
-TEST_CASE ("Exp_Float32_CpuAcc_Test")
+TEST_CASE ("Ceil_Float32_Test")
 {
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    // Set input data
-    std::vector<float> inputValues
-    {
-        5.0f, 4.0f,
-        3.0f, 2.0f,
-        1.0f, 1.1f
-    };
-    // Set output data
-    std::vector<float> expectedOutputValues
-    {
-        148.413159102577f, 54.598150033144f,
-        20.085536923188f,  7.389056098931f,
-        2.718281828459f,  3.004166023946f
-    };
-
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_EXP, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Log_Float32_CpuAcc_Test")
-{
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    // Set input data
-    std::vector<float> inputValues
-    {
-        1.0f, 1.0f,  2.0f,
-        3.0f,  4.0f, 2.71828f
-    };
-    // Set output data
-    std::vector<float> expectedOutputValues
-    {
-        0.f,  0.f,  0.69314718056f,
-        1.09861228867f, 1.38629436112f, 0.99999932734f
-    };
-
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_LOG, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Neg_Float32_CpuAcc_Test")
-{
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    // Set input data
-    std::vector<float> inputValues
-    {
-        1.f, 0.f, 3.f,
-        25.f, 64.f, 100.f
-    };
-    // Set output data
-    std::vector<float> expectedOutputValues
-    {
-        -1.f, 0.f, -3.f,
-        -25.f, -64.f, -100.f
-    };
-
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_NEG, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Rsqrt_Float32_CpuAcc_Test")
-{
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    // Set input data
-    std::vector<float> inputValues
-    {
-        1.f, 4.f, 16.f,
-        25.f, 64.f, 100.f
-    };
-    // Set output data
-    std::vector<float> expectedOutputValues
-    {
-        1.f, 0.5f, 0.25f,
-        0.2f, 0.125f, 0.1f
-    };
-
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_RSQRT, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Sin_Float32_CpuAcc_Test")
-{
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    // Set input data
-    std::vector<float> inputValues
-    {
-        0.0f, 1.0f, 16.0f,
-        0.5f, 36.0f, -1.f
-    };
-    // Set output data
-    std::vector<float> expectedOutputValues
-    {
-        0.0f, 0.8414709848f, -0.28790331666f,
-        0.4794255386f, -0.99177885344f, -0.8414709848f
-    };
-
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SIN, backends, inputValues, expectedOutputValues);
-}
-} // TEST_SUITE("ElementwiseUnary_CpuAccTests")
-
-TEST_SUITE("ElementwiseUnary_CpuRefTests")
-{
-
-TEST_CASE ("Abs_Float32_CpuRef_Test")
-{
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    // Set input data
-    std::vector<float> inputValues
-    {
-        -0.1f, -0.2f, -0.3f,
-        0.1f,  0.2f,  0.3f
-    };
-    // Calculate output data
-    std::vector<float> expectedOutputValues(inputValues.size());
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        expectedOutputValues[i] = std::abs(inputValues[i]);
-    }
-
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_ABS, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Ceil_Float32_CpuRef_Test")
-{
-    // Create the ArmNN Delegate
+    // Only works on CpuRef
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
     // Set input data
     std::vector<float> inputValues
@@ -311,13 +56,11 @@
         1.0f, 0.0f, -1.0f
     };
 
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_CEIL, backends, inputValues, expectedOutputValues);
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_CEIL, inputValues, expectedOutputValues, backends);
 }
 
-TEST_CASE ("Exp_Float32_CpuRef_Test")
+TEST_CASE ("Exp_Float32_Test")
 {
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
     // Set input data
     std::vector<float> inputValues
     {
@@ -333,13 +76,11 @@
         2.718281828459f,  3.004166023946f
     };
 
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_EXP, backends, inputValues, expectedOutputValues);
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_EXP, inputValues, expectedOutputValues);
 }
 
-TEST_CASE ("Log_Float32_CpuRef_Test")
+TEST_CASE ("Log_Float32_Test")
 {
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
     // Set input data
     std::vector<float> inputValues
     {
@@ -353,13 +94,11 @@
         1.09861228867f, 1.38629436112f, 0.99999932734f
     };
 
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_LOG, backends, inputValues, expectedOutputValues);
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_LOG, inputValues, expectedOutputValues);
 }
 
-TEST_CASE ("Neg_Float32_CpuRef_Test")
+TEST_CASE ("Neg_Float32_Test")
 {
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
     // Set input data
     std::vector<float> inputValues
     {
@@ -373,13 +112,11 @@
         -25.f, -64.f, -100.f
     };
 
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_NEG, backends, inputValues, expectedOutputValues);
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_NEG, inputValues, expectedOutputValues);
 }
 
-TEST_CASE ("Rsqrt_Float32_CpuRef_Test")
+TEST_CASE ("Rsqrt_Float32_Test")
 {
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
     // Set input data
     std::vector<float> inputValues
     {
@@ -393,11 +130,12 @@
         0.2f, 0.125f, 0.1f
     };
 
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_RSQRT, backends, inputValues, expectedOutputValues);
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_RSQRT, inputValues, expectedOutputValues);
 }
 
-TEST_CASE ("Sqrt_Float32_CpuRef_Test")
+TEST_CASE ("Sqrt_Float32_Test")
 {
+    // Only works on CpuRef.
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
     // Set input data
     std::vector<float> inputValues
@@ -412,13 +150,11 @@
         expectedOutputValues[i] = std::sqrt(inputValues[i]);
     }
 
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SQRT, backends, inputValues, expectedOutputValues);
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SQRT, inputValues, expectedOutputValues, backends);
 }
 
-TEST_CASE ("Sin_Float32_CpuRef_Test")
+TEST_CASE ("Sin_Float32_Test")
 {
-    // Create the ArmNN Delegate
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
     // Set input data
     std::vector<float> inputValues
     {
@@ -432,8 +168,8 @@
             0.4794255386f, -0.99177885344f, -0.8414709848f
     };
 
-    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SIN, backends, inputValues, expectedOutputValues);
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SIN, inputValues, expectedOutputValues);
 }
-} // TEST_SUITE("ElementwiseUnary_CpuRefTests")
+} // TEST_SUITE("ElementwiseUnary_Tests")
 
 } // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ElementwiseUnaryTestHelper.hpp b/delegate/test/ElementwiseUnaryTestHelper.hpp
index 7ef11f4..c62b9cc 100644
--- a/delegate/test/ElementwiseUnaryTestHelper.hpp
+++ b/delegate/test/ElementwiseUnaryTestHelper.hpp
@@ -74,9 +74,9 @@
 }
 
 void ElementwiseUnaryFP32Test(tflite::BuiltinOperator unaryOperatorCode,
-                              std::vector<armnn::BackendId>& backends,
                               std::vector<float>& inputValues,
-                              std::vector<float>& expectedOutputValues)
+                              std::vector<float>& expectedOutputValues,
+                              const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
     std::vector<int32_t> inputShape  { { 3, 1, 2} };
@@ -93,7 +93,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
@@ -108,10 +108,10 @@
 }
 
 void ElementwiseUnaryBoolTest(tflite::BuiltinOperator unaryOperatorCode,
-                              std::vector<armnn::BackendId>& backends,
                               std::vector<int32_t>& inputShape,
                               std::vector<bool>& inputValues,
-                              std::vector<bool>& expectedOutputValues)
+                              std::vector<bool>& expectedOutputValues,
+                              const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode,
@@ -127,7 +127,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/LogicalTest.cpp b/delegate/test/LogicalTest.cpp
index 8292727..a9133cd 100644
--- a/delegate/test/LogicalTest.cpp
+++ b/delegate/test/LogicalTest.cpp
@@ -15,7 +15,7 @@
 namespace armnnDelegate
 {
 
-void LogicalBinaryAndBoolTest(std::vector<armnn::BackendId>& backends)
+void LogicalBinaryAndBoolTest()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2 };
     std::vector<int32_t> input1Shape { 1, 2, 2 };
@@ -28,7 +28,6 @@
 
     LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_AND,
                       ::tflite::TensorType_BOOL,
-                      backends,
                       input0Shape,
                       input1Shape,
                       expectedOutputShape,
@@ -37,7 +36,7 @@
                       expectedOutputValues);
 }
 
-void LogicalBinaryAndBroadcastTest(std::vector<armnn::BackendId>& backends)
+void LogicalBinaryAndBroadcastTest()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2 };
     std::vector<int32_t> input1Shape { 1, 1, 1 };
@@ -49,7 +48,6 @@
 
     LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_AND,
                       ::tflite::TensorType_BOOL,
-                      backends,
                       input0Shape,
                       input1Shape,
                       expectedOutputShape,
@@ -58,7 +56,7 @@
                       expectedOutputValues);
 }
 
-void LogicalBinaryOrBoolTest(std::vector<armnn::BackendId>& backends)
+void LogicalBinaryOrBoolTest()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2 };
     std::vector<int32_t> input1Shape { 1, 2, 2 };
@@ -70,7 +68,6 @@
 
     LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_OR,
                       ::tflite::TensorType_BOOL,
-                      backends,
                       input0Shape,
                       input1Shape,
                       expectedOutputShape,
@@ -79,7 +76,7 @@
                       expectedOutputValues);
 }
 
-void LogicalBinaryOrBroadcastTest(std::vector<armnn::BackendId>& backends)
+void LogicalBinaryOrBroadcastTest()
 {
     std::vector<int32_t> input0Shape { 1, 2, 2 };
     std::vector<int32_t> input1Shape { 1, 1, 1 };
@@ -91,7 +88,6 @@
 
     LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_OR,
                       ::tflite::TensorType_BOOL,
-                      backends,
                       input0Shape,
                       input1Shape,
                       expectedOutputShape,
@@ -101,7 +97,7 @@
 }
 
 // LogicalNot operator uses ElementwiseUnary unary layer and descriptor but is still classed as logical operator.
-void LogicalNotBoolTest(std::vector<armnn::BackendId>& backends)
+void LogicalNotBoolTest()
 {
     std::vector<int32_t> inputShape { 1, 2, 2 };
 
@@ -109,115 +105,37 @@
     std::vector<bool> expectedOutputValues { 1, 0, 1, 0 };
 
     ElementwiseUnaryBoolTest(tflite::BuiltinOperator_LOGICAL_NOT,
-                             backends,
                              inputShape,
                              inputValues,
                              expectedOutputValues);
 }
 
-TEST_SUITE("LogicalBinaryTests_GpuAccTests")
+TEST_SUITE("LogicalBinaryTests_Tests")
 {
 
-TEST_CASE ("LogicalBinary_AND_Bool_GpuAcc_Test")
+TEST_CASE ("LogicalBinary_AND_Bool_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    LogicalBinaryAndBoolTest(backends);
+    LogicalBinaryAndBoolTest();
 }
 
-TEST_CASE ("LogicalBinary_AND_Broadcast_GpuAcc_Test")
+TEST_CASE ("LogicalBinary_AND_Broadcast_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    LogicalBinaryAndBroadcastTest(backends);
+    LogicalBinaryAndBroadcastTest();
 }
 
-TEST_CASE ("Logical_NOT_Bool_GpuAcc_Test")
+TEST_CASE ("Logical_NOT_Bool_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    LogicalNotBoolTest(backends);
+    LogicalNotBoolTest();
 }
 
-TEST_CASE ("LogicalBinary_OR_Bool_GpuAcc_Test")
+TEST_CASE ("LogicalBinary_OR_Bool_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    LogicalBinaryOrBoolTest(backends);
+    LogicalBinaryOrBoolTest();
 }
 
-TEST_CASE ("LogicalBinary_OR_Broadcast_GpuAcc_Test")
+TEST_CASE ("LogicalBinary_OR_Broadcast_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
-    LogicalBinaryOrBroadcastTest(backends);
-}
-
-}
-
-
-TEST_SUITE("LogicalBinaryTests_CpuAccTests")
-{
-
-TEST_CASE ("LogicalBinary_AND_Bool_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    LogicalBinaryAndBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_AND_Broadcast_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    LogicalBinaryAndBroadcastTest(backends);
-}
-
-TEST_CASE ("Logical_NOT_Bool_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    LogicalNotBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_OR_Bool_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    LogicalBinaryOrBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_OR_Broadcast_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    LogicalBinaryOrBroadcastTest(backends);
-}
-
-}
-
-
-TEST_SUITE("LogicalBinaryTests_CpuRefTests")
-{
-
-TEST_CASE ("LogicalBinary_AND_Bool_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    LogicalBinaryAndBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_AND_Broadcast_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    LogicalBinaryAndBroadcastTest(backends);
-}
-
-TEST_CASE ("Logical_NOT_Bool_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    LogicalNotBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_OR_Bool_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    LogicalBinaryOrBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_OR_Broadcast_CpuRef_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    LogicalBinaryOrBroadcastTest(backends);
+    LogicalBinaryOrBroadcastTest();
 }
 
 }
diff --git a/delegate/test/LogicalTestHelper.hpp b/delegate/test/LogicalTestHelper.hpp
index 9732917..763bb49 100644
--- a/delegate/test/LogicalTestHelper.hpp
+++ b/delegate/test/LogicalTestHelper.hpp
@@ -126,7 +126,6 @@
 
 void LogicalBinaryTest(tflite::BuiltinOperator logicalOperatorCode,
                        tflite::TensorType tensorType,
-                       std::vector<armnn::BackendId>& backends,
                        std::vector<int32_t>& input0Shape,
                        std::vector<int32_t>& input1Shape,
                        std::vector<int32_t>& expectedOutputShape,
@@ -134,7 +133,8 @@
                        std::vector<bool>& input1Values,
                        std::vector<bool>& expectedOutputValues,
                        float quantScale = 1.0f,
-                       int quantOffset  = 0)
+                       int quantOffset  = 0,
+                       const std::vector<armnn::BackendId>& backends = {})
 {
     using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateLogicalBinaryTfLiteModel(logicalOperatorCode,
@@ -155,7 +155,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk);
diff --git a/delegate/test/TestUtils.hpp b/delegate/test/TestUtils.hpp
index ba81cd8..0932f22 100644
--- a/delegate/test/TestUtils.hpp
+++ b/delegate/test/TestUtils.hpp
@@ -10,10 +10,60 @@
 
 #include <doctest/doctest.h>
 
+#include <armnn/BackendId.hpp>
 #include <half/half.hpp>
 
 using Half = half_float::half;
 
+namespace
+{
+/**
+ * Based on the compilation options capture subcases for the available backends. If "onlyTheseBackends" is NOT empty
+ * then we'll ignore any backend NOT listed in it.
+ *
+ * @param onlyTheseBackends limit the number of backends considered for sub casing. If empty all are considered.
+ * @return vector of backends that have been captured for sub casing.
+ */
+std::vector<armnn::BackendId> CaptureAvailableBackends(const std::vector<armnn::BackendId>& onlyTheseBackends)
+{
+    std::vector<armnn::BackendId> availableBackends;
+#if defined(ARMNNREF_ENABLED)
+    // Careful logic here. An empty onlyTheseBackends means we always evaluate.
+    if (onlyTheseBackends.empty() || (std::find(onlyTheseBackends.begin(), onlyTheseBackends.end(),
+                                                armnn::Compute::CpuRef) != onlyTheseBackends.end()))
+    {
+        SUBCASE("CpuRef")
+        {
+            availableBackends.push_back({ armnn::Compute::CpuRef });
+        }
+    }
+#endif
+#if defined(ARMCOMPUTENEON_ENABLED)
+    // Careful logic here. An empty onlyTheseBackends means we always evaluate.
+    if (onlyTheseBackends.empty() || (std::find(onlyTheseBackends.begin(), onlyTheseBackends.end(),
+                                                armnn::Compute::CpuAcc) != onlyTheseBackends.end()))
+    {
+        SUBCASE("CpuAcc")
+        {
+            availableBackends.push_back({ armnn::Compute::CpuAcc });
+        }
+    }
+#endif
+#if defined(ARMCOMPUTECL_ENABLED)
+    if (onlyTheseBackends.empty() || (std::find(onlyTheseBackends.begin(), onlyTheseBackends.end(),
+                                                armnn::Compute::GpuAcc) != onlyTheseBackends.end()))
+    {
+        SUBCASE("GpuAcc")
+        {
+            availableBackends.push_back({ armnn::Compute::GpuAcc });
+        }
+    }
+#endif
+    CAPTURE(availableBackends);
+    return availableBackends;
+}
+
+}    // namespace
 namespace armnnDelegate
 {
 
@@ -65,9 +115,9 @@
                        std::vector<T>& armnnDelegateOutputs,
                        std::vector<T>& expectedOutputValues)
 {
-    armnnDelegate::CompareData(expectedOutputValues.data(),  armnnDelegateOutputs.data(), expectedOutputValues.size());
+    armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputs.data(), expectedOutputValues.size());
     armnnDelegate::CompareData(tfLiteDelegateOutputs.data(), expectedOutputValues.data(), expectedOutputValues.size());
     armnnDelegate::CompareData(tfLiteDelegateOutputs.data(), armnnDelegateOutputs.data(), expectedOutputValues.size());
 }
 
-} // namespace armnnDelegate
+}    // namespace armnnDelegate
diff --git a/delegate/test/TransposeConvolution2dTest.cpp b/delegate/test/TransposeConvolution2dTest.cpp
index b526192..7c3728c 100644
--- a/delegate/test/TransposeConvolution2dTest.cpp
+++ b/delegate/test/TransposeConvolution2dTest.cpp
@@ -18,7 +18,7 @@
 namespace armnnDelegate
 {
 
-void TransposeConvInt8Test(std::vector<armnn::BackendId>& backends)
+void TransposeConvInt8Test()
 {
     // Set input data
     std::vector<int32_t> transposeTensorShape { 4 };
@@ -37,8 +37,7 @@
         };
 
     tflite::Padding padding = tflite::Padding_VALID;
-    TransposeConvTest<int8_t>(backends,
-                              ::tflite::TensorType_INT8,
+    TransposeConvTest<int8_t>(::tflite::TensorType_INT8,
                               1, // strideX
                               1, // strideY
                               padding,
@@ -52,7 +51,7 @@
                               expectedOutputValues);
 }
 
-void TransposeConvFp32Test(std::vector<armnn::BackendId>& backends)
+void TransposeConvFp32Test()
 {
     std::vector<int32_t> transposeTensorShape { 4 };
     std::vector<int32_t> filterShape { 1, 2, 2, 1 };
@@ -70,8 +69,7 @@
         };
 
     tflite::Padding padding = tflite::Padding_VALID;
-    TransposeConvTest<float>(backends,
-                             ::tflite::TensorType_FLOAT32,
+    TransposeConvTest<float>(::tflite::TensorType_FLOAT32,
                              1, // strideX
                              1, // strideY
                              padding,
@@ -85,55 +83,19 @@
                              expectedOutputValues);
 }
 
-TEST_SUITE("TransposeConv_CpuRef_Test")
+TEST_SUITE("TransposeConv_Test")
 {
 
-TEST_CASE ("TransposeConv_CpuRef_Fp32_Test")
+TEST_CASE ("TransposeConv_Fp32_Test")
 {
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    TransposeConvFp32Test(backends);
+    TransposeConvFp32Test();
 }
 
-TEST_CASE ("TransposeConv_CpuRef_Int8_Test")
+TEST_CASE ("TransposeConv_Int8_Test")
 {
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    TransposeConvInt8Test(backends);
+    TransposeConvInt8Test();
 }
 
-} // End of  TEST_SUITE(TransposeConv_CpuRef_Test)
-
-TEST_SUITE("TransposeConv_CpuAcc_Test")
-{
-
-TEST_CASE ("TransposeConv_CpuAcc_Fp32_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    TransposeConvFp32Test(backends);
-}
-
-TEST_CASE ("TransposeConv_CpuAcc_Int8_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    TransposeConvInt8Test(backends);
-}
-
-} // End of  TEST_SUITE(TransposeConv_CpuAcc_Test)
-
-TEST_SUITE("TransposeConv_GpuAcc_Test")
-{
-
-TEST_CASE ("TransposeConv_GpuAcc_Fp32_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    TransposeConvFp32Test(backends);
-}
-
-TEST_CASE ("TransposeConv_GpuAcc_Int8_Test")
-{
-    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    TransposeConvInt8Test(backends);
-}
-
-} // End of  TEST_SUITE(TransposeConv_GpuAcc_Test)
+} // End of  TEST_SUITE(TransposeConv_Test)
 
 } // namespace armnnDelegate
\ No newline at end of file