IVGCVSW-7675 Rework more DelegateUnitTests so backends are subcases.

The intent of this change is to remove the per backend test cases in
the delegate unit tests. They will be replaced by using DocTest
SUBCASES. The sub cases are paramaterized by the available backends.
The list of available backends are determined by the compilation flags.

Signed-off-by: Colm Donelan <colm.donelan@arm.com>
Change-Id: I6dd0369491c4582b8e2467b911dfd085dddcf576
diff --git a/delegate/test/SoftmaxTestHelper.hpp b/delegate/test/SoftmaxTestHelper.hpp
index 609882d..f8525d1 100644
--- a/delegate/test/SoftmaxTestHelper.hpp
+++ b/delegate/test/SoftmaxTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -9,14 +9,9 @@
 
 #include <armnn_delegate.hpp>
 #include <DelegateTestInterpreter.hpp>
-#include <armnnUtils/FloatingPointComparison.hpp>
 
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
 #include <tensorflow/lite/version.h>
 
-#include <doctest/doctest.h>
-
 namespace
 {
 std::vector<char> CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperatorCode,
@@ -102,10 +97,10 @@
 
 void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,
                  tflite::TensorType tensorType,
-                 std::vector<armnn::BackendId>& backends,
                  std::vector<int32_t>& shape,
                  std::vector<float>& inputValues,
                  std::vector<float>& expectedOutputValues,
+                 const std::vector<armnn::BackendId>& backends = {},
                  float beta = 0)
 {
     using namespace delegateTestInterpreter;
@@ -123,7 +118,7 @@
     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
     // Setup interpreter with Arm NN Delegate applied.
-    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
     CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
@@ -143,8 +138,9 @@
 /// \param backends armnn backends to target
 /// \param beta multiplicative parameter to the softmax function
 /// \param expectedOutput to be checked against transformed input
-void SoftmaxTestCase(tflite::BuiltinOperator operatorCode,
-                     std::vector<armnn::BackendId> backends, float beta, std::vector<float> expectedOutput) {
+void SoftmaxTestCase(tflite::BuiltinOperator operatorCode, float beta,
+                     std::vector<float> expectedOutput, const std::vector<armnn::BackendId> backends = {})
+{
     std::vector<float> input = {
         1.0, 2.5, 3.0, 4.5, 5.0,
         -1.0, -2.5, -3.0, -4.5, -5.0};
@@ -152,10 +148,10 @@
 
     SoftmaxTest(operatorCode,
                 tflite::TensorType_FLOAT32,
-                backends,
                 shape,
                 input,
                 expectedOutput,
+                backends,
                 beta);
 }