IVGCVSW-5803 Delegate Unit Tests Failure on Android: Normalization & Softmax

Signed-off-by: Keith Davis <keith.davis@arm.com>
Change-Id: I2873f8563cc11da550d460b04e5175372489a564
diff --git a/delegate/src/test/NeonDelegateTests_NDK_Issue.cpp b/delegate/src/test/NeonDelegateTests_NDK_Issue.cpp
new file mode 100644
index 0000000..a437a08
--- /dev/null
+++ b/delegate/src/test/NeonDelegateTests_NDK_Issue.cpp
@@ -0,0 +1,63 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NormalizationTestHelper.hpp"
+#include "SoftmaxTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+// There's a known Android NDK bug which causes this subset of Neon Tests to
+// fail. We'll exclude these tests in if we're doing
+// a debug build and NDK is less than r21.
+// The exclusion takes place in test/CMakeLists.txt
+// https://github.com/android/ndk/issues/1135
+
+TEST_SUITE ("Softmax_CpuAccTests")
+{
+
+TEST_CASE ("Softmax_Standard_Beta_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    std::vector<float> expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
+                                         0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
+    SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
+}
+
+TEST_CASE ("Softmax_Different_Beta_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    std::vector<float> expectedOutput = {
+        0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092,
+        0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054};
+    SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
+}
+
+TEST_CASE ("Log_Softmax_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    std::vector<float> expectedOutput =
+        {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
+         -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
+    SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
+}
+} // TEST_SUITE ("Softmax_CpuAccTests")
+
+TEST_SUITE("L2Normalization_CpuAccTests")
+{
+
+TEST_CASE ("L2NormalizationFp32Test_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    L2NormalizationTest(backends);
+}
+} // TEST_SUITE("L2NormalizationFp32Test_CpuAcc_Test")
+}
\ No newline at end of file
diff --git a/delegate/src/test/NormalizationTest.cpp b/delegate/src/test/NormalizationTest.cpp
index 058394e..e33dcf0 100644
--- a/delegate/src/test/NormalizationTest.cpp
+++ b/delegate/src/test/NormalizationTest.cpp
@@ -8,95 +8,12 @@
 #include <armnn_delegate.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
 
 #include <doctest/doctest.h>
 
 namespace armnnDelegate
 {
 
-void L2NormalizationTest(std::vector<armnn::BackendId>& backends)
-{
-    // Set input data
-    std::vector<int32_t> inputShape  { 1, 1, 1, 10 };
-    std::vector<int32_t> outputShape { 1, 1, 1, 10 };
-
-    std::vector<float> inputValues
-    {
-        1.0f,
-        2.0f,
-        3.0f,
-        4.0f,
-        5.0f,
-        6.0f,
-        7.0f,
-        8.0f,
-        9.0f,
-        10.0f
-    };
-
-    const float approxInvL2Norm = 0.050964719f;
-    std::vector<float> expectedOutputValues
-    {
-        1.0f  * approxInvL2Norm,
-        2.0f  * approxInvL2Norm,
-        3.0f  * approxInvL2Norm,
-        4.0f  * approxInvL2Norm,
-        5.0f  * approxInvL2Norm,
-        6.0f  * approxInvL2Norm,
-        7.0f  * approxInvL2Norm,
-        8.0f  * approxInvL2Norm,
-        9.0f  * approxInvL2Norm,
-        10.0f * approxInvL2Norm
-    };
-
-    NormalizationTest<float>(tflite::BuiltinOperator_L2_NORMALIZATION,
-                             ::tflite::TensorType_FLOAT32,
-                             backends,
-                             inputShape,
-                             outputShape,
-                             inputValues,
-                             expectedOutputValues);
-}
-
-void LocalResponseNormalizationTest(std::vector<armnn::BackendId>& backends,
-                                    int32_t radius,
-                                    float bias,
-                                    float alpha,
-                                    float beta)
-{
-    // Set input data
-    std::vector<int32_t> inputShape  { 2, 2, 2, 1 };
-    std::vector<int32_t> outputShape { 2, 2, 2, 1 };
-
-    std::vector<float> inputValues
-    {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f
-    };
-
-    std::vector<float> expectedOutputValues
-    {
-        0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
-        0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f
-    };
-
-    NormalizationTest<float>(tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
-                             ::tflite::TensorType_FLOAT32,
-                             backends,
-                             inputShape,
-                             outputShape,
-                             inputValues,
-                             expectedOutputValues,
-                             radius,
-                             bias,
-                             alpha,
-                             beta);
-}
-
-
 TEST_SUITE("L2Normalization_CpuRefTests")
 {
 
@@ -108,17 +25,6 @@
 
 } // TEST_SUITE("L2Normalization_CpuRefTests")
 
-TEST_SUITE("L2Normalization_CpuAccTests")
-{
-
-TEST_CASE ("L2NormalizationFp32Test_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    L2NormalizationTest(backends);
-}
-
-} // TEST_SUITE("L2NormalizationFp32Test_CpuAcc_Test")
-
 TEST_SUITE("L2Normalization_GpuAccTests")
 {
 
diff --git a/delegate/src/test/NormalizationTestHelper.hpp b/delegate/src/test/NormalizationTestHelper.hpp
index bc969c2..ebdfdc1 100644
--- a/delegate/src/test/NormalizationTestHelper.hpp
+++ b/delegate/src/test/NormalizationTestHelper.hpp
@@ -178,4 +178,85 @@
     armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
 }
 
+void L2NormalizationTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape  { 1, 1, 1, 10 };
+    std::vector<int32_t> outputShape { 1, 1, 1, 10 };
+
+    std::vector<float> inputValues
+    {
+        1.0f,
+        2.0f,
+        3.0f,
+        4.0f,
+        5.0f,
+        6.0f,
+        7.0f,
+        8.0f,
+        9.0f,
+        10.0f
+    };
+
+    const float approxInvL2Norm = 0.050964719f;
+    std::vector<float> expectedOutputValues
+    {
+        1.0f  * approxInvL2Norm,
+        2.0f  * approxInvL2Norm,
+        3.0f  * approxInvL2Norm,
+        4.0f  * approxInvL2Norm,
+        5.0f  * approxInvL2Norm,
+        6.0f  * approxInvL2Norm,
+        7.0f  * approxInvL2Norm,
+        8.0f  * approxInvL2Norm,
+        9.0f  * approxInvL2Norm,
+        10.0f * approxInvL2Norm
+    };
+
+    NormalizationTest<float>(tflite::BuiltinOperator_L2_NORMALIZATION,
+                             ::tflite::TensorType_FLOAT32,
+                             backends,
+                             inputShape,
+                             outputShape,
+                             inputValues,
+                             expectedOutputValues);
+}
+
+void LocalResponseNormalizationTest(std::vector<armnn::BackendId>& backends,
+                                    int32_t radius,
+                                    float bias,
+                                    float alpha,
+                                    float beta)
+{
+    // Set input data
+    std::vector<int32_t> inputShape  { 2, 2, 2, 1 };
+    std::vector<int32_t> outputShape { 2, 2, 2, 1 };
+
+    std::vector<float> inputValues
+    {
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        5.0f, 6.0f,
+        7.0f, 8.0f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
+        0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f
+    };
+
+    NormalizationTest<float>(tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
+                             ::tflite::TensorType_FLOAT32,
+                             backends,
+                             inputShape,
+                             outputShape,
+                             inputValues,
+                             expectedOutputValues,
+                             radius,
+                             bias,
+                             alpha,
+                             beta);
+}
+
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/src/test/SoftmaxTest.cpp b/delegate/src/test/SoftmaxTest.cpp
index 3aacfe0..3339c09 100644
--- a/delegate/src/test/SoftmaxTest.cpp
+++ b/delegate/src/test/SoftmaxTest.cpp
@@ -14,28 +14,6 @@
 
 namespace armnnDelegate
 {
-
-/// Convenience function to run softmax and log-softmax test cases
-/// \param operatorCode tflite::BuiltinOperator_SOFTMAX or tflite::BuiltinOperator_LOG_SOFTMAX
-/// \param backends armnn backends to target
-/// \param beta multiplicative parameter to the softmax function
-/// \param expectedOutput to be checked against transformed input
-void SoftmaxTestCase(tflite::BuiltinOperator operatorCode,
-                     std::vector<armnn::BackendId> backends, float beta, std::vector<float> expectedOutput) {
-    std::vector<float> input = {
-        1.0, 2.5, 3.0, 4.5, 5.0,
-        -1.0, -2.5, -3.0, -4.5, -5.0};
-    std::vector<int32_t> shape = {2, 5};
-
-    SoftmaxTest(operatorCode,
-                tflite::TensorType_FLOAT32,
-                backends,
-                shape,
-                input,
-                expectedOutput,
-                beta);
-}
-
 TEST_SUITE ("Softmax_GpuAccTests")
 {
 
@@ -66,36 +44,6 @@
 }
 } // TEST_SUITE ("Softmax_GpuAccTests")
 
-TEST_SUITE ("Softmax_CpuAccTests")
-{
-
-TEST_CASE ("Softmax_Standard_Beta_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    std::vector<float> expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
-                                         0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
-    SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
-}
-
-TEST_CASE ("Softmax_Different_Beta_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    std::vector<float> expectedOutput = {
-        0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092,
-        0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054};
-    SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
-}
-
-TEST_CASE ("Log_Softmax_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    std::vector<float> expectedOutput =
-        {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
-         -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
-    SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
-}
-} // TEST_SUITE ("Softmax_CpuAccTests")
-
 TEST_SUITE ("Softmax_CpuRefTests")
 {
 
diff --git a/delegate/src/test/SoftmaxTestHelper.hpp b/delegate/src/test/SoftmaxTestHelper.hpp
index b3086bb..bd32c21 100644
--- a/delegate/src/test/SoftmaxTestHelper.hpp
+++ b/delegate/src/test/SoftmaxTestHelper.hpp
@@ -167,4 +167,26 @@
     }
 }
 
+
+/// Convenience function to run softmax and log-softmax test cases
+/// \param operatorCode tflite::BuiltinOperator_SOFTMAX or tflite::BuiltinOperator_LOG_SOFTMAX
+/// \param backends armnn backends to target
+/// \param beta multiplicative parameter to the softmax function
+/// \param expectedOutput to be checked against transformed input
+void SoftmaxTestCase(tflite::BuiltinOperator operatorCode,
+                     std::vector<armnn::BackendId> backends, float beta, std::vector<float> expectedOutput) {
+    std::vector<float> input = {
+        1.0, 2.5, 3.0, 4.5, 5.0,
+        -1.0, -2.5, -3.0, -4.5, -5.0};
+    std::vector<int32_t> shape = {2, 5};
+
+    SoftmaxTest(operatorCode,
+                tflite::TensorType_FLOAT32,
+                backends,
+                shape,
+                input,
+                expectedOutput,
+                beta);
+}
+
 } // anonymous namespace