IVGCVSW-7451 LEAKY_RELU not supported by delegate

  * Added LEAKY_RELU support to classic and opaque delegate
  * CMake files updated
  * Test added

Signed-off-by: Tianle Cheng <tianle.cheng@arm.com>
Change-Id: Ib9a2ce8f637b14afcd796bbae11fd3fa03653a2c
diff --git a/delegate/classic/src/Activation.hpp b/delegate/classic/src/Activation.hpp
index e813956..442ce4f 100644
--- a/delegate/classic/src/Activation.hpp
+++ b/delegate/classic/src/Activation.hpp
@@ -101,6 +101,14 @@
             activationDesc.m_Function = armnn::ActivationFunction::HardSwish;
             break;
         }
+        case kTfLiteBuiltinLeakyRelu:
+        {
+            // Get the alpha param from builtin data
+            auto* leakyReluParameters = reinterpret_cast<TfLiteLeakyReluParams*>(tfLiteNode->builtin_data);
+            activationDesc.m_Function = armnn::ActivationFunction::LeakyReLu;
+            activationDesc.m_A = leakyReluParameters->alpha;
+            break;
+        }
         default:
         {
             return kTfLiteError;
diff --git a/delegate/classic/src/armnn_delegate.cpp b/delegate/classic/src/armnn_delegate.cpp
index 45bea3d..2483835 100644
--- a/delegate/classic/src/armnn_delegate.cpp
+++ b/delegate/classic/src/armnn_delegate.cpp
@@ -741,10 +741,16 @@
                                                 kTfLiteBuiltinL2Normalization);
         case kTfLiteBuiltinL2Pool2d:
             return VisitPooling2dOperator(delegateData,
-                                        tfLiteContext,
-                                        tfLiteNode,
-                                        nodeIndex,
-                                        kTfLiteBuiltinL2Pool2d);
+                                          tfLiteContext,
+                                          tfLiteNode,
+                                          nodeIndex,
+                                          kTfLiteBuiltinL2Pool2d);
+        case kTfLiteBuiltinLeakyRelu:
+            return VisitActivationOperator(delegateData,
+                                           tfLiteContext,
+                                           tfLiteNode,
+                                           nodeIndex,
+                                           kTfLiteBuiltinLeakyRelu);
         case kTfLiteBuiltinLess:
             return VisitComparisonOperator(delegateData,
                                            tfLiteContext,
diff --git a/delegate/opaque/src/Activation.hpp b/delegate/opaque/src/Activation.hpp
index 9fce7a1..f566090 100644
--- a/delegate/opaque/src/Activation.hpp
+++ b/delegate/opaque/src/Activation.hpp
@@ -166,6 +166,15 @@
             activationDesc.m_Function = armnn::ActivationFunction::HardSwish;
             break;
         }
+        case kTfLiteBuiltinLeakyRelu:
+        {
+            // Get alpha param from builtin data
+            auto* leakyReluParameters =
+                reinterpret_cast<TfLiteLeakyReluParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+            activationDesc.m_Function = armnn::ActivationFunction::LeakyReLu;
+            activationDesc.m_A = leakyReluParameters->alpha;
+            break;
+        }
         default:
         {
             return kTfLiteError;
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index 49fa30d..60da293 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -828,6 +828,12 @@
                                           tfLiteNode,
                                           nodeIndex,
                                           kTfLiteBuiltinL2Pool2d);
+        case kTfLiteBuiltinLeakyRelu:
+            return VisitActivationOperator(delegateData,
+                                           tfLiteContext,
+                                           tfLiteNode,
+                                           nodeIndex,
+                                           kTfLiteBuiltinLeakyRelu);
         case kTfLiteBuiltinLess:
             return VisitComparisonOperator(delegateData,
                                            tfLiteContext,
diff --git a/delegate/test/ActivationTest.cpp b/delegate/test/ActivationTest.cpp
index 8f2f198..620c299 100644
--- a/delegate/test/ActivationTest.cpp
+++ b/delegate/test/ActivationTest.cpp
@@ -170,6 +170,32 @@
                    outputExpectedData);
 }
 
+void ActivationLeakyReLuTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<float> inputData = {
+            -0.1f, -0.2f, -0.3f, -0.4f,
+            0.1f,  0.2f,  0.3f,  0.4f,
+            -1.0f, -2.0f, -3.0f, -4.0f,
+            1.0f,  2.0f,  3.0f,  4.0f
+    };
+
+    float alpha = 0.3f;
+
+    // Calculate output values for input.
+    auto f = [alpha](float value)
+    {
+        return value > 0 ? value : value * alpha;
+    };
+    std::vector<float> outputExpectedData(inputData.size());
+    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+    ActivationTest(tflite::BuiltinOperator_LEAKY_RELU,
+                   backends,
+                   inputData,
+                   outputExpectedData,
+                   alpha);
+}
+
 TEST_SUITE("Activation_CpuRefTests")
 {
 
@@ -209,6 +235,12 @@
     ActivationHardSwishTest(backends);
 }
 
+TEST_CASE ("Activation_LeakyRelu_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ActivationLeakyReLuTest(backends);
+}
+
 }
 
 TEST_SUITE("Activation_CpuAccTests")
@@ -250,6 +282,12 @@
     ActivationHardSwishTest(backends);
 }
 
+TEST_CASE ("Activation_LeakyRelu_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ActivationLeakyReLuTest(backends);
+}
+
 }
 
 TEST_SUITE("Activation_GpuAccTests")
@@ -291,6 +329,12 @@
     ActivationHardSwishTest(backends);
 }
 
+TEST_CASE ("Activation_LeakyRelu_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ActivationLeakyReLuTest(backends);
+}
+
 }
 
 } // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ActivationTestHelper.hpp b/delegate/test/ActivationTestHelper.hpp
index e1901b7..b0a4d67 100644
--- a/delegate/test/ActivationTestHelper.hpp
+++ b/delegate/test/ActivationTestHelper.hpp
@@ -23,7 +23,8 @@
 
 std::vector<char> CreateActivationTfLiteModel(tflite::BuiltinOperator activationOperatorCode,
                                               tflite::TensorType tensorType,
-                                              const std::vector <int32_t>& tensorShape)
+                                              const std::vector <int32_t>& tensorShape,
+                                              float alpha = 0)
 {
     using namespace tflite;
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
@@ -42,11 +43,24 @@
     // create operator
     const std::vector<int> operatorInputs{0};
     const std::vector<int> operatorOutputs{1};
+
+    // builtin options
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
+    flatbuffers::Offset<void> operatorBuiltinOption = 0;
+
+    if (activationOperatorCode == tflite::BuiltinOperator_LEAKY_RELU)
+    {
+        operatorBuiltinOptionsType = tflite::BuiltinOptions_LeakyReluOptions;
+        operatorBuiltinOption = CreateLeakyReluOptions(flatBufferBuilder, alpha).Union();
+    }
+
     flatbuffers::Offset <Operator> unaryOperator =
         CreateOperator(flatBufferBuilder,
                        0,
                        flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOption);
 
     const std::vector<int> subgraphInputs{0};
     const std::vector<int> subgraphOutputs{1};
@@ -78,13 +92,15 @@
 void ActivationTest(tflite::BuiltinOperator activationOperatorCode,
                     std::vector<armnn::BackendId>& backends,
                     std::vector<float>& inputValues,
-                    std::vector<float>& expectedOutputValues)
+                    std::vector<float>& expectedOutputValues,
+                    float alpha = 0)
 {
     using namespace delegateTestInterpreter;
     std::vector<int32_t> inputShape  { { 4, 1, 4} };
     std::vector<char> modelBuffer = CreateActivationTfLiteModel(activationOperatorCode,
                                                                 ::tflite::TensorType_FLOAT32,
-                                                                inputShape);
+                                                                inputShape,
+                                                                alpha);
 
     // Setup interpreter with just TFLite Runtime.
     auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);