IVGCVSW-3649 Add TfLite parser support for Prelu layer

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I3dedcc86efe1a67c709d9da636953e2fc400107b
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 7c81a8f..d4a0a6e 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -638,6 +638,7 @@
     m_ParserFunctions[tflite::BuiltinOperator_NEG]                     = &TfLiteParserImpl::ParseNeg;
     m_ParserFunctions[tflite::BuiltinOperator_PACK]                    = &TfLiteParserImpl::ParsePack;
     m_ParserFunctions[tflite::BuiltinOperator_PAD]                     = &TfLiteParserImpl::ParsePad;
+    m_ParserFunctions[tflite::BuiltinOperator_PRELU]                   = &TfLiteParserImpl::ParsePrelu;
     m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE]                = &TfLiteParserImpl::ParseQuantize;
     m_ParserFunctions[tflite::BuiltinOperator_RELU]                    = &TfLiteParserImpl::ParseRelu;
     m_ParserFunctions[tflite::BuiltinOperator_RELU6]                   = &TfLiteParserImpl::ParseRelu6;
@@ -1939,6 +1940,57 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
+void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex)
+{
+    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+    auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(inputs.size(), 2);
+
+    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    auto layerName = fmt::format("Prelu:{}:{}", subgraphIndex, operatorIndex);
+
+    armnn::TensorInfo inputTensorInfo  = ToTensorInfo(inputs[0]);
+    armnn::TensorInfo alphaTensorInfo  = ToTensorInfo(inputs[1]);
+    armnn::TensorInfo outputTensorInfo  = ToTensorInfo(outputs[0], true);
+    CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
+
+    IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str());
+    ARMNN_ASSERT(layer != nullptr);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    if (IsConstTensor(inputs[1]))
+    {
+        armnn::IInputSlot* slot = &(layer->GetInputSlot(0));
+        RegisterConsumerOfTensor(subgraphIndex, 0, slot);
+
+        auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+
+        auto alphaTensorAndData = CreateConstTensorNonPermuted(inputs[1], alphaTensorInfo);
+        std::string constLayerName = fmt::format("Constant:{}", inputs[1]->name);
+        IConnectableLayer* constLayer =
+                    m_Network->AddConstantLayer(alphaTensorAndData, constLayerName.c_str());
+        ARMNN_ASSERT(constLayer != nullptr);
+
+        constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
+        constLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+        RegisterOutputSlots(subgraphIndex,
+                            VIRTUAL_OPERATOR_ID,
+                            constLayer,
+                            { inputTensorIndexes[1] });
+    }
+    else
+    {
+        auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+        RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIndexes);
+    }
+
+    auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
+}
+
 void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 0aee07d..da2ae12 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -132,6 +132,7 @@
     void ParsePack(size_t subgraphIndex, size_t operatorIndex);
     void ParsePad(size_t subgraphIndex, size_t operatorIndex);
     void ParsePool(size_t subgraphIndex, size_t operatorIndex, armnn::PoolingAlgorithm algorithm);
+    void ParsePrelu(size_t subgraphIndex, size_t operatorIndex);
     void ParseQuantize(size_t subgraphIndex, size_t operatorIndex);
     void ParseReduce(size_t subgraphIndex, size_t operatorIndex, armnn::ReduceOperation reduceOperation);
     void ParseReduceMax(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/Prelu.cpp b/src/armnnTfLiteParser/test/Prelu.cpp
new file mode 100644
index 0000000..b4aa8d7
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Prelu.cpp
@@ -0,0 +1,159 @@
+//
+// Copyright © 2021 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct PreluFixture : public ParserFlatbuffersFixture
+{
+    explicit PreluFixture(const std::string& inputShape,
+                          const std::string& alphaShape,
+                          const std::string& outputShape,
+                          const std::string& inputIndex,
+                          const std::string& alphaData)
+    {
+        m_JsonString = R"(
+            {
+              "version": 3,
+              "operator_codes": [
+                {
+                  "builtin_code": "PRELU",
+                  "version": 1
+                }
+              ],
+              "subgraphs": [
+                {
+                  "tensors": [
+                    {
+                      "shape": )" + inputShape + R"(,
+                      "type": "FLOAT32",
+                      "buffer": 1,
+                      "name": "input0",
+                      "quantization": {
+                        "details_type": "NONE",
+                        "quantized_dimension": 0
+                      },
+                      "is_variable": false
+                    },
+                    {
+                      "shape": )" + alphaShape + R"(,
+                      "type": "FLOAT32",
+                      "buffer": 2,
+                      "name": "input1",
+                      "quantization": {
+                        "details_type": "NONE",
+                        "quantized_dimension": 0
+                      },
+                      "is_variable": false
+                    },
+                    {
+                      "shape": )" + outputShape + R"(,
+                      "type": "FLOAT32",
+                      "buffer": 3,
+                      "name": "output",
+                      "quantization": {
+                        "details_type": "NONE",
+                        "quantized_dimension": 0
+                      },
+                      "is_variable": false
+                    }
+                  ],
+                  "inputs": )" + inputIndex + R"(,
+                  "outputs": [
+                    2
+                  ],
+                  "operators": [
+                    {
+                      "opcode_index": 0,
+                      "inputs": [
+                        0,
+                        1
+                      ],
+                      "outputs": [
+                        2
+                      ],
+                      "builtin_options_type": "NONE",
+                      "custom_options_format": "FLEXBUFFERS"
+                    }
+                  ],
+                  "name": "main"
+                }
+              ],
+              "description": "MLIR Converted.",
+              "buffers": [
+                {
+                },
+                {
+                },
+                { )" + alphaData + R"(
+                },
+                {
+                }
+              ]
+            }
+        )";
+        Setup();
+    }
+};
+
+struct SimplePreluFixture : PreluFixture
+{
+    SimplePreluFixture() : PreluFixture("[ 2, 3 ]",
+                                        "[ 1, 1 ]",
+                                        "[ 2, 3 ]",
+                                        "[ 0, 1 ]",
+                                        "") {}
+};
+
+struct PreluConstAlphaFixture : PreluFixture
+{
+    PreluConstAlphaFixture() : PreluFixture(
+        "[ 2, 3 ]",
+        "[ 2, 3 ]",
+        "[ 2, 3 ]",
+        "[ 0 ]",
+        "\"data\": [ 0, 0, 128, 62, 0, 0, 128, 62, 0, 0, 128, 62, 0, 0, 128, 62, 0, 0, 128, 62, 0, 0, 128, 62 ]"){}
+};
+
+struct PreluDynamicTensorFixture : PreluFixture
+{
+    PreluDynamicTensorFixture() : PreluFixture("[ 2, 3 ]",
+                                               "[ 1, 1 ]",
+                                               "[]",
+                                               "[ 0 ]",
+                                               "\"data\": [ 0, 0, 128, 62 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SimplePrelu, SimplePreluFixture)
+{
+  RunTest<2, armnn::DataType::Float32>(
+      0,
+      {{"input0", { -14.f, 2.f, 0.f, 1.f, -5.f, 14.f }},{"input1", { 0.25f }}},
+      {{"output", { -3.5f, 2.f, 0.f, 1.f, -1.25f, 14.f }}});
+}
+
+BOOST_FIXTURE_TEST_CASE(PreluConstAlpha, PreluConstAlphaFixture)
+{
+  RunTest<2, armnn::DataType::Float32>(
+      0,
+      {{"input0", { -14.f, 2.f, 0.f, 1.f, -5.f, 14.f }}},
+      {{"output", { -3.5f, 2.f, 0.f, 1.f, -1.25f, 14.f }}});
+}
+
+BOOST_FIXTURE_TEST_CASE(PreluDynamicTensor, PreluDynamicTensorFixture)
+{
+  RunTest<2, armnn::DataType::Float32, armnn::DataType::Float32>(
+      0,
+      {{"input0", { -14.f, 2.f, 0.f, 1.f, -5.f, 14.f }}},
+      {{"output", { -3.5f, 2.f, 0.f, 1.f, -1.25f, 14.f }}},
+      true);
+}
+
+BOOST_AUTO_TEST_SUITE_END()