IVGCVSW-1651 Add Support for Relu on TF Lite parser
 * Added Relu and Relu6 Support for the TfLite Parser.

Change-Id: I3cc5e4922910e556f25b633eae6d2d361cea61b5
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0fc3f1c..e4ed9b4 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -727,6 +727,7 @@
              src/armnnTfLiteParser/test/GetTensorIds.cpp
              src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
              src/armnnTfLiteParser/test/GetInputsOutputs.cpp
+             src/armnnTfLiteParser/test/Activations.cpp
              )
     endif()
 
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 2c70b48..dd1f577 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -455,6 +455,8 @@
     m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] =  &TfLiteParser::ParseDepthwiseConv2D;
     m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX]           =  &TfLiteParser::ParseSoftmax;
     m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE]           =  &TfLiteParser::ParseSqueeze;
+    m_ParserFunctions[tflite::BuiltinOperator_RELU]              =  &TfLiteParser::ParseRelu;
+    m_ParserFunctions[tflite::BuiltinOperator_RELU6]             =  &TfLiteParser::ParseRelu6;
 }
 
 void TfLiteParser::ResetParser()
@@ -692,7 +694,7 @@
     // we need to add the activation layer and fortunately we don't need to care about the data layout
     // beause the activation function is element-wise, so it is OK to have the activation after the trailing
     // swizzle layer
-    layer = AddActivationLayer(permuteLayers.second, 0, options->fused_activation_function);
+    layer = AddFusedActivationLayer(permuteLayers.second, 0, options->fused_activation_function);
     // register the output connection slots for the layer, connections are made after all layers have been created
     auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
@@ -770,7 +772,7 @@
     // we need to add the activation layer and fortunately we don't need to care about the data layout
     // beause the activation function is element-wise, so it is OK to have the activation after the trailing
     // swizzle layer
-    layer = AddActivationLayer(permuteLayers.second, 0, options->fused_activation_function);
+    layer = AddFusedActivationLayer(permuteLayers.second, 0, options->fused_activation_function);
     // register the output connection slots for the layer, connections are made after all layers have been created
     auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
@@ -845,7 +847,7 @@
     // we need to add the activation layer and fortunately we don't need to care about the data layout
     // beause the activation function is element-wise, so it is OK to have the activation after the trailing
     // swizzle layer
-    layer = AddActivationLayer(permuteLayers.second, 0, options->fused_activation_function);
+    layer = AddFusedActivationLayer(permuteLayers.second, 0, options->fused_activation_function);
     // register the output connection slots for the layer, connections are made after all layers have been created
     auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
@@ -965,9 +967,75 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-armnn::IConnectableLayer* TfLiteParser::AddActivationLayer(armnn::IConnectableLayer* prevLayer,
-                                                           unsigned int outputSlot,
-                                                           tflite::ActivationFunctionType activationType)
+void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
+{
+    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+    const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+    boost::ignore_unused(operatorPtr);
+
+    auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(inputs.size(), 1);
+
+    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    auto layerName = str(boost::format("Activation:RELU:%1%:%2%") % subgraphIndex % operatorIndex);
+    ActivationDescriptor activationDesc;
+    activationDesc.m_Function = ActivationFunction::ReLu;
+    IConnectableLayer* const layer  =
+        m_Network->AddActivationLayer(activationDesc, layerName.c_str());
+
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    // register the input connection slots for the layer, connections are made after all layers have been created
+    // only the tensors for the inputs are relevant, exclude the const tensors
+    auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+    // register the output connection slots for the layer, connections are made after all layers have been created
+    auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
+void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
+{
+    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+    const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+    boost::ignore_unused(operatorPtr);
+
+    auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(inputs.size(), 1);
+
+    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    auto layerName = str(boost::format("Activation:RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
+    ActivationDescriptor activationDesc;
+    activationDesc.m_Function = ActivationFunction::BoundedReLu;
+    activationDesc.m_A = 6.0f;
+    activationDesc.m_B = 0.0f;
+    IConnectableLayer* const layer  =
+        m_Network->AddActivationLayer(activationDesc, layerName.c_str());
+
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    // register the input connection slots for the layer, connections are made after all layers have been created
+    // only the tensors for the inputs are relevant, exclude the const tensors
+    auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+    // register the output connection slots for the layer, connections are made after all layers have been created
+    auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
+armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
+                                                                unsigned int outputSlot,
+                                                                tflite::ActivationFunctionType activationType)
 {
     ActivationDescriptor activationDesc;
     std::string layerName = prevLayer->GetName();
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 964be85..b9f81e4 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -93,6 +93,8 @@
     void ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex);
     void ParseSoftmax(size_t subgraphIndex, size_t operatorIndex);
     void ParseSqueeze(size_t subgraphIndex, size_t operatorIndex);
+    void ParseRelu(size_t subgraphIndex, size_t operatorIndex);
+    void ParseRelu6(size_t subgraphIndex, size_t operatorIndex);
 
     void RegisterProducerOfTensor(size_t subgraphIndex, size_t tensorIndex, armnn::IOutputSlot* slot);
     void RegisterConsumerOfTensor(size_t subgraphIndex, size_t tensorIndex, armnn::IInputSlot* slot);
@@ -111,9 +113,9 @@
     void ResetParser();
 
     /// Attach an activation layer to the one passed as a parameter
-    armnn::IConnectableLayer* AddActivationLayer(armnn::IConnectableLayer* layer,
-                                                 unsigned int outputSlot,
-                                                 tflite::ActivationFunctionType activationType);
+    armnn::IConnectableLayer* AddFusedActivationLayer(armnn::IConnectableLayer* layer,
+                                                      unsigned int outputSlot,
+                                                      tflite::ActivationFunctionType activationType);
 
     // SupportedDataStorage's purpose is to hold data till we pass over to the network.
     // We don't care about the content, and we want a single datatype to simplify the code.
diff --git a/src/armnnTfLiteParser/test/Activations.cpp b/src/armnnTfLiteParser/test/Activations.cpp
new file mode 100644
index 0000000..a30d464
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Activations.cpp
@@ -0,0 +1,87 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct ActivationFixture : ParserFlatbuffersFixture
+{
+
+    explicit ActivationFixture(std::string activationFunction, std::string dataType)
+    {
+        m_JsonString = R"(
+            {
+                "version": 3,
+                "operator_codes": [ { "builtin_code": )" + activationFunction + R"( } ],
+                "subgraphs": [ {
+                    "tensors": [
+                        {
+                            "shape": [ 1, 7 ],
+                            "type": )" + dataType + R"(,
+                            "buffer": 0,
+                            "name": "inputTensor",
+                            "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        },
+                        {
+                            "shape": [ 1, 7 ],
+                            "type": )" + dataType + R"(,
+                            "buffer": 1,
+                            "name": "outputTensor",
+                            "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        }
+                    ],
+                    "inputs": [ 0 ],
+                    "outputs": [ 1 ],
+                    "operators": [
+                        {
+                          "opcode_index": 0,
+                          "inputs": [ 0 ],
+                          "outputs": [ 1 ],
+                          "custom_options_format": "FLEXBUFFERS"
+                        }
+                    ],
+                } ],
+                "buffers" : [ {}, {} ]
+            }
+        )";
+        SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+    }
+
+};
+
+struct ReLuFixture : ActivationFixture
+{
+    ReLuFixture() : ActivationFixture("RELU", "FLOAT32") {}
+};
+BOOST_FIXTURE_TEST_CASE(ParseReLu, ReLuFixture)
+{
+    RunTest<2, float>(0, { -1.0f, -0.5f, 1.25f, -3.0f, 0.0f, 0.5f, -0.75f },
+                      { 0.0f, 0.0f, 1.25f, 0.0f, 0.0f, 0.5f, 0.0f });
+}
+
+struct ReLu6Fixture : ActivationFixture
+{
+    ReLu6Fixture() : ActivationFixture("RELU6", "FLOAT32") {}
+};
+BOOST_FIXTURE_TEST_CASE(ParseReLu6, ReLu6Fixture)
+{
+    RunTest<2, float>(0, { -1.0f, -0.5f, 7.25f, -3.0f, 0.0f, 0.5f, -0.75f },
+                      { 0.0f, 0.0f, 6.0f, 0.0f, 0.0f, 0.5f, 0.0f });
+}
+
+BOOST_AUTO_TEST_SUITE_END()