IVGCVSW-3383 - Add TfLite Parser support for L2 Normalization layer

* Added ParseL2Normalization in TfLiteParser
* Added new unit tests L2Normalization.cpp
* Added documentation for supported L2 Normalization to TensorflorLiteSupport.md

Signed-off-by: Matthew Jackson <matthew.jackson@arm.com>
Change-Id: I83ea75d1791ac8a00390aed3e5d0a7b337fcd46d
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 9bc4201..aa462fb 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -565,6 +565,7 @@
              src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
              src/armnnTfLiteParser/test/DetectionPostProcess.cpp
              src/armnnTfLiteParser/test/FullyConnected.cpp
+             src/armnnTfLiteParser/test/L2Normalization.cpp
              src/armnnTfLiteParser/test/Maximum.cpp
              src/armnnTfLiteParser/test/MaxPool2D.cpp
              src/armnnTfLiteParser/test/Mean.cpp
diff --git a/src/armnnTfLiteParser/TensorFlowLiteSupport.md b/src/armnnTfLiteParser/TensorFlowLiteSupport.md
index 8a8b7be..7acbf28 100644
--- a/src/armnnTfLiteParser/TensorFlowLiteSupport.md
+++ b/src/armnnTfLiteParser/TensorFlowLiteSupport.md
@@ -22,6 +22,8 @@
 
 * LOGISTIC
 
+* L2_NORMALIZATION
+
 * MAX_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE
 
 * MAXIMUM
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 21c1715..04fa6b1 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -440,6 +440,7 @@
     m_ParserFunctions[tflite::BuiltinOperator_CUSTOM]            =  &TfLiteParser::ParseDetectionPostProcess;
     m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED]   =  &TfLiteParser::ParseFullyConnected;
     m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC]          =  &TfLiteParser::ParseLogistic;
+    m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION]  =  &TfLiteParser::ParseL2Normalization;
     m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D]       =  &TfLiteParser::ParseMaxPool2D;
     m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM]           =  &TfLiteParser::ParseMaximum;
     m_ParserFunctions[tflite::BuiltinOperator_MINIMUM]           =  &TfLiteParser::ParseMinimum;
@@ -917,6 +918,33 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
+void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
+{
+    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+    auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(inputs.size(), 1);
+
+    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    L2NormalizationDescriptor desc;
+    desc.m_DataLayout = armnn::DataLayout::NHWC;
+    auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
+    IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
+
+    BOOST_ASSERT(layer != nullptr);
+
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+    auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
 void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
 {
     ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 437e459..90b800d 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -100,6 +100,7 @@
     void ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex);
     void ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex);
     void ParseLogistic(size_t subgraphIndex, size_t operatorIndex);
+    void ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex);
     void ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex);
     void ParseMaximum(size_t subgraphIndex, size_t operatorIndex);
     void ParseMean(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/L2Normalization.cpp b/src/armnnTfLiteParser/test/L2Normalization.cpp
new file mode 100644
index 0000000..0dd5eef
--- /dev/null
+++ b/src/armnnTfLiteParser/test/L2Normalization.cpp
@@ -0,0 +1,128 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct L2NormalizationFixture : public ParserFlatbuffersFixture
+{
+    explicit L2NormalizationFixture(const std::string & inputOutputShape)
+    {
+        m_JsonString = R"(
+            {
+                "version": 3,
+                "operator_codes": [ { "builtin_code": "L2_NORMALIZATION" } ],
+                "subgraphs": [ {
+                    "tensors": [
+                        {
+                            "shape": )" + inputOutputShape + R"(,
+                            "type": "FLOAT32",
+                            "buffer": 0,
+                            "name": "inputTensor",
+                            "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        },
+                        {
+                            "shape": )" + inputOutputShape + R"(,
+                            "type": "FLOAT32",
+                            "buffer": 1,
+                            "name": "outputTensor",
+                            "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        }
+                    ],
+                    "inputs": [ 0 ],
+                    "outputs": [ 1 ],
+                    "operators": [
+                        {
+                            "opcode_index": 0,
+                            "inputs": [ 0 ],
+                            "outputs": [ 1 ],
+                            "custom_options_format": "FLEXBUFFERS"
+                        }
+                    ],
+                } ],
+                "buffers" : [
+                    { }
+                ]
+            }
+        )";
+        Setup();
+    }
+};
+
+float CalcL2Norm(std::initializer_list<float> elements)
+{
+    const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
+        [](float acc, float element) { return acc + element * element; });
+    const float eps = 1e-12f;
+    const float max = reduction < eps ? eps : reduction;
+    return sqrtf(max);
+}
+
+struct L2NormalizationFixture4D : L2NormalizationFixture
+{
+    // TfLite uses NHWC shape
+    L2NormalizationFixture4D() : L2NormalizationFixture("[ 1, 1, 4, 3 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseL2Normalization4D, L2NormalizationFixture4D)
+{
+  RunTest<4, armnn::DataType::Float32>(
+      0,
+      {{"inputTensor", { 1.0f,  2.0f,  3.0f,
+                         4.0f,  5.0f,  6.0f,
+                         7.0f,  8.0f,  9.0f,
+                         10.0f, 11.0f, 12.0f }}},
+
+      {{"outputTensor", { 1.0f  / CalcL2Norm({ 1.0f,  2.0f,  3.0f }),
+                          2.0f  / CalcL2Norm({ 1.0f,  2.0f,  3.0f }),
+                          3.0f  / CalcL2Norm({ 1.0f,  2.0f,  3.0f }),
+
+                          4.0f  / CalcL2Norm({ 4.0f,  5.0f,  6.0f }),
+                          5.0f  / CalcL2Norm({ 4.0f,  5.0f,  6.0f }),
+                          6.0f  / CalcL2Norm({ 4.0f,  5.0f,  6.0f }),
+
+                          7.0f  / CalcL2Norm({ 7.0f,  8.0f,  9.0f }),
+                          8.0f  / CalcL2Norm({ 7.0f,  8.0f,  9.0f }),
+                          9.0f  / CalcL2Norm({ 7.0f,  8.0f,  9.0f }),
+
+                          10.0f / CalcL2Norm({ 10.0f, 11.0f, 12.0f }),
+                          11.0f / CalcL2Norm({ 10.0f, 11.0f, 12.0f }),
+                          12.0f / CalcL2Norm({ 10.0f, 11.0f, 12.0f }) }}});
+}
+
+struct L2NormalizationSimpleFixture4D : L2NormalizationFixture
+{
+    L2NormalizationSimpleFixture4D() : L2NormalizationFixture("[ 1, 1, 1, 4 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseL2NormalizationEps4D, L2NormalizationSimpleFixture4D)
+{
+      RunTest<4, armnn::DataType::Float32>(
+      0,
+      {{"inputTensor", { 0.00000001f, 0.00000002f, 0.00000003f, 0.00000004f }}},
+
+      {{"outputTensor", { 0.00000001f / CalcL2Norm({ 0.00000001f, 0.00000002f, 0.00000003f, 0.00000004f }),
+                          0.00000002f / CalcL2Norm({ 0.00000001f, 0.00000002f, 0.00000003f, 0.00000004f }),
+                          0.00000003f / CalcL2Norm({ 0.00000001f, 0.00000002f, 0.00000003f, 0.00000004f }),
+                          0.00000004f / CalcL2Norm({ 0.00000001f, 0.00000002f, 0.00000003f, 0.00000004f }) }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()