IVGCVSW-2017: CLWorkload to use L2Normalization

* Changed ClL2Normalisation from using CLNormalizationLayer
  to use CLL2NormalizeLayer to normalise along the channel axis
  in either NCHW or NHWC format.

Change-Id: I399cbee408a277d1ef8c6c85ebcbd86d6c3e407b
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index f8dd18a..2781786 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -282,7 +282,7 @@
         : m_DataLayout(DataLayout::NCHW)
     {}
 
-    DataLayout m_DataLayout;
+    DataLayoutIndexed m_DataLayout;
 };
 
 struct BatchNormalizationDescriptor
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
index f827137..74e40ec 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
@@ -18,13 +18,14 @@
                                                       const TensorInfo& output,
                                                       const L2NormalizationDescriptor& descriptor)
 {
-    const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
-    const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
+    const arm_compute::TensorInfo aclInput  = BuildArmComputeTensorInfo(input,
+                                                                        descriptor.m_DataLayout.GetDataLayout());
+    const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output,
+                                                                        descriptor.m_DataLayout.GetDataLayout());
 
-    arm_compute::NormalizationLayerInfo normalizationInfo =
-            CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout);
+    unsigned int axis = (descriptor.m_DataLayout.GetDataLayout() == DataLayout::NCHW) ? 2 : 0;
 
-    return arm_compute::CLNormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
+    return arm_compute::CLL2NormalizeLayer::validate(&aclInput, &aclOutput, axis);
 }
 
 ClL2NormalizationFloatWorkload::ClL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor& descriptor,
@@ -36,13 +37,13 @@
     arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout());
     input.info()->set_data_layout(aclDataLayout);
     output.info()->set_data_layout(aclDataLayout);
 
-    m_Layer.configure(&input, &output,
-                      CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0],
-                                                                        m_Data.m_Parameters.m_DataLayout));
+    unsigned int axis = (m_Data.m_Parameters.m_DataLayout.GetDataLayout() == DataLayout::NCHW) ? 2 : 0;
+
+    m_Layer.configure(&input, &output, axis);
 }
 
 void ClL2NormalizationFloatWorkload::Execute() const
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.hpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.hpp
index a16a8f6..53bbfc8 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.hpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.hpp
@@ -25,7 +25,7 @@
 
 private:
     // Purposely not a CLL2Normalize function. See constructor.
-    mutable arm_compute::CLNormalizationLayer m_Layer;
+    mutable arm_compute::CLL2NormalizeLayer m_Layer;
 };
 
 } //namespace armnn
diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
index 754155d..ca3b36e 100644
--- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
@@ -14,11 +14,12 @@
                                                         const TensorInfo& output,
                                                         const L2NormalizationDescriptor& descriptor)
 {
-    const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
-    const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
+    const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout.GetDataLayout());
+    const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(
+                                                  output, descriptor.m_DataLayout.GetDataLayout());
 
     arm_compute::NormalizationLayerInfo normalizationInfo =
-            CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout);
+            CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout.GetDataLayout());
 
     return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
 }
@@ -33,13 +34,14 @@
     arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout());
     input.info()->set_data_layout(aclDataLayout);
     output.info()->set_data_layout(aclDataLayout);
 
-    m_Layer.configure(&input, &output,
-                      CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0],
-                                                                        m_Data.m_Parameters.m_DataLayout));
+    m_Layer.configure(&input,
+                      &output,
+                      CreateAclNormalizationLayerInfoForL2Normalization(
+                          info.m_InputTensorInfos[0], m_Data.m_Parameters.m_DataLayout.GetDataLayout()));
 }
 
 void NeonL2NormalizationFloatWorkload::Execute() const