IVGCVSW-1967 Fix L2Norm NHWC unit test

 * Applying the proper tensor shape during the tests depending on
   the specific data layout used

Change-Id: I9c6c1e077236e84cecc1e10d7b2d0bd901df3ebd
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index fed9dd6..b9735f4 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -885,9 +885,14 @@
     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
 
+    TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
+                TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
+    TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
+                TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
+
     // Connects up.
-    armnn::TensorInfo inputTensorInfo({ 5, 20, 50, 67 }, DataType);
-    armnn::TensorInfo outputTensorInfo({ 5, 20, 50, 67 }, DataType);
+    armnn::TensorInfo inputTensorInfo(inputShape, DataType);
+    armnn::TensorInfo outputTensorInfo(outputShape, DataType);
     Connect(input, layer, inputTensorInfo);
     Connect(layer, output, outputTensorInfo);
     CreateTensorHandles(graph, factory);
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 659ba82..6ec89aa 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -578,17 +578,23 @@
 {
     Graph graph;
     ClWorkloadFactory factory;
-
-    auto workload = CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>
-        (factory, graph, dataLayout);
+    auto workload =
+            CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
 
     // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
     L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 5, 20, 50, 67 }));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 5, 20, 50, 67 }));
+    std::initializer_list<unsigned int> inputShape  = (dataLayout == DataLayout::NCHW)
+            ? std::initializer_list<unsigned int>({ 5, 20, 50, 67 })
+            : std::initializer_list<unsigned int>({ 5, 50, 67, 20 });
+    std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW)
+            ? std::initializer_list<unsigned int>({ 5, 20, 50, 67 })
+            : std::initializer_list<unsigned int>({ 5, 50, 67, 20 });
+
+    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
+    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
 }
 
 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNchwWorkload)
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index d1a5b2a..05281cd 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -493,17 +493,23 @@
 template <typename L2NormalizationWorkloadType, typename armnn::DataType DataType>
 static void NeonCreateL2NormalizationWorkloadTest(DataLayout dataLayout)
 {
-    Graph                graph;
-    NeonWorkloadFactory  factory;
-    auto                 workload = CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType,
-                                    DataType>(factory, graph, dataLayout);
+    Graph graph;
+    NeonWorkloadFactory factory;
+    auto workload =
+            CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
 
     // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
     L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 5, 20, 50, 67 }, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 5, 20, 50, 67 }, DataType)));
+
+    TensorShape inputShape  = (dataLayout == DataLayout::NCHW) ?
+                TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
+    TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
+                TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
+
+    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC