IVGCVSW-1978: Support NHWC for ResizeBilinear CpuRef

* Adds implementation to plumb DataLayout parameter
  for ResizeBilinear on CpuRef.
* Adds unit tests to execute ResizeBilinear on CpuRef
  using the NHWC data layout.
* Adds DataLayoutIndexed API, allowing easy access to
  the Channels, Height and Width of a tensor based on
  its data layout. This reduces code duplication.
* Refactors original ResizeBilinear implementation and
  tests to use the DataLayoutIndexed API when required.

Change-Id: Ic2b8916cdd2e370d070175547079d774daf6d7bf
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index c2efa21..c051005 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -313,9 +313,9 @@
     , m_DataLayout(DataLayout::NCHW)
     {}
 
-    uint32_t   m_TargetWidth;
-    uint32_t   m_TargetHeight;
-    DataLayout m_DataLayout;
+    uint32_t          m_TargetWidth;
+    uint32_t          m_TargetHeight;
+    DataLayoutIndexed m_DataLayout;
 };
 
 struct ReshapeDescriptor
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index 4afc50b..bb0b1e6 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -31,12 +31,56 @@
     Signed32  = 3
 };
 
+// Begin: DataLayout
+
 enum class DataLayout
 {
     NCHW = 1,
     NHWC = 2
 };
 
+// Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout
+class DataLayoutIndexed
+{
+public:
+    DataLayoutIndexed(DataLayout dataLayout) : m_DataLayout(dataLayout)
+    {
+        switch (dataLayout)
+        {
+            case DataLayout::NHWC:
+                m_ChannelsIndex = 3;
+                m_HeightIndex   = 1;
+                m_WidthIndex    = 2;
+                break;
+            case DataLayout::NCHW:
+                m_ChannelsIndex = 1;
+                m_HeightIndex   = 2;
+                m_WidthIndex    = 3;
+                break;
+            default:
+                throw InvalidArgumentException("Unknown DataLayout value: " +
+                                               std::to_string(static_cast<int>(dataLayout)));
+        }
+    }
+
+    DataLayout   GetDataLayout()    const { return m_DataLayout; }
+    unsigned int GetChannelsIndex() const { return m_ChannelsIndex; }
+    unsigned int GetHeightIndex()   const { return m_HeightIndex; }
+    unsigned int GetWidthIndex()    const { return m_WidthIndex; }
+
+private:
+    DataLayout   m_DataLayout;
+    unsigned int m_ChannelsIndex;
+    unsigned int m_HeightIndex;
+    unsigned int m_WidthIndex;
+};
+
+// Conversion methods - implementations in src/armnn/InternalTypes.cpp
+bool operator==(const DataLayout& dataLayout, const DataLayoutIndexed& indexed);
+bool operator==(const DataLayoutIndexed& indexed, const DataLayout& dataLayout);
+
+// End: DataLayout
+
 enum class ActivationFunction
 {
     Sigmoid     = 0,
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index fce1e95..67c596e 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -48,4 +48,16 @@
     }
 }
 
+// Definition in include/armnn/Types.hpp
+bool operator==(const DataLayout& dataLayout, const DataLayoutIndexed& indexed)
+{
+    return dataLayout == indexed.GetDataLayout();
+}
+
+// Definition in include/armnn/Types.hpp
+bool operator==(const DataLayoutIndexed& indexed, const DataLayout& dataLayout)
+{
+    return indexed.GetDataLayout() == dataLayout;
+}
+
 }
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index b9735f4..a33189e 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -820,31 +820,26 @@
 template <typename ResizeBilinearWorkload, armnn::DataType DataType>
 std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory,
                                                                          armnn::Graph& graph,
-                                                                         DataLayout dataLayout = DataLayout::NCHW)
+                                                                         DataLayoutIndexed dataLayout =
+                                                                             DataLayout::NCHW)
 {
     TensorShape inputShape;
     TensorShape outputShape;
-    unsigned int heightIndex;
-    unsigned int widthIndex;
 
-    switch (dataLayout) {
+    switch (dataLayout.GetDataLayout()) {
         case DataLayout::NHWC:
-            inputShape = { 2, 4, 4, 3 };
+            inputShape =  { 2, 4, 4, 3 };
             outputShape = { 2, 2, 2, 3 };
-            heightIndex = 1;
-            widthIndex = 2;
             break;
         default: // NCHW
-            inputShape = { 2, 3, 4, 4 };
+            inputShape =  { 2, 3, 4, 4 };
             outputShape = { 2, 3, 2, 2 };
-            heightIndex = 2;
-            widthIndex = 3;
     }
 
     // Creates the layer we're testing.
     ResizeBilinearDescriptor resizeDesc;
-    resizeDesc.m_TargetWidth = outputShape[widthIndex];
-    resizeDesc.m_TargetHeight = outputShape[heightIndex];
+    resizeDesc.m_TargetWidth = outputShape[dataLayout.GetWidthIndex()];
+    resizeDesc.m_TargetHeight = outputShape[dataLayout.GetHeightIndex()];
     resizeDesc.m_DataLayout = dataLayout;
     Layer* const layer = graph.AddLayer<ResizeBilinearLayer>(resizeDesc, "layer");
 
@@ -865,7 +860,7 @@
     ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout.GetDataLayout() == dataLayout));
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
diff --git a/src/backends/WorkloadData.cpp b/src/backends/WorkloadData.cpp
index a6a87f3..d562b73 100644
--- a/src/backends/WorkloadData.cpp
+++ b/src/backends/WorkloadData.cpp
@@ -663,11 +663,10 @@
     }
 
     {
-        // DataLayout is NCHW by default (channelsIndex = 1)
-        const unsigned int channelsIndex = this->m_Parameters.m_DataLayout == armnn::DataLayout::NHWC ? 3 : 1;
-
-        const unsigned int inputChannelCount = workloadInfo.m_InputTensorInfos[0].GetShape()[channelsIndex];
-        const unsigned int outputChannelCount = workloadInfo.m_OutputTensorInfos[0].GetShape()[channelsIndex];
+        const unsigned int inputChannelCount =
+            workloadInfo.m_InputTensorInfos[0].GetShape()[this->m_Parameters.m_DataLayout.GetChannelsIndex()];
+        const unsigned int outputChannelCount =
+            workloadInfo.m_OutputTensorInfos[0].GetShape()[this->m_Parameters.m_DataLayout.GetChannelsIndex()];
         if (inputChannelCount != outputChannelCount)
         {
             throw InvalidArgumentException(
diff --git a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
index ced3a06..4ee6d5e 100644
--- a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
@@ -26,7 +26,7 @@
     arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout());
     input.info()->set_data_layout(aclDataLayout);
     output.info()->set_data_layout(aclDataLayout);
 
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index e88fbed..e8d536f 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -420,27 +420,46 @@
 }
 
 template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
-static void RefCreateResizeBilinearTest()
+static void RefCreateResizeBilinearTest(DataLayout dataLayout)
 {
     Graph graph;
     RefWorkloadFactory factory;
-    auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph);
+    auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
+
+    TensorShape inputShape;
+    TensorShape outputShape;
+
+    switch (dataLayout)
+    {
+        case DataLayout::NHWC:
+            inputShape  = { 2, 4, 4, 3 };
+            outputShape = { 2, 2, 2, 3 };
+            break;
+        default: // NCHW
+            inputShape  = { 2, 3, 4, 4 };
+            outputShape = { 2, 3, 2, 2 };
+    }
 
     // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
     CheckInputOutput(
-        std::move(workload),
-        TensorInfo({ 2, 3, 4, 4 }, DataType),
-        TensorInfo({ 2, 3, 2, 2 }, DataType));
+            std::move(workload),
+            TensorInfo(inputShape, DataType),
+            TensorInfo(outputShape, DataType));
 }
 
 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
 {
-    RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>();
+    RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
 {
-    RefCreateResizeBilinearTest<RefResizeBilinearUint8Workload, armnn::DataType::QuantisedAsymm8>();
+    RefCreateResizeBilinearTest<RefResizeBilinearUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
+{
+    RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index de2c2fe..48bffa9 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -178,7 +178,7 @@
 ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest)
 ARMNN_AUTO_TEST_CASE(BatchNormUint8, BatchNormUint8Test)
 
-// Resize Bilinear
+// Resize Bilinear - NCHW
 ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest)
 ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearUint8Test)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest)
@@ -190,6 +190,13 @@
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagUint8Test)
 
+// Resize Bilinear - NHWC
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopNhwcTest)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearNhwcTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinNhwcTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinNhwcTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagNhwcTest)
+
 // Fake Quantization
 ARMNN_AUTO_TEST_CASE(FakeQuantization, FakeQuantizationTest)
 
diff --git a/src/backends/reference/workloads/RefResizeBilinearFloat32Workload.cpp b/src/backends/reference/workloads/RefResizeBilinearFloat32Workload.cpp
index 50ee7a2..8d86bdc 100644
--- a/src/backends/reference/workloads/RefResizeBilinearFloat32Workload.cpp
+++ b/src/backends/reference/workloads/RefResizeBilinearFloat32Workload.cpp
@@ -23,7 +23,8 @@
     ResizeBilinear(GetInputTensorDataFloat(0, m_Data),
         inputInfo,
         GetOutputTensorDataFloat(0, m_Data),
-        outputInfo);
+        outputInfo,
+        m_Data.m_Parameters.m_DataLayout);
 }
 
 } //namespace armnn
diff --git a/src/backends/reference/workloads/ResizeBilinear.cpp b/src/backends/reference/workloads/ResizeBilinear.cpp
index 0bce3c7..e098c6c 100644
--- a/src/backends/reference/workloads/ResizeBilinear.cpp
+++ b/src/backends/reference/workloads/ResizeBilinear.cpp
@@ -25,27 +25,31 @@
 
 }
 
-void ResizeBilinear(const float* in, const TensorInfo& inputInfo, float* out, const TensorInfo& outputInfo)
+void ResizeBilinear(const float*      in,
+                    const TensorInfo& inputInfo,
+                    float*            out,
+                    const TensorInfo& outputInfo,
+                    DataLayoutIndexed dataLayout)
 {
     // We follow the definition of TensorFlow and AndroidNN: the top-left corner of a texel in the output
     // image is projected into the input image to figure out the interpolants and weights. Note that this
     // will yield different results than if projecting the centre of output texels.
 
     const unsigned int batchSize = inputInfo.GetShape()[0];
-    const unsigned int channelCount = inputInfo.GetShape()[1];
+    const unsigned int channelCount = inputInfo.GetShape()[dataLayout.GetChannelsIndex()];
 
-    const unsigned int inputHeight = inputInfo.GetShape()[2];
-    const unsigned int inputWidth = inputInfo.GetShape()[3];
-    const unsigned int outputHeight = outputInfo.GetShape()[2];
-    const unsigned int outputWidth = outputInfo.GetShape()[3];
+    const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
+    const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
+    const unsigned int outputHeight = outputInfo.GetShape()[dataLayout.GetHeightIndex()];
+    const unsigned int outputWidth = outputInfo.GetShape()[dataLayout.GetWidthIndex()];
 
     // How much to scale pixel coordinates in the output image, to get the corresponding pixel coordinates
     // in the input image.
     const float scaleY = boost::numeric_cast<float>(inputHeight) / boost::numeric_cast<float>(outputHeight);
     const float scaleX = boost::numeric_cast<float>(inputWidth) / boost::numeric_cast<float>(outputWidth);
 
-    TensorBufferArrayView<const float> input(inputInfo.GetShape(), in);
-    TensorBufferArrayView<float> output(outputInfo.GetShape(), out);
+    TensorBufferArrayView<const float> input(inputInfo.GetShape(), in, dataLayout);
+    TensorBufferArrayView<float> output(outputInfo.GetShape(), out, dataLayout);
 
     for (unsigned int n = 0; n < batchSize; ++n)
     {
diff --git a/src/backends/reference/workloads/ResizeBilinear.hpp b/src/backends/reference/workloads/ResizeBilinear.hpp
index 847b8e8..92b229d 100644
--- a/src/backends/reference/workloads/ResizeBilinear.hpp
+++ b/src/backends/reference/workloads/ResizeBilinear.hpp
@@ -10,6 +10,10 @@
 namespace armnn
 {
 
-void ResizeBilinear(const float* in, const TensorInfo& inputInfo, float* out, const TensorInfo& outputInfo);
+void ResizeBilinear(const float*      in,
+                    const TensorInfo& inputInfo,
+                    float*            out,
+                    const TensorInfo& outputInfo,
+                    DataLayoutIndexed dataLayout = DataLayout::NCHW);
 
 } //namespace armnn
diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp
index e19810c..aba44e4 100644
--- a/src/backends/reference/workloads/TensorBufferArrayView.hpp
+++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp
@@ -15,28 +15,33 @@
 class TensorBufferArrayView
 {
 public:
-    TensorBufferArrayView(const TensorShape& shape, DataType* data)
+    TensorBufferArrayView(const TensorShape& shape, DataType* data, DataLayoutIndexed dataLayout = DataLayout::NCHW)
         : m_Shape(shape)
         , m_Data(data)
+        , m_DataLayout(dataLayout)
     {
     }
 
     DataType& Get(unsigned int b, unsigned int c, unsigned int h, unsigned int w) const
     {
-        BOOST_ASSERT( b < m_Shape[0] || (m_Shape[0] == 0 && b == 0) );
-        BOOST_ASSERT( c < m_Shape[1] || (m_Shape[1] == 0 && c == 0) );
-        BOOST_ASSERT( h < m_Shape[2] || (m_Shape[2] == 0 && h == 0) );
-        BOOST_ASSERT( w < m_Shape[3] || (m_Shape[3] == 0 && w == 0) );
+        BOOST_ASSERT( b < m_Shape[0] || ( m_Shape[0]   == 0 && b == 0 ) );
+        BOOST_ASSERT( c < m_Shape[m_DataLayout.GetChannelsIndex()] ||
+            ( m_Shape[m_DataLayout.GetChannelsIndex()] == 0 && c == 0) );
+        BOOST_ASSERT( h < m_Shape[m_DataLayout.GetHeightIndex()] ||
+            ( m_Shape[m_DataLayout.GetHeightIndex()]   == 0 && h == 0) );
+        BOOST_ASSERT( w < m_Shape[m_DataLayout.GetWidthIndex()] ||
+            ( m_Shape[m_DataLayout.GetWidthIndex()]    == 0 && w == 0) );
 
         return m_Data[b * m_Shape[1] * m_Shape[2] * m_Shape[3]
-                    + c * m_Shape[2] * m_Shape[3]
-                    + h * m_Shape[3]
+                    + c * m_Shape[m_DataLayout.GetHeightIndex()] * m_Shape[m_DataLayout.GetWidthIndex()]
+                    + h * m_Shape[m_DataLayout.GetWidthIndex()]
                     + w];
     }
 
 private:
     const TensorShape m_Shape;
-    DataType* m_Data;
+    DataType*         m_Data;
+    DataLayoutIndexed m_DataLayout;
 };
 
 } //namespace armnn