IVGCVSW-3892 Add EndToEnd Layer test for INSTANCE_NORMALIZATION

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Ia646446d52a7b597c3021f1e235465a96ce2beed
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 754a3a0..39b9e35 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -31,6 +31,7 @@
 
 COMMON_TEST_SOURCES := \
     test/CommonTestUtils.cpp \
+    test/InstanceNormalizationEndToEndTestImpl.cpp \
     test/JsonPrinterTestImpl.cpp \
     test/QuantizedLstmEndToEndTestImpl.cpp \
     test/SpaceToDepthEndToEndTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index d353a77..c3ce02a 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -19,6 +19,8 @@
     DynamicBackendTests.hpp
     EndToEndTestImpl.hpp
     GatherEndToEndTestImpl.hpp
+    InstanceNormalizationEndToEndTestImpl.cpp
+    InstanceNormalizationEndToEndTestImpl.hpp
     IsLayerSupportedTestImpl.hpp
     JsonPrinterTestImpl.cpp
     JsonPrinterTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
new file mode 100644
index 0000000..0ba2a74
--- /dev/null
+++ b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
@@ -0,0 +1,380 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "InstanceNormalizationEndToEndTestImpl.hpp"
+
+#include "DataLayoutIndexed.hpp"
+#include "EndToEndTestImpl.hpp"
+#include "ResolveType.hpp"
+
+#include <Permute.hpp>
+
+#include <armnn/INetwork.hpp>
+
+#include <backendsCommon/test/DataLayoutUtils.hpp>
+
+#include <test/TestUtils.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+namespace
+{
+
+template<typename armnn::DataType DataType>
+armnn::INetworkPtr CreateInstanceNormalizationNetwork(const armnn::TensorShape& inputShape,
+                                                      const armnn::TensorShape& outputShape,
+                                                      const armnn::DataLayout dataLayout,
+                                                      const float gamma,
+                                                      const float beta,
+                                                      const float eps,
+                                                      const float qScale = 1.0f,
+                                                      const int32_t qOffset = 0)
+{
+    using namespace armnn;
+
+    // Builds up the structure of the network.
+    INetworkPtr net(INetwork::Create());
+
+    TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+
+    InstanceNormalizationDescriptor instanceNormalizationDesc;
+    instanceNormalizationDesc.m_Gamma = gamma;
+    instanceNormalizationDesc.m_Beta  = beta;
+    instanceNormalizationDesc.m_Eps   = eps;
+    instanceNormalizationDesc.m_DataLayout = dataLayout;
+
+    IConnectableLayer* instanceNormalization = net->AddInstanceNormalizationLayer(instanceNormalizationDesc,
+                                                                                  "InstanceNormalization");
+    IConnectableLayer* input = net->AddInputLayer(0, "input");
+    Connect(input, instanceNormalization, inputTensorInfo, 0, 0);
+
+    TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
+    IConnectableLayer* output = net->AddOutputLayer(0, "output");
+    Connect(instanceNormalization, output, outputTensorInfo, 0, 0);
+
+    return net;
+}
+
+void InstanceNormalizationEndToEnd(const std::vector<armnn::BackendId>& backends,
+                                   const armnn::DataLayout& dataLayout,
+                                   armnn::TensorInfo& inputTensorInfo,
+                                   armnn::TensorInfo& outputTensorInfo,
+                                   std::vector<float>& inputData,
+                                   std::vector<float>& expectedOutputData,
+                                   const float gamma,
+                                   const float beta,
+                                   const float eps)
+{
+    using namespace armnn;
+
+    if (dataLayout == DataLayout::NCHW)
+    {
+        PermuteTensorNhwcToNchw<float>(inputTensorInfo, inputData);
+        PermuteTensorNhwcToNchw<float>(outputTensorInfo, expectedOutputData);
+    }
+
+    // Builds up the structure of the network
+    INetworkPtr net = CreateInstanceNormalizationNetwork<DataType::Float32>(inputTensorInfo.GetShape(),
+                                                                            outputTensorInfo.GetShape(),
+                                                                            dataLayout,
+                                                                            gamma,
+                                                                            beta,
+                                                                            eps);
+
+    BOOST_TEST_CHECKPOINT("Create a network");
+
+    std::map<int, std::vector<float>> inputTensorData = { { 0, inputData } };
+    std::map<int, std::vector<float>> expectedOutputTensorData = { { 0, expectedOutputData } };
+
+    EndToEndLayerTestImpl<DataType::Float32, DataType::Float32>(move(net),
+                                                                inputTensorData,
+                                                                expectedOutputTensorData,
+                                                                backends);
+}
+
+} // anonymous namespace
+
+void InstanceNormalizationNhwcEndToEndTest1(const std::vector<armnn::BackendId>& defaultBackends)
+{
+    using namespace armnn;
+
+    const float eps       = 0.0001f;
+    const float beta      = 0.0f;
+    const float gamma     = 1.0f;
+
+    TensorShape inputShape{2, 2, 2, 2};
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+
+    TensorShape outputShape{2, 2, 2, 2};
+    TensorInfo outputTensorInfo(outputShape, DataType::Float32);
+
+    std::vector<float> inputData = std::vector<float>(
+    {
+        // Batch 0, Height 0, Width 0 x Channel (2)
+        0.f,  1.f,
+        // Batch 0, Height 0, Width 1 x Channel (2)
+        0.f,  2.f,
+
+        // Batch 0, Height 1, Width 0 x Channel (2)
+        0.f,  2.f,
+        // Batch 0, Height 1, Width 1 x Channel (2)
+        0.f,  4.f,
+
+        // Batch 1, Height 0, Width 0 x Channel (2)
+        1.f, -1.f,
+        // Batch 1, Height 0, Width 1 x Channel (2)
+        -1.f,  2.f,
+
+        // Batch 1, Height 1, Width 0 x Channel (2)
+        -1.f, -2.f,
+        // Batch 1, Height 1, Width 1 x Channel (2)
+        1.f,  4.f
+    });
+
+    std::vector<float> expectedOutputData = std::vector<float>(
+    {
+        // Batch 0, Height 0, Width 0 x Channel (2)
+        0.f, -1.1470304f,
+        // Batch 0, Height 0, Width 1 x Channel (2)
+        0.f, -0.22940612f,
+        // Batch 0, Height 1, Width 0 x Channel (2)
+        0.f, -0.22940612f,
+        // Batch 0, Height 1, Width 1 x Channel (2)
+        0.f,  1.6058424f,
+
+        // Batch 1, Height 0, Width 0 x Channel (2)
+        0.99995005f, -0.7337929f,
+        // Batch 1, Height 0, Width 1 x Channel (2)
+        -0.99995005f,  0.52413774f,
+
+        // Batch 1, Height 1, Width 0 x Channel (2)
+        -0.99995005f, -1.1531031f,
+        // Batch 1, Height 1, Width 1 x Channel (2)
+        0.99995005f,  1.3627582f
+    });
+
+    InstanceNormalizationEndToEnd(defaultBackends,
+                                  DataLayout::NHWC,
+                                  inputTensorInfo,
+                                  outputTensorInfo,
+                                  inputData,
+                                  expectedOutputData,
+                                  gamma,
+                                  beta,
+                                  eps);
+}
+
+void InstanceNormalizationNchwEndToEndTest1(const std::vector<armnn::BackendId>& defaultBackends)
+{
+    using namespace armnn;
+
+    const float eps       = 0.0001f;
+    const float beta      = 0.0f;
+    const float gamma     = 1.0f;
+
+    TensorShape inputShape{2, 2, 2, 2};
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+
+    TensorShape outputShape{2, 2, 2, 2};
+    TensorInfo outputTensorInfo(outputShape, DataType::Float32);
+
+    std::vector<float> inputData = std::vector<float>(
+        {
+            // Batch 0, Height 0, Width 0 x Channel (2)
+            0.f,  1.f,
+            // Batch 0, Height 0, Width 1 x Channel (2)
+            0.f,  2.f,
+
+            // Batch 0, Height 1, Width 0 x Channel (2)
+            0.f,  2.f,
+            // Batch 0, Height 1, Width 1 x Channel (2)
+            0.f,  4.f,
+
+            // Batch 1, Height 0, Width 0 x Channel (2)
+            1.f, -1.f,
+            // Batch 1, Height 0, Width 1 x Channel (2)
+            -1.f,  2.f,
+
+            // Batch 1, Height 1, Width 0 x Channel (2)
+            -1.f, -2.f,
+            // Batch 1, Height 1, Width 1 x Channel (2)
+            1.f,  4.f
+        });
+
+    std::vector<float> expectedOutputData = std::vector<float>(
+        {
+            // Batch 0, Height 0, Width 0 x Channel (2)
+            0.f, -1.1470304f,
+            // Batch 0, Height 0, Width 1 x Channel (2)
+            0.f, -0.22940612f,
+            // Batch 0, Height 1, Width 0 x Channel (2)
+            0.f, -0.22940612f,
+            // Batch 0, Height 1, Width 1 x Channel (2)
+            0.f,  1.6058424f,
+
+            // Batch 1, Height 0, Width 0 x Channel (2)
+            0.99995005f, -0.7337929f,
+            // Batch 1, Height 0, Width 1 x Channel (2)
+            -0.99995005f,  0.52413774f,
+
+            // Batch 1, Height 1, Width 0 x Channel (2)
+            -0.99995005f, -1.1531031f,
+            // Batch 1, Height 1, Width 1 x Channel (2)
+            0.99995005f,  1.3627582f
+        });
+
+
+    InstanceNormalizationEndToEnd(defaultBackends,
+                                  DataLayout::NCHW,
+                                  inputTensorInfo,
+                                  outputTensorInfo,
+                                  inputData,
+                                  expectedOutputData,
+                                  gamma,
+                                  beta,
+                                  eps);
+}
+
+void InstanceNormalizationNhwcEndToEndTest2(const std::vector<armnn::BackendId>& defaultBackends)
+{
+    using namespace armnn;
+
+    const float eps        = 0.0001f;
+    const float beta       = 10.0f;
+    const float gamma      = 2.0f;
+
+    TensorShape inputShape{2, 2, 2, 2};
+    TensorShape outputShape{2, 2, 2, 2};
+
+    TensorInfo outputTensorInfo(outputShape, DataType::Float32);
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+
+    std::vector<float> inputData = std::vector<float>(
+    {
+        // Batch 0, Height 0, Width 0 x Channel (2)
+        0.f,  1.f,
+        // Batch 0, Height 0, Width 1 x Channel (2)
+        0.f,  2.f,
+
+        // Batch 0, Height 1, Width 0 x Channel (2)
+        0.f,  2.f,
+        // Batch 0, Height 1, Width 1 x Channel (2)
+        0.f,  4.f,
+
+        // Batch 1, Height 0, Width 0 x Channel (2)
+        1.f, -1.f,
+        // Batch 1, Height 0, Width 1 x Channel (2)
+        -1.f,  2.f,
+
+        // Batch 1, Height 1, Width 0 x Channel (2)
+        -1.f, -2.f,
+        // Batch 1, Height 1, Width 1 x Channel (2)
+        1.f,  4.f
+    });
+
+    std::vector<float> expectedOutputData = std::vector<float>(
+    {
+        // Batch 0, Height 0, Width 0 x Channel (2)
+        10.f,     7.7059393f,
+        // Batch 0, Height 0, Width 1 x Channel (2)
+        10.f,     9.541187f,
+
+        // Batch 0, Height 1, Width 0 x Channel (2)
+        10.f,     9.541187f,
+        // Batch 0, Height 1, Width 1 x Channel (2)
+        10.f,     13.211685f,
+
+        // Batch 1, Height 0, Width 0 x Channel (2)
+        11.9999f, 8.532414f,
+        // Batch 1, Height 0, Width 1 x Channel (2)
+        8.0001f,  11.048275f,
+
+        // Batch 1, Height 1, Width 0 x Channel (2)
+        8.0001f,  7.693794f,
+        // Batch 1, Height 1, Width 1 x Channel (2)
+        11.9999f, 12.725516f
+    });
+
+    InstanceNormalizationEndToEnd(defaultBackends,
+                                  DataLayout::NHWC,
+                                  inputTensorInfo,
+                                  outputTensorInfo,
+                                  inputData,
+                                  expectedOutputData,
+                                  gamma,
+                                  beta,
+                                  eps);
+}
+
+void InstanceNormalizationNchwEndToEndTest2(const std::vector<armnn::BackendId>& defaultBackends)
+{
+    using namespace armnn;
+
+    const float eps        = 0.0001f;
+    const float beta       = 10.0f;
+    const float gamma      = 2.0f;
+
+    TensorShape inputShape{2, 2, 2, 2};
+    TensorShape outputShape{2, 2, 2, 2};
+
+    TensorInfo outputTensorInfo(outputShape, DataType::Float32);
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+
+    std::vector<float> inputData = std::vector<float>(
+        {
+            // Batch 0, Height 0, Width 0 x Channel (2)
+            0.f,  1.f,
+            // Batch 0, Height 0, Width 1 x Channel (2)
+            0.f,  2.f,
+
+            // Batch 0, Height 1, Width 0 x Channel (2)
+            0.f,  2.f,
+            // Batch 0, Height 1, Width 1 x Channel (2)
+            0.f,  4.f,
+
+            // Batch 1, Height 0, Width 0 x Channel (2)
+            1.f, -1.f,
+            // Batch 1, Height 0, Width 1 x Channel (2)
+            -1.f,  2.f,
+
+            // Batch 1, Height 1, Width 0 x Channel (2)
+            -1.f, -2.f,
+            // Batch 1, Height 1, Width 1 x Channel (2)
+            1.f,  4.f
+        });
+
+    std::vector<float> expectedOutputData = std::vector<float>(
+        {
+            // Batch 0, Height 0, Width 0 x Channel (2)
+            10.f,     7.7059393f,
+            // Batch 0, Height 0, Width 1 x Channel (2)
+            10.f,     9.541187f,
+
+            // Batch 0, Height 1, Width 0 x Channel (2)
+            10.f,     9.541187f,
+            // Batch 0, Height 1, Width 1 x Channel (2)
+            10.f,     13.211685f,
+
+            // Batch 1, Height 0, Width 0 x Channel (2)
+            11.9999f, 8.532414f,
+            // Batch 1, Height 0, Width 1 x Channel (2)
+            8.0001f,  11.048275f,
+
+            // Batch 1, Height 1, Width 0 x Channel (2)
+            8.0001f,  7.693794f,
+            // Batch 1, Height 1, Width 1 x Channel (2)
+            11.9999f, 12.725516f
+        });
+
+    InstanceNormalizationEndToEnd(defaultBackends,
+                                  DataLayout::NCHW,
+                                  inputTensorInfo,
+                                  outputTensorInfo,
+                                  inputData,
+                                  expectedOutputData,
+                                  gamma,
+                                  beta,
+                                  eps);
+}
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp
new file mode 100644
index 0000000..7d5b34b
--- /dev/null
+++ b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp
@@ -0,0 +1,18 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/BackendId.hpp>
+
+#include <vector>
+
+void InstanceNormalizationNhwcEndToEndTest1(const std::vector<armnn::BackendId>& defaultBackends);
+
+void InstanceNormalizationNchwEndToEndTest1(const std::vector<armnn::BackendId>& defaultBackends);
+
+void InstanceNormalizationNhwcEndToEndTest2(const std::vector<armnn::BackendId>& defaultBackends);
+
+void InstanceNormalizationNchwEndToEndTest2(const std::vector<armnn::BackendId>& defaultBackends);
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index d5d1ad5..59d26ed 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -10,6 +10,7 @@
 #include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
+#include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp>
 #include <backendsCommon/test/PreluEndToEndTestImpl.hpp>
 #include <backendsCommon/test/QuantizedLstmEndToEndTestImpl.hpp>
 #include <backendsCommon/test/SpaceToDepthEndToEndTestImpl.hpp>
@@ -156,6 +157,27 @@
                                                                                             expectedOutput);
 }
 
+// InstanceNormalization
+BOOST_AUTO_TEST_CASE(ClInstanceNormalizationNhwcEndToEndTest1)
+{
+    InstanceNormalizationNhwcEndToEndTest1(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClInstanceNormalizationNchwEndToEndTest1)
+{
+    InstanceNormalizationNchwEndToEndTest1(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClInstanceNormalizationNhwcEndToEndTest2)
+{
+    InstanceNormalizationNhwcEndToEndTest2(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClInstanceNormalizationNchwEndToEndTest2)
+{
+    InstanceNormalizationNchwEndToEndTest2(defaultBackends);
+}
+
 BOOST_AUTO_TEST_CASE(ClPreluEndToEndFloat32Test)
 {
     PreluEndToEndNegativeTest<armnn::DataType::Float32>(defaultBackends);
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index 9d7fc9d..88f7ae7 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -10,6 +10,7 @@
 #include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
+#include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp>
 #include <backendsCommon/test/PreluEndToEndTestImpl.hpp>
 #include <backendsCommon/test/QuantizedLstmEndToEndTestImpl.hpp>
 #include <backendsCommon/test/SpaceToDepthEndToEndTestImpl.hpp>
@@ -390,4 +391,15 @@
     ExportOutputWithSeveralOutputSlotConnectionsTest(defaultBackends);
 }
 
+// InstanceNormalization
+BOOST_AUTO_TEST_CASE(NeonInstanceNormalizationNchwEndToEndTest1)
+{
+    InstanceNormalizationNchwEndToEndTest1(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonInstanceNormalizationNchwEndToEndTest2)
+{
+    InstanceNormalizationNchwEndToEndTest2(defaultBackends);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index c60dbbd..8b768ab 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -13,6 +13,7 @@
 #include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp>
 #include <backendsCommon/test/GatherEndToEndTestImpl.hpp>
+#include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp>
 #include <backendsCommon/test/PreluEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ResizeEndToEndTestImpl.hpp>
 #include <backendsCommon/test/SpaceToDepthEndToEndTestImpl.hpp>
@@ -1012,6 +1013,27 @@
     ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
+// InstanceNormalization
+BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNhwcEndToEndTest1)
+{
+    InstanceNormalizationNhwcEndToEndTest1(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNchwEndToEndTest1)
+{
+    InstanceNormalizationNchwEndToEndTest1(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNhwcEndToEndTest2)
+{
+    InstanceNormalizationNhwcEndToEndTest2(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNchwEndToEndTest2)
+{
+    InstanceNormalizationNchwEndToEndTest2(defaultBackends);
+}
+
 #if !defined(__ANDROID__)
 // Only run these tests on non Android platforms
 BOOST_AUTO_TEST_CASE(RefImportNonAlignedPointerTest)