IVGCVSW-2632 Fix RefMerger from QAsymm8 types with different quantization parameters

Change-Id: Ie67ce4966c5e5fef618876b027292da429de1485
Signed-off-by: Ferran Balaguer <ferran.balaguer@arm.com>
diff --git a/src/armnn/NetworkQuantizerUtils.cpp b/src/armnn/NetworkQuantizerUtils.cpp
index 1bec63b..551760f 100644
--- a/src/armnn/NetworkQuantizerUtils.cpp
+++ b/src/armnn/NetworkQuantizerUtils.cpp
@@ -12,7 +12,7 @@
 namespace armnn
 {
 
-std::pair<int, float> ComputeQAsymmParams(int numBits, double min, double max)
+std::pair<float, int> ComputeQAsymmParams(int numBits, double min, double max)
 {
     BOOST_ASSERT_MSG(min < max, "min >= max will result in invalid quantization.");
     double highest = (1 << numBits) - 1;
@@ -27,7 +27,7 @@
     // Clamp offset [0-highest]
     offset = std::max(0.0, std::min(highest, offset));
 
-    return std::make_pair(static_cast<int>(std::round(offset)), static_cast<float>(scale));
+    return std::make_pair(static_cast<float>(scale), static_cast<int>(std::round(offset)));
 }
 
 ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>& backing)
diff --git a/src/armnn/NetworkQuantizerUtils.hpp b/src/armnn/NetworkQuantizerUtils.hpp
index 458d21a..c23517e 100644
--- a/src/armnn/NetworkQuantizerUtils.hpp
+++ b/src/armnn/NetworkQuantizerUtils.hpp
@@ -17,7 +17,7 @@
 namespace armnn
 {
 
-std::pair<int, float> ComputeQAsymmParams(int numBits, double min, double max);
+std::pair<float, int> ComputeQAsymmParams(int numBits, double min, double max);
 
 template<typename srcType>
 void Quantize(const srcType* src, uint8_t* dst, size_t numElements, float& scale, int& offset)
@@ -34,8 +34,8 @@
     }
 
     auto qParams = ComputeQAsymmParams(8, min, max);
-    offset = qParams.first;
-    scale = qParams.second;
+    scale = qParams.first;
+    offset = qParams.second;
     for (size_t i = 0; i < numElements; ++i)
     {
         dst[i] = armnn::Quantize<uint8_t>(src[i], scale, offset);
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 437d7b9..110594c 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -50,8 +50,8 @@
         // Set the quantization params
         TensorInfo info(newOutputSlot.GetTensorInfo());
         info.SetDataType(DataType::QuantisedAsymm8);
-        info.SetQuantizationOffset(qParams.first);
-        info.SetQuantizationScale(qParams.second);
+        info.SetQuantizationOffset(qParams.second);
+        info.SetQuantizationScale(qParams.first);
         newOutputSlot.SetTensorInfo(info);
     }
 }
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index fcce208..f7723bd 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -997,15 +997,15 @@
                                       const OriginsDescriptor& mergerDescriptor,
                                       const char* name = nullptr)
         {
-            std::pair<int, float> expectedValues = ComputeQAsymmParams(8, m_Min, m_Max);
+            std::pair<float, int> expectedValues = ComputeQAsymmParams(8, m_Min, m_Max);
 
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
 
-            BOOST_TEST((info.GetQuantizationOffset() == expectedValues.first));
+            BOOST_TEST((info.GetQuantizationOffset() == expectedValues.second));
 
-            BOOST_CHECK_CLOSE(info.GetQuantizationScale(), expectedValues.second, 0.000001f);
+            BOOST_CHECK_CLOSE(info.GetQuantizationScale(), expectedValues.first, 0.000001f);
         }
 
     private:
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index 6060b30..a6b3b3d 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -5993,6 +5993,148 @@
     return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
 }
 
+LayerTestResult<uint8_t, 3> MergerUint8DifferentQParamsTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int outputWidth = 3;
+    unsigned int outputHeight = 6;
+    unsigned int outputChannels = 3;
+
+    unsigned int inputWidth1 = 3;
+    unsigned int inputHeight1 = 6;
+    unsigned int inputChannels1 = 2;
+
+    unsigned int inputWidth2 = 3;
+    unsigned int inputHeight2 = 6;
+    unsigned int inputChannels2 = 1;
+
+    // Defines the tensor descriptors.
+    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
+
+    // Quantized input1 tensor. Range [-3, 1]
+    const float inputScale1 = 0.015686f;
+    const int32_t inputOffset1 = 192;
+
+    auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
+    {
+        1, 2, 3,
+        4, 5, 6,
+        7, 8, 9,
+        10, 11, 12,
+        13, 14, 15,
+        16, 17, 18,
+
+        19, 20, 21,
+        22, 23, 24,
+        25, 26, 27,
+        28, 29, 30,
+        31, 32, 33,
+        34, 35, 36,
+    })
+    );
+
+    // Quatized input2 tensor. Range [-1, 4]
+    const float inputScale2 = 0.019608f;
+    const int32_t inputOffset2 = 50;
+
+    auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
+    {
+        37, 38, 39,
+        40, 41, 42,
+        43, 44, 45,
+        46, 47, 48,
+        49, 50, 51,
+        52, 53, 54,
+    })
+    );
+
+    // Output has the same quantization parameters than input1,
+    // so that only the requantization of input2 is required
+    const float outputScale = 0.015686f;
+    const int32_t outputOffset = 192;
+
+    LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
+
+    ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
+    {
+        1, 2, 3,
+        4, 5, 6,
+        7, 8, 9,
+        10, 11, 12,
+        13, 14, 15,
+        16, 17, 18,
+
+        19, 20, 21,
+        22, 23, 24,
+        25, 26, 27,
+        28, 29, 30,
+        31, 32, 33,
+        34, 35, 36,
+
+        176, 177, 178,
+        179, 181, 182,
+        183, 184, 186,
+        187, 188, 189,
+        191, 192, 193,
+        195, 196, 197,
+    })
+    );
+
+    outputTensorInfo.SetQuantizationScale(outputScale);
+    outputTensorInfo.SetQuantizationOffset(outputOffset);
+    inputTensorInfo1.SetQuantizationScale(inputScale1);
+    inputTensorInfo1.SetQuantizationOffset(inputOffset1);
+    inputTensorInfo2.SetQuantizationScale(inputScale2);
+    inputTensorInfo2.SetQuantizationOffset(inputOffset2);
+
+    std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
+    armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
+
+    std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
+    armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    bool subTensorsSupported = workloadFactory.SupportsSubTensors();
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+            subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+            workloadFactory.CreateTensorHandle(inputTensorInfo1);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
+            subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+            workloadFactory.CreateTensorHandle(inputTensorInfo2);
+
+    armnn::MergerQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    data.m_ViewOrigins.push_back(window1);
+    data.m_ViewOrigins.push_back(window2);
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
+
+    inputHandle1->Allocate();
+    inputHandle2->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
+    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
+
+    return ret;
+}
+
 LayerTestResult<uint8_t, 3> MergerUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 05d510e..93385f0 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -745,6 +745,10 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
+LayerTestResult<uint8_t, 3> MergerUint8DifferentQParamsTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
 LayerTestResult<uint8_t, 4> AdditionUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index acaedc9..a75146b 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -21,6 +21,7 @@
         workloads/FullyConnected.cpp \
         workloads/Gather.cpp \
         workloads/Mean.cpp \
+        workloads/Merger.cpp \
         workloads/Pad.cpp \
         workloads/Pooling2d.cpp \
         workloads/RefActivationFloat32Workload.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index b4ef85a..cf8e6a2 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -209,6 +209,7 @@
 // Merger
 ARMNN_AUTO_TEST_CASE(SimpleMerger, MergerTest)
 ARMNN_AUTO_TEST_CASE(MergerUint8, MergerUint8Test)
+ARMNN_AUTO_TEST_CASE(MergerUint8DifferentQParams, MergerUint8DifferentQParamsTest)
 
 // Add
 ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 47e42f7..89aed91 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -25,6 +25,7 @@
     Gather.hpp
     Maximum.hpp
     Merger.hpp
+    Merger.cpp
     Minimum.hpp
     Pad.cpp
     Pad.hpp
diff --git a/src/backends/reference/workloads/Merger.cpp b/src/backends/reference/workloads/Merger.cpp
new file mode 100644
index 0000000..10cc249
--- /dev/null
+++ b/src/backends/reference/workloads/Merger.cpp
@@ -0,0 +1,110 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Merger.hpp"
+#include "RefWorkloadUtils.hpp"
+
+namespace armnn
+{
+
+template <>
+void CopyValue<float>(const float& source, const TensorInfo& sourceInfo, float& dest, const TensorInfo& destInfo)
+{
+    dest = source;
+}
+
+template <>
+void CopyValue<uint8_t>(const uint8_t& source, const TensorInfo& sourceInfo, uint8_t& dest, const TensorInfo& destInfo)
+{
+    if (sourceInfo.GetQuantizationScale() != destInfo.GetQuantizationScale() ||
+        sourceInfo.GetQuantizationOffset() != destInfo.GetQuantizationOffset())
+    {
+        // Dequantize value acording to sourceInfo params
+        float dequantizedValue = armnn::Dequantize<uint8_t>(source,
+                                                            sourceInfo.GetQuantizationScale(),
+                                                            sourceInfo.GetQuantizationOffset());
+
+        // Quantize again according to destInfo paramns
+        dest = armnn::Quantize<uint8_t>(dequantizedValue,
+                destInfo.GetQuantizationScale(),
+                destInfo.GetQuantizationOffset());
+    }
+    else
+    {
+        dest = source;
+    }
+}
+
+template <typename DataType>
+void Merger(const MergerQueueDescriptor& data)
+{
+    const TensorInfo& outputInfo0 = GetTensorInfo(data.m_Outputs[0]);
+
+    for (unsigned int index = 0 ; index < outputInfo0.GetNumElements(); ++index)
+    {
+        unsigned int indices[MaxNumOfTensorDimensions] = { 0 };
+
+        unsigned int indexRemainder = index;
+        unsigned int dimensionStride = outputInfo0.GetNumElements();
+
+        for (unsigned int i = 0; i < outputInfo0.GetNumDimensions(); i++)
+        {
+            dimensionStride /= outputInfo0.GetShape()[i];
+            indices[i] = indexRemainder / dimensionStride; // Use integer division to round down.
+            indexRemainder -= indices[i] * dimensionStride;
+        }
+
+        for (unsigned int viewIdx = 0; viewIdx < data.m_ViewOrigins.size(); ++viewIdx)
+        {
+            MergerQueueDescriptor::ViewOrigin const& view = data.m_ViewOrigins[viewIdx];
+
+            //Split view extents are defined by the size of (the corresponding) input tensor.
+            const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[viewIdx]);
+            BOOST_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
+
+            // Check all dimensions to see if this element is inside the given input view.
+            bool insideView = true;
+            for (unsigned int i = 0; i < inputInfo.GetNumDimensions(); i++)
+            {
+                if (indices[i] < view.m_Origin[i])
+                {
+                    insideView = false;
+                }
+                if (indices[i] >= view.m_Origin[i] + inputInfo.GetShape()[i])
+                {
+                    insideView = false;
+                }
+            }
+
+            if (insideView)
+            {
+                unsigned int inIndex = 0;
+                unsigned int dimensionStride = 1;
+
+                for (unsigned int i = inputInfo.GetNumDimensions(); i-- > 0;)
+                {
+                    inIndex += dimensionStride * (indices[i] - view.m_Origin[i]);
+                    dimensionStride *= inputInfo.GetShape()[i];
+                }
+
+                CopyValue<DataType>((GetInputTensorData<DataType>(viewIdx, data))[inIndex],
+                                    GetTensorInfo(data.m_Inputs[viewIdx]),
+                                    (GetOutputTensorData<DataType>(0, data))[index],
+                                    outputInfo0);
+
+                //What should we do if input views overlap on the output tensor?
+                //We could error, take the average, or shm else...
+                //For now just stop after finding first view (input) that matches.
+                break;
+            }
+        }
+    }
+}
+
+template void Merger<float>(const MergerQueueDescriptor& data);
+
+template void Merger<uint8_t>(const MergerQueueDescriptor& data);
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/Merger.hpp b/src/backends/reference/workloads/Merger.hpp
index 76d807c..ba3b99b 100644
--- a/src/backends/reference/workloads/Merger.hpp
+++ b/src/backends/reference/workloads/Merger.hpp
@@ -5,8 +5,6 @@
 
 #pragma once
 
-#include "RefWorkloadUtils.hpp"
-
 #include <backendsCommon/WorkloadData.hpp>
 #include <armnn/Tensor.hpp>
 
@@ -14,68 +12,9 @@
 {
 
 template <typename DataType>
-void Merger(const MergerQueueDescriptor& data)
-{
-    const TensorInfo& outputInfo0 = GetTensorInfo(data.m_Outputs[0]);
+void CopyValue(const DataType& source, const TensorInfo& sourceInfo, DataType& dest, const TensorInfo& destInfo);
 
-    for (unsigned int index = 0 ; index < outputInfo0.GetNumElements(); ++index)
-    {
-        unsigned int indices[MaxNumOfTensorDimensions] = { 0 };
-
-        unsigned int indexRemainder = index;
-        unsigned int dimensionStride = outputInfo0.GetNumElements();
-
-        for (unsigned int i=0; i<outputInfo0.GetNumDimensions(); i++)
-        {
-            dimensionStride /= outputInfo0.GetShape()[i];
-            indices[i] = indexRemainder / dimensionStride; // Use integer division to round down.
-            indexRemainder -= indices[i] * dimensionStride;
-        }
-
-        for (unsigned int viewIdx = 0; viewIdx < data.m_ViewOrigins.size(); ++viewIdx)
-        {
-            MergerQueueDescriptor::ViewOrigin const& view = data.m_ViewOrigins[viewIdx];
-
-            //Split view extents are defined by the size of (the corresponding) input tensor.
-            const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[viewIdx]);
-            BOOST_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
-
-            // Check all dimensions to see if this element is inside the given input view.
-            bool insideView = true;
-            for (unsigned int i=0; i<inputInfo.GetNumDimensions(); i++)
-            {
-                if (indices[i] < view.m_Origin[i])
-                {
-                    insideView = false;
-                }
-                if (indices[i] >= view.m_Origin[i] + inputInfo.GetShape()[i])
-                {
-                    insideView = false;
-                }
-            }
-
-            if (insideView)
-            {
-                unsigned int inIndex = 0;
-                unsigned int dimensionStride = 1;
-
-                for (unsigned int i = inputInfo.GetNumDimensions(); i-- > 0;)
-                {
-                    inIndex += dimensionStride * (indices[i] - view.m_Origin[i]);
-                    dimensionStride *= inputInfo.GetShape()[i];
-                }
-
-                //We are within the view, copy input data to the output corresponding to this view.
-                (GetOutputTensorData<DataType>(0, data))[index] =
-                    (GetInputTensorData<DataType>(viewIdx, data))[inIndex];
-
-                //What should we do if input views overlap on the output tensor?
-                //We could error, take the average, or shm else...
-                //For now just stop after finding first view (input) that matches.
-                break;
-            }
-        }
-    }
-}
+template <typename DataType>
+void Merger(const MergerQueueDescriptor& data);
 
 } //namespace armnn