COMPMID-1918: Different qinfos support in NEConcatLayer.

Added support in NEDepthConcatenateLayerKernel and NEWidthConcatenateLayer for
different quantization arguments both for the input and output.

If input's quantization infos are not homogeneous the input values are requantized using
the output's quantization info.

Change-Id: I2daa638361947eb3ec848d5425d0a5bbfea1936d
Reviewed-on: https://review.mlplatform.org/627
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-by: Isabella Gottardi <isabella.gottardi@arm.com>
diff --git a/tests/validation/fixtures/DepthConcatenateLayerFixture.h b/tests/validation/fixtures/DepthConcatenateLayerFixture.h
index 5fdfacb..edeefa2 100644
--- a/tests/validation/fixtures/DepthConcatenateLayerFixture.h
+++ b/tests/validation/fixtures/DepthConcatenateLayerFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -53,9 +53,22 @@
         // Create input shapes
         std::mt19937                    gen(library->seed());
         std::uniform_int_distribution<> num_dis(2, 4);
-        const int                       num_tensors = num_dis(gen);
+        std::uniform_int_distribution<> offset_dis(0, 20);
 
-        std::vector<TensorShape>         shapes(num_tensors, shape);
+        const int num_tensors = num_dis(gen);
+
+        std::vector<TensorShape> shapes(num_tensors, shape);
+
+        // vector holding the quantization info:
+        //      the last element is the output quantization info
+        //      all other elements are the quantization info for the input tensors
+        std::vector<QuantizationInfo> qinfo(num_tensors + 1, QuantizationInfo());
+
+        for(auto &qi : qinfo)
+        {
+            qi = QuantizationInfo(1.f / 255.f, offset_dis(gen));
+        }
+
         std::uniform_int_distribution<>  depth_dis(1, 3);
         std::bernoulli_distribution      mutate_dis(0.5f);
         std::uniform_real_distribution<> change_dis(-0.25f, 0.f);
@@ -82,8 +95,8 @@
             }
         }
 
-        _target    = compute_target(shapes, data_type);
-        _reference = compute_reference(shapes, data_type);
+        _target    = compute_target(shapes, qinfo, data_type);
+        _reference = compute_reference(shapes, qinfo, data_type);
     }
 
 protected:
@@ -93,7 +106,7 @@
         library->fill_tensor_uniform(tensor, i);
     }
 
-    TensorType compute_target(std::vector<TensorShape> shapes, DataType data_type)
+    TensorType compute_target(std::vector<TensorShape> shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type)
     {
         std::vector<TensorType>    srcs;
         std::vector<ITensorType *> src_ptrs;
@@ -101,14 +114,14 @@
         // Create tensors
         srcs.reserve(shapes.size());
 
-        for(const auto &shape : shapes)
+        for(size_t j = 0; j < shapes.size(); ++j)
         {
-            srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1));
+            srcs.emplace_back(create_tensor<TensorType>(shapes[j], data_type, 1, qinfo[j]));
             src_ptrs.emplace_back(&srcs.back());
         }
 
         TensorShape dst_shape = misc::shape_calculator::calculate_depth_concatenate_shape(src_ptrs);
-        TensorType  dst       = create_tensor<TensorType>(dst_shape, data_type, 1);
+        TensorType  dst       = create_tensor<TensorType>(dst_shape, data_type, 1, qinfo[shapes.size()]);
 
         // Create and configure function
         FunctionType depth_concat;
@@ -144,19 +157,21 @@
         return dst;
     }
 
-    SimpleTensor<T> compute_reference(std::vector<TensorShape> shapes, DataType data_type)
+    SimpleTensor<T> compute_reference(std::vector<TensorShape> shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type)
     {
         std::vector<SimpleTensor<T>> srcs;
 
         // Create and fill tensors
-        int i = 0;
-        for(const auto &shape : shapes)
+        for(size_t j = 0; j < shapes.size(); ++j)
         {
-            srcs.emplace_back(shape, data_type, 1);
-            fill(srcs.back(), i++);
+            srcs.emplace_back(shapes[j], data_type, 1, qinfo[j]);
+            fill(srcs.back(), j);
         }
 
-        return reference::depthconcatenate_layer<T>(srcs);
+        const TensorShape dst_shape = calculate_depth_concatenate_shape(shapes);
+        SimpleTensor<T>   dst{ dst_shape, data_type, 1, qinfo[shapes.size()] };
+
+        return reference::depthconcatenate_layer<T>(srcs, dst);
     }
 
     TensorType      _target{};
diff --git a/tests/validation/fixtures/WidthConcatenateLayerFixture.h b/tests/validation/fixtures/WidthConcatenateLayerFixture.h
index 1f79210..47a03ed 100644
--- a/tests/validation/fixtures/WidthConcatenateLayerFixture.h
+++ b/tests/validation/fixtures/WidthConcatenateLayerFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -53,9 +53,20 @@
         // Create input shapes
         std::mt19937                    gen(library->seed());
         std::uniform_int_distribution<> num_dis(2, 8);
-        const int                       num_tensors = num_dis(gen);
+        std::uniform_int_distribution<> offset_dis(0, 20);
 
-        std::vector<TensorShape>         shapes(num_tensors, shape);
+        const int num_tensors = num_dis(gen);
+
+        std::vector<TensorShape> shapes(num_tensors, shape);
+
+        // vector holding the quantization info:
+        //      the last element is the output quantization info
+        //      all other elements are the quantization info for the input tensors
+        std::vector<QuantizationInfo> qinfo(num_tensors + 1, QuantizationInfo());
+        for(auto &qi : qinfo)
+        {
+            qi = QuantizationInfo(1.f / 255.f, offset_dis(gen));
+        }
         std::bernoulli_distribution      mutate_dis(0.5f);
         std::uniform_real_distribution<> change_dis(-0.25f, 0.f);
 
@@ -71,8 +82,8 @@
             }
         }
 
-        _target    = compute_target(shapes, data_type);
-        _reference = compute_reference(shapes, data_type);
+        _target    = compute_target(shapes, qinfo, data_type);
+        _reference = compute_reference(shapes, qinfo, data_type);
     }
 
 protected:
@@ -82,7 +93,7 @@
         library->fill_tensor_uniform(tensor, i);
     }
 
-    TensorType compute_target(std::vector<TensorShape> shapes, DataType data_type)
+    TensorType compute_target(std::vector<TensorShape> shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type)
     {
         std::vector<TensorType>    srcs;
         std::vector<ITensorType *> src_ptrs;
@@ -90,14 +101,15 @@
         // Create tensors
         srcs.reserve(shapes.size());
 
-        for(const auto &shape : shapes)
+        for(size_t j = 0; j < shapes.size(); ++j)
         {
-            srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1));
+            srcs.emplace_back(create_tensor<TensorType>(shapes[j], data_type, 1, qinfo[j]));
             src_ptrs.emplace_back(&srcs.back());
         }
 
         TensorShape dst_shape = misc::shape_calculator::calculate_width_concatenate_shape(src_ptrs);
-        TensorType  dst       = create_tensor<TensorType>(dst_shape, data_type, 1);
+
+        TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, qinfo[shapes.size()]);
 
         // Create and configure function
         FunctionType width_concat;
@@ -133,19 +145,21 @@
         return dst;
     }
 
-    SimpleTensor<T> compute_reference(std::vector<TensorShape> shapes, DataType data_type)
+    SimpleTensor<T> compute_reference(std::vector<TensorShape> shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type)
     {
         std::vector<SimpleTensor<T>> srcs;
 
         // Create and fill tensors
-        int i = 0;
-        for(const auto &shape : shapes)
+        for(size_t j = 0; j < shapes.size(); ++j)
         {
-            srcs.emplace_back(shape, data_type, 1);
-            fill(srcs.back(), i++);
+            srcs.emplace_back(shapes[j], data_type, 1, qinfo[j]);
+            fill(srcs.back(), j);
         }
 
-        return reference::widthconcatenate_layer<T>(srcs);
+        const TensorShape dst_shape = calculate_width_concatenate_shape(shapes);
+        SimpleTensor<T>   dst{ dst_shape, data_type, 1, qinfo[shapes.size()] };
+
+        return reference::widthconcatenate_layer<T>(srcs, dst);
     }
 
     TensorType      _target{};
diff --git a/tests/validation/reference/DepthConcatenateLayer.cpp b/tests/validation/reference/DepthConcatenateLayer.cpp
index 90fbd91..6551f0c 100644
--- a/tests/validation/reference/DepthConcatenateLayer.cpp
+++ b/tests/validation/reference/DepthConcatenateLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -34,7 +34,7 @@
 namespace reference
 {
 template <typename T>
-SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
+SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs, SimpleTensor<T> &dst)
 {
     // Create reference
     std::vector<TensorShape> shapes;
@@ -44,10 +44,6 @@
         shapes.emplace_back(src.shape());
     }
 
-    DataType        dst_type  = srcs.empty() ? DataType::UNKNOWN : srcs[0].data_type();
-    TensorShape     dst_shape = calculate_depth_concatenate_shape(shapes);
-    SimpleTensor<T> dst(dst_shape, dst_type);
-
     // Compute reference
     int       depth_offset = 0;
     const int width_out    = dst.shape().x();
@@ -80,8 +76,20 @@
             {
                 for(int r = 0; r < height; ++r)
                 {
-                    std::copy(src_ptr, src_ptr + width, dst.data() + offset_to_first_element + d * out_stride_z + r * width_out);
-                    src_ptr += width;
+                    if(src.data_type() == DataType::QASYMM8 && src.quantization_info() != dst.quantization_info())
+                    {
+                        std::transform(src_ptr, src_ptr + width, dst.data() + offset_to_first_element + d * out_stride_z + r * width_out, [src, dst](T t)
+                        {
+                            const float dequantized_input = src.quantization_info().dequantize(t);
+                            return dst.quantization_info().quantize(dequantized_input, RoundingPolicy::TO_NEAREST_UP);
+                        });
+                        src_ptr += width;
+                    }
+                    else
+                    {
+                        std::copy(src_ptr, src_ptr + width, dst.data() + offset_to_first_element + d * out_stride_z + r * width_out);
+                        src_ptr += width;
+                    }
                 }
             }
         }
@@ -92,9 +100,9 @@
     return dst;
 }
 
-template SimpleTensor<uint8_t> depthconcatenate_layer(const std::vector<SimpleTensor<uint8_t>> &srcs);
-template SimpleTensor<float> depthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs);
-template SimpleTensor<half> depthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs);
+template SimpleTensor<uint8_t> depthconcatenate_layer(const std::vector<SimpleTensor<uint8_t>> &srcs, SimpleTensor<uint8_t> &dst);
+template SimpleTensor<float> depthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs, SimpleTensor<float> &dst);
+template SimpleTensor<half> depthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs, SimpleTensor<half> &dst);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/DepthConcatenateLayer.h b/tests/validation/reference/DepthConcatenateLayer.h
index 3c486a8..8a78441 100644
--- a/tests/validation/reference/DepthConcatenateLayer.h
+++ b/tests/validation/reference/DepthConcatenateLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -37,7 +37,7 @@
 namespace reference
 {
 template <typename T>
-SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs);
+SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs, SimpleTensor<T> &dst);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/WidthConcatenateLayer.cpp b/tests/validation/reference/WidthConcatenateLayer.cpp
index 6be171b..3854339 100644
--- a/tests/validation/reference/WidthConcatenateLayer.cpp
+++ b/tests/validation/reference/WidthConcatenateLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -34,7 +34,7 @@
 namespace reference
 {
 template <typename T>
-SimpleTensor<T> widthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
+SimpleTensor<T> widthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs, SimpleTensor<T> &dst)
 {
     // Create reference
     std::vector<TensorShape> shapes;
@@ -44,10 +44,6 @@
         shapes.emplace_back(src.shape());
     }
 
-    DataType        dst_type  = srcs.empty() ? DataType::UNKNOWN : srcs[0].data_type();
-    TensorShape     dst_shape = calculate_width_concatenate_shape(shapes);
-    SimpleTensor<T> dst(dst_shape, dst_type);
-
     // Compute reference
     int       width_offset = 0;
     const int width_out    = dst.shape().x();
@@ -74,21 +70,32 @@
                 for(int r = 0; r < height; ++r)
                 {
                     const int offset = u * height * depth + d * height + r;
-                    std::copy(src_ptr, src_ptr + width, dst_ptr + width_offset + offset * width_out);
-                    src_ptr += width;
+                    if(src.data_type() == DataType::QASYMM8 && src.quantization_info() != dst.quantization_info())
+                    {
+                        std::transform(src_ptr, src_ptr + width, dst_ptr + width_offset + offset * width_out, [src, dst](T t)
+                        {
+                            const float dequantized_input = src.quantization_info().dequantize(t);
+                            return dst.quantization_info().quantize(dequantized_input, RoundingPolicy::TO_NEAREST_UP);
+                        });
+                        src_ptr += width;
+                    }
+                    else
+                    {
+                        std::copy(src_ptr, src_ptr + width, dst_ptr + width_offset + offset * width_out);
+                        src_ptr += width;
+                    }
                 }
             }
         }
-
         width_offset += width;
     }
 
     return dst;
 }
 
-template SimpleTensor<float> widthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs);
-template SimpleTensor<half> widthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs);
-template SimpleTensor<uint8_t> widthconcatenate_layer(const std::vector<SimpleTensor<uint8_t>> &srcs);
+template SimpleTensor<float> widthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs, SimpleTensor<float> &dst);
+template SimpleTensor<half> widthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs, SimpleTensor<half> &dst);
+template SimpleTensor<uint8_t> widthconcatenate_layer(const std::vector<SimpleTensor<uint8_t>> &srcs, SimpleTensor<uint8_t> &dst);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/WidthConcatenateLayer.h b/tests/validation/reference/WidthConcatenateLayer.h
index 237e72b..0f1f428 100644
--- a/tests/validation/reference/WidthConcatenateLayer.h
+++ b/tests/validation/reference/WidthConcatenateLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -37,7 +37,7 @@
 namespace reference
 {
 template <typename T>
-SimpleTensor<T> widthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs);
+SimpleTensor<T> widthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs, SimpleTensor<T> &dst);
 } // namespace reference
 } // namespace validation
 } // namespace test