COMPMID-2547: CLSpaceToBatchLayer causes NN Test Failures on QUANT8_ASYMM Data Type

Change-Id: I47c9d057e50fa624f9b9e3fd79724e4fa7d0fd82
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1713
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/tests/validation/CL/SpaceToBatchLayer.cpp b/tests/validation/CL/SpaceToBatchLayer.cpp
index 7fca9ec..3ddbcd8 100644
--- a/tests/validation/CL/SpaceToBatchLayer.cpp
+++ b/tests/validation/CL/SpaceToBatchLayer.cpp
@@ -105,15 +105,15 @@
 
 TEST_SUITE(Float)
 TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(Small, CLSpaceToBatchLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
-                                                                                                                    DataType::F32)),
+FIXTURE_DATA_TEST_CASE(Small, CLSpaceToBatchLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(),
+                                                                                                                    framework::dataset::make("DataType", DataType::F32)),
                                                                                                             framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
 {
     // Validate output
     validate(CLAccessor(_target), _reference);
 }
-FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
-                                                                                                                  DataType::F32)),
+FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(),
+                                                                                                                  framework::dataset::make("DataType", DataType::F32)),
                                                                                                           framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
 {
     // Validate output
@@ -122,15 +122,15 @@
 TEST_SUITE_END() // FP32
 
 TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(Small, CLSpaceToBatchLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
-                                                                                                                   DataType::F16)),
+FIXTURE_DATA_TEST_CASE(Small, CLSpaceToBatchLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(),
+                                                                                                                   framework::dataset::make("DataType", DataType::F16)),
                                                                                                            framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
 {
     // Validate output
     validate(CLAccessor(_target), _reference);
 }
-FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
-                                                                                                                 DataType::F16)),
+FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(),
+                                                                                                                 framework::dataset::make("DataType", DataType::F16)),
                                                                                                          framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
 {
     // Validate output
@@ -139,6 +139,29 @@
 TEST_SUITE_END() // FP16
 TEST_SUITE_END() // Float
 
+template <typename T>
+using CLSpaceToBatchLayerQuantizedFixture = SpaceToBatchLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLSpaceToBatchLayer, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(Small, CLSpaceToBatchLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallSpaceToBatchLayerDataset(),
+                                                                                                                       framework::dataset::make("DataType", DataType::QASYMM8)),
+                                                                                                                       framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+                                                                                                                       framework::dataset::make("QuantizationInfo", { 1.f / 255.f, 9.f })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeSpaceToBatchLayerDataset(),
+                                                                                                                     framework::dataset::make("DataType", DataType::QASYMM8)),
+                                                                                                                     framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+                                                                                                                     framework::dataset::make("QuantizationInfo", { 1.f / 255.f, 9.f })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // QASYMM8
+TEST_SUITE_END() // Quantized
 TEST_SUITE_END() // SpaceToBatch
 TEST_SUITE_END() // CL
 } // namespace validation
diff --git a/tests/validation/NEON/SpaceToBatchLayer.cpp b/tests/validation/NEON/SpaceToBatchLayer.cpp
index 1d5ef06..fc8a800 100644
--- a/tests/validation/NEON/SpaceToBatchLayer.cpp
+++ b/tests/validation/NEON/SpaceToBatchLayer.cpp
@@ -121,15 +121,15 @@
 TEST_SUITE_END() // FP32
 
 TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(Small, NESpaceToBatchLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
-                                                                                                                   DataType::F16)),
+FIXTURE_DATA_TEST_CASE(Small, NESpaceToBatchLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(),
+                                                                                                                   framework::dataset::make("DataType", DataType::F16)),
                                                                                                            framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
 {
     // Validate output
     validate(Accessor(_target), _reference);
 }
-FIXTURE_DATA_TEST_CASE(Large, NESpaceToBatchLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
-                                                                                                                 DataType::F16)),
+FIXTURE_DATA_TEST_CASE(Large, NESpaceToBatchLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(),
+                                                                                                                 framework::dataset::make("DataType", DataType::F16)),
                                                                                                          framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
 {
     // Validate output
@@ -138,6 +138,29 @@
 TEST_SUITE_END() // FP16
 TEST_SUITE_END() // Float
 
+template <typename T>
+using NESpaceToBatchLayerQuantizedFixture = SpaceToBatchLayerValidationQuantizedFixture<Tensor, Accessor, NESpaceToBatchLayer, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(Small, NESpaceToBatchLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallSpaceToBatchLayerDataset(),
+                                                                                                                       framework::dataset::make("DataType", DataType::QASYMM8)),
+                                                                                                                       framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+                                                                                                                       framework::dataset::make("QuantizationInfo", { 1.f / 255.f, 9.f })))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(Large, NESpaceToBatchLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeSpaceToBatchLayerDataset(),
+                                                                                                                     framework::dataset::make("DataType", DataType::QASYMM8)),
+                                                                                                                     framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+                                                                                                                     framework::dataset::make("QuantizationInfo", { 1.f / 255.f, 9.f })))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // QASYMM8
+TEST_SUITE_END() // Quantized
 TEST_SUITE_END() // SpaceToBatch
 TEST_SUITE_END() // NEON
 } // namespace validation
diff --git a/tests/validation/fixtures/SpaceToBatchFixture.h b/tests/validation/fixtures/SpaceToBatchFixture.h
index a304162..d88ecb9 100644
--- a/tests/validation/fixtures/SpaceToBatchFixture.h
+++ b/tests/validation/fixtures/SpaceToBatchFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -36,31 +36,32 @@
 namespace validation
 {
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class SpaceToBatchLayerValidationFixture : public framework::Fixture
+class SpaceToBatchLayerValidationGenericFixture : public framework::Fixture
 {
 public:
     template <typename...>
-    void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout)
+    void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape,
+               DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info)
     {
-        _target    = compute_target(input_shape, block_shape_shape, paddings_shape, output_shape, data_type, data_layout);
-        _reference = compute_reference(input_shape, block_shape_shape, paddings_shape, output_shape, data_type);
+        _target    = compute_target(input_shape, block_shape_shape, paddings_shape, output_shape, data_type, data_layout, quantization_info);
+        _reference = compute_reference(input_shape, block_shape_shape, paddings_shape, output_shape, data_type, quantization_info);
     }
 
 protected:
     template <typename U>
     void fill(U &&tensor, int i)
     {
-        std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
-        library->fill(tensor, distribution, i);
+        library->fill_tensor_uniform(tensor, i);
     }
+
     template <typename U>
-    void fill_pad(U &&tensor, int i)
+    void fill_pad(U &&tensor)
     {
-        std::uniform_int_distribution<> distribution(0, 0);
-        library->fill(tensor, distribution, i);
+        library->fill_tensor_value(tensor, 0);
     }
+
     TensorType compute_target(TensorShape input_shape, const TensorShape &block_shape_shape, const TensorShape &paddings_shape, TensorShape output_shape,
-                              DataType data_type, DataLayout data_layout)
+                              DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info)
     {
         if(data_layout == DataLayout::NHWC)
         {
@@ -69,10 +70,10 @@
         }
 
         // Create tensors
-        TensorType input       = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
+        TensorType input       = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout);
         TensorType block_shape = create_tensor<TensorType>(block_shape_shape, DataType::S32);
         TensorType paddings    = create_tensor<TensorType>(paddings_shape, DataType::S32);
-        TensorType output      = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
+        TensorType output      = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout);
 
         // Create and configure function
         FunctionType space_to_batch;
@@ -96,7 +97,7 @@
 
         // Fill tensors
         fill(AccessorType(input), 0);
-        fill_pad(AccessorType(paddings), 0);
+        fill_pad(AccessorType(paddings));
         {
             auto      block_shape_data = AccessorType(block_shape);
             const int idx_width        = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
@@ -112,16 +113,16 @@
     }
 
     SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &block_shape_shape, const TensorShape &paddings_shape,
-                                      const TensorShape &output_shape, DataType data_type)
+                                      const TensorShape &output_shape, DataType data_type, QuantizationInfo quantization_info)
     {
         // Create reference
-        SimpleTensor<T>       input{ input_shape, data_type };
+        SimpleTensor<T>       input{ input_shape, data_type, 1, quantization_info };
         SimpleTensor<int32_t> block_shape{ block_shape_shape, DataType::S32 };
         SimpleTensor<int32_t> paddings{ paddings_shape, DataType::S32 };
 
         // Fill reference
         fill(input, 0);
-        fill_pad(paddings, 0);
+        fill_pad(paddings);
         for(unsigned int i = 0; i < block_shape_shape.x(); ++i)
         {
             block_shape[i] = input_shape[i] / output_shape[i];
@@ -134,6 +135,30 @@
     TensorType      _target{};
     SimpleTensor<T> _reference{};
 };
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class SpaceToBatchLayerValidationFixture : public SpaceToBatchLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+    template <typename...>
+    void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape,
+               DataType data_type, DataLayout data_layout)
+    {
+        SpaceToBatchLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, block_shape_shape, paddings_shape, output_shape, data_type, data_layout, QuantizationInfo());
+    }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class SpaceToBatchLayerValidationQuantizedFixture : public SpaceToBatchLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+    template <typename...>
+    void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape,
+               DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info)
+    {
+        SpaceToBatchLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, block_shape_shape, paddings_shape, output_shape, data_type, data_layout, quantization_info);
+    }
+};
 } // namespace validation
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/validation/reference/SpaceToBatch.cpp b/tests/validation/reference/SpaceToBatch.cpp
index c635d4a..8c25bb7 100644
--- a/tests/validation/reference/SpaceToBatch.cpp
+++ b/tests/validation/reference/SpaceToBatch.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -37,7 +37,7 @@
 template <typename T>
 SimpleTensor<T> space_to_batch(const SimpleTensor<T> &src, const SimpleTensor<int32_t> &block_shape, const SimpleTensor<int32_t> &paddings, const TensorShape &dst_shape)
 {
-    SimpleTensor<T> result(dst_shape, src.data_type());
+    SimpleTensor<T> result(dst_shape, src.data_type(), 1, src.quantization_info());
 
     const auto width_out  = static_cast<int>(dst_shape[0]);
     const auto height_out = static_cast<int>(dst_shape[1]);
@@ -55,6 +55,9 @@
     const auto padding_left = paddings[0];
     const auto padding_top  = paddings[2];
 
+    // Pad value must be logic zero
+    const auto pad_value = is_data_type_quantized(src.data_type()) ? src.quantization_info().uniform().offset : 0;
+
     int out_pos = 0;
     for(int outB = 0; outB < batch_out; ++outB)
     {
@@ -74,7 +77,7 @@
                     if(outH * block_height + shift_h < padding_top || outH * block_height + shift_h >= padding_top + height_in || outW * block_width + shift_w < padding_left
                        || outW * block_width + shift_w >= padding_left + width_in)
                     {
-                        result[out_pos] = 0;
+                        result[out_pos] = pad_value;
                     }
                     else
                     {
@@ -90,6 +93,7 @@
 
 template SimpleTensor<float> space_to_batch(const SimpleTensor<float> &src, const SimpleTensor<int32_t> &block_shape, const SimpleTensor<int32_t> &paddings, const TensorShape &dst_shape);
 template SimpleTensor<half> space_to_batch(const SimpleTensor<half> &src, const SimpleTensor<int32_t> &block_shape, const SimpleTensor<int32_t> &paddings, const TensorShape &dst_shape);
+template SimpleTensor<uint8_t> space_to_batch(const SimpleTensor<uint8_t> &src, const SimpleTensor<int32_t> &block_shape, const SimpleTensor<int32_t> &paddings, const TensorShape &dst_shape);
 } // namespace reference
 } // namespace validation
 } // namespace test