COMPMID-1519: Add support for 3D input/output in CLGEMMLowpOutputStage

Change-Id: I637add70310d2da4d82b236a6352af9d33be17a1
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/149706
Reviewed-by: Isabella Gottardi <isabella.gottardi@arm.com>
Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
Tested-by: bsgcomp <bsgcomp@arm.com>
diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
index 49e19e3..1206206 100644
--- a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -67,21 +67,25 @@
      * @param[in]  min                          (Optional) Min value used to saturate down the output result before converting back to QASYMM8
      * @param[in]  max                          (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
      *                                          Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+     * @param[in]  output_3d_depth              (Optional) Depth of output in 3D (Defaults to 1)
      */
-    void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift, int min = 0, int max = 0);
+    void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
+                   int min = 0, int max = 0, unsigned int output_3d_depth = 1);
     /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel
      *
-     * @param[in] input  Input tensor. Data type supported: S32
-     * @param[in] bias   Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
-     *                   Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
-     * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
-     * @param[in] min    (Optional) Min value used to saturate down the output result before converting back to QASYMM8
-     * @param[in] max    (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
-     *                   Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+     * @param[in] input           Input tensor. Data type supported: S32
+     * @param[in] bias            Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
+     *                            Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+     * @param[in] output          Output tensor. Data type supported: Data type supported: QASYMM8
+     * @param[in] min             (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+     * @param[in] max             (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+     *                            Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+     * @param[in] output_3d_depth (Optional)  Depth of output in 3D (Defaults to 1)
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
+    static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output,
+                           int min = 0, int max = 0, unsigned int output_3d_depth = 1);
 
     // Inherited methods overridden:
     void run(const Window &window, cl::CommandQueue &queue) override;
@@ -90,7 +94,7 @@
     const ICLTensor *_input;
     const ICLTensor *_bias;
     ICLTensor       *_output;
+    bool             _reinterpret_as_3d;
 };
 } // namespace arm_compute
-
 #endif /* __ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEBYFIXEDPOINTKERNEL_H__ */
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
index 6ebb515..8412fa2 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
@@ -72,24 +72,24 @@
      * @param[in]  min                          (Optional) Min value used to saturate down the output result before converting back to QASYMM8
      * @param[in]  max                          (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
      *                                          Along with @p min, this value can be used to implement "rectified linear unit" activation functions
-     * @param[in]  gemm_3d_depth                (Optional)     Depth of GEMM 3D (Defaults to 1)
+     * @param[in]  output_3d_depth              (Optional) Depth of output in 3D (Defaults to 1)
      */
     void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
-                   int min = 0, int max = 0, unsigned int gemm_3d_depth = 1);
+                   int min = 0, int max = 0, unsigned int output_3d_depth = 1);
     /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel
      *
-     * @param[in] input         Input tensor. Data type supported: S32
-     * @param[in] bias          Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
-     *                          Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
-     * @param[in] output        Output tensor. Data type supported: Data type supported: QASYMM8
-     * @param[in] min           (Optional) Min value used to saturate down the output result before converting back to QASYMM8
-     * @param[in] max           (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
-     *                          Along with @p min, this value can be used to implement "rectified linear unit" activation functions
-     * @param[in] gemm_3d_depth (Optional)  Depth of GEMM 3D (Defaults to 1)
+     * @param[in] input           Input tensor. Data type supported: S32
+     * @param[in] bias            Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
+     *                            Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+     * @param[in] output          Output tensor. Data type supported: Data type supported: QASYMM8
+     * @param[in] min             (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+     * @param[in] max             (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+     *                            Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+     * @param[in] output_3d_depth (Optional)  Depth of output in 3D (Defaults to 1)
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0, unsigned int gemm_3d_depth = 1);
+    static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0, unsigned int output_3d_depth = 1);
 
     // Inherited methods overridden:
     void run(const Window &window, const ThreadInfo &info) override;
@@ -117,8 +117,7 @@
     int                     _result_offset_after_shift;
     int                     _min;
     int                     _max;
-    unsigned int            _gemm_3d_depth;
+    unsigned int            _output_3d_depth;
 };
 } // namespace arm_compute
-
 #endif /* __ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEBYFIXEDPOINTKERNEL_H__ */
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index b4ab10c..804ff3c 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -162,7 +162,7 @@
 {
     TensorShape shape_vector_sum_row{ a.tensor_shape() };
     shape_vector_sum_row.set(Window::DimX, a.dimension(1));
-    if(a.num_dimensions() > 1)
+    if(shape_vector_sum_row.num_dimensions() > 1)
     {
         shape_vector_sum_row.remove_dimension(1);
     }
@@ -513,13 +513,17 @@
     return output_shape;
 }
 
-inline TensorShape compute_output_stage_shape(const ITensorInfo &input, unsigned int gemm_3d_depth = 1)
+inline TensorShape compute_output_stage_shape(const ITensorInfo &input, unsigned int gemm_3d_depth = 1, bool batch_size_on_z = false)
 {
     ARM_COMPUTE_ERROR_ON(input.data_layout() != DataLayout::NHWC && gemm_3d_depth > 1);
 
     TensorShape output_shape = input.tensor_shape();
     if(gemm_3d_depth > 1)
     {
+        if(batch_size_on_z)
+        {
+            output_shape.shift_right(1);
+        }
         output_shape.set(0, input.tensor_shape().x());
         output_shape.set(1, input.tensor_shape().y() / gemm_3d_depth);
         output_shape.set(2, gemm_3d_depth);