COMPMID-439 - Refactored NEQuantizationLayer and NEQuantizationLayer in order to support 3D input tensors

Change-Id: I03eac2108a30bed56d40dfd52e75577a35d492e0
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/85783
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h
index 92cd142..617a2da 100644
--- a/arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h
@@ -30,7 +30,11 @@
 {
 class ITensor;
 
-/** Interface for the quantization layer kernel. */
+/** Interface for the quantization layer kernel.
+ *
+ * @note The implementation supports only 3D input tensors
+ *
+ */
 class NEQuantizationLayerKernel : public INEKernel
 {
 public:
@@ -48,12 +52,12 @@
     ~NEQuantizationLayerKernel() = default;
     /** Set the input, output, min and max.
      *
-     * @param[in]  input  Source tensor. Data types supported: F32.
-     * @param[out] output Destination tensor. Data types supported: U8.
-     * @param[in]  min    Pointer to the minimum value of the input tensor.
-     * @param[in]  max    Pointer to the maximum value of the input tensor.
+     * @param[in]  input   Source tensor with at least 3 dimensions. The dimensions over the third will be interpreted as batches. Data types supported: F32.
+     * @param[out] output  Destination tensor with the same dimensions of input. Data types supported: U8.
+     * @param[in]  min_max Pointer to the tensor with shape [2, batches] which stores the minimum and maximum value for each 3D input tensor.
+     *                     The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32
      */
-    void configure(const ITensor *input, ITensor *output, const float *min, const float *max);
+    void configure(const ITensor *input, ITensor *output, const ITensor *min_max);
 
     // Inherited methods overridden:
     void run(const Window &window, const ThreadInfo &info) override;
@@ -61,8 +65,7 @@
 private:
     const ITensor *_input;
     ITensor       *_output;
-    const float   *_min;
-    const float   *_max;
+    const ITensor *_min_max;
 };
 }
 #endif /*__ARM_COMPUTE_NEQUANTIZATIONLAYERKERNEL_H__ */