COMPMID-439 - Refactored NEQuantizationLayer and NEQuantizationLayer in order to support 3D input tensors

Change-Id: I03eac2108a30bed56d40dfd52e75577a35d492e0
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/85783
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp b/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp
index 3bf2b35..70984f0 100644
--- a/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp
@@ -23,9 +23,9 @@
  */
 #include "arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h"
 
+#include "arm_compute/core/AccessWindowStatic.h"
 #include "arm_compute/core/Error.h"
 #include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
 #include "arm_compute/core/Utils.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/Window.h"
@@ -35,16 +35,16 @@
 using namespace arm_compute;
 
 NEDequantizationLayerKernel::NEDequantizationLayerKernel()
-    : _input(nullptr), _output(nullptr), _min(nullptr), _max(nullptr)
+    : _input(nullptr), _output(nullptr), _min_max(nullptr)
 {
 }
 
-void NEDequantizationLayerKernel::configure(const ITensor *input, ITensor *output, const float *min, const float *max)
+void NEDequantizationLayerKernel::configure(const ITensor *input, ITensor *output, const ITensor *min_max)
 {
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
     ARM_COMPUTE_ERROR_ON_NULLPTR(output);
-    ARM_COMPUTE_ERROR_ON_NULLPTR(min);
-    ARM_COMPUTE_ERROR_ON_NULLPTR(max);
+    ARM_COMPUTE_ERROR_ON_NULLPTR(min_max);
+    ARM_COMPUTE_ERROR_ON(input->info()->num_dimensions() < 3);
 
     // Output tensor auto initialization if not yet initialized
     auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, DataType::F32, 0);
@@ -52,17 +52,20 @@
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F32);
     ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
 
-    _input  = input;
-    _output = output;
-    _min    = min;
-    _max    = max;
+    _input   = input;
+    _output  = output;
+    _min_max = min_max;
 
     constexpr unsigned int num_elems_processed_per_iteration = 8;
 
     // Configure window
     Window                 win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
+    AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
     AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
-    update_window_and_padding(win, AccessWindowHorizontal(input->info(), 0, num_elems_processed_per_iteration), output_access);
+    AccessWindowStatic     min_max_access(min_max->info(), 0, 0, 2, min_max->info()->dimension(1));
+
+    // Update window and padding
+    update_window_and_padding(win, input_access, output_access, min_max_access);
     output_access.set_valid_region(win, input->info()->valid_region());
 
     INEKernel::configure(win);
@@ -74,31 +77,55 @@
     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
 
-    Iterator input(_input, window);
-    Iterator output(_output, window);
+    Window window_input_output(window);
+    window_input_output.collapse_if_possible(INEKernel::window(), 3);
+    window_input_output.set(3, Window::Dimension(0, 1, 1));
 
-    const float32x4_t vmin    = vdupq_n_f32(*_min);
-    const float       range   = *_max - *_min;
-    const float32x4_t scaling = vdupq_n_f32(range / 255.0f);
+    Window window_min_max;
+    window_min_max.use_tensor_dimensions(_min_max->info()->tensor_shape());
+    window_min_max.set(Window::DimX, Window::Dimension(0, 1, 1));
+    window_min_max.collapse_if_possible(INEKernel::window(), 1);
 
-    // Uniformly map values to range 8bit integers, i.e. [min, max] -> [0, 255]
-    execute_window_loop(window, [&](const Coordinates & id)
+    Iterator input(_input, window_input_output);
+    Iterator output(_output, window_input_output);
+    Iterator min_max(_min_max, window_min_max);
+
+    execute_window_loop(window_min_max, [&](const Coordinates & id_batch)
     {
-        const uint8x8_t  val_u8       = vld1_u8(reinterpret_cast<uint8_t *>(input.ptr()));
-        const uint16x8_t val_u16      = vmovl_u8(val_u8);
-        const uint32x4_t val_u32_low  = vmovl_u16(vget_low_u16(val_u16));
-        const uint32x4_t val_u32_high = vmovl_u16(vget_high_u16(val_u16));
-        float32x4_t      val_low      = vcvtq_f32_u32(val_u32_low);
-        float32x4_t      val_high     = vcvtq_f32_u32(val_u32_high);
+        // Get the min and max
+        const float min = *(reinterpret_cast<const float *>(min_max.ptr()) + 0);
+        const float max = *(reinterpret_cast<const float *>(min_max.ptr()) + 1);
 
-        // Dequantize -> (q / 255.0 * range) + min
-        val_low  = vmulq_f32(val_low, scaling);
-        val_high = vmulq_f32(val_high, scaling);
-        val_low  = vaddq_f32(val_low, vmin);
-        val_high = vaddq_f32(val_high, vmin);
+        const float32x4_t vmin    = vdupq_n_f32(min);
+        const float       range   = max - min;
+        const float32x4_t scaling = vdupq_n_f32(range / 255.0f);
 
-        const float32x4x2_t dequantized = vuzpq_f32(val_low, val_high);
-        vst2q_f32(reinterpret_cast<float *>(output.ptr()), dequantized);
+        // Uniformly map values to range 8bit integers, i.e. [min, max] -> [0, 255]
+        execute_window_loop(window_input_output, [&](const Coordinates & id)
+        {
+            // Get the input values
+            const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr() + id_batch[1] * _input->info()->strides_in_bytes()[3]);
+
+            const uint8x8_t  val_u8       = vld1_u8(input_ptr);
+            const uint16x8_t val_u16      = vmovl_u8(val_u8);
+            const uint32x4_t val_u32_low  = vmovl_u16(vget_low_u16(val_u16));
+            const uint32x4_t val_u32_high = vmovl_u16(vget_high_u16(val_u16));
+            float32x4_t      val_low      = vcvtq_f32_u32(val_u32_low);
+            float32x4_t      val_high     = vcvtq_f32_u32(val_u32_high);
+
+            // Dequantize -> (q / 255.0 * range) + min
+            val_low  = vmulq_f32(val_low, scaling);
+            val_high = vmulq_f32(val_high, scaling);
+            val_low  = vaddq_f32(val_low, vmin);
+            val_high = vaddq_f32(val_high, vmin);
+
+            const float32x4x2_t dequantized = vuzpq_f32(val_low, val_high);
+
+            // Store the dequantized values
+            auto output_ptr = reinterpret_cast<float *>(output.ptr() + id_batch[1] * _output->info()->strides_in_bytes()[3]);
+            vst2q_f32(output_ptr, dequantized);
+        },
+        input, output);
     },
-    input, output);
-}
+    min_max);
+}
\ No newline at end of file
diff --git a/src/core/NEON/kernels/NEMinMaxLayerKernel.cpp b/src/core/NEON/kernels/NEMinMaxLayerKernel.cpp
new file mode 100644
index 0000000..5e6c48f
--- /dev/null
+++ b/src/core/NEON/kernels/NEMinMaxLayerKernel.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NEMinMaxLayerKernel.h"
+
+#include "arm_compute/core/Coordinates.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/IAccessWindow.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+#include <algorithm>
+#include <arm_neon.h>
+#include <climits>
+#include <cstddef>
+
+namespace arm_compute
+{
+NEMinMaxLayerKernel::NEMinMaxLayerKernel()
+    : _input(nullptr), _output(nullptr), _mtx()
+{
+}
+
+void NEMinMaxLayerKernel::configure(const ITensor *input, ITensor *output)
+{
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
+    ARM_COMPUTE_ERROR_ON(input->info()->num_dimensions() < 3);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    TensorShape output_shape{ input->info()->tensor_shape() };
+    output_shape.set(Window::DimX, 2);
+    output_shape.remove_dimension(1);
+    output_shape.remove_dimension(1);
+
+    // Output auto initialization if not yet initialized
+    auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->fixed_point_position());
+
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape);
+
+    _input  = input;
+    _output = output;
+
+    // Configure kernel window
+    constexpr unsigned int num_elems_processed_per_iteration = 1;
+
+    Window                 win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
+    AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
+    AccessWindowHorizontal output_access(output->info(), 0, 2);
+
+    update_window_and_padding(win, input_access, output_access);
+
+    output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
+
+    INEKernel::configure(win);
+}
+
+void NEMinMaxLayerKernel::run(const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_UNUSED(info);
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+
+    const int x_start = window.x().start();
+    const int x_end   = window.x().end();
+
+    Window window_output;
+    window_output.use_tensor_dimensions(_output->info()->tensor_shape());
+    window_output.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+    // Handle X dimension manually to split into two loops
+    // First one will use vector operations, second one processes the left over pixels
+    Window window_input(window);
+    window_input.set(Window::DimX, Window::Dimension(0, 1, 1));
+    window_input.collapse_if_possible(INEKernel::window(), 3);
+    window_input.set(3, Window::Dimension(0, 1, 1));
+
+    Iterator input(_input, window_input);
+    Iterator output(_output, window_output);
+
+    execute_window_loop(window_output, [&](const Coordinates & id_batch)
+    {
+        float32x2_t carry_min = vdup_n_f32(std::numeric_limits<float>::max());
+        float32x2_t carry_max = vdup_n_f32(std::numeric_limits<float>::lowest());
+
+        float carry_min_scalar = std::numeric_limits<float>::max();
+        float carry_max_scalar = std::numeric_limits<float>::lowest();
+
+        execute_window_loop(window_input, [&](const Coordinates & id)
+        {
+            int        x      = x_start;
+            const auto in_ptr = reinterpret_cast<const float *const>(input.ptr() + id_batch[1] * _input->info()->strides_in_bytes()[3]);
+
+            // Vector loop
+            for(; x <= x_end - 8; x += 8)
+            {
+                const float32x4x2_t pixels   = vld2q_f32(in_ptr + x);
+                const float32x4_t   tmp_min1 = vminq_f32(pixels.val[0], pixels.val[1]);
+                const float32x4_t   tmp_max1 = vmaxq_f32(pixels.val[0], pixels.val[1]);
+                const float32x2_t   tmp_min2 = vmin_f32(vget_high_f32(tmp_min1), vget_low_f32(tmp_min1));
+                const float32x2_t   tmp_max2 = vmax_f32(vget_high_f32(tmp_max1), vget_low_f32(tmp_max1));
+                carry_min                    = vmin_f32(tmp_min2, carry_min);
+                carry_max                    = vmax_f32(tmp_max2, carry_max);
+            }
+
+            // Process leftover pixels
+            for(; x < x_end; ++x)
+            {
+                const float pixel = in_ptr[x];
+                carry_min_scalar  = std::min(pixel, carry_min_scalar);
+                carry_max_scalar  = std::max(pixel, carry_max_scalar);
+            }
+        },
+        input);
+
+        // Reduce result
+        carry_min = vpmin_f32(carry_min, carry_min);
+        carry_max = vpmax_f32(carry_max, carry_max);
+        carry_min = vpmin_f32(carry_min, carry_min);
+        carry_max = vpmax_f32(carry_max, carry_max);
+
+        // Extract max/min values
+        const float min_i = std::min(vget_lane_f32(carry_min, 0), carry_min_scalar);
+        const float max_i = std::max(vget_lane_f32(carry_max, 0), carry_max_scalar);
+
+        auto out_ptr = reinterpret_cast<float *const>(output.ptr());
+
+        // Perform reduction of local min/max values
+        update_min_max(out_ptr, min_i, max_i);
+    },
+    output);
+}
+
+void NEMinMaxLayerKernel::reset()
+{
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+
+    float32x2_t reset_values = vdup_n_f32(0.0f);
+    reset_values             = vset_lane_f32(std::numeric_limits<float>::max(), reset_values, 0);
+    reset_values             = vset_lane_f32(std::numeric_limits<float>::min(), reset_values, 1);
+
+    Window window_output;
+    window_output.use_tensor_dimensions(_output->info()->tensor_shape());
+    window_output.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+    Iterator output(_output, window_output);
+
+    execute_window_loop(window_output, [&](const Coordinates & id)
+    {
+        vst1_f32(reinterpret_cast<float *const>(output.ptr()), reset_values);
+    },
+    output);
+}
+
+void NEMinMaxLayerKernel::update_min_max(float *out_ptr, float min, float max)
+{
+    std::lock_guard<std::mutex> lock(_mtx);
+
+    const float32x2_t old_min = vld1_dup_f32(out_ptr);
+    const float32x2_t old_max = vld1_dup_f32(out_ptr + 1);
+    const float32x2_t new_min = vmin_f32(vdup_n_f32(min), old_min);
+    const float32x2_t new_max = vmax_f32(vdup_n_f32(max), old_max);
+
+    vst1_f32(out_ptr, vzip_f32(new_min, new_max).val[0]);
+}
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/core/NEON/kernels/NEQuantizationLayerKernel.cpp b/src/core/NEON/kernels/NEQuantizationLayerKernel.cpp
index a596d83..bff79f0 100644
--- a/src/core/NEON/kernels/NEQuantizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEQuantizationLayerKernel.cpp
@@ -23,9 +23,9 @@
  */
 #include "arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h"
 
+#include "arm_compute/core/AccessWindowStatic.h"
 #include "arm_compute/core/Error.h"
 #include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
 #include "arm_compute/core/Utils.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/Window.h"
@@ -35,14 +35,15 @@
 using namespace arm_compute;
 
 NEQuantizationLayerKernel::NEQuantizationLayerKernel()
-    : _input(nullptr), _output(nullptr), _min(nullptr), _max(nullptr)
+    : _input(nullptr), _output(nullptr), _min_max(nullptr)
 {
 }
 
-void NEQuantizationLayerKernel::configure(const ITensor *input, ITensor *output, const float *min, const float *max)
+void NEQuantizationLayerKernel::configure(const ITensor *input, ITensor *output, const ITensor *min_max)
 {
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
     ARM_COMPUTE_ERROR_ON_NULLPTR(output);
+    ARM_COMPUTE_ERROR_ON(input->info()->num_dimensions() < 3);
 
     // Output tensor auto initialization if not yet initialized
     auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, DataType::U8, 0);
@@ -50,17 +51,20 @@
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
     ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
 
-    _input  = input;
-    _output = output;
-    _min    = min;
-    _max    = max;
+    _input   = input;
+    _output  = output;
+    _min_max = min_max;
 
     constexpr unsigned int num_elems_processed_per_iteration = 8;
 
     // Configure window
     Window                 win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
+    AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
     AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
-    update_window_and_padding(win, AccessWindowHorizontal(input->info(), 0, num_elems_processed_per_iteration), output_access);
+    AccessWindowStatic     min_max_access(min_max->info(), 0, 0, 2, min_max->info()->dimension(1));
+
+    // Update window and padding
+    update_window_and_padding(win, input_access, output_access, min_max_access);
     output_access.set_valid_region(win, input->info()->valid_region());
 
     INEKernel::configure(win);
@@ -72,36 +76,67 @@
     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
 
-    Iterator input(_input, window);
-    Iterator output(_output, window);
+    Window window_input_output(window);
+    window_input_output.collapse_if_possible(INEKernel::window(), 3);
+    window_input_output.set(3, Window::Dimension(0, 1, 1));
 
-    const float32x4_t vmin             = vdupq_n_f32(*_min);
-    const float32x4_t inv_range        = vdupq_n_f32(1.0f / (*_max - *_min));
-    const float32x4_t quantization_max = vdupq_n_f32(255.0f);
-    const float32x4_t quantization_mul = vdupq_n_f32(256.0f);
+    Window window_min_max;
+    window_min_max.use_tensor_dimensions(_min_max->info()->tensor_shape());
+    window_min_max.set(Window::DimX, Window::Dimension(0, 1, 1));
+    window_min_max.collapse_if_possible(INEKernel::window(), 1);
 
-    // Uniformly map values to range 8bit integers, i.e. [min, max] -> [0, 255]
-    execute_window_loop(window, [&](const Coordinates & id)
+    Iterator input(_input, window_input_output);
+    Iterator output(_output, window_input_output);
+    Iterator min_max(_min_max, window_min_max);
+
+    execute_window_loop(window_min_max, [&](const Coordinates & id_batch)
     {
-        float32x4x2_t val = vld2q_f32(reinterpret_cast<const float *>(input.ptr()));
-        // Map float values to range [0.0, 1.0]
-        val.val[0] = vsubq_f32(val.val[0], vmin);
-        val.val[1] = vsubq_f32(val.val[1], vmin);
-        val.val[0] = vmulq_f32(val.val[0], inv_range);
-        val.val[1] = vmulq_f32(val.val[1], inv_range);
+        // Get the min and max
+        float min = *(reinterpret_cast<const float *>(min_max.ptr()) + 0);
+        float max = *(reinterpret_cast<const float *>(min_max.ptr()) + 1);
 
-        // Quantize
-        val.val[0] = vmulq_f32(val.val[0], quantization_mul);
-        val.val[1] = vmulq_f32(val.val[1], quantization_mul);
-        val.val[0] = vminq_f32(val.val[0], quantization_max);
-        val.val[1] = vminq_f32(val.val[1], quantization_max);
+        // Saturate the result if min = max
+        if(min == max)
+        {
+            min = 0.0f;
+            max = 1.0f;
+        }
 
-        const uint32x4_t   val_u32_low  = vcvtq_u32_f32(val.val[0]);
-        const uint32x4_t   val_u32_high = vcvtq_u32_f32(val.val[1]);
-        const uint16x4x2_t val_u16      = vzip_u16(vmovn_u32(val_u32_low), vmovn_u32(val_u32_high));
+        const float32x4_t vmin             = vdupq_n_f32(min);
+        const float32x4_t inv_range        = vdupq_n_f32(1.0f / (max - min));
+        const float32x4_t quantization_max = vdupq_n_f32(255.0f);
+        const float32x4_t quantization_mul = vdupq_n_f32(256.0f);
 
-        const uint8x8_t quantized = vmovn_u16(vcombine_u16(val_u16.val[0], val_u16.val[1]));
-        vst1_u8(reinterpret_cast<uint8_t *>(output.ptr()), quantized);
+        // Uniformly map values to range 8bit integers, i.e. [min, max] -> [0, 255]
+        execute_window_loop(window_input_output, [&](const Coordinates & id)
+        {
+            // Get the input values
+            const auto    input_ptr = reinterpret_cast<const float *>(input.ptr() + id_batch[1] * _input->info()->strides_in_bytes()[3]);
+            float32x4x2_t val       = vld2q_f32(input_ptr);
+
+            // Map float values to range [0.0, 1.0]
+            val.val[0] = vsubq_f32(val.val[0], vmin);
+            val.val[1] = vsubq_f32(val.val[1], vmin);
+            val.val[0] = vmulq_f32(val.val[0], inv_range);
+            val.val[1] = vmulq_f32(val.val[1], inv_range);
+
+            // Quantize
+            val.val[0] = vmulq_f32(val.val[0], quantization_mul);
+            val.val[1] = vmulq_f32(val.val[1], quantization_mul);
+            val.val[0] = vminq_f32(val.val[0], quantization_max);
+            val.val[1] = vminq_f32(val.val[1], quantization_max);
+
+            const uint32x4_t   val_u32_low  = vcvtq_u32_f32(val.val[0]);
+            const uint32x4_t   val_u32_high = vcvtq_u32_f32(val.val[1]);
+            const uint16x4x2_t val_u16      = vzip_u16(vmovn_u32(val_u32_low), vmovn_u32(val_u32_high));
+
+            const uint8x8_t quantized = vmovn_u16(vcombine_u16(val_u16.val[0], val_u16.val[1]));
+
+            // Store the quantized values
+            auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr() + id_batch[1] * _output->info()->strides_in_bytes()[3]);
+            vst1_u8(output_ptr, quantized);
+        },
+        input, output);
     },
-    input, output);
+    min_max);
 }