COMPMID-1751: Remove output_3d_depth from NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint

Change-Id: I1d5bc4d24059917f9ddef0873dd3043b1f2320a8
diff --git a/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp
index af84d02..33a5b4a 100644
--- a/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp
@@ -62,16 +62,24 @@
     if(b_offset != 0)
     {
         ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
-        ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_row->dimension(0) != mm_result->dimension(1));
+
+        // Check if input is a 3D reinterpretation
+        const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
+
+        // Validate input
+        ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
+        ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
 
         TensorShape output_shape = mm_result->tensor_shape();
         if(output_shape.num_dimensions() > 1)
         {
+            const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
+
             TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
             vector_sum_row_shape.collapse_from(1);
-            output_shape.collapse_from(2);
+            output_shape.collapse_from(output_batch_idx);
 
-            ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[2],
+            ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
                                             "mm_result tensor must have the same number of batches of output tensor");
 
             if(a_offset != 0)
@@ -117,6 +125,217 @@
     Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
     return std::make_pair(err, win);
 }
+
+template <bool is_gemm3d>
+void run_offset_contribution(const Window &window,
+                             ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row,
+                             int32_t a_offset, int32_t b_offset, int32_t k_offset, bool slide_vector_sum_col)
+{
+    Window collapsed_window = window.collapse_if_possible(window, Window::DimZ);
+
+    const int height_input = is_gemm3d ? mm_result->info()->dimension(1) : 0;
+    const int depth_input  = is_gemm3d ? mm_result->info()->dimension(2) : 1;
+
+    if((a_offset != 0) && (b_offset != 0) && (vector_sum_col != nullptr) && (vector_sum_row != nullptr)) // true, true
+    {
+        // Set window for vector_sum_col
+        Window win_vector_sum_col(collapsed_window);
+        win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
+        win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+        // Set window for vector_sum_row
+        Window win_vector_sum_row(collapsed_window);
+        win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
+        win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
+        win_vector_sum_row.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+        Iterator vector_sum_col_it(vector_sum_col, win_vector_sum_col);
+        Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
+        Iterator mm_result_it(mm_result, window);
+
+        const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
+
+        // Offset in case vector_sum_col is batched
+        const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
+
+        execute_window_loop(collapsed_window, [&](const Coordinates & id)
+        {
+            const int  batch_id           = id.z() / depth_input;
+            const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
+
+            // Compute the leftover term due to a_offset.
+            int32x4x4_t a_offset_term_s32 =
+            {
+                {
+                    vld1q_s32(vector_sum_col_ptr + 0),
+                    vld1q_s32(vector_sum_col_ptr + 4),
+                    vld1q_s32(vector_sum_col_ptr + 8),
+                    vld1q_s32(vector_sum_col_ptr + 12)
+                }
+            };
+
+            a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
+            a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
+            a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
+            a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
+
+            // Compute the leftover term due to b_offset.
+            int32x4_t b_offset_term_s32 = vld1q_dup_s32(reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y) + id.y()
+                                                        + (id.z() % depth_input) * height_input);
+            b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, b_offset);
+
+            // Add a_offset_term_s32 and b_offset_term_s32
+            int32x4x4_t offset_term_s32 =
+            {
+                {
+                    vdupq_n_s32(k_offset),
+                    vdupq_n_s32(k_offset),
+                    vdupq_n_s32(k_offset),
+                    vdupq_n_s32(k_offset)
+                }
+            };
+
+            offset_term_s32.val[0] = vaddq_s32(offset_term_s32.val[0], vaddq_s32(a_offset_term_s32.val[0], b_offset_term_s32));
+            offset_term_s32.val[1] = vaddq_s32(offset_term_s32.val[1], vaddq_s32(a_offset_term_s32.val[1], b_offset_term_s32));
+            offset_term_s32.val[2] = vaddq_s32(offset_term_s32.val[2], vaddq_s32(a_offset_term_s32.val[2], b_offset_term_s32));
+            offset_term_s32.val[3] = vaddq_s32(offset_term_s32.val[3], vaddq_s32(a_offset_term_s32.val[3], b_offset_term_s32));
+
+            int32x4x4_t in_s32 =
+            {
+                {
+                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + 0),
+                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + 4),
+                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + 8),
+                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + 12)
+                }
+            };
+
+            // Add the offset terms to GEMM's result
+            in_s32.val[0] = vaddq_s32(in_s32.val[0], offset_term_s32.val[0]);
+            in_s32.val[1] = vaddq_s32(in_s32.val[1], offset_term_s32.val[1]);
+            in_s32.val[2] = vaddq_s32(in_s32.val[2], offset_term_s32.val[2]);
+            in_s32.val[3] = vaddq_s32(in_s32.val[3], offset_term_s32.val[3]);
+
+            // Store the result with the offset contribution
+            vst1q_s32(reinterpret_cast<int32_t *>(mm_result_it.ptr()) + 0, in_s32.val[0]);
+            vst1q_s32(reinterpret_cast<int32_t *>(mm_result_it.ptr()) + 4, in_s32.val[1]);
+            vst1q_s32(reinterpret_cast<int32_t *>(mm_result_it.ptr()) + 8, in_s32.val[2]);
+            vst1q_s32(reinterpret_cast<int32_t *>(mm_result_it.ptr()) + 12, in_s32.val[3]);
+        },
+        vector_sum_col_it, vector_sum_row_it, mm_result_it);
+    }
+    else if((a_offset == 0) && (b_offset != 0) && (vector_sum_row != nullptr)) // false, true
+    {
+        ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
+
+        // Set window for vector_sum_row
+        Window win_vector_sum_row(collapsed_window);
+        win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
+        win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
+        win_vector_sum_row.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+        Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
+        Iterator mm_result_it(mm_result, window);
+
+        const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
+
+        execute_window_loop(window, [&](const Coordinates & id)
+        {
+            const int batch_id = id.z() / depth_input;
+
+            // Compute the leftover term due to b_offset.
+            int32x4_t b_offset_term_s32 = vld1q_dup_s32(reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y) + id.y()
+                                                        + (id.z() % depth_input) * height_input);
+            b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, b_offset);
+
+            int32x4x4_t in_s32 =
+            {
+                {
+                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + 0),
+                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + 4),
+                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + 8),
+                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + 12)
+                }
+            };
+
+            // Add the offset terms to GEMM's result
+            in_s32.val[0] = vaddq_s32(in_s32.val[0], b_offset_term_s32);
+            in_s32.val[1] = vaddq_s32(in_s32.val[1], b_offset_term_s32);
+            in_s32.val[2] = vaddq_s32(in_s32.val[2], b_offset_term_s32);
+            in_s32.val[3] = vaddq_s32(in_s32.val[3], b_offset_term_s32);
+
+            // Store the result with the offset contribution
+            vst1q_s32(reinterpret_cast<int32_t *>(mm_result_it.ptr()) + 0, in_s32.val[0]);
+            vst1q_s32(reinterpret_cast<int32_t *>(mm_result_it.ptr()) + 4, in_s32.val[1]);
+            vst1q_s32(reinterpret_cast<int32_t *>(mm_result_it.ptr()) + 8, in_s32.val[2]);
+            vst1q_s32(reinterpret_cast<int32_t *>(mm_result_it.ptr()) + 12, in_s32.val[3]);
+        },
+        vector_sum_row_it, mm_result_it);
+    }
+    else if((a_offset != 0) && (b_offset == 0) && (vector_sum_col != nullptr)) // true, false
+    {
+        // Set window for vector_sum_col
+        Window win_vector_sum_col(collapsed_window);
+        win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
+        win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+        Iterator vector_sum_col_it(vector_sum_col, win_vector_sum_col);
+        Iterator mm_result_it(mm_result, window);
+
+        // Offset in case vector_sum_col is batched
+        const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
+
+        execute_window_loop(window, [&](const Coordinates & id)
+        {
+            const int  batch_id           = id.z() / depth_input;
+            const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
+
+            // Compute the leftover term due to a_offset.
+            int32x4x4_t a_offset_term_s32 =
+            {
+                {
+                    vld1q_s32(vector_sum_col_ptr + 0),
+                    vld1q_s32(vector_sum_col_ptr + 4),
+                    vld1q_s32(vector_sum_col_ptr + 8),
+                    vld1q_s32(vector_sum_col_ptr + 12)
+                }
+            };
+
+            a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
+            a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
+            a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
+            a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
+
+            int32x4x4_t in_s32 =
+            {
+                {
+                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + 0),
+                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + 4),
+                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + 8),
+                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + 12)
+                }
+            };
+
+            // Add the offset terms to GEMM's result
+            in_s32.val[0] = vaddq_s32(in_s32.val[0], a_offset_term_s32.val[0]);
+            in_s32.val[1] = vaddq_s32(in_s32.val[1], a_offset_term_s32.val[1]);
+            in_s32.val[2] = vaddq_s32(in_s32.val[2], a_offset_term_s32.val[2]);
+            in_s32.val[3] = vaddq_s32(in_s32.val[3], a_offset_term_s32.val[3]);
+
+            // Store the result with the offset contribution
+            vst1q_s32(reinterpret_cast<int32_t *>(mm_result_it.ptr()) + 0, in_s32.val[0]);
+            vst1q_s32(reinterpret_cast<int32_t *>(mm_result_it.ptr()) + 4, in_s32.val[1]);
+            vst1q_s32(reinterpret_cast<int32_t *>(mm_result_it.ptr()) + 8, in_s32.val[2]);
+            vst1q_s32(reinterpret_cast<int32_t *>(mm_result_it.ptr()) + 12, in_s32.val[3]);
+        },
+        vector_sum_col_it, mm_result_it);
+    }
+    else // false, false
+    {
+        // No offset contribution from matrix A and matrix B
+        return;
+    }
+}
 } // namespace
 
 NEGEMMLowpOffsetContributionKernel::NEGEMMLowpOffsetContributionKernel()
@@ -177,193 +396,17 @@
     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
 
-    Window collapsed_window = window.collapse_if_possible(IKernel::window(), Window::DimZ);
+    // Check if input is a 3D reinterpretation
+    const bool reinterpret_as_3d = _vector_sum_row != nullptr
+                                   && _mm_result->info()->num_dimensions() > 1
+                                   && _mm_result->info()->tensor_shape().y() != _vector_sum_row->info()->tensor_shape().x();
 
-    if(_a_offset != 0 && _b_offset != 0) // true, true
+    if(reinterpret_as_3d)
     {
-        // Set window for vector_sum_col
-        Window win_vector_sum_col(collapsed_window);
-        win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
-        if(!_slide_vector_sum_col)
-        {
-            win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
-        }
-
-        // Set window for vector_sum_row
-        Window win_vector_sum_row(collapsed_window);
-        win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
-        win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
-        win_vector_sum_row.set(Window::DimZ, Window::Dimension(0, 0, 0));
-
-        Iterator vector_sum_col(_vector_sum_col, win_vector_sum_col);
-        Iterator vector_sum_row(_vector_sum_row, win_vector_sum_row);
-        Iterator mm_result(_mm_result, window);
-
-        const size_t sum_row_stride_y = _vector_sum_row->info()->strides_in_bytes().y();
-
-        execute_window_loop(collapsed_window, [&](const Coordinates & id)
-        {
-            // Compute the leftover term due to a_offset.
-            int32x4x4_t a_offset_term_s32 =
-            {
-                {
-                    vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 0),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 4),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 8),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 12)
-                }
-            };
-
-            a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], _a_offset);
-            a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], _a_offset);
-            a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], _a_offset);
-            a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], _a_offset);
-
-            // Compute the leftover term due to b_offset.
-            int32x4_t b_offset_term_s32 = vld1q_dup_s32(reinterpret_cast<const int32_t *>(vector_sum_row.ptr() + id.z() * sum_row_stride_y) + id.y());
-            b_offset_term_s32           = vmulq_n_s32(b_offset_term_s32, _b_offset);
-
-            // Add a_offset_term_s32 and b_offset_term_s32
-            int32x4x4_t offset_term_s32 =
-            {
-                {
-                    vdupq_n_s32(_k_offset),
-                    vdupq_n_s32(_k_offset),
-                    vdupq_n_s32(_k_offset),
-                    vdupq_n_s32(_k_offset)
-                }
-            };
-
-            offset_term_s32.val[0] = vaddq_s32(offset_term_s32.val[0], vaddq_s32(a_offset_term_s32.val[0], b_offset_term_s32));
-            offset_term_s32.val[1] = vaddq_s32(offset_term_s32.val[1], vaddq_s32(a_offset_term_s32.val[1], b_offset_term_s32));
-            offset_term_s32.val[2] = vaddq_s32(offset_term_s32.val[2], vaddq_s32(a_offset_term_s32.val[2], b_offset_term_s32));
-            offset_term_s32.val[3] = vaddq_s32(offset_term_s32.val[3], vaddq_s32(a_offset_term_s32.val[3], b_offset_term_s32));
-
-            int32x4x4_t in_s32 =
-            {
-                {
-                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 0),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 4),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 8),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 12)
-                }
-            };
-
-            // Add the offset terms to GEMM's result
-            in_s32.val[0] = vaddq_s32(in_s32.val[0], offset_term_s32.val[0]);
-            in_s32.val[1] = vaddq_s32(in_s32.val[1], offset_term_s32.val[1]);
-            in_s32.val[2] = vaddq_s32(in_s32.val[2], offset_term_s32.val[2]);
-            in_s32.val[3] = vaddq_s32(in_s32.val[3], offset_term_s32.val[3]);
-
-            // Store the result with the offset contribution
-            vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 0, in_s32.val[0]);
-            vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 4, in_s32.val[1]);
-            vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 8, in_s32.val[2]);
-            vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 12, in_s32.val[3]);
-        },
-        vector_sum_col, vector_sum_row, mm_result);
+        run_offset_contribution<true>(window, _mm_result, _vector_sum_col, _vector_sum_row, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col);
     }
-    else if((_a_offset == 0) && (_b_offset != 0)) // false, true
+    else
     {
-        // Set window for vector_sum_row
-        Window win_vector_sum_row(collapsed_window);
-        win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
-        win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
-        win_vector_sum_row.set(Window::DimZ, Window::Dimension(0, 0, 0));
-
-        Iterator vector_sum_row(_vector_sum_row, win_vector_sum_row);
-        Iterator mm_result(_mm_result, window);
-
-        const size_t sum_row_stride_y = _vector_sum_row->info()->strides_in_bytes().y();
-
-        execute_window_loop(window, [&](const Coordinates & id)
-        {
-            // Compute the leftover term due to b_offset.
-            int32x4_t b_offset_term_s32 = vld1q_dup_s32(reinterpret_cast<const int32_t *>(vector_sum_row.ptr() + id.z() * sum_row_stride_y) + id.y());
-            b_offset_term_s32           = vmulq_n_s32(b_offset_term_s32, _b_offset);
-
-            int32x4x4_t in_s32 =
-            {
-                {
-                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 0),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 4),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 8),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 12)
-                }
-            };
-
-            // Add the offset terms to GEMM's result
-            in_s32.val[0] = vaddq_s32(in_s32.val[0], b_offset_term_s32);
-            in_s32.val[1] = vaddq_s32(in_s32.val[1], b_offset_term_s32);
-            in_s32.val[2] = vaddq_s32(in_s32.val[2], b_offset_term_s32);
-            in_s32.val[3] = vaddq_s32(in_s32.val[3], b_offset_term_s32);
-
-            // Store the result with the offset contribution
-            vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 0, in_s32.val[0]);
-            vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 4, in_s32.val[1]);
-            vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 8, in_s32.val[2]);
-            vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 12, in_s32.val[3]);
-        },
-        vector_sum_row, mm_result);
+        run_offset_contribution<false>(window, _mm_result, _vector_sum_col, _vector_sum_row, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col);
     }
-    else if((_a_offset != 0) && (_b_offset == 0)) // true, false
-    {
-        // Set window for vector_sum_col
-        Window win_vector_sum_col(collapsed_window);
-        win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
-        if(!_slide_vector_sum_col)
-        {
-            win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
-        }
-
-        Iterator vector_sum_col(_vector_sum_col, win_vector_sum_col);
-        Iterator mm_result(_mm_result, window);
-
-        execute_window_loop(window, [&](const Coordinates & id)
-        {
-            // Compute the leftover term due to a_offset.
-            int32x4x4_t a_offset_term_s32 =
-            {
-                {
-                    vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 0),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 4),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 8),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 12)
-                }
-            };
-
-            a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], _a_offset);
-            a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], _a_offset);
-            a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], _a_offset);
-            a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], _a_offset);
-
-            int32x4x4_t in_s32 =
-            {
-                {
-                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 0),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 4),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 8),
-                    vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 12)
-                }
-            };
-
-            // Add the offset terms to GEMM's result
-            in_s32.val[0] = vaddq_s32(in_s32.val[0], a_offset_term_s32.val[0]);
-            in_s32.val[1] = vaddq_s32(in_s32.val[1], a_offset_term_s32.val[1]);
-            in_s32.val[2] = vaddq_s32(in_s32.val[2], a_offset_term_s32.val[2]);
-            in_s32.val[3] = vaddq_s32(in_s32.val[3], a_offset_term_s32.val[3]);
-
-            // Store the result with the offset contribution
-            vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 0, in_s32.val[0]);
-            vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 4, in_s32.val[1]);
-            vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 8, in_s32.val[2]);
-            vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 12, in_s32.val[3]);
-        },
-        vector_sum_col, mm_result);
-    }
-    else // false, false
-    {
-        // No offset contribution from matrix A and matrix B
-        return;
-    }
-}
+}
\ No newline at end of file