COMPMID-959: Add FP32 support to GLES GEMMConvolution

The following kernels were supposed to have FP32 support but this was not the
case because of bugs and missing shaders:

- GCCol2Im
- GCIm2Col
- GCWeightsReshape

Change-Id: Ie6ea464db0612757c71c3d40874e7bb0d60f170a
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/127572
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/src/core/GLES_COMPUTE/cs_shaders/convolution_layer.cs b/src/core/GLES_COMPUTE/cs_shaders/convolution_layer.cs
index ad3f14d..40b5a2b 100644
--- a/src/core/GLES_COMPUTE/cs_shaders/convolution_layer.cs
+++ b/src/core/GLES_COMPUTE/cs_shaders/convolution_layer.cs
@@ -62,7 +62,56 @@
     uint total_filters;
 };
 
-#if defined(DATA_TYPE_FP16)
+#if defined(DATA_TYPE_FP32)
+
+TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
+TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
+#ifdef HAS_BIAS
+TENSOR_DECLARATION(3, biasesBuffer, float, biases_ptr, biases_shift, 2, readonly);
+#endif /* BIAS */
+
+void main()
+{
+    Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
+    ImageIterator    dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
+#ifdef HAS_BIAS
+    VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
+#endif /* BIAS */
+
+    bool is_last_thread = (((int(gl_GlobalInvocationID.x)) == (int(gl_NumWorkGroups.x * gl_WorkGroupSize.x) - 1)) && ((int(gl_GlobalInvocationID.y)) == (int(gl_NumWorkGroups.y * gl_WorkGroupSize.y) - 1))
+                           && ((int(gl_GlobalInvocationID.z)) == (int(gl_NumWorkGroups.z * gl_WorkGroupSize.z) - 1)));
+    TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, ((uint(gl_GlobalInvocationID.x) * uint(dst_attrs.stride_y)) + (uint(gl_GlobalInvocationID.y) * uint(width) * uint(dst_attrs.stride_y)) + (uint(
+                                                    gl_GlobalInvocationID.z)
+                                                * uint(width) * uint(height) * uint(dst_attrs.stride_y))));
+    // Linearize convolution elements
+    if(is_last_thread)
+    {
+        for(uint i = 0u; i < uint(total_filters); ++i)
+        {
+            float s0 = LOAD_CURRENT_ITEM(src_ptr, src_iter);
+            STORE_CURRENT_ITEM(dst_ptr, dst_iter, s0);
+            TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (depth * src_attrs.stride_z));
+#ifdef HAS_BIAS
+            float b = LOAD_CURRENT_ITEM(biases_ptr, biases_iter);
+            STORE(dst_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(dst_iter, dst_attrs.stride_y), b);
+            TENSOR_ITERATOR_ADVANCE_IN_BYTES(biases_iter, biases_attrs.stride_x);
+#endif /* HAS_BIAS */
+            TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.stride_x);
+        }
+    }
+    else
+    {
+        for(uint i = 0u; i < uint(total_filters); ++i)
+        {
+            float s0 = LOAD_CURRENT_ITEM(src_ptr, src_iter);
+            STORE_CURRENT_ITEM(dst_ptr, dst_iter, s0);
+            TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (depth * src_attrs.stride_z));
+            TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.stride_x);
+        }
+    }
+}
+
+#elif defined(DATA_TYPE_FP16)
 
 TENSOR_DECLARATION(1, srcBuffer, uint, src_ptr, src_shift, 2, readonly);
 TENSOR_DECLARATION(2, dstBuffer, uint, dst_ptr, dst_shift, 2, writeonly);
@@ -72,10 +121,10 @@
 
 void main()
 {
-    Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
-    ImageIterator    dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
+    Tensor3DIterator src_iter    = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
+    ImageIterator    dst_iter    = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
 #ifdef HAS_BIAS
-    VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
+    VectorIterator   biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
 #endif /* BIAS */
 
     bool is_last_thread = (((int(gl_GlobalInvocationID.x)) == (int(gl_NumWorkGroups.x * gl_WorkGroupSize.x) - 1)) && ((int(gl_GlobalInvocationID.y)) == (int(gl_NumWorkGroups.y * gl_WorkGroupSize.y) - 1))
@@ -151,7 +200,7 @@
     }
 }
 
-#endif /* DATA_TYPE_FP16 */
+#endif /* DATA_TYPE_FP32 */
 #endif // RESHAPE_TO_COLUMNS
 
 #ifdef IM2COL_GENERIC
@@ -193,30 +242,31 @@
     Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(src_attrs, src_shift);
     ImageIterator    dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
 
-    uint xc    = gl_GlobalInvocationID.x;                // x coordinate in the convolved tensor
-    uint yc    = gl_GlobalInvocationID.y;                // y coordinate in the convolved tensor
-    uint ch    = gl_GlobalInvocationID.z % KERNEL_DEPTH; // input feature map
-    uint batch = gl_GlobalInvocationID.z / KERNEL_DEPTH; // the batch
+    int xc    = int(gl_GlobalInvocationID.x);                // x coordinate in the convolved tensor
+    int yc    = int(gl_GlobalInvocationID.y);                // y coordinate in the convolved tensor
+    int ch    = int(gl_GlobalInvocationID.z) % KERNEL_DEPTH; // input feature map
+    int batch = int(gl_GlobalInvocationID.z) / KERNEL_DEPTH; // the batch
 
     // Calculate input indeces
-    uint xi = xc * uint(STRIDE_X) - uint(PAD_LEFT);
-    uint yi = yc * uint(STRIDE_Y) - uint(PAD_TOP);
-    TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (ch * src_attrs.stride_z) + (batch * src_stride_w));
+    int xi = xc * STRIDE_X - PAD_LEFT;
+    int yi = yc * STRIDE_Y - PAD_TOP;
+    TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (ch * int(src_attrs.stride_z)) + (batch * int(src_stride_w)));
 
     // Calculate output indeces
-    uint xo = ch * uint(KERNEL_WIDTH) * uint(KERNEL_HEIGHT);
-    uint yo = xc + yc * uint(CONVOLVED_WIDTH); // Index of the convolution
-    TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, (yo * dst_attrs.stride_y) + (batch * dst_stride_w) + xo);
+    int xo = ch * KERNEL_WIDTH * KERNEL_HEIGHT;
+    int yo = xc + yc * CONVOLVED_WIDTH; // Index of the convolution
+    // sizeof is not available in GLES, so we'll use stride_x
+    TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, (yo * int(dst_attrs.stride_y)) + (batch * int(dst_stride_w)) + xo * int(dst_attrs.stride_x));
 
     uint src_pos = 0u;
 
     // Linearize convolution elements
-    for(uint y = yi, y_e = yi + uint(KERNEL_HEIGHT) * uint(DILATION_Y); y < y_e; y += uint(DILATION_Y))
+    for(int y = yi, y_e = yi + KERNEL_HEIGHT * DILATION_Y; y < y_e; y += DILATION_Y)
     {
-        for(uint x = xi, x_e = xi + uint(KERNEL_WIDTH) * uint(DILATION_X); x < x_e; x += uint(DILATION_X), TENSOR_OFFSET_ADVANCE(dst_iter, 1u))
+        for(int x = xi, x_e = xi + KERNEL_WIDTH * DILATION_X; x < x_e; x += DILATION_X, TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, int(dst_attrs.stride_x)))
         {
 #if PAD_LEFT == 0 && PAD_TOP == 0 && PAD_RIGHT == 0 && PAD_BOTTOM == 0
-            src_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * src_attrs.stride_x + y * src_attrs.stride_y);
+            src_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * int(src_attrs.stride_x) + y * int(src_attrs.stride_y));
             STORE_CURRENT_ITEM(dst_ptr, dst_iter, LOAD(src_ptr, src_pos));
 #else  /* PAD_LEFT == 0 && PAD_TOP == 0 && PAD_RIGHT == 0 && PAD_BOTTOM == 0 */
             if(x < 0 || x >= SRC_WIDTH || y < 0 || y >= SRC_HEIGHT)
@@ -225,7 +275,7 @@
             }
             else
             {
-                src_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * src_attrs.stride_x + y * src_attrs.stride_y);
+                src_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * int(src_attrs.stride_x) + y * int(src_attrs.stride_y));
                 STORE_CURRENT_ITEM(dst_ptr, dst_iter, LOAD(src_ptr, src_pos));
             }
 #endif /* PAD_LEFT == 0 && PAD_TOP == 0 && PAD_RIGHT == 0 && PAD_BOTTOM == 0 */
@@ -233,7 +283,7 @@
     }
 
 #ifdef HAS_BIAS
-    if(ch == (uint(KERNEL_DEPTH) - 1))
+    if(ch == (KERNEL_DEPTH - 1))
     {
         STORE_CURRENT_ITEM(dst_ptr, dst_iter, 1.0f);
     }
@@ -661,6 +711,7 @@
 #endif /* DATA_TYPE_FP32 */
 #endif /* IM2COL_REDUCED */
 
+#ifdef COL2IM
 #ifdef WIDTH_OUTPUT
 
 /** This kernel performs a reshaping of the output of the convolution layer.
@@ -694,10 +745,9 @@
     Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
 
     uvec3 pos = uvec3(gl_GlobalInvocationID.xyz);
-    TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, pos.x * src_attrs.step_y + pos.y * WIDTH_OUTPUT * src_attrs.step_y + (pos.z % dst_depth) * src_attrs.stride_x + (pos.z / dst_depth) * (src_attrs.stride_z));
+    TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, pos.x * src_attrs.step_y + pos.y * uint(WIDTH_OUTPUT) * src_attrs.step_y + (pos.z % dst_depth) * src_attrs.stride_x + (pos.z / dst_depth) * dst_strideZ);
 
-    STORE_CURRENT_ITEM(dst_ptr, dst_iter,
-                       LOAD_CURRENT_ITEM(src_ptr, src_iter));
+    STORE_CURRENT_ITEM(dst_ptr, dst_iter, LOAD_CURRENT_ITEM(src_ptr, src_iter));
 }
 
 #elif defined(DATA_TYPE_FP16)
@@ -737,4 +787,5 @@
 #else /* DATA_TYPE_FP32 */
 #error Data type not supported
 #endif /* DATA_TYPE_FP32 */
+#endif /* WIDTH_OUTPUT */
 #endif /* COL2IM */
diff --git a/src/core/GLES_COMPUTE/kernels/GCCol2ImKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCCol2ImKernel.cpp
index af1e34e..1554a89 100644
--- a/src/core/GLES_COMPUTE/kernels/GCCol2ImKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCCol2ImKernel.cpp
@@ -62,30 +62,32 @@
     _output         = output;
     _convolved_dims = convolved_dims;
 
-    unsigned int num_elems_processed_per_iteration = 1;
+    const DataType     dt         = input->info()->data_type();
+    const unsigned int local_size = 1;
 
     // Create kernel
     std::set<std::string> build_opts;
+    build_opts.emplace("#define COL2IM ");
     build_opts.emplace("#define WIDTH_OUTPUT " + support::cpp11::to_string(_convolved_dims.first));
-    std::string dt_name = (input->info()->data_type() == DataType::F32) ? "DATA_TYPE_FP32" : "DATA_TYPE_FP16";
+    const std::string dt_name = (dt == DataType::F32) ? "DATA_TYPE_FP32" : "DATA_TYPE_FP16";
     build_opts.emplace(("#define " + dt_name));
-    build_opts.emplace("#define LOCAL_SIZE_X " + support::cpp11::to_string(num_elems_processed_per_iteration));
-    build_opts.emplace("#define LOCAL_SIZE_Y " + support::cpp11::to_string(num_elems_processed_per_iteration));
-    build_opts.emplace("#define LOCAL_SIZE_Z " + support::cpp11::to_string(num_elems_processed_per_iteration));
+    build_opts.emplace("#define LOCAL_SIZE_X " + support::cpp11::to_string(local_size));
+    build_opts.emplace("#define LOCAL_SIZE_Y " + support::cpp11::to_string(local_size));
+    build_opts.emplace("#define LOCAL_SIZE_Z " + support::cpp11::to_string(local_size));
 
     _kernel = static_cast<GCKernel>(GCKernelLibrary::get().create_kernel("col2im", build_opts));
 
     // Configure window
-    unsigned int nums = 2;
-    Window       win  = calculate_max_window(*output->info(), Steps(nums));
+    const unsigned int num_elems_processed_per_iteration = (dt == DataType::F32) ? 1 : 2;
 
-    AccessWindowHorizontal output_access(output->info(), 0, 2);
+    Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
+
+    AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
     const int              input_padding = ceil_to_multiple(input->info()->dimension(0), 2) - input->info()->dimension(0);
 
     AccessWindowStatic input_access(input->info(), 0, 0, input->info()->dimension(0) + input_padding, input->info()->dimension(1) + 1);
 
-    update_window_and_padding(win, input_access,
-                              output_access);
+    update_window_and_padding(win, input_access, output_access);
 
     output_access.set_valid_region(win, output->info()->valid_region());
 
diff --git a/src/core/GLES_COMPUTE/kernels/GCIm2ColKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCIm2ColKernel.cpp
index eb79047..6c89616 100644
--- a/src/core/GLES_COMPUTE/kernels/GCIm2ColKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCIm2ColKernel.cpp
@@ -113,7 +113,7 @@
         _convolved_dims = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1),
                                             kernel_dims.width, kernel_dims.height,
                                             conv_info, dilation);
-        _num_elems_processed_per_iteration = 2;
+        _num_elems_processed_per_iteration = (input->info()->data_type() == DataType::F32) ? 1 : 2;
 
         build_opts.emplace("#define KERNEL_WIDTH " + support::cpp11::to_string(kernel_dims.width));
         build_opts.emplace("#define KERNEL_HEIGHT " + support::cpp11::to_string(kernel_dims.height));