Add uint8/int8 support to cpu conv3d

Add support for qasymm8/qasymm8_signed in cpu conv3d.

Resolves: COMPMID-4665

Signed-off-by: Freddie Liardet <frederick.liardet@arm.com>
Change-Id: I2450bb6f24969745c8b936f4b657bd406b788c57
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6478
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/runtime/NEON/functions/NEConv3D.h b/arm_compute/runtime/NEON/functions/NEConv3D.h
index 2b3a45f..2a3c535 100644
--- a/arm_compute/runtime/NEON/functions/NEConv3D.h
+++ b/arm_compute/runtime/NEON/functions/NEConv3D.h
@@ -66,6 +66,8 @@
      * |:--------------|:------------------|:------|:--------------|
      * |F16            |F16                |F16    |F16            |
      * |F32            |F32                |F32    |F32            |
+     * |QASYMM8        |QASYMM8            |S32    |QASYMM8        |
+     * |QASYMM8_SIGNED |QASYMM8_SIGNED     |S32    |QASYMM8_SIGNED |
      *
      * @param[in]  input     Source tensor. 4 lower dimensions represent a single input [IFM, width, height, depth],
      *                       while every optional dimension from 5 and above represent a batch of inputs.
diff --git a/docs/user_guide/operator_list.dox b/docs/user_guide/operator_list.dox
index 55bfe38..1dfbdf6 100644
--- a/docs/user_guide/operator_list.dox
+++ b/docs/user_guide/operator_list.dox
@@ -617,6 +617,8 @@
     <tr><th>src0<th>src1<th>src2<th>dst
     <tr><td>F16<td>F16<td>F16<td>F16
     <tr><td>F32<td>F32<td>F32<td>F32
+    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
+    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
     </table>
 <tr>
   <td>CLConv3D
diff --git a/src/cpu/kernels/CpuDirectConv3dKernel.cpp b/src/cpu/kernels/CpuDirectConv3dKernel.cpp
index 595b5f1..4f47787 100644
--- a/src/cpu/kernels/CpuDirectConv3dKernel.cpp
+++ b/src/cpu/kernels/CpuDirectConv3dKernel.cpp
@@ -76,6 +76,16 @@
         "neon_fp32_directconv3d",
         [](const DirectConv3dSelectorData & data) { return data.dt == DataType::F32; },
         REGISTER_FP32_NEON(arm_compute::cpu::directconv3d_float_neon_ndhwc<float>)
+    },
+    {
+        "neon_qasymm8_directconv3d",
+        [](const DirectConv3dSelectorData & data) { return data.dt == DataType::QASYMM8; },
+        REGISTER_QASYMM8_NEON(arm_compute::cpu::directconv3d_quantized_neon_ndhwc<uint8_t>)
+    },
+    {
+        "neon_qasymm8_signed_directconv3d",
+        [](const DirectConv3dSelectorData & data) { return data.dt == DataType::QASYMM8_SIGNED; },
+        REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::directconv3d_quantized_neon_ndhwc<int8_t>)
     }
 };
 
@@ -105,7 +115,7 @@
     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst);
     ARM_COMPUTE_RETURN_ERROR_ON(src0->data_layout() != DataLayout::NDHWC);
     ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src0);
-    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::F16, DataType::F32);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::F16, DataType::F32, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1);
 
     const DataLayout data_layout = src0->data_layout();
@@ -117,10 +127,16 @@
 
     if(src2 != nullptr)
     {
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, src2);
-        ARM_COMPUTE_RETURN_ERROR_ON_MSG(src2->dimension(0) != src1->dimension(0),
-                                        "biases size and number of output feature maps should match");
-        ARM_COMPUTE_RETURN_ERROR_ON_MSG(src2->num_dimensions() > 1, "biases should be one dimensional");
+        if(is_data_type_quantized(src0->data_type()))
+        {
+            ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src2, 1, DataType::S32);
+        }
+        else
+        {
+            ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, src2);
+        }
+        ARM_COMPUTE_RETURN_ERROR_ON_MSG(src2->dimension(0) != src1->dimension(0), "Biases size and number of dst feature maps should match");
+        ARM_COMPUTE_RETURN_ERROR_ON_MSG(src2->num_dimensions() > 1, "Biases should be one dimensional");
     }
 
     // Checks performed when output is configured
@@ -136,7 +152,7 @@
 
     return Status{};
 }
-}
+} // namespace
 
 void CpuDirectConv3dKernel::configure(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, ITensorInfo *dst, const Conv3dInfo &conv_info)
 {
diff --git a/src/cpu/kernels/CpuDirectConv3dKernel.h b/src/cpu/kernels/CpuDirectConv3dKernel.h
index fc64e85..ff3b30f 100644
--- a/src/cpu/kernels/CpuDirectConv3dKernel.h
+++ b/src/cpu/kernels/CpuDirectConv3dKernel.h
@@ -46,6 +46,8 @@
      * |:--------------|:------------------|:------|:--------------|
      * |F16            |F16                |F16    |F16            |
      * |F32            |F32                |F32    |F32            |
+     * |QASYMM8        |QASYMM8            |S32    |QASYMM8        |
+     * |QASYMM8_SIGNED |QASYMM8_SIGNED     |S32    |QASYMM8_SIGNED |
      *
      * @param[in, out] src0      Input tensor info.
      * @param[in]      src1      Set of kernels to convolve the input volume.
diff --git a/src/cpu/kernels/conv3d/neon/list.h b/src/cpu/kernels/conv3d/neon/list.h
index b24785a..3e2db66 100644
--- a/src/cpu/kernels/conv3d/neon/list.h
+++ b/src/cpu/kernels/conv3d/neon/list.h
@@ -29,6 +29,7 @@
 #include "arm_compute/runtime/FunctionDescriptors.h"
 #include "src/core/NEON/wrapper/wrapper.h"
 #include "src/core/helpers/WindowHelpers.h"
+#include "src/cpu/kernels/conv3d/neon/quantized.h"
 
 namespace arm_compute
 {
@@ -171,6 +172,7 @@
     },
     out);
 }
+
 } // namespace cpu
 } // namespace arm_compute
 #endif // SRC_CORE_NEON_KERNELS_CONV3D_LIST_H
\ No newline at end of file
diff --git a/src/cpu/kernels/conv3d/neon/quantized.h b/src/cpu/kernels/conv3d/neon/quantized.h
new file mode 100644
index 0000000..2958cd6
--- /dev/null
+++ b/src/cpu/kernels/conv3d/neon/quantized.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_KERNELS_CONV3D_QUANTIZED_H
+#define SRC_CORE_NEON_KERNELS_CONV3D_QUANTIZED_H
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/Traits.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+#include "arm_compute/runtime/FunctionDescriptors.h"
+#include "src/core/NEON/NEAsymm.h"
+#include "src/core/NEON/wrapper/wrapper.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+template <typename T>
+void directconv3d_quantized_neon_ndhwc(const ITensor *src0, const ITensor *src1, const ITensor *src2, ITensor *dst, const Conv3dInfo &conv_info, const Window &window)
+{
+    const ITensor *src     = src0;
+    const ITensor *weights = src1;
+    const ITensor *biases  = src2;
+
+    using vtype                                = wrapper::traits::neon_bitvector<T, wrapper::traits::BitWidth::W128>;
+    using vector_type                          = typename vtype::type;
+    using tag_type                             = typename vtype::tag_type;
+    constexpr int num_elems_read_per_iteration = 16 / sizeof(T);
+    using q16_t                                = typename wrapper::traits::promote_t<T>;
+    using q32_t                                = typename wrapper::traits::promote_t<q16_t>;
+    using q32x4_t                              = typename wrapper::traits::neon_vector<q32_t, 4>::type;
+
+    const int32_t input_offset   = -src->info()->quantization_info().uniform().offset;
+    const float   input_scale    = src->info()->quantization_info().uniform().scale;
+    const int32_t weights_offset = -weights->info()->quantization_info().uniform().offset;
+    const float   weights_scale  = weights->info()->quantization_info().uniform().scale;
+    const int32_t output_offset  = dst->info()->quantization_info().uniform().offset;
+    const float   output_scale   = dst->info()->quantization_info().uniform().scale;
+
+    int32_t     output_multiplier = 0;
+    int32_t     output_shift      = 0;
+    const float multiplier        = input_scale * weights_scale / output_scale;
+    arm_compute::quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
+
+    // Scalar quantities (N D H W Cin)
+    const int element_size   = src->info()->element_size();
+    const int input_stride_w = src->info()->strides_in_bytes().y() / element_size;
+    const int input_stride_h = src->info()->strides_in_bytes().z() / element_size;
+    const int input_stride_d = src->info()->strides_in_bytes()[3] / element_size;
+    const int input_stride_n = src->info()->strides_in_bytes()[4] / element_size;
+    const int input_dim_w    = src->info()->dimension(1);
+    const int input_dim_h    = src->info()->dimension(2);
+    const int input_dim_d    = src->info()->dimension(3);
+
+    // Kernel info (D H W Cin Cout)
+    const unsigned int kernel_stride_w = weights->info()->strides_in_bytes()[2] / element_size;
+    const unsigned int kernel_stride_h = weights->info()->strides_in_bytes()[3] / element_size;
+    const unsigned int kernel_stride_d = weights->info()->strides_in_bytes()[4] / element_size;
+    const int          kernel_dim_w    = weights->info()->dimension(2);
+    const int          kernel_dim_h    = weights->info()->dimension(3);
+    const int          kernel_dim_d    = weights->info()->dimension(4);
+
+    // Convolution padding and stride
+    const int conv_pad_top   = conv_info.padding.top;
+    const int conv_pad_left  = conv_info.padding.left;
+    const int conv_pad_front = conv_info.padding.front;
+    const int conv_stride_w  = conv_info.stride.width;
+    const int conv_stride_h  = conv_info.stride.height;
+    const int conv_stride_d  = conv_info.stride.depth;
+
+    // Setup input window for the output iterator
+    Window window_out = window;
+    window_out.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+    // Setup input window for the weights iterator
+    Window window_w = calculate_max_window(*weights->info(), Steps());
+    window_w.set(Window::DimY, Window::Dimension(0, 1, 1));
+    window_w.set(Window::DimZ, Window::Dimension(0, 1, 1));
+    window_w.set(Window::DimW, Window::Dimension(0, 1, 1));
+    window_w.set(4, Window::Dimension(0, 1, 1));
+
+    Iterator out(dst, window_out);
+    Iterator wei(weights, window_w);
+
+    const int32_t *biases_ptr = nullptr;
+    if(biases != nullptr)
+    {
+        biases_ptr = reinterpret_cast<int32_t *>(biases->buffer() + biases->info()->offset_first_element_in_bytes());
+    }
+    execute_window_loop(window_out, [&](const Coordinates & id)
+    {
+        // We are computing the theoretical input starting points
+        const int in_w_start_t = static_cast<int>(id.y()) * conv_stride_w - conv_pad_left;
+        const int in_h_start_t = static_cast<int>(id.z()) * conv_stride_h - conv_pad_top;
+        const int in_d_start_t = static_cast<int>(id[3]) * conv_stride_d - conv_pad_front;
+        const int in_w_end_t   = in_w_start_t + kernel_dim_w;
+        const int in_h_end_t   = in_h_start_t + kernel_dim_h;
+        const int in_d_end_t   = in_d_start_t + kernel_dim_d;
+
+        // We are computing the valid initial and ending input points by checking the borders
+        const int in_w_start = std::max(in_w_start_t, 0);
+        const int in_h_start = std::max(in_h_start_t, 0);
+        const int in_d_start = std::max(in_d_start_t, 0);
+        const int in_w_end   = std::min(in_w_end_t, input_dim_w);
+        const int in_h_end   = std::min(in_h_end_t, input_dim_h);
+        const int in_d_end   = std::min(in_d_end_t, input_dim_d);
+
+        // We use the input points to select the valid weight points to use
+        const int wei_w_start = in_w_start - in_w_start_t;
+        const int wei_h_start = in_h_start - in_h_start_t;
+        const int wei_d_start = in_d_start - in_d_start_t;
+        const int wei_w_end   = kernel_dim_w - (in_w_end_t - in_w_end);
+        const int wei_h_end   = kernel_dim_h - (in_h_end_t - in_h_end);
+        const int wei_d_end   = kernel_dim_d - (in_d_end_t - in_d_end);
+
+        const int      index_c_out_end = weights->info()->dimension(0);
+        const int      index_c_in_end  = weights->info()->dimension(1);
+        const T *const in_ptr_start    = reinterpret_cast<const T *>(src->buffer() + src->info()->offset_first_element_in_bytes()) + id[4] * input_stride_n;
+
+        execute_window_loop(window_w, [&](const Coordinates & id_w)
+        {
+            /*
+            * This is the loop in the weights, and it goes along OFM (output feature map)
+            */
+            const auto weights_ptr_start = reinterpret_cast<const T *>(wei.ptr());
+            int32_t    acc               = static_cast<int32_t>(0);
+            T         *out_ptr           = reinterpret_cast<T *>(out.ptr());
+            for(int index_wei_d = wei_d_start, index_in_d = in_d_start; index_wei_d < wei_d_end; ++index_wei_d, ++index_in_d)
+            {
+                const auto in_ptr_d      = in_ptr_start + index_in_d * input_stride_d;
+                const auto weights_ptr_d = weights_ptr_start + index_wei_d * kernel_stride_d;
+                for(int index_wei_h = wei_h_start, index_in_h = in_h_start; index_wei_h < wei_h_end; ++index_wei_h, ++index_in_h)
+                {
+                    const T *const in_ptr_row      = in_ptr_d + index_in_h * input_stride_h;
+                    const T *const weights_ptr_row = weights_ptr_d + index_wei_h * kernel_stride_h;
+                    for(int index_wei_w = wei_w_start, index_in_w = in_w_start; index_wei_w < wei_w_end; ++index_wei_w, ++index_in_w)
+                    {
+                        const T    *in_ptr_mover      = in_ptr_row + index_in_w * input_stride_w;
+                        const T    *weights_ptr_mover = weights_ptr_row + index_wei_w * kernel_stride_w;
+                        int         index_c_in        = 0;
+                        vector_type w_vec             = wrapper::vdup_n(static_cast<T>(0), tag_type());
+
+                        q32x4_t acc_q32_0 = wrapper::vdup_n(static_cast<q32_t>(0), tag_type());
+                        q32x4_t acc_q32_1 = wrapper::vdup_n(static_cast<q32_t>(0), tag_type());
+                        q32x4_t acc_q32_2 = wrapper::vdup_n(static_cast<q32_t>(0), tag_type());
+                        q32x4_t acc_q32_3 = wrapper::vdup_n(static_cast<q32_t>(0), tag_type());
+
+                        for(; index_c_in <= index_c_in_end - num_elems_read_per_iteration;
+                            index_c_in += num_elems_read_per_iteration, in_ptr_mover += num_elems_read_per_iteration)
+                        {
+                            const auto src_vec = wrapper::vloadq(in_ptr_mover);
+                            //Load Cin weights
+                            for(unsigned int k = 0; k < num_elems_read_per_iteration; ++k, weights_ptr_mover += index_c_out_end)
+                            {
+                                w_vec = wrapper::vsetlane(*weights_ptr_mover, w_vec, k);
+                            }
+                            q32x4_t src_q32_0 = wrapper::vdup_n(static_cast<q32_t>(input_offset), tag_type());
+                            q32x4_t src_q32_1 = wrapper::vdup_n(static_cast<q32_t>(input_offset), tag_type());
+                            q32x4_t src_q32_2 = wrapper::vdup_n(static_cast<q32_t>(input_offset), tag_type());
+                            q32x4_t src_q32_3 = wrapper::vdup_n(static_cast<q32_t>(input_offset), tag_type());
+
+                            q32x4_t wei_q32_0 = wrapper::vdup_n(static_cast<q32_t>(weights_offset), tag_type());
+                            q32x4_t wei_q32_1 = wrapper::vdup_n(static_cast<q32_t>(weights_offset), tag_type());
+                            q32x4_t wei_q32_2 = wrapper::vdup_n(static_cast<q32_t>(weights_offset), tag_type());
+                            q32x4_t wei_q32_3 = wrapper::vdup_n(static_cast<q32_t>(weights_offset), tag_type());
+
+                            const auto src_q16_0 = wrapper::vmovl(wrapper::vgetlow(src_vec));
+                            const auto src_q16_1 = wrapper::vmovl(wrapper::vgetlow(src_vec));
+                            const auto wei_q16_0 = wrapper::vmovl(wrapper::vgetlow(w_vec));
+                            const auto wei_q16_1 = wrapper::vmovl(wrapper::vgetlow(w_vec));
+
+                            src_q32_0 = wrapper::vadd(src_q32_0, wrapper::vmovl(wrapper::vgetlow(src_q16_0)));
+                            src_q32_1 = wrapper::vadd(src_q32_1, wrapper::vmovl(wrapper::vgetlow(src_q16_0)));
+                            src_q32_2 = wrapper::vadd(src_q32_2, wrapper::vmovl(wrapper::vgethigh(src_q16_1)));
+                            src_q32_3 = wrapper::vadd(src_q32_3, wrapper::vmovl(wrapper::vgethigh(src_q16_1)));
+
+                            wei_q32_0 = wrapper::vadd(wei_q32_0, wrapper::vmovl(wrapper::vgetlow(wei_q16_0)));
+                            wei_q32_1 = wrapper::vadd(wei_q32_1, wrapper::vmovl(wrapper::vgetlow(wei_q16_0)));
+                            wei_q32_2 = wrapper::vadd(wei_q32_2, wrapper::vmovl(wrapper::vgethigh(wei_q16_1)));
+                            wei_q32_3 = wrapper::vadd(wei_q32_3, wrapper::vmovl(wrapper::vgethigh(wei_q16_1)));
+
+                            acc_q32_0 = wrapper::vmla(acc_q32_0, wei_q32_0, src_q32_0);
+                            acc_q32_1 = wrapper::vmla(acc_q32_1, wei_q32_1, src_q32_1);
+                            acc_q32_2 = wrapper::vmla(acc_q32_2, wei_q32_2, src_q32_2);
+                            acc_q32_3 = wrapper::vmla(acc_q32_3, wei_q32_3, src_q32_3);
+                        }
+#if defined(__aarch64__)
+                        acc += wrapper::vaddv(acc_q32_0);
+                        acc += wrapper::vaddv(acc_q32_1);
+                        acc += wrapper::vaddv(acc_q32_2);
+                        acc += wrapper::vaddv(acc_q32_3);
+#else // __aarch64__
+                        auto temp = wrapper::vpadd(wrapper::vgethigh(acc_q32_0), wrapper::vgetlow(acc_q32_0));
+                        temp      = wrapper::vpadd(temp, temp);
+                        acc       += wrapper::vgetlane(temp, 0);
+
+                        temp      = wrapper::vpadd(wrapper::vgethigh(acc_q32_1), wrapper::vgetlow(acc_q32_1));
+                        temp      = wrapper::vpadd(temp, temp);
+                        acc       += wrapper::vgetlane(temp, 0);
+
+                        temp      = wrapper::vpadd(wrapper::vgethigh(acc_q32_2), wrapper::vgetlow(acc_q32_2));
+                        temp      = wrapper::vpadd(temp, temp);
+                        acc       += wrapper::vgetlane(temp, 0);
+
+                        temp      = wrapper::vpadd(wrapper::vgethigh(acc_q32_3), wrapper::vgetlow(acc_q32_3));
+                        temp      = wrapper::vpadd(temp, temp);
+                        acc       += wrapper::vgetlane(temp, 0);
+
+#endif // __aarch64__
+
+                        for(; index_c_in < index_c_in_end; ++index_c_in, ++in_ptr_mover, weights_ptr_mover += index_c_out_end)
+                        {
+                            const auto src_val = *(in_ptr_mover) + input_offset;
+                            const auto w_val   = *(weights_ptr_mover) + weights_offset;
+                            acc += src_val * w_val;
+                        }
+                    }
+                }
+            }
+
+            if(biases)
+            {
+                acc += *reinterpret_cast<const int32_t *>(biases_ptr + id_w[0]);
+            }
+
+            T out_val                                   = finalize_quantization(acc, output_multiplier, output_shift, output_offset, T(0), T(0), false);
+            *(reinterpret_cast<T *>(out_ptr + id_w[0])) = out_val;
+        },
+        wei);
+    },
+    out);
+}
+} // namespace cpu
+} // namespace arm_compute
+#endif // SRC_CORE_NEON_KERNELS_CONV3D_QUANTIZED_H
\ No newline at end of file
diff --git a/src/cpu/operators/CpuDirectConv3d.h b/src/cpu/operators/CpuDirectConv3d.h
index f7c3099..cde01f0 100644
--- a/src/cpu/operators/CpuDirectConv3d.h
+++ b/src/cpu/operators/CpuDirectConv3d.h
@@ -65,6 +65,8 @@
      * |:--------------|:------------------|:------|:--------------|
      * |F16            |F16                |F16    |F16            |
      * |F32            |F32                |F32    |F32            |
+     * |QASYMM8        |QASYMM8            |S32    |QASYMM8        |
+     * |QASYMM8_SIGNED |QASYMM8_SIGNED     |S32    |QASYMM8_SIGNED |
      *
      * @param[in, out] src0      Input tensor info.
      * @param[in]      src1      Set of kernels to convolve the input volume.
diff --git a/tests/validation/NEON/Convolution3D.cpp b/tests/validation/NEON/Convolution3D.cpp
new file mode 100644
index 0000000..1bfac90
--- /dev/null
+++ b/tests/validation/NEON/Convolution3D.cpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEConv3D.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/DirectConvolution3DFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for FP16 types */
+const AbsoluteTolerance<float>            abs_tolerance_f16(0.2f);                   /**< Absolute tolerance for FP16 types */
+constexpr float                           tolerance_num = 0.07f;                     /**< Tolerance number for the FP16 implementation */
+#endif                                                                               /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+constexpr AbsoluteTolerance<float>   tolerance_fp32(0.001f);                         /**< Tolerance for floating point tests */
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);                           /**< Tolerance for quantized tests */
+
+/** Activation function Dataset*/
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+    ActivationLayerInfo(),
+    ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
+});
+
+const auto data_precommit = combine(combine(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(
+                                                                                    datasets::SmallDirectConv3DShapes(),
+                                                                                    framework::dataset::make("StrideX", { 1, 5, 8 })),
+                                                                                framework::dataset::make("StrideY", { 1, 2, 3 })),
+                                                                            framework::dataset::make("StrideZ", { 1, 2, 1 })),
+                                                                        framework::dataset::make("PadX", { 0, 1, 2 })),
+                                                                    framework::dataset::make("PadY", { 0, 2, 1 })),
+                                                                framework::dataset::make("PadZ", { 0, 3, 5 })),
+                                                            framework::dataset::make("KernelWidth", { 3, 5, 9 })),
+                                                        framework::dataset::make("KernelHeight", { 2, 1, 3 })),
+                                                    framework::dataset::make("KernelDepth", { 1, 2, 3 })),
+                                                framework::dataset::make("NumKernels", { 2, 3, 8 })),
+                                            framework::dataset::make("HasBias", { true, false })),
+                                    ActivationFunctionsDataset);
+} // namespace
+
+TEST_SUITE(NEON)
+TEST_SUITE(Convolution3D)
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
+        framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U, 4U), 1U, DataType::F32, DataLayout::NDHWC), // Mismatching data type input/weights
+                                                TensorInfo(TensorShape(27U, 13U, 2U, 4U), 1U, DataType::F32, DataLayout::NDHWC), // Mismatching input feature maps
+                                                TensorInfo(TensorShape(27U, 13U, 2U, 4U), 1U, DataType::F32, DataLayout::NDHWC), // Invalid weights dimensions
+                                                TensorInfo(TensorShape(27U, 13U, 2U, 4U), 1U, DataType::F32, DataLayout::NHWC), // Invalid data layout
+                                                TensorInfo(TensorShape(27U, 13U, 2U, 4U), 1U, DataType::F32, DataLayout::NDHWC), // Invalid biases size
+                                                TensorInfo(TensorShape(27U, 13U, 2U, 4U), 1U, DataType::F32, DataLayout::NDHWC), // Invalid biases dimensions
+                                                TensorInfo(TensorShape(27U, 13U, 2U, 4U), 1U, DataType::F32, DataLayout::NDHWC), // Invalid output size
+                                              }),
+        framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(4U, 3U, 3U, 3U, 2U), 1U, DataType::F16),
+                                                 TensorInfo(TensorShape(4U, 3U, 3U, 3U, 3U), 1U, DataType::F32),
+                                                 TensorInfo(TensorShape(4U, 3U, 3U, 3U, 2U, 3U), 1U, DataType::F32),
+                                                 TensorInfo(TensorShape(4U, 3U, 3U, 3U, 2U), 1U, DataType::F32),
+                                                 TensorInfo(TensorShape(4U, 3U, 3U, 3U, 2U), 1U, DataType::F32),
+                                                 TensorInfo(TensorShape(4U, 3U, 3U, 3U, 2U), 1U, DataType::F32),
+                                                 TensorInfo(TensorShape(4U, 3U, 3U, 3U, 2U), 1U, DataType::F32),
+                                              })),
+        framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1U, DataType::F32),
+                                                TensorInfo(TensorShape(4U), 1U, DataType::F32),
+                                                TensorInfo(TensorShape(4U), 1U, DataType::F32),
+                                                TensorInfo(TensorShape(4U), 1U, DataType::F32),
+                                                TensorInfo(TensorShape(3U), 1U, DataType::F32),
+                                                TensorInfo(TensorShape(4U, 2U), 1U, DataType::F32),
+                                                TensorInfo(TensorShape(4U), 1U, DataType::F32),
+                                              })),
+        framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1U, DataType::F32),
+                                                TensorInfo(TensorShape(25U, 11U, 4U), 1U, DataType::F32),
+                                                TensorInfo(TensorShape(25U, 11U, 4U), 1U, DataType::F32),
+                                                TensorInfo(TensorShape(25U, 11U, 4U), 1U, DataType::F32),
+                                                TensorInfo(TensorShape(25U, 11U, 4U), 1U, DataType::F32),
+                                                TensorInfo(TensorShape(25U, 11U, 4U), 1U, DataType::F32),
+                                                TensorInfo(TensorShape(26U, 11U, 4U), 1U, DataType::F32),
+                                              })),
+        framework::dataset::make("Expected", { false, false, false, false, false, false, false })),
+        input_info, weights_info, biases_info, output_info, expected)
+{
+        const Conv3dInfo  conv3d_info(Size3D(1, 1, 1), Padding3D(0, 0, 0), ActivationLayerInfo(), Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false);
+        bool is_valid = bool(NEConv3D::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv3d_info));
+        ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+template <typename T>
+using NEDirectConvolution3DFixture = DirectConvolution3DValidationFixture<Tensor, Accessor, NEConv3D, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectConvolution3DFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(data_precommit,
+                                                                                                                 framework::dataset::make("DataType", DataType::F32)),
+                                                                                                                 framework::dataset::make("DataLayout", { DataLayout::NDHWC })))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_fp32);
+}
+TEST_SUITE_END() // FP32
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectConvolution3DFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(data_precommit,
+                                                                                                                        framework::dataset::make("DataType", DataType::F16)),
+                                                                                                                framework::dataset::make("DataLayout", { DataLayout::NDHWC })))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
+}
+TEST_SUITE_END() // FP16
+#endif           /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+
+TEST_SUITE_END() // Float
+
+template <typename T>
+using NEDirectConvolution3DQuantizedFixture = DirectConvolution3DValidationQuantizedFixture<Tensor, Accessor, NEConv3D, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectConvolution3DQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
+                       combine(combine(combine(combine(combine(combine(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(
+                                                                                                                   framework::dataset::make("InputShape", { TensorShape(7U, 5U, 3U, 13U, 3U),
+                                                                                                                           TensorShape(15U, 7U, 11U, 7U),
+                                                                                                                           TensorShape(19U, 5U, 16U, 4U),
+                                                                                                                           TensorShape(13U, 5U, 17U, 2U)
+                                                                                                                                                          }),
+                                                                                                                   framework::dataset::make("StrideX", { 1, 3, 2, 1 })),
+                                                                                                               framework::dataset::make("StrideY", { 2, 1, 3, 1 })),
+                                                                                                           framework::dataset::make("StrideZ", { 3, 2, 1, 1 })),
+                                                                                                       framework::dataset::make("PadX", { 0, 2, 1, 0 })),
+                                                                                                   framework::dataset::make("PadY", { 1, 0, 2, 0 })),
+                                                                                               framework::dataset::make("PadZ", { 2, 1, 0, 0 })),
+                                                                                           framework::dataset::make("KernelWidth", { 3, 7, 5, 1 })),
+                                                                                       framework::dataset::make("KernelHeight", { 5, 3, 7, 1 })),
+                                                                                   framework::dataset::make("KernelDepth", { 7, 5, 3, 1 })),
+                                                                               framework::dataset::make("NumKernels", { 5, 3, 1, 11 })),
+                                                                           framework::dataset::make("HasBias", { true, true, true, false })),
+                                                                       framework::dataset::make("Activation", ActivationLayerInfo())),
+                                                               framework::dataset::make("DataType", DataType::QASYMM8)),
+                                                       framework::dataset::make("DataLayout", DataLayout::NDHWC)),
+                                               framework::dataset::make("SrcQuantizationInfo", QuantizationInfo(0.1f, 10))),
+                                       framework::dataset::make("WeightsQuantizationInfo", QuantizationInfo(0.3f, 20))),
+                               framework::dataset::make("DstQuantizationInfo", QuantizationInfo(0.2f, 5))))
+{
+    validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectConvolution3DQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
+                       combine(combine(combine(combine(combine(combine(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(
+                                                                                                                   framework::dataset::make("InputShape", { TensorShape(7U, 5U, 3U, 13U, 3U),
+                                                                                                                           TensorShape(15U, 7U, 11U, 7U),
+                                                                                                                           TensorShape(19U, 5U, 16U, 4U),
+                                                                                                                           TensorShape(13U, 5U, 17U, 2U)
+                                                                                                                                                          }),
+                                                                                                                   framework::dataset::make("StrideX", { 1, 3, 2, 1 })),
+                                                                                                               framework::dataset::make("StrideY", { 2, 1, 3, 1 })),
+                                                                                                           framework::dataset::make("StrideZ", { 3, 2, 1, 1 })),
+                                                                                                       framework::dataset::make("PadX", { 0, 2, 1, 0 })),
+                                                                                                   framework::dataset::make("PadY", { 1, 0, 2, 0 })),
+                                                                                               framework::dataset::make("PadZ", { 2, 1, 0, 0 })),
+                                                                                           framework::dataset::make("KernelWidth", { 3, 7, 5, 1 })),
+                                                                                       framework::dataset::make("KernelHeight", { 5, 3, 7, 1 })),
+                                                                                   framework::dataset::make("KernelDepth", { 7, 5, 3, 1 })),
+                                                                               framework::dataset::make("NumKernels", { 5, 3, 1, 11 })),
+                                                                           framework::dataset::make("HasBias", { true, true, true, false })),
+                                                                       framework::dataset::make("Activation", ActivationLayerInfo())),
+                                                               framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+                                                       framework::dataset::make("DataLayout", DataLayout::NDHWC)),
+                                               framework::dataset::make("SrcQuantizationInfo", QuantizationInfo(0.1f, 10))),
+                                       framework::dataset::make("WeightsQuantizationInfo", QuantizationInfo(0.3f, 20))),
+                               framework::dataset::make("DstQuantizationInfo", QuantizationInfo(0.2f, 5))))
+{
+    validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+
+TEST_SUITE_END() // QASYMM8_SIGNED
+TEST_SUITE_END() // Quantized
+
+TEST_SUITE_END() // Convolution3D
+TEST_SUITE_END() // Neon
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/NEON/DirectConvolution3D.cpp b/tests/validation/NEON/DirectConvolution3D.cpp
deleted file mode 100644
index 37c901d..0000000
--- a/tests/validation/NEON/DirectConvolution3D.cpp
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/functions/NEConv3D.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "arm_compute/runtime/TensorAllocator.h"
-#include "tests/NEON/Accessor.h"
-#include "tests/PaddingCalculator.h"
-#include "tests/datasets/ShapeDatasets.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Macros.h"
-#include "tests/framework/datasets/Datasets.h"
-#include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/DirectConvolution3DFixture.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace
-{
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for FP16 types */
-const AbsoluteTolerance<float>            abs_tolerance_f16(0.2f);                   /**< Absolute tolerance for FP16 types */
-constexpr float                           tolerance_num = 0.07f;                     /**< Tolerance number for the FP16 implementation */
-#endif                                                                               /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f);                           /**< Tolerance for floating point tests */
-
-/** Activation function Dataset*/
-const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
-{
-    ActivationLayerInfo(),
-    ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
-});
-
-const auto data_precommit = combine(combine(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(
-                                                                                    datasets::SmallDirectConv3DShapes(),
-                                                                                    framework::dataset::make("StrideX", { 1, 5, 8 })),
-                                                                                framework::dataset::make("StrideY", { 1, 2, 3 })),
-                                                                            framework::dataset::make("StrideZ", { 1, 2, 1 })),
-                                                                        framework::dataset::make("PadX", { 0, 1, 2 })),
-                                                                    framework::dataset::make("PadY", { 0, 2, 1 })),
-                                                                framework::dataset::make("PadZ", { 0, 3, 5 })),
-                                                            framework::dataset::make("KernelWidth", { 3, 5, 9 })),
-                                                        framework::dataset::make("KernelHeight", { 2, 1, 3 })),
-                                                    framework::dataset::make("KernelDepth", { 1, 2, 3 })),
-                                                framework::dataset::make("NumKernels", { 2, 3, 8 })),
-                                            framework::dataset::make("HasBias", { true, false })),
-                                    ActivationFunctionsDataset);
-} // namespace
-
-TEST_SUITE(NEON)
-TEST_SUITE(Convolution3D)
-
-// *INDENT-OFF*
-// clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
-        framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U, 4U), 1U, DataType::F32, DataLayout::NDHWC), // Mismatching data type input/weights
-                                                TensorInfo(TensorShape(27U, 13U, 2U, 4U), 1U, DataType::F32, DataLayout::NDHWC), // Mismatching input feature maps
-                                                TensorInfo(TensorShape(27U, 13U, 2U, 4U), 1U, DataType::F32, DataLayout::NDHWC), // Invalid weights dimensions
-                                                TensorInfo(TensorShape(27U, 13U, 2U, 4U), 1U, DataType::F32, DataLayout::NHWC), // Invalid data layout
-                                                TensorInfo(TensorShape(27U, 13U, 2U, 4U), 1U, DataType::F32, DataLayout::NDHWC), // Invalid biases size
-                                                TensorInfo(TensorShape(27U, 13U, 2U, 4U), 1U, DataType::F32, DataLayout::NDHWC), // Invalid biases dimensions
-                                                TensorInfo(TensorShape(27U, 13U, 2U, 4U), 1U, DataType::F32, DataLayout::NDHWC), // Invalid output size
-                                              }),
-        framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(4U, 3U, 3U, 3U, 2U), 1U, DataType::F16),
-                                                 TensorInfo(TensorShape(4U, 3U, 3U, 3U, 3U), 1U, DataType::F32),
-                                                 TensorInfo(TensorShape(4U, 3U, 3U, 3U, 2U, 3U), 1U, DataType::F32),
-                                                 TensorInfo(TensorShape(4U, 3U, 3U, 3U, 2U), 1U, DataType::F32),
-                                                 TensorInfo(TensorShape(4U, 3U, 3U, 3U, 2U), 1U, DataType::F32),
-                                                 TensorInfo(TensorShape(4U, 3U, 3U, 3U, 2U), 1U, DataType::F32),
-                                                 TensorInfo(TensorShape(4U, 3U, 3U, 3U, 2U), 1U, DataType::F32),
-                                              })),
-        framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1U, DataType::F32),
-                                                TensorInfo(TensorShape(4U), 1U, DataType::F32),
-                                                TensorInfo(TensorShape(4U), 1U, DataType::F32),
-                                                TensorInfo(TensorShape(4U), 1U, DataType::F32),
-                                                TensorInfo(TensorShape(3U), 1U, DataType::F32),
-                                                TensorInfo(TensorShape(4U, 2U), 1U, DataType::F32),
-                                                TensorInfo(TensorShape(4U), 1U, DataType::F32),
-                                              })),
-        framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1U, DataType::F32),
-                                                TensorInfo(TensorShape(25U, 11U, 4U), 1U, DataType::F32),
-                                                TensorInfo(TensorShape(25U, 11U, 4U), 1U, DataType::F32),
-                                                TensorInfo(TensorShape(25U, 11U, 4U), 1U, DataType::F32),
-                                                TensorInfo(TensorShape(25U, 11U, 4U), 1U, DataType::F32),
-                                                TensorInfo(TensorShape(25U, 11U, 4U), 1U, DataType::F32),
-                                                TensorInfo(TensorShape(26U, 11U, 4U), 1U, DataType::F32),
-                                              })),
-        framework::dataset::make("Expected", { false, false, false, false, false, false, false })),
-        input_info, weights_info, biases_info, output_info, expected)
-{
-        const Conv3dInfo  conv3d_info(Size3D(1, 1, 1), Padding3D(0, 0, 0), ActivationLayerInfo(), Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false);
-        bool is_valid = bool(NEConv3D::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv3d_info));
-        ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
-}
-// clang-format on
-// *INDENT-ON*
-
-template <typename T>
-using NEDirectConvolution3DFixture = DirectConvolution3DValidationFixture<Tensor, Accessor, NEConv3D, T>;
-
-TEST_SUITE(Float)
-TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectConvolution3DFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(data_precommit,
-                                                                                                                 framework::dataset::make("DataType", DataType::F32)),
-                                                                                                                 framework::dataset::make("DataLayout", { DataLayout::NDHWC })))
-{
-    // Validate output
-    validate(Accessor(_target), _reference, tolerance_fp32);
-}
-TEST_SUITE_END() // FP32
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectConvolution3DFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(data_precommit,
-                                                                                                                        framework::dataset::make("DataType", DataType::F16)),
-                                                                                                                framework::dataset::make("DataLayout", { DataLayout::NDHWC })))
-{
-    // Validate output
-    validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
-}
-TEST_SUITE_END() // FP16
-#endif           /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-
-TEST_SUITE_END() // Float
-TEST_SUITE_END() // Convolution3D
-TEST_SUITE_END() // Neon
-} // namespace validation
-} // namespace test
-} // namespace arm_compute