COMPMID-2108: Fuse Activation Layer in CLDepthwiseConvolutionLayer3x3Kernels for F32

Change-Id: I39dd23696b6d8573e172a59b9e327b6a69886f08
Signed-off-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/973
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Usama Arif <usama.arif@arm.com>
Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com>
diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index dd8bf23..9e6dd4b 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -33,6 +33,7 @@
 #include "tests/framework/Asserts.h"
 #include "tests/framework/Fixture.h"
 #include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ActivationLayer.h"
 #include "tests/validation/reference/DepthwiseConvolutionLayer.h"
 
 #include "utils/Utils.h"
@@ -56,7 +57,7 @@
 public:
     template <typename...>
     void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type, QuantizationInfo quantization_info,
-               DataLayout data_layout)
+               DataLayout data_layout, ActivationLayerInfo act_info)
     {
         _quantization_info            = quantization_info;
         _data_type                    = data_type;
@@ -64,15 +65,15 @@
 
         TensorShape weights_shape(kernel_size.width, kernel_size.height);
 
-        const TensorInfo in_info(in_shape, 1, data_type);
-        const TensorInfo we_info(weights_shape, 1, data_type);
-        const TensorShape      out_shape = compute_depthwise_convolution_shape(in_info, we_info, pad_stride_info, depth_multiplier, dilation);
+        const TensorInfo  in_info(in_shape, 1, data_type);
+        const TensorInfo  we_info(weights_shape, 1, data_type);
+        const TensorShape out_shape = compute_depthwise_convolution_shape(in_info, we_info, pad_stride_info, depth_multiplier, dilation);
 
         weights_shape.set(2, out_shape.z());
         const TensorShape biases_shape(weights_shape[2]);
 
-        _target    = compute_target(in_shape, weights_shape, biases_shape, out_shape, pad_stride_info, dilation, depth_multiplier, data_type, bias_data_type, quantization_info, data_layout);
-        _reference = compute_reference(in_shape, weights_shape, biases_shape, out_shape, pad_stride_info, dilation, depth_multiplier, data_type, bias_data_type, quantization_info);
+        _target    = compute_target(in_shape, weights_shape, biases_shape, out_shape, pad_stride_info, dilation, depth_multiplier, data_type, bias_data_type, quantization_info, data_layout, act_info);
+        _reference = compute_reference(in_shape, weights_shape, biases_shape, out_shape, pad_stride_info, dilation, depth_multiplier, data_type, bias_data_type, quantization_info, act_info);
     }
 
 protected:
@@ -107,7 +108,7 @@
 
     TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape output_shape, PadStrideInfo &pad_stride_info, Size2D dilation,
                               unsigned int   depth_multiplier,
-                              const DataType data_type, const DataType bias_data_type, const QuantizationInfo quantization_info, const DataLayout data_layout)
+                              const DataType data_type, const DataType bias_data_type, const QuantizationInfo quantization_info, const DataLayout data_layout, ActivationLayerInfo act_info)
     {
         if(data_layout == DataLayout::NHWC)
         {
@@ -124,7 +125,7 @@
 
         // Create Depthwise Convolution configure function
         FunctionType dwc;
-        dwc.configure(&src, &weights, &biases, &dst, pad_stride_info, depth_multiplier, ActivationLayerInfo(), dilation);
+        dwc.configure(&src, &weights, &biases, &dst, pad_stride_info, depth_multiplier, act_info, dilation);
 
         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -155,7 +156,7 @@
 
     SimpleTensor<T> compute_reference(const TensorShape &in_shape, const TensorShape &weights_shape, const TensorShape &biases_shape, const TensorShape &out_shape, const PadStrideInfo &pad_stride_info,
                                       const Size2D &dilation, unsigned int depth_multiplier,
-                                      const DataType data_type, const DataType bias_data_type, const QuantizationInfo quantization_info)
+                                      const DataType data_type, const DataType bias_data_type, const QuantizationInfo quantization_info, ActivationLayerInfo act_info)
     {
         SimpleTensor<T>     src{ in_shape, data_type, 1, quantization_info };
         SimpleTensor<T>     weights{ weights_shape, data_type, 1, quantization_info };
@@ -165,7 +166,8 @@
         fill(weights, 1);
         fill(biases, 2);
 
-        return reference::depthwise_convolution(src, weights, biases, out_shape, pad_stride_info, depth_multiplier, dilation);
+        SimpleTensor<T> depth_out = reference::depthwise_convolution(src, weights, biases, out_shape, pad_stride_info, depth_multiplier, dilation);
+        return (act_info.enabled()) ? reference::activation_layer<T>(depth_out, act_info) : depth_out;
     }
 
     TensorType       _target{};
@@ -179,10 +181,11 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type, DataLayout data_layout)
+    void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type, DataLayout data_layout,
+               ActivationLayerInfo act_info)
     {
         DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier,
-                                                                                                            data_type, QuantizationInfo(), data_layout);
+                                                                                                            data_type, QuantizationInfo(), data_layout, act_info);
     }
 };
 
@@ -192,10 +195,10 @@
 public:
     template <typename...>
     void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type, QuantizationInfo quantization_info,
-               DataLayout data_layout)
+               DataLayout data_layout, ActivationLayerInfo act_info)
     {
         DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier,
-                                                                                                            data_type, quantization_info, data_layout);
+                                                                                                            data_type, quantization_info, data_layout, act_info);
     }
 };
 } // namespace validation