Add PostOp support to GEMM and CLGEMM operators and functions Part 2

* Implement PostOp interface changes
* Remove spaces around "=" in TypePrinter

Partially resolves COMPMID-4435

Signed-off-by: SiCongLi <sicong.li@arm.com>
Change-Id: If1e2280554030a0f635e73339a2e86987f6dc41b
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6484
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Sheri Zhang <sheri.zhang@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index bfe85ea..bff672c 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -30,6 +30,7 @@
 #include "arm_compute/core/Size3D.h"
 #include "arm_compute/core/Strides.h"
 #include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/experimental/IPostOp.h"
 #include "arm_compute/core/utils/misc/Macros.h"
 #include "support/Bfloat16.h"
 #include "support/Half.h"
@@ -1963,6 +1964,7 @@
     bool         export_to_cl_image{ false }; /**< True if the reshaped rhs has to be exported to cl_image. n0 must be equal to 4 */
 };
 
+class ITensorInfo;
 /** GEMM information class. This class stores the necessary information to compute GEMM functions
  *
  * This object also contains the information about how matrix A and matrix B have been reshaped
@@ -1984,7 +1986,8 @@
           _fp_mixed_precision(false),
           _broadcast_bias(false),
           _pretranspose_B(true),
-          _activation_info()
+          _activation_info(),
+          _post_ops()
     {
     }
     /** Constructor
@@ -2002,10 +2005,11 @@
      * @param[in] fast_math                   (Optional) Use a data type of shorter width to improve performance
      * @param[in] broadcast_bias              (Optional) Broadcast the shape of the bias tensor from a vector to a matrix.
      * @param[in] activation_info             (Optional) Activation to apply after the matrix multiplication
+     * @param[in] post_ops                    (Optional) A sequence of post operations that are performed after the main operation.
      */
     GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false,
              GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo(), bool fp_mixed_precision = false, bool fast_math = false, bool broadcast_bias = false,
-             const ActivationLayerInfo &activation_info = ActivationLayerInfo()) noexcept
+             const ActivationLayerInfo &activation_info = ActivationLayerInfo(), const experimental::PostOpList<ITensorInfo *> &post_ops = experimental::PostOpList<ITensorInfo *>()) noexcept
         : _is_a_reshaped(is_a_reshaped),
           _is_b_reshaped(is_b_reshaped),
           _reshape_b_only_on_first_run(reshape_b_only_on_first_run),
@@ -2017,7 +2021,8 @@
           _fp_mixed_precision(fp_mixed_precision),
           _broadcast_bias(broadcast_bias),
           _pretranspose_B(reshape_b_only_on_first_run),
-          _activation_info(activation_info)
+          _activation_info(activation_info),
+          _post_ops(post_ops)
     {
     }
     /** Flag which specifies if the matrix A has been reshaped
@@ -2142,20 +2147,37 @@
     {
         _activation_info = activation_info;
     }
+    /** Post operations to apply after the matrix multiplication
+     *
+     * @return experimental::PostOpList object
+     */
+    const experimental::PostOpList<ITensorInfo *> &post_ops() const
+    {
+        return _post_ops;
+    }
+    /** Set post ops
+     *
+     * @param[in] post_ops experimental::PostOpList object to set
+     */
+    void set_post_ops(const experimental::PostOpList<ITensorInfo *> &post_ops)
+    {
+        _post_ops = post_ops;
+    }
 
 private:
-    bool                    _is_a_reshaped;
-    bool                    _is_b_reshaped;
-    bool                    _reshape_b_only_on_first_run;
-    int                     _depth_output_gemm3d;
-    bool                    _reinterpret_input_as_3d;
-    bool                    _retain_internal_weights;
-    GEMMLowpOutputStageInfo _gemmlowp_output_stage;
-    bool                    _fast_math;
-    bool                    _fp_mixed_precision;
-    bool                    _broadcast_bias;
-    bool                    _pretranspose_B;
-    ActivationLayerInfo     _activation_info;
+    bool                                    _is_a_reshaped;
+    bool                                    _is_b_reshaped;
+    bool                                    _reshape_b_only_on_first_run;
+    int                                     _depth_output_gemm3d;
+    bool                                    _reinterpret_input_as_3d;
+    bool                                    _retain_internal_weights;
+    GEMMLowpOutputStageInfo                 _gemmlowp_output_stage;
+    bool                                    _fast_math;
+    bool                                    _fp_mixed_precision;
+    bool                                    _broadcast_bias;
+    bool                                    _pretranspose_B;
+    ActivationLayerInfo                     _activation_info;
+    experimental::PostOpList<ITensorInfo *> _post_ops;
 };
 
 /** Winograd information */
diff --git a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
index 0f092bd..38a4019 100644
--- a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
@@ -26,6 +26,7 @@
 
 #include "arm_compute/core/CL/CLCompileContext.h"
 #include "arm_compute/core/Types.h"
+#include "arm_compute/core/experimental/IPostOp.h"
 #include "arm_compute/runtime/CL/CLTensor.h"
 #include "arm_compute/runtime/IFunction.h"
 #include "arm_compute/runtime/IMemoryManager.h"
@@ -118,9 +119,11 @@
      * @param[in]  enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
      *                              available which may introduce a drop of accuracy as well. Default is false
      * @param[in]  num_groups       (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
+     * @param[in]  post_ops         (Optional) A sequence of post operations that are performed after the main operation.
      */
     void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(),
-                   const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false, unsigned int num_groups = 1);
+                   const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false, unsigned int num_groups = 1,
+                   const experimental::PostOpList<ICLTensor *> &post_ops = experimental::PostOpList<ICLTensor *> {});
     /** Set the input and output tensors.
      *
      * @param[in]  compile_context  The compile context to be used.
@@ -140,10 +143,11 @@
      * @param[in]  enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
      *                              available which may introduce a drop of accuracy as well. Default is false
      * @param[in]  num_groups       (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
+     * @param[in]  post_ops         (Optional) A sequence of post operations that are performed after the main operation.
      */
     void configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
                    const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false,
-                   unsigned int num_groups = 1);
+                   unsigned int num_groups = 1, const experimental::PostOpList<ICLTensor *> &post_ops = experimental::PostOpList<ICLTensor *> {});
     /** Static function to check if given info will lead to a valid configuration of @ref CLConvolutionLayer
      *
      * @param[in] input            Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
@@ -162,12 +166,13 @@
      * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
      *                             available which may introduce a drop of accuracy as well. Default is false
      * @param[in] num_groups       (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
+     * @param[in] post_ops         (Optional) A sequence of post operations that are performed after the main operation.
      *
      * @return a status
      */
     static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
                            const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false,
-                           unsigned int num_groups = 1);
+                           unsigned int num_groups = 1, const experimental::PostOpList<ITensorInfo *> &post_ops = experimental::PostOpList<ITensorInfo *> {});
     /** Static function to check if given info will return the convolution called by @ref CLConvolutionLayer
      *
      * @param[in] input            Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
index d7a4e7f..9918a61 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
@@ -24,6 +24,7 @@
 #ifndef ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H
 #define ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H
 
+#include "arm_compute/core/experimental/IPostOp.h"
 #include "arm_compute/runtime/CL/CLTensor.h"
 #include "arm_compute/runtime/CL/CLTypes.h"
 #include "arm_compute/runtime/IFunction.h"
@@ -93,9 +94,11 @@
      * @param[in]  dilation     (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
      * @param[in]  act_info     (Optional) Activation layer information in case of a fused activation.
      * @param[in]  num_groups   (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
+     * @param[in]  post_ops     (Optional) A sequence of post operations that are performed after the main operation.
      */
     void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(),
-                   const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), unsigned int num_groups = 1);
+                   const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), unsigned int num_groups = 1,
+                   const experimental::PostOpList<ICLTensor *> &post_ops = experimental::PostOpList<ICLTensor *> {});
     /** Set the input and output tensors.
      *
      * @param[in]  compile_context The compile context to be used.
@@ -114,10 +117,12 @@
      * @param[in]  dilation        (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
      * @param[in]  act_info        (Optional) Activation layer information in case of a fused activation.
      * @param[in]  num_groups      (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
+     * @param[in]  post_ops        (Optional) A sequence of post operations that are performed after the main operation.
      */
     void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
                    const WeightsInfo &weights_info = WeightsInfo(),
-                   const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), unsigned int num_groups = 1);
+                   const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), unsigned int num_groups = 1,
+                   const experimental::PostOpList<ICLTensor *> &post_ops = experimental::PostOpList<ICLTensor *> {});
     /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer.
      *
      * @param[in]  input        Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
@@ -135,11 +140,13 @@
      * @param[in]  dilation     (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
      * @param[in]  act_info     (Optional) Activation layer information in case of a fused activation.
      * @param[in]  num_groups   (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
+     * @param[in]  post_ops     (Optional) A sequence of post operations that are performed after the main operation.
      *
      * @return a status
      */
     static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
-                           const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), unsigned int num_groups = 1);
+                           const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), unsigned int num_groups = 1,
+                           const experimental::PostOpList<ITensorInfo *> &post_ops = experimental::PostOpList<ITensorInfo *> {});
 
     // Inherited methods overridden:
     void run() override;
diff --git a/arm_compute/runtime/FunctionDescriptors.h b/arm_compute/runtime/FunctionDescriptors.h
index 07a8f66..face8a6 100644
--- a/arm_compute/runtime/FunctionDescriptors.h
+++ b/arm_compute/runtime/FunctionDescriptors.h
@@ -57,20 +57,22 @@
 {
     Conv2dInfo() = default;
 
-    Conv2dInfo(const PadStrideInfo       &conv_info,
-               const Size2D              &dilation,
-               const ActivationLayerInfo &act_info,
-               bool                       enable_fast_math,
-               unsigned int               num_groups)
-        : conv_info(conv_info), dilation(dilation), act_info(act_info), enable_fast_math(enable_fast_math), num_groups(num_groups)
+    Conv2dInfo(const PadStrideInfo                           &conv_info,
+               const Size2D                                  &dilation,
+               const ActivationLayerInfo                     &act_info,
+               bool                                           enable_fast_math,
+               unsigned int                                   num_groups,
+               const experimental::PostOpList<ITensorInfo *> &post_ops = experimental::PostOpList<ITensorInfo *> {})
+        : conv_info(conv_info), dilation(dilation), act_info(act_info), enable_fast_math(enable_fast_math), num_groups(num_groups), post_ops(post_ops)
     {
     }
 
-    PadStrideInfo       conv_info{};
-    Size2D              dilation{ 1U, 1U };
-    ActivationLayerInfo act_info{};
-    bool                enable_fast_math{ false };
-    unsigned int        num_groups{ 1 };
+    PadStrideInfo                           conv_info{};
+    Size2D                                  dilation{ 1U, 1U };
+    ActivationLayerInfo                     act_info{};
+    bool                                    enable_fast_math{ false };
+    unsigned int                            num_groups{ 1 };
+    experimental::PostOpList<ITensorInfo *> post_ops{};
 };
 
 /** Descriptor used by the 3d Convolution function */
diff --git a/src/gpu/cl/operators/ClConv2d.cpp b/src/gpu/cl/operators/ClConv2d.cpp
index 7fe0de7..d633c8f 100644
--- a/src/gpu/cl/operators/ClConv2d.cpp
+++ b/src/gpu/cl/operators/ClConv2d.cpp
@@ -92,6 +92,7 @@
         case ConvolutionMethod::WINOGRAD:
         {
             ARM_COMPUTE_ERROR_ON(conv2d_info.num_groups != 1);
+            ARM_COMPUTE_ERROR_ON(conv2d_info.post_ops.size() > 0);
             auto f = std::make_unique<ClWinogradConv2d>();
             f->configure(compile_context, src, weights, biases, dst, conv2d_info.conv_info, conv2d_info.act_info, conv2d_info.enable_fast_math);
             _operator = std::move(f);
@@ -100,6 +101,7 @@
         case ConvolutionMethod::DIRECT:
         {
             ARM_COMPUTE_ERROR_ON(conv2d_info.num_groups != 1);
+            ARM_COMPUTE_ERROR_ON(conv2d_info.post_ops.size() > 0);
             auto f = std::make_unique<ClDirectConv2d>();
             f->configure(compile_context, src, weights, biases, dst, conv2d_info.conv_info, conv2d_info.act_info);
             _operator = std::move(f);
@@ -133,6 +135,7 @@
         {
             //Validate Winograd
             ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv2d_info.num_groups != 1, "Grouping (num_groups != 1) with ClWinogradConv2d is not supported");
+            ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv2d_info.post_ops.size() > 0, "ClWinogradConv2d does not support PostOps");
             ARM_COMPUTE_RETURN_ON_ERROR(ClWinogradConv2d::validate(src, weights, biases, dst, conv2d_info.conv_info, conv2d_info.act_info, conv2d_info.enable_fast_math));
             break;
         }
@@ -140,6 +143,7 @@
         {
             // Validate direct convolution layer
             ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv2d_info.num_groups != 1, "Grouping (num_groups != 1) with ClDirectConv2d is not supported");
+            ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv2d_info.post_ops.size() > 0, "ClDirectConv2d does not support PostOps");
             ARM_COMPUTE_RETURN_ON_ERROR(ClDirectConv2d::validate(src, weights, biases, dst, conv2d_info.conv_info, conv2d_info.act_info));
             break;
         }
diff --git a/src/gpu/cl/operators/ClGemm.cpp b/src/gpu/cl/operators/ClGemm.cpp
index d2d0f8f..e05256e 100644
--- a/src/gpu/cl/operators/ClGemm.cpp
+++ b/src/gpu/cl/operators/ClGemm.cpp
@@ -38,6 +38,7 @@
 #include "arm_compute/runtime/CL/CLScheduler.h"
 #include "arm_compute/runtime/ITensorAllocator.h"
 
+#include "arm_compute/core/experimental/IPostOp.h"
 #include "src/core/helpers/AutoConfiguration.h"
 #include "src/core/helpers/MemoryHelpers.h"
 #include "src/core/utils/helpers/float_ops.h"
@@ -64,7 +65,7 @@
 {
 inline bool validate_gemm_kernel(CLGEMMKernelType kernel_type)
 {
-    return kernel_type == CLGEMMKernelType::NATIVE? false : true;
+    return kernel_type == CLGEMMKernelType::NATIVE ? false : true;
 }
 //Automatically select between mlgo (prioritized) and default heuristics for gemm kernel type
 inline CLGEMMKernelType auto_select_gemm_kernel(auto_heuristics::CommonQuery query, bool reshape_b_only_on_first_run, bool constant_weights)
@@ -203,6 +204,7 @@
 void ClGemm::configure_native(const CLCompileContext &compile_context, ITensorInfo *a, ITensorInfo *b, ITensorInfo *c, ITensorInfo *output, float alpha, float beta,
                               const GEMMInfo &gemm_info)
 {
+    ARM_COMPUTE_ERROR_ON_MSG(gemm_info.post_ops().size() > 0, "PostOps are not supported in this kernel");
     DataType           data_type               = a->data_type();
     bool               reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
     const unsigned int m                       = reinterpret_input_as_3d ? (a->dimension(1) * a->dimension(2)) : a->dimension(1);
@@ -252,6 +254,7 @@
     kernel_info.reinterpret_input_as_3d = false;
     kernel_info.broadcast_bias          = broadcast_bias;
     kernel_info.activation_info         = gemm_info.activation_info();
+    kernel_info.post_ops                = gemm_info.post_ops();
 
     // Set the target for the kernels
     _reshape_lhs_kernel->set_target(gpu_target);
@@ -278,6 +281,7 @@
 void ClGemm::configure_reshaped_only_rhs(const CLCompileContext &compile_context, ITensorInfo *a, ITensorInfo *b, ITensorInfo *c, ITensorInfo *output, float alpha, float beta,
                                          const GEMMInfo &gemm_info)
 {
+    ARM_COMPUTE_ERROR_ON_MSG(gemm_info.post_ops().size() > 0, "PostOps are not supported in this kernel");
     DataType           data_type               = a->data_type();
     bool               reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
     const unsigned int m                       = reinterpret_input_as_3d ? (a->dimension(1) * a->dimension(2)) : a->dimension(1);
@@ -330,6 +334,7 @@
 {
     ARM_COMPUTE_UNUSED(alpha);
     ARM_COMPUTE_UNUSED(output);
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.post_ops().size() > 0, "PostOps are not supported in this kernel");
 
     // Get the GPU target
     const GPUTarget    gpu_target              = CLScheduler::get().target();
@@ -386,6 +391,7 @@
     kernel_info.reinterpret_input_as_3d = false;
     kernel_info.broadcast_bias          = broadcast_bias;
     kernel_info.activation_info         = gemm_info.activation_info();
+    kernel_info.post_ops                = gemm_info.post_ops();
 
     GEMMLHSMatrixInfo lhs_info;
     GEMMRHSMatrixInfo rhs_info;
@@ -412,6 +418,7 @@
 {
     ARM_COMPUTE_UNUSED(alpha);
     ARM_COMPUTE_UNUSED(output);
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.post_ops().size() > 0, "PostOps are not supported in this kernel");
 
     TensorInfo tmp_b_info{};
 
@@ -588,8 +595,10 @@
                 ITensorPack reshape_rhs_pack{ { ACL_SRC, rhs }, { ACL_DST, rhs_reshaped.get() } };
                 CLScheduler::get().enqueue_op(*_reshape_rhs_kernel, reshape_rhs_pack, false);
             }
-
-            ITensorPack gemm_reshaped_pack{ { ACL_SRC_0, lhs_reshaped.get() }, { ACL_SRC_1, rhs_reshaped.get() }, { ACL_SRC_2, src2 }, { ACL_DST, dst } };
+            // Copy original tensor pack and overwrite lhs and rhs with reshaped counterparts
+            ITensorPack gemm_reshaped_pack(tensors);
+            gemm_reshaped_pack.add_const_tensor(ACL_SRC_0, lhs_reshaped.get());
+            gemm_reshaped_pack.add_const_tensor(ACL_SRC_1, rhs_reshaped.get());
 
             if(_gemm_kernel_type == CLGEMMKernelType::RESHAPED)
             {
diff --git a/src/gpu/cl/operators/ClGemm.h b/src/gpu/cl/operators/ClGemm.h
index fd53648..e084e53 100644
--- a/src/gpu/cl/operators/ClGemm.h
+++ b/src/gpu/cl/operators/ClGemm.h
@@ -81,8 +81,8 @@
      * @param[in]  alpha           Weight of the matrix product
      * @param[in]  beta            Weight of matrix C
      * @param[in]  gemm_info       (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
-     *                       if the reshape of matrix B should happen only for the first run. GEMMInfo also contains information about the reshaping
-     *                       in case matrix A and matrix B have been already transformed.
+     *                             if the reshape of matrix B should happen only for the first run. GEMMInfo also contains information about the reshaping
+     *                             in case matrix A and matrix B have been already transformed.
      */
     void configure(const CLCompileContext &compile_context, ITensorInfo *a, ITensorInfo *b, ITensorInfo *c, ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info);
     /** Static function to check if given info will lead to a valid configuration
diff --git a/src/gpu/cl/operators/ClGemmConv2d.cpp b/src/gpu/cl/operators/ClGemmConv2d.cpp
index 785f1f1..7db5fa0 100644
--- a/src/gpu/cl/operators/ClGemmConv2d.cpp
+++ b/src/gpu/cl/operators/ClGemmConv2d.cpp
@@ -54,14 +54,14 @@
 {
 ClGemmConv2d::ClGemmConv2d()
     : _weights_reshape_kernel(nullptr), _im2col_kernel(nullptr), _mm_gemm(nullptr), _mm_gemmlowp(nullptr), _col2im_kernel(nullptr), _activation_kernel(nullptr), _im2col_output(), _weights_reshaped(),
-      _gemm_output(), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _fuse_activation(true), _append_bias(false), _is_prepared(false), _aux_mem(AuxTensorIdx::Count)
+      _gemm_output(), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _fuse_activation(true), _append_bias(false), _is_prepared(false), _use_post_ops(false), _aux_mem(AuxTensorIdx::Count)
 {
 }
 ClGemmConv2d::~ClGemmConv2d() = default;
 
 void ClGemmConv2d::configure_mm(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
                                 const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
-                                int gemm_3d_depth, const ActivationLayerInfo &act_info)
+                                int gemm_3d_depth, const ActivationLayerInfo &act_info, const experimental::PostOpList<ITensorInfo *> &post_ops)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights);
     ARM_COMPUTE_ERROR_THROW_ON(validate_mm(src, weights, biases, dst, gemmlowp_output_stage, gemm_3d_depth, _skip_im2col, act_info));
@@ -76,11 +76,14 @@
                                          false,                 // fast_math
                                          false,                 // fp_mixed_precision
                                          true,                  // broadcast_bias
-                                         act_info);             // activation_info
+                                         act_info,              // activation_info
+                                         post_ops               // post ops
+                                        );
 
     TensorInfo tmp_src{ *src };
     if(_is_quantized)
     {
+        ARM_COMPUTE_ERROR_ON_MSG(post_ops.size() > 0, "ClGemmConv2d quantized types do not support post ops");
         // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
         // Extract and negate input and weights offset
         const QuantizationInfo input_quantization_info   = src->quantization_info();
@@ -115,7 +118,7 @@
 }
 
 Status ClGemmConv2d::validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
-                                 const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info)
+                                 const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info, const experimental::PostOpList<ITensorInfo *> &post_ops)
 {
     const bool is_quantized = is_data_type_quantized_asymmetric(src->data_type());
 
@@ -129,10 +132,13 @@
                                          false,                 // fast_math
                                          false,                 // fp_mixed_precision
                                          true,                  // broadcast_bias
-                                         act_info);             // activation_info
+                                         act_info,              // activation_info
+                                         post_ops               // post ops
+                                        );
 
     if(is_quantized)
     {
+        ARM_COMPUTE_RETURN_ERROR_ON_MSG(post_ops.size() > 0, "ClGemmConv2d quantized types do not support post ops");
         // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
         // Extract and negate input and weights offset
         const QuantizationInfo input_quantization_info   = src->quantization_info();
@@ -183,6 +189,7 @@
 
     // Only for quantize there are few cases where we cannot fuse the activation function in GEMM
     _fuse_activation = true;
+    _use_post_ops    = conv2d_info.post_ops.size() > 0;
 
     const ITensorInfo *gemm_input_to_use  = src;
     ITensorInfo       *gemm_output_to_use = dst;
@@ -311,10 +318,11 @@
     // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
     const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
 
-    configure_mm(compile_context, gemm_input_to_use, &_weights_reshaped, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, conv2d_info.act_info);
+    configure_mm(compile_context, gemm_input_to_use, &_weights_reshaped, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, conv2d_info.act_info, conv2d_info.post_ops);
 
     if(!_skip_col2im)
     {
+        ARM_COMPUTE_ERROR_ON_MSG(conv2d_info.post_ops.size() > 0, "ClGemmConv2d does not support post ops with col2im operation"); // Post ops must be performed after every other op
         // Set the GPU target for col2im
         _col2im_kernel = std::make_unique<opencl::kernels::ClCol2ImKernel>();
         _col2im_kernel->set_target(CLScheduler::get().target());
@@ -326,7 +334,8 @@
     ARM_COMPUTE_ERROR_ON_MSG((dst->dimension(idx_width) != conv_w) || (dst->dimension(idx_height) != conv_h),
                              "Output shape does not match the expected one");
 
-    if(!_fuse_activation)
+    // Disable running of activation kernel if post ops are used
+    if(!_fuse_activation && !_use_post_ops)
     {
         _activation_kernel = std::make_unique<opencl::kernels::ClActivationKernel>();
         _activation_kernel->configure(compile_context, dst, nullptr, conv2d_info.act_info);
@@ -376,6 +385,7 @@
                                              && conv2d_info.conv_info.stride().second == 1);
     const bool skip_col2im     = data_layout == DataLayout::NHWC;
     bool       fuse_activation = true;
+    bool       use_post_ops    = conv2d_info.post_ops.size() > 0;
 
     ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * conv2d_info.num_groups) != src->dimension(idx_channel));
     ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
@@ -507,16 +517,19 @@
     // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
     const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
 
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, skip_im2col, conv2d_info.act_info));
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, skip_im2col, conv2d_info.act_info,
+                                            conv2d_info.post_ops));
 
     // Validate Col2Im
     if(!skip_col2im)
     {
+        ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv2d_info.post_ops.size() > 0, "ClGemmConv2d does not support post ops with col2im operation"); // Post ops must be performed after every other op
         ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClCol2ImKernel::validate(gemm_output_to_use, dst, Size2D(conv_w, conv_h), conv2d_info.num_groups));
     }
 
-    //Validate Activation Layer
-    if(!fuse_activation)
+    // Validate Activation Layer
+    // Disable running (thus validation) of activation kernel if post ops are used
+    if(!fuse_activation && !use_post_ops)
     {
         ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClActivationKernel::validate(dst, nullptr, conv2d_info.act_info));
     }
@@ -585,7 +598,8 @@
     }
 
     //Run Activation Layer if we cannot fuse in GEMM
-    if(!_fuse_activation)
+    // Disable running of activation kernel if post ops are used
+    if(!_fuse_activation && !_use_post_ops)
     {
         ITensorPack pack =
         {
diff --git a/src/gpu/cl/operators/ClGemmConv2d.h b/src/gpu/cl/operators/ClGemmConv2d.h
index 9a5e381..afde7c5 100644
--- a/src/gpu/cl/operators/ClGemmConv2d.h
+++ b/src/gpu/cl/operators/ClGemmConv2d.h
@@ -26,6 +26,7 @@
 
 #include "arm_compute/core/TensorInfo.h"
 #include "arm_compute/core/Types.h"
+#include "arm_compute/core/experimental/IPostOp.h"
 #include "arm_compute/runtime/FunctionDescriptors.h"
 #include "src/gpu/cl/ClCompileContext.h"
 #include "src/gpu/cl/IClOperator.h"
@@ -132,7 +133,7 @@
      */
     void configure_mm(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
                       const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
-                      int gemm_3d_depth, const ActivationLayerInfo &act_info);
+                      int gemm_3d_depth, const ActivationLayerInfo &act_info, const experimental::PostOpList<ITensorInfo *> &post_ops = experimental::PostOpList<ITensorInfo *> {});
     /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer matrix multiply routines
      *
      * @param[in] src                   Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
@@ -149,7 +150,7 @@
      * @return a status
      */
     static Status validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
-                              int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info);
+                              int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info, const experimental::PostOpList<ITensorInfo *> &post_ops = experimental::PostOpList<ITensorInfo *> {});
 
     enum AuxTensorIdx
     {
@@ -177,6 +178,7 @@
     bool _fuse_activation;
     bool _append_bias;
     bool _is_prepared;
+    bool _use_post_ops;
 
     experimental::MemoryRequirements _aux_mem;
 };
diff --git a/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp b/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp
index 234a0df..e8affc0 100644
--- a/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp
@@ -29,10 +29,11 @@
 #include "arm_compute/core/Types.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/common/utils/Log.h"
 
 #include "src/core/CL/kernels/CLBatchNormalizationLayerKernel.h"
 
+#include "src/common/utils/Log.h"
+
 namespace arm_compute
 {
 CLBatchNormalizationLayer::CLBatchNormalizationLayer()
diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp
index eaca6ee..d75f54f 100644
--- a/src/runtime/CL/functions/CLConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp
@@ -60,21 +60,26 @@
 CLConvolutionLayer::~CLConvolutionLayer() = default;
 
 void CLConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
-                                   const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
+                                   const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups, const experimental::PostOpList<ICLTensor *> &post_ops)
 {
-    configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
+    configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups, post_ops);
 }
 
 void CLConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
                                    const WeightsInfo &weights_info,
-                                   const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
+                                   const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups, const experimental::PostOpList<ICLTensor *> &post_ops)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
     ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayer::validate(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation, act_info,
                                                             enable_fast_math, num_groups));
-    ARM_COMPUTE_LOG_PARAMS(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
+    ARM_COMPUTE_LOG_PARAMS(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups, post_ops);
 
-    const Conv2dInfo conv2d_info = Conv2dInfo(conv_info, dilation, act_info, enable_fast_math, num_groups);
+    // Convert post op arguments to ITensorInfo
+    auto transformed_post_ops = experimental::transform_post_op_list_arguments<ICLTensor *, ITensorInfo *>(post_ops, [](auto tensor)
+    {
+        return tensor->info();
+    });
+    const Conv2dInfo conv2d_info = Conv2dInfo(conv_info, dilation, act_info, enable_fast_math, num_groups, transformed_post_ops);
 
     switch(opencl::ClConv2d::get_convolution_method(input->info(), weights->info(), output->info(), conv2d_info,
                                                     weights_info, CLScheduler::get().target()))
@@ -90,6 +95,7 @@
         }
         case ConvolutionMethod::FFT:
         {
+            ARM_COMPUTE_ERROR_ON_MSG(post_ops.size() > 0, "CLFFTConvolutionLayer does not support post ops");
             auto f = std::make_unique<CLFFTConvolutionLayer>(_impl->memory_manager);
             f->configure(compile_context, input, weights, biases, output, conv_info, act_info, enable_fast_math);
             _impl->func = std::move(f);
@@ -102,22 +108,30 @@
 
     if(_impl->op)
     {
-        _impl->memory_group = MemoryGroup(std::move(_impl->memory_manager));
-        _impl->aux_mem_req  = _impl->op->workspace();
-        _impl->run_pack     = { { ACL_SRC_0, input }, { ACL_SRC_1, weights }, { ACL_SRC_2, biases }, { ACL_DST, output } };
-        _impl->prep_pack    = { { ACL_SRC_1, weights }, { ACL_SRC_2, biases } };
-        _impl->workspace    = manage_workspace<CLTensor>(_impl->aux_mem_req, _impl->memory_group, _impl->run_pack, _impl->prep_pack);
+        _impl->memory_group         = MemoryGroup(std::move(_impl->memory_manager));
+        _impl->aux_mem_req          = _impl->op->workspace();
+        _impl->run_pack             = { { ACL_SRC_0, input }, { ACL_SRC_1, weights }, { ACL_SRC_2, biases }, { ACL_DST, output } };
+        size_t post_op_tensor_index = 0;
+        for(const auto &op : post_ops.get_list())
+        {
+            for(auto &tensor : op->arguments())
+            {
+                _impl->run_pack.add_const_tensor(experimental::get_post_op_arg_type(post_op_tensor_index++), *tensor);
+            }
+        }
+        _impl->prep_pack = { { ACL_SRC_1, weights }, { ACL_SRC_2, biases } };
+        _impl->workspace = manage_workspace<CLTensor>(_impl->aux_mem_req, _impl->memory_group, _impl->run_pack, _impl->prep_pack);
     }
 }
 
 Status CLConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
-                                    const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
+                                    const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups, const experimental::PostOpList<ITensorInfo *> &post_ops)
 {
     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
     ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
 
     const GPUTarget  gpu_target  = CLScheduler::get().target();
-    const Conv2dInfo conv2d_info = Conv2dInfo(conv_info, dilation, act_info, enable_fast_math, num_groups);
+    const Conv2dInfo conv2d_info = Conv2dInfo(conv_info, dilation, act_info, enable_fast_math, num_groups, post_ops);
 
     switch(opencl::ClConv2d::get_convolution_method(input, weights, output, conv2d_info, weights_info, gpu_target))
     {
@@ -131,6 +145,7 @@
         case ConvolutionMethod::FFT:
         {
             // Validate FFT-based convolution layer
+            ARM_COMPUTE_RETURN_ERROR_ON_MSG(post_ops.size() > 0, "CLFFTConvolutionLayer does not support post ops");
             ARM_COMPUTE_RETURN_ON_ERROR(CLFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math));
             break;
         }
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index 837527b..1eabee6 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -31,6 +31,7 @@
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
+#include "src/core/experimental/PostOp.h"
 #include "src/core/helpers/MemoryHelpers.h"
 #include "src/gpu/cl/operators/ClGemmConv2d.h"
 #include "support/Cast.h"
@@ -68,19 +69,24 @@
 CLGEMMConvolutionLayer::~CLGEMMConvolutionLayer() = default;
 
 void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
-                                       const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
+                                       const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups, const experimental::PostOpList<ICLTensor *> &post_ops)
 {
-    configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups);
+    configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups, post_ops);
 }
 
 void CLGEMMConvolutionLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
                                        const PadStrideInfo &conv_info,
-                                       const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
+                                       const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups, const experimental::PostOpList<ICLTensor *> &post_ops)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
-    _impl->weights               = weights;
-    _impl->op                    = std::make_unique<opencl::ClGemmConv2d>();
-    const Conv2dInfo conv2d_info = Conv2dInfo(conv_info, dilation, act_info, false, num_groups);
+    _impl->weights = weights;
+    _impl->op      = std::make_unique<opencl::ClGemmConv2d>();
+    // Convert post op arguments to ITensorInfo
+    auto transformed_post_ops = experimental::transform_post_op_list_arguments<ICLTensor *, ITensorInfo *>(post_ops, [](auto tensor)
+    {
+        return tensor->info();
+    });
+    const Conv2dInfo conv2d_info = Conv2dInfo(conv_info, dilation, act_info, false, num_groups, transformed_post_ops);
     _impl->op->configure(compile_context, input->info(), weights->info(), (biases != nullptr ? biases->info() : nullptr), output->info(), conv2d_info, weights_info);
 
     _impl->run_pack =
@@ -90,6 +96,15 @@
         { TensorType::ACL_SRC_2, biases },
         { TensorType::ACL_DST, output }
     };
+    // Add post op tensors
+    size_t post_op_tensor_index = 0;
+    for(const auto &op : post_ops.get_list())
+    {
+        for(auto &tensor : op->arguments())
+        {
+            _impl->run_pack.add_const_tensor(experimental::get_post_op_arg_type(post_op_tensor_index++), *tensor);
+        }
+    }
     _impl->prep_pack =
     {
         { TensorType::ACL_SRC_1, weights },
@@ -100,9 +115,9 @@
 }
 
 Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
-                                        const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
+                                        const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups, const experimental::PostOpList<ITensorInfo *> &post_ops)
 {
-    const Conv2dInfo conv2d_info = Conv2dInfo(conv_info, dilation, act_info, false, num_groups);
+    const Conv2dInfo conv2d_info = Conv2dInfo(conv_info, dilation, act_info, false, num_groups, post_ops);
     return opencl::ClGemmConv2d::validate(input, weights, biases, output, conv2d_info, weights_info);
 }
 
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index 30ba667..950d322 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -363,18 +363,18 @@
  */
 inline ::std::ostream &operator<<(::std::ostream &os, const GEMMKernelInfo &gemm_info)
 {
-    os << "( m= " << gemm_info.m;
-    os << " n= " << gemm_info.n;
-    os << " k= " << gemm_info.k;
-    os << " depth_output_gemm3d= " << gemm_info.depth_output_gemm3d;
-    os << " reinterpret_input_as_3d= " << gemm_info.reinterpret_input_as_3d;
-    os << " broadcast_bias= " << gemm_info.broadcast_bias;
-    os << " fp_mixed_precision= " << gemm_info.fp_mixed_precision;
-    os << " mult_transpose1xW_width= " << gemm_info.mult_transpose1xW_width;
-    os << " mult_interleave4x4_height= " << gemm_info.mult_interleave4x4_height;
-    os << " a_offset= " << gemm_info.a_offset;
-    os << " b_offset= " << gemm_info.b_offset;
-    os << "post_ops= " << gemm_info.post_ops;
+    os << "( m=" << gemm_info.m;
+    os << " n=" << gemm_info.n;
+    os << " k=" << gemm_info.k;
+    os << " depth_output_gemm3d=" << gemm_info.depth_output_gemm3d;
+    os << " reinterpret_input_as_3d=" << gemm_info.reinterpret_input_as_3d;
+    os << " broadcast_bias=" << gemm_info.broadcast_bias;
+    os << " fp_mixed_precision=" << gemm_info.fp_mixed_precision;
+    os << " mult_transpose1xW_width=" << gemm_info.mult_transpose1xW_width;
+    os << " mult_interleave4x4_height=" << gemm_info.mult_interleave4x4_height;
+    os << " a_offset=" << gemm_info.a_offset;
+    os << " b_offset=" << gemm_info.b_offset;
+    os << "post_ops=" << gemm_info.post_ops;
     os << ")";
     return os;
 }
@@ -388,7 +388,7 @@
  */
 inline ::std::ostream &operator<<(::std::ostream &os, const GEMMLHSMatrixInfo &gemm_info)
 {
-    os << "( m0= " << (unsigned int)gemm_info.m0 << " k0= " << gemm_info.k0 << "  v0= " << gemm_info.v0 << "  trans= " << gemm_info.transpose << "  inter= " << gemm_info.interleave << "})";
+    os << "( m0=" << (unsigned int)gemm_info.m0 << " k0=" << gemm_info.k0 << "  v0=" << gemm_info.v0 << "  trans=" << gemm_info.transpose << "  inter=" << gemm_info.interleave << "})";
     return os;
 }
 
@@ -401,7 +401,7 @@
  */
 inline ::std::ostream &operator<<(::std::ostream &os, const GEMMRHSMatrixInfo &gemm_info)
 {
-    os << "( n0= " << (unsigned int)gemm_info.n0 << " k0= " << gemm_info.k0 << "  h0= " << gemm_info.h0 << "  trans= " << gemm_info.transpose << "  inter= " << gemm_info.interleave << " exp_img=" <<
+    os << "( n0=" << (unsigned int)gemm_info.n0 << " k0=" << gemm_info.k0 << "  h0=" << gemm_info.h0 << "  trans=" << gemm_info.transpose << "  inter=" << gemm_info.interleave << " exp_img=" <<
        gemm_info.export_to_cl_image << "})";
     return os;
 }
@@ -455,7 +455,7 @@
 inline ::std::ostream &operator<<(::std::ostream &os, const BoundingBoxTransformInfo &bbox_info)
 {
     auto weights = bbox_info.weights();
-    os << "(" << bbox_info.img_width() << "x" << bbox_info.img_height() << ")~" << bbox_info.scale() << "(weights = {" << weights[0] << ", " << weights[1] << ", " << weights[2] << ", " << weights[3] <<
+    os << "(" << bbox_info.img_width() << "x" << bbox_info.img_height() << ")~" << bbox_info.scale() << "(weights={" << weights[0] << ", " << weights[1] << ", " << weights[2] << ", " << weights[3] <<
        "})";
     return os;
 }
@@ -1495,6 +1495,7 @@
     os << "fp_mixed_precision=" << info.fp_mixed_precision() << ",";
     os << "broadcast_bias=" << info.broadcast_bias() << ",";
     os << "pretranspose_B=" << info.pretranspose_B() << ",";
+    os << "post_ops=" << info.post_ops() << "}";
 
     return os;
 }
@@ -2540,10 +2541,10 @@
  */
 inline ::std::ostream &operator<<(::std::ostream &os, const ConvolutionInfo &conv_info)
 {
-    os << "{PadStrideInfo = " << conv_info.pad_stride_info << ", "
-       << "depth_multiplier = " << conv_info.depth_multiplier << ", "
-       << "act_info = " << to_string(conv_info.act_info) << ", "
-       << "dilation = " << conv_info.dilation << "}";
+    os << "{PadStrideInfo=" << conv_info.pad_stride_info << ", "
+       << "depth_multiplier=" << conv_info.depth_multiplier << ", "
+       << "act_info=" << to_string(conv_info.act_info) << ", "
+       << "dilation=" << conv_info.dilation << "}";
     return os;
 }
 
@@ -2569,13 +2570,13 @@
  */
 inline ::std::ostream &operator<<(::std::ostream &os, const FullyConnectedLayerInfo &layer_info)
 {
-    os << "{activation_info = " << to_string(layer_info.activation_info) << ", "
-       << "weights_trained_layout = " << layer_info.weights_trained_layout << ", "
-       << "transpose_weights = " << layer_info.transpose_weights << ", "
-       << "are_weights_reshaped = " << layer_info.are_weights_reshaped << ", "
-       << "retain_internal_weights = " << layer_info.retain_internal_weights << ", "
-       << "constant_weights = " << layer_info.transpose_weights << ", "
-       << "fp_mixed_precision = " << layer_info.fp_mixed_precision << "}";
+    os << "{activation_info=" << to_string(layer_info.activation_info) << ", "
+       << "weights_trained_layout=" << layer_info.weights_trained_layout << ", "
+       << "transpose_weights=" << layer_info.transpose_weights << ", "
+       << "are_weights_reshaped=" << layer_info.are_weights_reshaped << ", "
+       << "retain_internal_weights=" << layer_info.retain_internal_weights << ", "
+       << "constant_weights=" << layer_info.transpose_weights << ", "
+       << "fp_mixed_precision=" << layer_info.fp_mixed_precision << "}";
     return os;
 }
 
@@ -2643,17 +2644,17 @@
  */
 inline ::std::ostream &operator<<(::std::ostream &os, const GEMMLowpOutputStageInfo &gemm_info)
 {
-    os << "{type = " << gemm_info.type << ", "
-       << "gemlowp_offset = " << gemm_info.gemmlowp_offset << ", "
-       << "gemmlowp_multiplier" << gemm_info.gemmlowp_multiplier << ", "
-       << "gemmlowp_shift = " << gemm_info.gemmlowp_shift << ", "
-       << "gemmlowp_min_bound = " << gemm_info.gemmlowp_min_bound << ", "
-       << "gemmlowp_max_bound = " << gemm_info.gemmlowp_max_bound << ", "
-       << "gemmlowp_multipliers = " << gemm_info.gemmlowp_multiplier << ", "
-       << "gemmlowp_shifts = " << gemm_info.gemmlowp_shift << ", "
-       << "gemmlowp_real_multiplier = " << gemm_info.gemmlowp_real_multiplier << ", "
-       << "is_quantized_per_channel = " << gemm_info.is_quantized_per_channel << ", "
-       << "output_data_type = " << gemm_info.output_data_type << "}";
+    os << "{type=" << gemm_info.type << ", "
+       << "gemlowp_offset=" << gemm_info.gemmlowp_offset << ", "
+       << "gemmlowp_multiplier=" << gemm_info.gemmlowp_multiplier << ", "
+       << "gemmlowp_shift=" << gemm_info.gemmlowp_shift << ", "
+       << "gemmlowp_min_bound=" << gemm_info.gemmlowp_min_bound << ", "
+       << "gemmlowp_max_bound=" << gemm_info.gemmlowp_max_bound << ", "
+       << "gemmlowp_multipliers=" << gemm_info.gemmlowp_multiplier << ", "
+       << "gemmlowp_shifts=" << gemm_info.gemmlowp_shift << ", "
+       << "gemmlowp_real_multiplier=" << gemm_info.gemmlowp_real_multiplier << ", "
+       << "is_quantized_per_channel=" << gemm_info.is_quantized_per_channel << ", "
+       << "output_data_type=" << gemm_info.output_data_type << "}";
     return os;
 }
 
@@ -2679,11 +2680,12 @@
  */
 inline ::std::ostream &operator<<(::std::ostream &os, const Conv2dInfo &conv_info)
 {
-    os << "{conv_info = " << conv_info.conv_info << ", "
-       << "dilation = " << conv_info.dilation << ", "
-       << "act_info = " << to_string(conv_info.act_info) << ", "
-       << "enable_fast_math = " << conv_info.enable_fast_math << ", "
-       << "num_groups = " << conv_info.num_groups << "}";
+    os << "{conv_info=" << conv_info.conv_info << ", "
+       << "dilation=" << conv_info.dilation << ", "
+       << "act_info=" << to_string(conv_info.act_info) << ", "
+       << "enable_fast_math=" << conv_info.enable_fast_math << ", "
+       << "num_groups=" << conv_info.num_groups << ","
+       << "post_ops=" << conv_info.post_ops << "}";
     return os;
 }
 
@@ -2709,7 +2711,7 @@
  */
 inline ::std::ostream &operator<<(::std::ostream &os, const PixelValue &pixel_value)
 {
-    os << "{value.u64= " << pixel_value.get<uint64_t>() << "}";
+    os << "{value.u64=" << pixel_value.get<uint64_t>() << "}";
     return os;
 }
 
@@ -2735,13 +2737,13 @@
  */
 inline ::std::ostream &operator<<(::std::ostream &os, const ScaleKernelInfo &scale_info)
 {
-    os << "{interpolation_policy = " << scale_info.interpolation_policy << ", "
-       << "BorderMode = " << scale_info.border_mode << ", "
-       << "PixelValue = " << scale_info.constant_border_value << ", "
-       << "SamplingPolicy = " << scale_info.sampling_policy << ", "
-       << "use_padding = " << scale_info.use_padding << ", "
-       << "align_corners = " << scale_info.align_corners << ", "
-       << "data_layout = " << scale_info.data_layout << "}";
+    os << "{interpolation_policy=" << scale_info.interpolation_policy << ", "
+       << "BorderMode=" << scale_info.border_mode << ", "
+       << "PixelValue=" << scale_info.constant_border_value << ", "
+       << "SamplingPolicy=" << scale_info.sampling_policy << ", "
+       << "use_padding=" << scale_info.use_padding << ", "
+       << "align_corners=" << scale_info.align_corners << ", "
+       << "data_layout=" << scale_info.data_layout << "}";
     return os;
 }
 
@@ -2803,8 +2805,8 @@
  */
 inline ::std::ostream &operator<<(::std::ostream &os, const FFT1DInfo &fft1d_info)
 {
-    os << "{axis = " << fft1d_info.axis << ", "
-       << "direction = " << fft1d_info.direction << "}";
+    os << "{axis=" << fft1d_info.axis << ", "
+       << "direction=" << fft1d_info.direction << "}";
     return os;
 }
 
@@ -2830,9 +2832,9 @@
  */
 inline ::std::ostream &operator<<(::std::ostream &os, const FFT2DInfo &fft2d_info)
 {
-    os << "{axis = " << fft2d_info.axis0 << ", "
-       << "axis = " << fft2d_info.axis1 << ", "
-       << "direction = " << fft2d_info.direction << "}";
+    os << "{axis=" << fft2d_info.axis0 << ", "
+       << "axis=" << fft2d_info.axis1 << ", "
+       << "direction=" << fft2d_info.direction << "}";
     return os;
 }
 
@@ -2858,8 +2860,8 @@
  */
 inline ::std::ostream &operator<<(::std::ostream &os, const Coordinates2D &coord_2d)
 {
-    os << "{x = " << coord_2d.x << ", "
-       << "y = " << coord_2d.y << "}";
+    os << "{x=" << coord_2d.x << ", "
+       << "y=" << coord_2d.y << "}";
     return os;
 }
 
@@ -2921,10 +2923,10 @@
  */
 inline ::std::ostream &operator<<(::std::ostream &os, const SoftmaxKernelInfo &info)
 {
-    os << "{beta = " << info.beta << ", "
-       << "is_log = " << info.is_log << ", "
-       << "input_data_type = " << info.input_data_type << ", "
-       << "axis = " << info.axis << "}";
+    os << "{beta=" << info.beta << ", "
+       << "is_log=" << info.is_log << ", "
+       << "input_data_type=" << info.input_data_type << ", "
+       << "axis=" << info.axis << "}";
     return os;
 }
 
@@ -3052,15 +3054,15 @@
  */
 inline ::std::ostream &operator<<(::std::ostream &os, const BoxNMSLimitInfo &info)
 {
-    os << "{score_thresh = " << info.score_thresh() << ", "
-       << "nms = " << info.nms() << ", "
-       << "detections_per_im = " << info.detections_per_im() << ", "
-       << "soft_nms_enabled = " << info.soft_nms_enabled() << ", "
-       << "soft_nms_min_score_thres = " << info.soft_nms_min_score_thres() << ", "
-       << "suppress_size = " << info.suppress_size() << ", "
-       << "min_size = " << info.min_size() << ", "
-       << "im_width = " << info.im_width() << ", "
-       << "im_height = " << info.im_height() << "}";
+    os << "{score_thresh=" << info.score_thresh() << ", "
+       << "nms=" << info.nms() << ", "
+       << "detections_per_im=" << info.detections_per_im() << ", "
+       << "soft_nms_enabled=" << info.soft_nms_enabled() << ", "
+       << "soft_nms_min_score_thres=" << info.soft_nms_min_score_thres() << ", "
+       << "suppress_size=" << info.suppress_size() << ", "
+       << "min_size=" << info.min_size() << ", "
+       << "im_width=" << info.im_width() << ", "
+       << "im_height=" << info.im_height() << "}";
     return os;
 }