Fix dst "widening" validation

* Auto-initialize the dst tensor before checking for PostOp shape
compliance so that we catch the invalid case of "widening" dst tensor
shape

* Rework post op validate test cases to be more readable

Partially resolves: COMPMID-4435

Change-Id: I79943994182942f962e4d59a7fa0d6f017ae9ac7
Signed-off-by: SiCongLi <sicong.li@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6548
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/CL/CLUtils.cpp b/src/core/CL/CLUtils.cpp
index 1da970e..748b0f5 100644
--- a/src/core/CL/CLUtils.cpp
+++ b/src/core/CL/CLUtils.cpp
@@ -85,16 +85,24 @@
 
 bool PostOpCLKernelUtils::are_post_op_shapes_compliant(const ITensorInfo *dst, const experimental::PostOpList<ITensorInfo *> &post_ops)
 {
-    // All post ops must be elementwise and must not alter the shape of the original dst tensor after broadcasting
     for(const auto &op : post_ops.get_list())
     {
         for(const auto &tensor : op->arguments())
         {
             const TensorShape &out_shape = TensorShape::broadcast_shape(dst->tensor_shape(), (*tensor)->tensor_shape());
+            // All post ops must be elementwise and must not alter the shape of the original dst tensor after broadcasting
             if(detail::have_different_dimensions(out_shape, dst->tensor_shape(), 0))
             {
                 return false;
             }
+            // NOTE: Kernel limitation: currently only the following broadcasting types are supported:
+            //  1. Post op arg is scalar, broadcast in both X and Y
+            //  2. Post op arg is of shape: Y=1, X=N, broadcast only in Y
+            //  This means this case: Post op arg is of shape: Y=M, X=1, broadcast only in X, is NOT supported
+            if(dst->dimension(0) > 1 && dst->dimension(1) > 1 && (*tensor)->dimension(0) == 1 && (*tensor)->dimension(1) > 1)
+            {
+                return false;
+            }
         }
     }
     return true;
diff --git a/src/core/experimental/PostOp.h b/src/core/experimental/PostOp.h
index 64414d2..7d62bd9 100644
--- a/src/core/experimental/PostOp.h
+++ b/src/core/experimental/PostOp.h
@@ -79,9 +79,9 @@
 struct PostOpEltwiseAdd : public IPostOp<TensorRelatedT>
 {
 public:
-    PostOpEltwiseAdd(TensorRelatedT addend, int prev_op_arg_pos, ConvertPolicy policy)
+    PostOpEltwiseAdd(TensorRelatedT addend, int prev_dst_pos, ConvertPolicy policy)
         : _addend{ addend },
-          _prev_op_arg_pos{ prev_op_arg_pos },
+          _prev_dst_pos{ prev_dst_pos },
           _policy{ policy }
     {
     }
@@ -93,7 +93,7 @@
     PostOpEltwiseAdd &operator=(PostOpEltwiseAdd &&) = default;
     int               prev_dst_pos() const override
     {
-        return _prev_op_arg_pos;
+        return _prev_dst_pos;
     }
     PostOpType type() const override
     {
@@ -112,7 +112,7 @@
         return std::make_unique<PostOpEltwiseAdd<TensorRelatedT>>(*this);
     }
     TensorRelatedT _addend;
-    int            _prev_op_arg_pos;
+    int            _prev_dst_pos;
     ConvertPolicy  _policy;
 };
 
@@ -135,7 +135,7 @@
             case PostOpType::Eltwise_Add:
             {
                 const auto _post_op = utils::cast::polymorphic_downcast<const PostOpEltwiseAdd<FromTensorT> *>(post_op.get());
-                transformed_post_ops.template push_back_op<PostOpEltwiseAdd<ToTensorT>>(transform_arg(_post_op->_addend), _post_op->_prev_op_arg_pos, _post_op->_policy);
+                transformed_post_ops.template push_back_op<PostOpEltwiseAdd<ToTensorT>>(transform_arg(_post_op->_addend), _post_op->_prev_dst_pos, _post_op->_policy);
                 break;
             }
             default:
diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp
index 4b28e2b..8ee72d3 100644
--- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp
+++ b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp
@@ -182,11 +182,11 @@
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst);
 
-    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src0, src1, src2, dst, alpha, beta, lhs_info, rhs_info, gemm_info));
-
     // dst tensor auto initialization if not yet initialized
     auto_init_if_empty(*dst, src0->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info)));
 
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src0, src1, src2, dst, alpha, beta, lhs_info, rhs_info, gemm_info));
+
     auto padding_info         = get_padding_info({ src0, src1, src2, dst });
     _reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
     _use_dummy_work_items     = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device());