IVGCVSW-7455 Workaround to allow CLBatchMatMul to parse some 4D models

 * Added ability to reduce dimension sizes when calling BuildArmComputeTensorInfo or
   BuildArmComputeTensorShapes, this will attempt to remove leading 1s in order to
   squeeze the number of dimensions but retain the size.
 * Changed ClBatchMatMulWorkload to attempt to squeeze the number of dimensions to 3
   as the CL Gemm Kernel can only support up to 3 dimensions.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I6b3d0886c5b97fdb686838fc3dc292833ddc4643
diff --git a/include/armnnUtils/TensorUtils.hpp b/include/armnnUtils/TensorUtils.hpp
index 2d6ec2f..a2aa9b0 100644
--- a/include/armnnUtils/TensorUtils.hpp
+++ b/include/armnnUtils/TensorUtils.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -8,6 +8,7 @@
 #include <armnn/TypesUtils.hpp>
 #include <armnn/Tensor.hpp>
 #include <armnn/Types.hpp>
+#include <armnnUtils/TensorUtils.hpp>
 #include <utility>
 #include <vector>
 
@@ -41,6 +42,10 @@
 
 std::pair<float, float> FindMinMax(armnn::ITensorHandle* tensorHandle);
 
+armnn::TensorShape ReduceDims(const armnn::TensorShape& tensorInfo, unsigned int dimensions);
+
+armnn::TensorInfo ReduceDims(const armnn::TensorInfo& tensorInfo, unsigned int dimensions);
+
 armnn::TensorShape ExpandDims(const armnn::TensorShape& tensorShape, int axis);
 
 std::vector<unsigned int> SqueezeDims(const armnn::TensorShape& tensorShape);