COMPMID-3887: NEGEMMConvolutionLayer hangs up on num_threads>18

When store the Tensor shape information, the size was clamped to 32bit, which cause the memory allocated is not big enough.

Signed-off-by: Sheri Zhang <sheri.zhang@arm.com>
Change-Id: I9f7dfcd5595a143b0ed4f6973e20bcd9d776b673
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4331
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/arm_compute/core/TensorShape.h b/arm_compute/core/TensorShape.h
index 2187743..b455a07 100644
--- a/arm_compute/core/TensorShape.h
+++ b/arm_compute/core/TensorShape.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2019 Arm Limited.
+ * Copyright (c) 2016-2020 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -36,7 +36,7 @@
 namespace arm_compute
 {
 /** Shape of a tensor */
-class TensorShape : public Dimensions<uint32_t>
+class TensorShape : public Dimensions<size_t>
 {
 public:
     /** Constructor to initialize the tensor shape.
diff --git a/arm_compute/core/Window.inl b/arm_compute/core/Window.inl
index 14a432a..6100d09 100644
--- a/arm_compute/core/Window.inl
+++ b/arm_compute/core/Window.inl
@@ -197,15 +197,15 @@
     {
         if(d == dimension)
         {
-            int start        = _dims[d].start();
-            int end          = _dims[d].end();
-            const int step   = _dims[d].step();
+            int       start = _dims[d].start();
+            int       end   = _dims[d].end();
+            const int step  = _dims[d].step();
 
             const int num_it = num_iterations(d);
             const int rem    = num_it % total;
-            int work         = num_it / total;
+            int       work   = num_it / total;
 
-            int it_start     = work * id;
+            int it_start = work * id;
 
             if(int(id) < rem)
             {
@@ -277,7 +277,7 @@
 {
     for(unsigned int n = first_dimension; n < shape.num_dimensions(); ++n)
     {
-        set(n, Window::Dimension(0, std::max(shape[n], static_cast<uint32_t>(1))));
+        set(n, Window::Dimension(0, std::max(shape[n], static_cast<size_t>(1))));
     }
 }