IVGCVSW-5300 Remove some boost::numeric_cast from armnn/backends

 * Replaced with armnn/utility/NumericCast.hpp
 * Some exclusions in reference backend
 * Excluded as requires float implementation in NumericCast.hpp

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: I9e4e9cd502c865452128fa04415fd6f250baa855
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index c7650dc..07ce14b 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -7,6 +7,7 @@
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <armnnUtils/DataLayoutIndexed.hpp>
 #include <armnnUtils/TensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
 
 #include <algorithm>
 #include <iomanip>
@@ -14,7 +15,6 @@
 #include <sstream>
 
 #include <boost/format.hpp>
-#include <boost/numeric/conversion/cast.hpp>
 
 using namespace armnnUtils;
 
@@ -306,7 +306,7 @@
         }
         outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
     }
-    TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
+    TensorShape broadcastShape = TensorShape(armnn::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
     if (broadcastShape != output.GetShape())
     {
         throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
@@ -2306,7 +2306,7 @@
     else
     {
         unsigned int outputDim =
-            inputTensorInfo.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
+            inputTensorInfo.GetNumDimensions() - armnn::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
         ValidateTensorNumDimensions(outputTensorInfo,
                                     descriptorName,
                                     outputDim > 0 ? outputDim : 1,
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index 37915cf..5886630 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -6,8 +6,7 @@
 #include <backendsCommon/WorkloadUtils.hpp>
 
 #include <armnn/Utils.hpp>
-
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/NumericCast.hpp>
 
 namespace armnn
 {
@@ -194,12 +193,12 @@
 int32_t ConvertMaskToACLFormat(int32_t mask, int32_t numDim)
 {
     int32_t reversedMask = 0;
-    for (unsigned int i = 0; i < boost::numeric_cast<unsigned int>(numDim); ++i)
+    for (unsigned int i = 0; i < armnn::numeric_cast<unsigned int>(numDim); ++i)
     {
         // Check if bit set in mask for each dimension
         int32_t bit = (mask & 1 << i) != 0;
         // Increment the new mask with the bits reversed
-        reversedMask += (bit << std::max(numDim-(boost::numeric_cast<int>(i)+1), 0));
+        reversedMask += (bit << std::max(numDim-(armnn::numeric_cast<int>(i)+1), 0));
     }
 
     return reversedMask;
diff --git a/src/backends/backendsCommon/test/ActivationFixture.hpp b/src/backends/backendsCommon/test/ActivationFixture.hpp
index 8ff77f6..d28174d 100644
--- a/src/backends/backendsCommon/test/ActivationFixture.hpp
+++ b/src/backends/backendsCommon/test/ActivationFixture.hpp
@@ -7,9 +7,10 @@
 #include "TensorCopyUtils.hpp"
 #include "WorkloadTestUtils.hpp"
 
+#include <armnn/utility/NumericCast.hpp>
+
 #include <test/TensorHelpers.hpp>
 
-#include <boost/numeric/conversion/cast.hpp>
 #include <boost/multi_array.hpp>
 
 struct ActivationFixture
@@ -17,10 +18,10 @@
     ActivationFixture()
     {
         auto boostArrayExtents = boost::extents
-            [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
-            [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
-            [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
-            [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
+            [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
+            [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
+            [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
+            [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
         output.resize(boostArrayExtents);
         outputExpected.resize(boostArrayExtents);
         input.resize(boostArrayExtents);
diff --git a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
index dc53b7b..c705f87 100644
--- a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
@@ -10,6 +10,8 @@
 
 #include <armnn/INetwork.hpp>
 
+#include <armnn/utility/NumericCast.hpp>
+
 #include <boost/test/unit_test.hpp>
 
 #include <vector>
@@ -34,7 +36,7 @@
     for (unsigned int i = 0; i < inputShapes.size(); ++i)
     {
         TensorInfo inputTensorInfo(inputShapes[i], ArmnnTypeInput, qScale, qOffset);
-        IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast<LayerBindingId>(i));
+        IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(i));
         Connect(input, comparisonLayer, inputTensorInfo, 0, i);
     }
 
diff --git a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
index ded3857..58a1f39 100644
--- a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
@@ -10,6 +10,8 @@
 
 #include <armnn/INetwork.hpp>
 
+#include <armnn/utility/NumericCast.hpp>
+
 #include <boost/test/unit_test.hpp>
 
 #include <vector>
@@ -38,7 +40,7 @@
     for (unsigned int i = 0; i < inputShapes.size(); ++i)
     {
         TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset);
-        IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast<LayerBindingId>(i));
+        IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(i));
         Connect(input, concat, inputTensorInfo, 0, i);
     }
 
diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
index 4c93735..5fedaa2 100644
--- a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
@@ -10,6 +10,8 @@
 
 #include <armnn/INetwork.hpp>
 
+#include <armnn/utility/NumericCast.hpp>
+
 #include <boost/test/unit_test.hpp>
 
 #include <vector>
@@ -32,7 +34,7 @@
     IConnectableLayer* elementwiseUnaryLayer = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary");
 
     TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset);
-    IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast<LayerBindingId>(0));
+    IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(0));
     Connect(input, elementwiseUnaryLayer, inputTensorInfo, 0, 0);
 
     TensorInfo outputTensorInfo(outputShape, ArmnnTypeInput, qScale, qOffset);
diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
index b06b30c..404a412 100644
--- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
@@ -13,6 +13,8 @@
 #include <armnn/INetwork.hpp>
 #include <armnn/QuantizedLstmParams.hpp>
 
+#include <armnn/utility/NumericCast.hpp>
+
 #include <test/TensorHelpers.hpp>
 
 #include <boost/test/unit_test.hpp>
@@ -27,9 +29,9 @@
 armnn::INetworkPtr CreateQuantizedLstmNetwork(MultiArray input,
                                               MultiArray expectedOutput)
 {
-    auto batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
-    auto inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
-    auto outputSize = boost::numeric_cast<unsigned int>(expectedOutput.shape()[1]);
+    auto batchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
+    auto inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
+    auto outputSize = armnn::numeric_cast<unsigned int>(expectedOutput.shape()[1]);
 
     float inputOutputScale = 0.0078125f;
     int32_t inputOutputOffset = 128;
diff --git a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
index 6c4c177..257a81b 100644
--- a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
@@ -8,6 +8,8 @@
 
 #include <armnn/INetwork.hpp>
 
+#include <armnn/utility/NumericCast.hpp>
+
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
 #include <boost/test/unit_test.hpp>
@@ -63,7 +65,7 @@
     for (unsigned int i = 0; i < outputShapes.size(); ++i)
     {
         TensorInfo outputTensorInfo(outputShapes[i], DataType, qScale, qOffset);
-        IConnectableLayer* output = net->AddOutputLayer(boost::numeric_cast<LayerBindingId>(i));
+        IConnectableLayer* output = net->AddOutputLayer(armnn::numeric_cast<LayerBindingId>(i));
         Connect(splitter, output, outputTensorInfo, i, 0);
     }
 
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 543ea77..6d83b1c 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -13,6 +13,8 @@
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 #include <reference/test/RefWorkloadFactoryHelper.hpp>
 
+#include <armnn/utility/NumericCast.hpp>
+
 #include <test/TensorHelpers.hpp>
 
 #include <boost/multi_array.hpp>
@@ -1261,10 +1263,10 @@
 
     LayerTestResult<T,4> ret(outputTensorInfo);
     auto boostArrayExtents = boost::extents
-        [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
-    [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
-    [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
-    [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
+        [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
+        [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
+        [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
+        [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
     ret.output.resize(boostArrayExtents);
     ret.outputExpected.resize(boostArrayExtents);
 
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index e99a26e..690d1cd 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -9,6 +9,7 @@
 #include <armnnUtils/TensorUtils.hpp>
 
 #include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
 #include <armnnUtils/DataLayoutIndexed.hpp>
 #include <armnnUtils/Permute.hpp>
 
@@ -219,20 +220,20 @@
     uint32_t dilationY = 1)
 {
     armnn::IgnoreUnused(memoryManager);
-    unsigned int inputHeight   = boost::numeric_cast<unsigned int>(originalInput.shape()[2]);
-    unsigned int inputWidth    = boost::numeric_cast<unsigned int>(originalInput.shape()[3]);
-    unsigned int inputChannels = boost::numeric_cast<unsigned int>(originalInput.shape()[1]);
-    unsigned int inputNum      = boost::numeric_cast<unsigned int>(originalInput.shape()[0]);
+    unsigned int inputHeight   = armnn::numeric_cast<unsigned int>(originalInput.shape()[2]);
+    unsigned int inputWidth    = armnn::numeric_cast<unsigned int>(originalInput.shape()[3]);
+    unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInput.shape()[1]);
+    unsigned int inputNum      = armnn::numeric_cast<unsigned int>(originalInput.shape()[0]);
 
-    unsigned int outputHeight   = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[2]);
-    unsigned int outputWidth    = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[3]);
-    unsigned int outputChannels = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[1]);
-    unsigned int outputNum      = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[0]);
+    unsigned int outputHeight   = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[2]);
+    unsigned int outputWidth    = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[3]);
+    unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[1]);
+    unsigned int outputNum      = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[0]);
 
-    unsigned int kernelHeight = boost::numeric_cast<unsigned int>(originalKernel.shape()[2]);
-    unsigned int kernelWidth = boost::numeric_cast<unsigned int>(originalKernel.shape()[3]);
-    unsigned int kernelChannels = boost::numeric_cast<unsigned int>(originalKernel.shape()[1]);
-    unsigned int kernelDepthMul = boost::numeric_cast<unsigned int>(originalKernel.shape()[0]);
+    unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernel.shape()[2]);
+    unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernel.shape()[3]);
+    unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernel.shape()[1]);
+    unsigned int kernelDepthMul = armnn::numeric_cast<unsigned int>(originalKernel.shape()[0]);
 
     bool biasEnabled = bias.size() > 0;
 
@@ -385,20 +386,20 @@
     uint32_t strideY  = 1)
 {
     armnn::IgnoreUnused(qScale, qOffset);
-    unsigned int inputNum       = boost::numeric_cast<unsigned int>(input.shape()[0]);
-    unsigned int inputChannels  = boost::numeric_cast<unsigned int>(input.shape()[3]);
-    unsigned int inputHeight    = boost::numeric_cast<unsigned int>(input.shape()[1]);
-    unsigned int inputWidth     = boost::numeric_cast<unsigned int>(input.shape()[2]);
+    unsigned int inputNum       = armnn::numeric_cast<unsigned int>(input.shape()[0]);
+    unsigned int inputChannels  = armnn::numeric_cast<unsigned int>(input.shape()[3]);
+    unsigned int inputHeight    = armnn::numeric_cast<unsigned int>(input.shape()[1]);
+    unsigned int inputWidth     = armnn::numeric_cast<unsigned int>(input.shape()[2]);
 
-    unsigned int kernelChanMul  = boost::numeric_cast<unsigned int>(kernel.shape()[0]);
-    unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[3]);
-    unsigned int kernelHeight   = boost::numeric_cast<unsigned int>(kernel.shape()[1]);
-    unsigned int kernelWidth    = boost::numeric_cast<unsigned int>(kernel.shape()[2]);
+    unsigned int kernelChanMul  = armnn::numeric_cast<unsigned int>(kernel.shape()[0]);
+    unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernel.shape()[3]);
+    unsigned int kernelHeight   = armnn::numeric_cast<unsigned int>(kernel.shape()[1]);
+    unsigned int kernelWidth    = armnn::numeric_cast<unsigned int>(kernel.shape()[2]);
 
-    unsigned int outputNum      = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
-    unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
-    unsigned int outputHeight   = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
-    unsigned int outputWidth    = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
+    unsigned int outputNum      = armnn::numeric_cast<unsigned int>(outputExpected.shape()[0]);
+    unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpected.shape()[3]);
+    unsigned int outputHeight   = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+    unsigned int outputWidth    = armnn::numeric_cast<unsigned int>(outputExpected.shape()[2]);
 
     bool biasEnabled = bias.size() > 0;
 
@@ -1643,18 +1644,18 @@
     uint32_t strideX = 1,
     uint32_t strideY = 1)
 {
-    unsigned int inputNum       = boost::numeric_cast<unsigned int>(input.shape()[0]);
-    unsigned int inputChannels  = boost::numeric_cast<unsigned int>(input.shape()[1]);
-    unsigned int inputHeight    = boost::numeric_cast<unsigned int>(input.shape()[2]);
-    unsigned int inputWidth     = boost::numeric_cast<unsigned int>(input.shape()[3]);
-    unsigned int kernelChanMul  = boost::numeric_cast<unsigned int>(kernel.shape()[0]);
-    unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[1]);
-    unsigned int kernelHeight   = boost::numeric_cast<unsigned int>(kernel.shape()[2]);
-    unsigned int kernelWidth    = boost::numeric_cast<unsigned int>(kernel.shape()[3]);
-    unsigned int outputNum      = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
-    unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
-    unsigned int outputHeight   = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
-    unsigned int outputWidth    = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
+    unsigned int inputNum       = armnn::numeric_cast<unsigned int>(input.shape()[0]);
+    unsigned int inputChannels  = armnn::numeric_cast<unsigned int>(input.shape()[1]);
+    unsigned int inputHeight    = armnn::numeric_cast<unsigned int>(input.shape()[2]);
+    unsigned int inputWidth     = armnn::numeric_cast<unsigned int>(input.shape()[3]);
+    unsigned int kernelChanMul  = armnn::numeric_cast<unsigned int>(kernel.shape()[0]);
+    unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernel.shape()[1]);
+    unsigned int kernelHeight   = armnn::numeric_cast<unsigned int>(kernel.shape()[2]);
+    unsigned int kernelWidth    = armnn::numeric_cast<unsigned int>(kernel.shape()[3]);
+    unsigned int outputNum      = armnn::numeric_cast<unsigned int>(outputExpected.shape()[0]);
+    unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+    unsigned int outputHeight   = armnn::numeric_cast<unsigned int>(outputExpected.shape()[2]);
+    unsigned int outputWidth    = armnn::numeric_cast<unsigned int>(outputExpected.shape()[3]);
 
     // If a bias is used, its size must equal the number of output channels.
     bool biasEnabled = bias.size() > 0;
@@ -2151,20 +2152,20 @@
     uint32_t dilationX = 1,
     uint32_t dilationY = 1)
 {
-    unsigned int inputHeight   = boost::numeric_cast<unsigned int>(originalInput.shape()[2]);
-    unsigned int inputWidth    = boost::numeric_cast<unsigned int>(originalInput.shape()[3]);
-    unsigned int inputChannels = boost::numeric_cast<unsigned int>(originalInput.shape()[1]);
-    unsigned int inputNum      = boost::numeric_cast<unsigned int>(originalInput.shape()[0]);
+    unsigned int inputHeight   = armnn::numeric_cast<unsigned int>(originalInput.shape()[2]);
+    unsigned int inputWidth    = armnn::numeric_cast<unsigned int>(originalInput.shape()[3]);
+    unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInput.shape()[1]);
+    unsigned int inputNum      = armnn::numeric_cast<unsigned int>(originalInput.shape()[0]);
 
-    unsigned int outputHeight   = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[2]);
-    unsigned int outputWidth    = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[3]);
-    unsigned int outputChannels = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[1]);
-    unsigned int outputNum      = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[0]);
+    unsigned int outputHeight   = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[2]);
+    unsigned int outputWidth    = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[3]);
+    unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[1]);
+    unsigned int outputNum      = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[0]);
 
-    unsigned int kernelHeight = boost::numeric_cast<unsigned int>(originalKernel.shape()[2]);
-    unsigned int kernelWidth = boost::numeric_cast<unsigned int>(originalKernel.shape()[3]);
-    unsigned int kernelChannels = boost::numeric_cast<unsigned int>(originalKernel.shape()[1]);
-    unsigned int kernelDepthMul = boost::numeric_cast<unsigned int>(originalKernel.shape()[0]);
+    unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernel.shape()[2]);
+    unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernel.shape()[3]);
+    unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernel.shape()[1]);
+    unsigned int kernelDepthMul = armnn::numeric_cast<unsigned int>(originalKernel.shape()[0]);
 
     bool biasEnabled = bias.size() > 0;
 
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index 8f39f42..088ca3b 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -7,6 +7,7 @@
 
 #include <QuantizeHelper.hpp>
 
+#include <armnn/utility/NumericCast.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 
@@ -144,9 +145,9 @@
         armnn::DataType constantDataType = armnn::DataType::Float32)
 {
     IgnoreUnused(memoryManager);
-    unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
-    unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
-    unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+    unsigned int batchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
+    unsigned int inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
+    unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
     // cellSize and outputSize have the same size when there is no projection.
     unsigned numUnits = outputSize;
 
@@ -1069,10 +1070,10 @@
     bool peepholeEnabled = true;
     bool projectionEnabled = false;
     // These are not the input and the output of Lstm yet
-    unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
-    unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
+    unsigned int batchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
+    unsigned int inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
 
-    unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+    unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
 
     const unsigned int cellSize = outputSize;
 
@@ -1560,9 +1561,9 @@
     const boost::multi_array<uint8_t, 2>& outputExpected)
 {
     IgnoreUnused(memoryManager);
-    auto numBatches = boost::numeric_cast<unsigned int>(input.shape()[0]);
-    auto inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
-    auto outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+    auto numBatches = armnn::numeric_cast<unsigned int>(input.shape()[0]);
+    auto inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
+    auto outputSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
 
     // Scale/Offset for input/output, cellState In/Out, weights, bias
     float inputOutputScale = 0.0078125f;
diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
index b42b180..2e8e16f 100644
--- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
@@ -8,6 +8,8 @@
 #include <armnn/Exceptions.hpp>
 #include <armnn/LayerSupport.hpp>
 
+#include <armnn/utility/NumericCast.hpp>
+
 #include <backendsCommon/CpuTensorHandle.hpp>
 
 #include <backendsCommon/test/TensorCopyUtils.hpp>
@@ -102,7 +104,7 @@
                     // pow((kappa + (accumulatedScale * alpha)), beta)
                     // ...where accumulatedScale is the sum of every element squared.
                     float divisor[inputNum];
-                    for(int i = 0; i < boost::numeric_cast<int>(inputNum); i++)
+                    for(int i = 0; i < armnn::numeric_cast<int>(inputNum); i++)
                     {
                         float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] +
                                                  input[i][0][0][1]*input[i][0][0][1] +
@@ -129,11 +131,11 @@
                     // ...where adjacent channels means within half the normSize for the channel
                     // The test data has only one channel, so this is simplified below.
                     std::vector<float> outputVector;
-                    for (int n = 0; n < boost::numeric_cast<int>(inputNum); ++n)
+                    for (int n = 0; n < armnn::numeric_cast<int>(inputNum); ++n)
                     {
-                        for (int h = 0; h < boost::numeric_cast<int>(inputHeight); ++h)
+                        for (int h = 0; h < armnn::numeric_cast<int>(inputHeight); ++h)
                         {
-                            for (int w = 0; w < boost::numeric_cast<int>(inputWidth); ++w)
+                            for (int w = 0; w < armnn::numeric_cast<int>(inputWidth); ++w)
                             {
                                 float accumulatedScale = input[n][0][h][w]*input[n][0][h][w];
                                 float scale = powf((kappa + accumulatedScale * alpha), -beta);
diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
index a4f87ff..70e2e61 100644
--- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
@@ -15,6 +15,7 @@
 #include <armnnUtils/Permute.hpp>
 
 #include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
 
 #include <backendsCommon/WorkloadInfo.hpp>
 
@@ -48,15 +49,15 @@
     auto widthIndex = dimensionIndices.GetWidthIndex();
     auto channelsIndex = dimensionIndices.GetChannelsIndex();
 
-    unsigned int inputHeight     = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
-    unsigned int inputWidth      = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
-    unsigned int inputChannels   = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
-    unsigned int inputBatchSize  = boost::numeric_cast<unsigned int>(input.shape()[0]);
+    unsigned int inputHeight     = armnn::numeric_cast<unsigned int>(input.shape()[heightIndex]);
+    unsigned int inputWidth      = armnn::numeric_cast<unsigned int>(input.shape()[widthIndex]);
+    unsigned int inputChannels   = armnn::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
+    unsigned int inputBatchSize  = armnn::numeric_cast<unsigned int>(input.shape()[0]);
 
-    unsigned int outputHeight    = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
-    unsigned int outputWidth     = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
-    unsigned int outputChannels  = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
-    unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
+    unsigned int outputHeight    = armnn::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
+    unsigned int outputWidth     = armnn::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
+    unsigned int outputChannels  = armnn::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
+    unsigned int outputBatchSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[0]);
 
     armnn::TensorInfo inputTensorInfo  = armnnUtils::GetTensorInfo(
         inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);