IVGCVSW-5302 Remove some boost::numeric_cast from parsers

 * Replaced with armnn/utility/NumericCast.hpp
 * Exclusions in armnnCaffeParser
 * Three excluded as requires float implementation in NumericCast.hpp

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: Ib468b606238694334a8319d0ed5db381ce37a915
diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp
index b8ce470..d50846a 100644
--- a/src/armnnCaffeParser/CaffeParser.cpp
+++ b/src/armnnCaffeParser/CaffeParser.cpp
@@ -14,6 +14,7 @@
 #include "VerificationHelpers.hpp"
 
 #include <armnn/utility/Assert.hpp>
+#include <armnn/utility/NumericCast.hpp>
 
 #include <boost/numeric/conversion/cast.hpp>
 #include <boost/format.hpp>
@@ -66,7 +67,7 @@
 const float* GetArrayPtrFromBlob(const LayerParameter& layerParam, unsigned int blobIndex)
 {
     auto nBlobs = layerParam.blobs_size();
-    if (blobIndex >= boost::numeric_cast<unsigned int>(nBlobs))
+    if (blobIndex >= armnn::numeric_cast<unsigned int>(nBlobs))
     {
         throw ParseException(
             boost::str(
@@ -78,7 +79,7 @@
                     CHECK_LOCATION().AsString()));
     }
 
-    const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(blobIndex));
+    const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(blobIndex));
 
     const float* arrayPtr = blob.data().data();
     return arrayPtr;
@@ -87,7 +88,7 @@
 void GetDataFromBlob(const LayerParameter& layerParam, vector<float>& outData, unsigned int blobIndex)
 {
     auto nBlobs = layerParam.blobs_size();
-    if (blobIndex >= boost::numeric_cast<unsigned int>(nBlobs))
+    if (blobIndex >= armnn::numeric_cast<unsigned int>(nBlobs))
     {
         throw ParseException(
             boost::str(
@@ -98,9 +99,9 @@
                     CHECK_LOCATION().AsString()));
     }
 
-    const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(blobIndex));
+    const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(blobIndex));
 
-    size_t blobSize = boost::numeric_cast<size_t>(blob.data_size());
+    size_t blobSize = armnn::numeric_cast<size_t>(blob.data_size());
     if (blobSize != outData.size())
     {
         throw ParseException(
@@ -115,7 +116,7 @@
                     CHECK_LOCATION().AsString()));
     }
 
-    int outSizeInt = boost::numeric_cast<int>(outData.size());
+    int outSizeInt = armnn::numeric_cast<int>(outData.size());
     for (int i = 0; i < outSizeInt; ++i)
     {
         outData[static_cast<size_t>(i)] = blob.data(i);
@@ -133,7 +134,7 @@
                               unsigned int                 numOutputs)
 {
     int numInputsActual = layerParameter.bottom_size();
-    if (numInputs != boost::numeric_cast<unsigned int>(numInputsActual))
+    if (numInputs != armnn::numeric_cast<unsigned int>(numInputsActual))
     {
         throw ParseException(
             boost::str(
@@ -146,7 +147,7 @@
     }
 
     int numOutputsActual = layerParameter.top_size();
-    if (numOutputs != boost::numeric_cast<unsigned int>(numOutputsActual))
+    if (numOutputs != armnn::numeric_cast<unsigned int>(numOutputsActual))
     {
         throw ParseException(
             boost::str(
@@ -320,7 +321,7 @@
         shape.push_back(static_cast<unsigned int>(blobShape.dim(j)));
     }
 
-    return TensorInfo(boost::numeric_cast<unsigned int>(shape.size()), shape.data(), DataType::Float32);
+    return TensorInfo(armnn::numeric_cast<unsigned int>(shape.size()), shape.data(), DataType::Float32);
 }
 
 BlobShape TensorDescToBlobShape(const TensorInfo& desc)
@@ -329,7 +330,7 @@
     for (unsigned int i = 0; i < desc.GetNumDimensions(); ++i)
     {
         ret.add_dim(i);
-        ret.set_dim(boost::numeric_cast<int>(i), desc.GetShape()[i]);
+        ret.set_dim(armnn::numeric_cast<int>(i), desc.GetShape()[i]);
     }
 
     return ret;
@@ -340,7 +341,7 @@
 vector<const LayerParameter*> CaffeParserBase::GetInputs(const LayerParameter& layerParam)
 {
     std::vector<const caffe::LayerParameter*> ret;
-    ret.reserve(boost::numeric_cast<size_t>(layerParam.bottom_size()));
+    ret.reserve(armnn::numeric_cast<size_t>(layerParam.bottom_size()));
     for (int j = 0; j < layerParam.bottom_size(); ++j)
     {
         std::string inputName = layerParam.bottom(j);
@@ -369,7 +370,7 @@
 
     const InputParameter& param = layerParam.input_param();
 
-    const armnn::LayerBindingId inputId = boost::numeric_cast<armnn::LayerBindingId>(
+    const armnn::LayerBindingId inputId = armnn::numeric_cast<armnn::LayerBindingId>(
         m_NetworkInputsBindingInfo.size());
     armnn::IConnectableLayer* const inputLayer = m_Network->AddInputLayer(inputId, layerParam.name().c_str());
 
@@ -504,7 +505,7 @@
                 static_cast<float>(desc.m_StrideX)) + 1));
 
     // Load the weight data for ALL groups
-    vector<float> weightData(boost::numeric_cast<size_t>(numGroups *
+    vector<float> weightData(armnn::numeric_cast<size_t>(numGroups *
                                                          inputShape.dim(1) *  // number of input channels
                                                          outputShape.dim(1) * // number of output channels
                                                          kernelH *
@@ -522,15 +523,15 @@
 
     if (desc.m_BiasEnabled)
     {
-        biasData.resize(boost::numeric_cast<size_t>(numGroups * outputShape.dim(1)), 1.f);
+        biasData.resize(armnn::numeric_cast<size_t>(numGroups * outputShape.dim(1)), 1.f);
         GetDataFromBlob(layerParam, biasData, 1);
 
         const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
         biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
     }
 
-    const unsigned int numWeightsPerGroup = boost::numeric_cast<unsigned int>(weightData.size()) / numGroups;
-    const unsigned int numBiasesPerGroup  = boost::numeric_cast<unsigned int>(biasData.size()) / numGroups;
+    const unsigned int numWeightsPerGroup = armnn::numeric_cast<unsigned int>(weightData.size()) / numGroups;
+    const unsigned int numBiasesPerGroup  = armnn::numeric_cast<unsigned int>(biasData.size()) / numGroups;
 
     for (unsigned int g = 0; g < numGroups; ++g)
     {
@@ -648,7 +649,7 @@
                 static_cast<float>(desc.m_StrideX)) + 1));
 
     // Load the weight data
-    size_t allWeightsSize = boost::numeric_cast<size_t>(inputShape.dim(1) * kernelH * kernelW);
+    size_t allWeightsSize = armnn::numeric_cast<size_t>(inputShape.dim(1) * kernelH * kernelW);
     vector<float> weightData(allWeightsSize);
 
     GetDataFromBlob(layerParam, weightData, 0);
@@ -668,7 +669,7 @@
     {
         TensorInfo biasInfo;
 
-        biasData.resize(boost::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
+        biasData.resize(armnn::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
         GetDataFromBlob(layerParam, biasData, 1);
 
         const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
@@ -824,7 +825,7 @@
                 static_cast<float>(strideW)) + 1));
 
     // Load the weight data for ALL groups
-    vector<float> weightData(boost::numeric_cast<size_t>(inputShape.dim(1) *
+    vector<float> weightData(armnn::numeric_cast<size_t>(inputShape.dim(1) *
                                                          outputShape.dim(1) *
                                                          kernelH *
                                                          kernelW));
@@ -846,7 +847,7 @@
     {
         TensorInfo biasInfo;
 
-        biasData.resize(boost::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
+        biasData.resize(armnn::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
         GetDataFromBlob(layerParam, biasData, 1);
 
         const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
@@ -1290,7 +1291,7 @@
     for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
     {
         const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(
-            layerParam.bottom(boost::numeric_cast<int>(viewIndex))).GetTensorInfo();
+            layerParam.bottom(armnn::numeric_cast<int>(viewIndex))).GetTensorInfo();
         // Checks whether the dimensions of the input tensors are actually 4.
         if (inputInfo.GetNumDimensions()!=4)
         {
@@ -1328,7 +1329,7 @@
     armnn::IConnectableLayer* concatlayer = m_Network->AddConcatLayer(concatDescriptor, layerParam.name().c_str());
     for (unsigned int i = 0; i < numInputs; ++i)
     {
-        armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(boost::numeric_cast<int>(i)));
+        armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(armnn::numeric_cast<int>(i)));
         outputSlot.Connect(concatlayer->GetInputSlot(i));
     }
 
@@ -1375,8 +1376,8 @@
     GetDataFromBlob(layerParam, varianceData, 1);
 
     // Reads moving average factor and applies scaling (if required).
-    const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(2));
-    const float movingAverageFactor = blob.data(boost::numeric_cast<int>(0));
+    const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(2));
+    const float movingAverageFactor = blob.data(armnn::numeric_cast<int>(0));
     if(movingAverageFactor != 0.0f)
     {
         const float scaleFactor = 1.0f / movingAverageFactor;
@@ -1722,7 +1723,7 @@
     {
         armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(requestedOutput);
 
-        const armnn::LayerBindingId outputId = boost::numeric_cast<armnn::LayerBindingId>(
+        const armnn::LayerBindingId outputId = armnn::numeric_cast<armnn::LayerBindingId>(
             m_NetworkOutputsBindingInfo.size());
         armnn::IConnectableLayer* const outputLayer = m_Network->AddOutputLayer(outputId, requestedOutput.c_str());
         outputSlot.Connect(outputLayer->GetInputSlot(0));
diff --git a/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp b/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp
index cb79436..a59725c 100644
--- a/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp
+++ b/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp
@@ -7,12 +7,10 @@
 
 #include "armnn/Exceptions.hpp"
 #include "armnn/Utils.hpp"
-
+#include <armnn/utility/NumericCast.hpp>
 
 #include "GraphTopologicalSort.hpp"
 
-#include <boost/numeric/conversion/cast.hpp>
-
 // Caffe
 #include <google/protobuf/wire_format.h>
 
@@ -282,7 +280,7 @@
     std::unique_ptr<char[]> ptr(new char[dataInfo.SizeOfData()]);
     ifs.clear();
     ifs.seekg(dataInfo.PositionOfData(), std::ios_base::beg);
-    ifs.read(ptr.get(), boost::numeric_cast<std::streamsize>(dataInfo.SizeOfData()));
+    ifs.read(ptr.get(), armnn::numeric_cast<std::streamsize>(dataInfo.SizeOfData()));
     return ptr;
 }
 
@@ -299,12 +297,12 @@
     // on the platform in which I am currently compiling std::streamoff is signed long int and
     // size_t is unsigned long int so there is no way this error condition can fire but this stuff
     // is supposed to be portable so the check remains in place
-    if (boost::numeric_cast<size_t>(sizeOfLayer) > SIZE_MAX) {
+    if (armnn::numeric_cast<size_t>(sizeOfLayer) > SIZE_MAX) {
         std::stringstream ss;
         ss << "layer is greater than " << SIZE_MAX << " in size cannot process. layer size = [" << sizeOfLayer << "]";
         throw armnn::ParseException(ss.str());
     }
-    LayerParameterInfo info(bufferStart, boost::numeric_cast<size_t>(sizeOfLayer));
+    LayerParameterInfo info(bufferStart, armnn::numeric_cast<size_t>(sizeOfLayer));
     return info;
 }
 
@@ -314,7 +312,7 @@
     ifs.clear();
     ifs.seekg(layerInfo.PositionOfData(), std::ios_base::beg);
     std::streamoff endOfLayer = layerInfo.PositionOfData() +
-        boost::numeric_cast<std::streamoff>(layerInfo.SizeOfData());
+        armnn::numeric_cast<std::streamoff>(layerInfo.SizeOfData());
     while(true)
     {
         // check to see if we have reached the end of the record
@@ -342,7 +340,7 @@
             {
                 int size = ReadBase128(ifs);
                 std::streamoff posStartOfData = ifs.tellg();
-                VarLenDataInfo dataInfo(posStartOfData, boost::numeric_cast<size_t>(size));
+                VarLenDataInfo dataInfo(posStartOfData, armnn::numeric_cast<size_t>(size));
                 //optional string name = 1; // the layer name
                 //optional string type = 2; // the layer type
                 //repeated string bottom = 3; // the name of each bottom blob
@@ -684,7 +682,7 @@
             char *buffer = new char[info->SizeOfData()];
             ifs.clear();
             ifs.seekg(info->PositionOfData(), std::ios_base::beg);
-            ifs.read(buffer, boost::numeric_cast<std::streamsize>(info->SizeOfData()));
+            ifs.read(buffer, armnn::numeric_cast<std::streamsize>(info->SizeOfData()));
             bool bRet = layer.ParseFromArray(buffer, static_cast<int>(info->SizeOfData()));
             delete[] buffer;
             if (!bRet)
@@ -719,7 +717,7 @@
     {
         armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(requestedOutput);
 
-        const armnn::LayerBindingId outputId = boost::numeric_cast<armnn::LayerBindingId>(
+        const armnn::LayerBindingId outputId = armnn::numeric_cast<armnn::LayerBindingId>(
             m_NetworkOutputsBindingInfo.size());
         armnn::IConnectableLayer* const outputLayer = m_Network->AddOutputLayer(outputId, requestedOutput.c_str());
         outputSlot.Connect(outputLayer->GetInputSlot(0));
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
index a07a899..01ad124 100644
--- a/src/armnnOnnxParser/OnnxParser.cpp
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -6,10 +6,10 @@
 
 #include <armnn/Descriptors.hpp>
 #include <armnn/utility/Assert.hpp>
+#include <armnn/utility/NumericCast.hpp>
 #include <VerificationHelpers.hpp>
 
 #include <boost/format.hpp>
-#include <boost/numeric/conversion/cast.hpp>
 
 #include <google/protobuf/text_format.h>
 #include <google/protobuf/io/zero_copy_stream_impl.h>
@@ -350,7 +350,7 @@
                               % CHECK_LOCATION().AsString()));
         }
 
-        auto targetNumElements = boost::numeric_cast<unsigned int>(std::accumulate(targetDims.begin(), targetDims.end(),
+        auto targetNumElements = armnn::numeric_cast<unsigned int>(std::accumulate(targetDims.begin(), targetDims.end(),
             -1, std::multiplies<int32_t>()));
         auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
         outDims[stretchIndex] = inShape.GetNumElements() / targetNumElements;
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 8bc4753..109c2c2 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -28,7 +28,6 @@
 #include <flatbuffers/flexbuffers.h>
 
 #include <boost/format.hpp>
-#include <boost/numeric/conversion/cast.hpp>
 
 #include <fstream>
 #include <algorithm>
@@ -388,10 +387,10 @@
             {
                 // NOTE: we lose precision here when converting from 64 bit to 32
                 //       but this is what we support at the moment in ArmNN
-                quantizationOffset = boost::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
+                quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
             }
 
-            TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+            TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
                                     safeShape.data());
             if (isDynamic)
             {
@@ -414,7 +413,7 @@
                       std::back_inserter(quantizationScales));
 
             // QSymmS8 Per-axis
-            TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+            TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
                                     safeShape.data());
             if (isDynamic)
             {
@@ -423,14 +422,14 @@
             armnn::TensorInfo result(tensorShape,
                                      type,
                                      quantizationScales,
-                                     dimensionMappings[boost::numeric_cast<unsigned int>(
+                                     dimensionMappings[armnn::numeric_cast<unsigned int>(
                                          tensorPtr->quantization->quantized_dimension)]);
             return result;
         }
     }
     else
     {
-        TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+        TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
                                 safeShape.data());
         if (isDynamic)
         {
@@ -866,8 +865,8 @@
     auto inputs  = GetInputs(m_Model, subgraphIndex, operatorIndex);
     auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
 
-    const unsigned int numInputs  = boost::numeric_cast<unsigned int>(inputs.size());
-    const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
+    const unsigned int numInputs  = armnn::numeric_cast<unsigned int>(inputs.size());
+    const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
 
     StandInDescriptor descriptor(numInputs, numOutputs);
     auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
@@ -2144,7 +2143,7 @@
         }
 
         auto targetNumElements =
-            boost::numeric_cast<unsigned int>(
+            armnn::numeric_cast<unsigned int>(
                 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
 
         auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
@@ -2899,14 +2898,14 @@
     // Check for inferred Axis
     if (numInferred == 0)
     {
-        if (splitSum != numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
+        if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
         {
             throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
         }
     }
     else if (numInferred == 1)
     {
-        splitsData[inferIdx] = numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
+        splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
     }
     else
     {
@@ -2922,7 +2921,7 @@
     unsigned int accumSplit = 0;
     for (unsigned int j = 0; j < numSplits; ++j)
     {
-        unsigned int splitSize = numeric_cast<unsigned int>(splitsData[j]);
+        unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
 
         // Set the size of the views.
         for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
diff --git a/src/armnnTfLiteParser/test/Unsupported.cpp b/src/armnnTfLiteParser/test/Unsupported.cpp
index dd77bca..b0ac2d6 100644
--- a/src/armnnTfLiteParser/test/Unsupported.cpp
+++ b/src/armnnTfLiteParser/test/Unsupported.cpp
@@ -8,6 +8,7 @@
 
 #include <armnn/LayerVisitorBase.hpp>
 #include <armnn/utility/Assert.hpp>
+#include <armnn/utility/NumericCast.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
 
 #include <layers/StandInLayer.hpp>
@@ -39,11 +40,11 @@
                            const StandInDescriptor& descriptor,
                            const char*) override
     {
-        unsigned int numInputs = boost::numeric_cast<unsigned int>(m_InputInfos.size());
+        unsigned int numInputs = armnn::numeric_cast<unsigned int>(m_InputInfos.size());
         BOOST_CHECK(descriptor.m_NumInputs    == numInputs);
         BOOST_CHECK(layer->GetNumInputSlots() == numInputs);
 
-        unsigned int numOutputs = boost::numeric_cast<unsigned int>(m_OutputInfos.size());
+        unsigned int numOutputs = armnn::numeric_cast<unsigned int>(m_OutputInfos.size());
         BOOST_CHECK(descriptor.m_NumOutputs    == numOutputs);
         BOOST_CHECK(layer->GetNumOutputSlots() == numOutputs);
 
@@ -77,10 +78,10 @@
         : ParserFlatbuffersFixture()
         , m_StandInLayerVerifier(inputInfos, outputInfos)
     {
-        const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputInfos.size());
+        const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputInfos.size());
         ARMNN_ASSERT(numInputs > 0);
 
-        const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputInfos.size());
+        const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputInfos.size());
         ARMNN_ASSERT(numOutputs > 0);
 
         m_JsonString = R"(
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 0d7c371..8046a55 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -12,6 +12,7 @@
 #include <armnnUtils/DataLayoutIndexed.hpp>
 #include <armnnUtils/Transpose.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
 
 #include <GraphTopologicalSort.hpp>
@@ -23,7 +24,6 @@
 #include <tensorflow/core/framework/graph.pb.h>
 
 #include <boost/format.hpp>
-#include <boost/numeric/conversion/cast.hpp>
 #include <fmt/core.h>
 #include <numeric>
 
@@ -250,7 +250,7 @@
         }
 
         auto targetNumElements =
-            boost::numeric_cast<unsigned int>(
+            armnn::numeric_cast<unsigned int>(
                 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
         auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
         outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
@@ -563,7 +563,7 @@
         return ret;
     }
 
-    ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
+    ret.reserve(armnn::numeric_cast<size_t>(nodeDef.input_size()));
     for (int j = 0; j < nodeDef.input_size(); ++j)
     {
         OutputId outputId = ParseOutputId(nodeDef.input(j));
@@ -1480,7 +1480,7 @@
                         % CHECK_LOCATION().AsString()));
     }
 
-    std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
+    std::int32_t inputDimSize = armnn::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
     std::vector<uint32_t> outputDims;
 
     // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
@@ -1503,7 +1503,7 @@
         // and insert 1 dimension at index 'expandDim'
         if (expandDim < 0)
         {
-            int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
+            int outputDimSize = armnn::numeric_cast<int>(outputDims.size() + 1);
             auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
             outputDims.insert(getPosition, 1);
         }
@@ -2766,7 +2766,7 @@
 
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
 
-    const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
+    const LayerBindingId layerId = armnn::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
 
     auto it = m_InputShapes.find(nodeDef.name());
     if (it == m_InputShapes.end())
@@ -3524,7 +3524,7 @@
             m_RequestedOutputs.end())
         {
             auto outId = ParseOutputId(nodeDef.name());
-            const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
+            const LayerBindingId layerId = armnn::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
             IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
 
             TensorInfo tensorInfo = prevSlot.GetTensorInfo();