IVGCVSW-4482 Remove boost::ignore_unused

!referencetests:229377

Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: Ia9b360b4a057fe7bbce5b268092627c09a0dba82
diff --git a/src/armnn/DynamicQuantizationVisitor.cpp b/src/armnn/DynamicQuantizationVisitor.cpp
index 4b1dce0..862a926 100644
--- a/src/armnn/DynamicQuantizationVisitor.cpp
+++ b/src/armnn/DynamicQuantizationVisitor.cpp
@@ -6,7 +6,7 @@
 #include "DynamicQuantizationVisitor.hpp"
 #include "NetworkUtils.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/Descriptors.hpp>
 #include <armnn/Types.hpp>
 
@@ -85,7 +85,7 @@
 
 void DynamicQuantizationVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
     SetRange(layer, 0, -20.f, 20.f);
     AddToCalibratedLayers(layer);
 }
@@ -98,12 +98,12 @@
                                                               const ConstTensor& gamma,
                                                               const char* name)
 {
-    boost::ignore_unused(desc);
-    boost::ignore_unused(mean);
-    boost::ignore_unused(variance);
-    boost::ignore_unused(beta);
-    boost::ignore_unused(gamma);
-    boost::ignore_unused(name);
+    IgnoreUnused(desc);
+    IgnoreUnused(mean);
+    IgnoreUnused(variance);
+    IgnoreUnused(beta);
+    IgnoreUnused(gamma);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
     AddToCalibratedLayers(layer);
 }
@@ -114,10 +114,10 @@
                                                          const Optional<ConstTensor>& biases,
                                                          const char* name)
 {
-    boost::ignore_unused(convolution2dDescriptor);
-    boost::ignore_unused(weights);
-    boost::ignore_unused(biases);
-    boost::ignore_unused(name);
+    IgnoreUnused(convolution2dDescriptor);
+    IgnoreUnused(weights);
+    IgnoreUnused(biases);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
     AddToCalibratedLayers(layer);
 }
@@ -128,10 +128,10 @@
                                                                   const Optional<ConstTensor>& biases,
                                                                   const char* name)
 {
-    boost::ignore_unused(desc);
-    boost::ignore_unused(weights);
-    boost::ignore_unused(biases);
-    boost::ignore_unused(name);
+    IgnoreUnused(desc);
+    IgnoreUnused(weights);
+    IgnoreUnused(biases);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
     AddToCalibratedLayers(layer);
 }
@@ -140,7 +140,7 @@
                                                       const ActivationDescriptor& activationDescriptor,
                                                       const char* name)
 {
-    boost::ignore_unused(name, activationDescriptor);
+    IgnoreUnused(name, activationDescriptor);
     switch (activationDescriptor.m_Function)
     {
         // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
@@ -172,10 +172,10 @@
                                                           const Optional<ConstTensor>& biases,
                                                           const char *name)
 {
-    boost::ignore_unused(desc);
-    boost::ignore_unused(weights);
-    boost::ignore_unused(biases);
-    boost::ignore_unused(name);
+    IgnoreUnused(desc);
+    IgnoreUnused(weights);
+    IgnoreUnused(biases);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
     AddToCalibratedLayers(layer);
 }
@@ -184,8 +184,8 @@
                                                    const PermuteDescriptor& permuteDescriptor,
                                                    const char* name)
 {
-    boost::ignore_unused(permuteDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(permuteDescriptor);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
@@ -193,8 +193,8 @@
                                                           const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                                           const char* name)
 {
-    boost::ignore_unused(spaceToBatchNdDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(spaceToBatchNdDescriptor);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
@@ -202,8 +202,8 @@
                                                      const Pooling2dDescriptor& pooling2dDescriptor,
                                                      const char* name)
 {
-    boost::ignore_unused(pooling2dDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(pooling2dDescriptor);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
@@ -211,8 +211,8 @@
                                                    const SoftmaxDescriptor& softmaxDescriptor,
                                                    const char* name)
 {
-    boost::ignore_unused(softmaxDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(softmaxDescriptor);
+    IgnoreUnused(name);
     SetRange(layer, 0, 0.f, 1.f);
     AddToCalibratedLayers(layer);
 }
@@ -221,7 +221,7 @@
                                                     const ConstTensor& input,
                                                     const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     if (input.GetDataType() != DataType::Float32)
     {
@@ -249,8 +249,8 @@
                                                   const ConcatDescriptor& originsDescriptor,
                                                   const char* name)
 {
-    boost::ignore_unused(name);
-    boost::ignore_unused(originsDescriptor);
+    IgnoreUnused(name);
+    IgnoreUnused(originsDescriptor);
     float min = std::numeric_limits<float>::max();
     float max = std::numeric_limits<float>::lowest();
     for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
@@ -270,8 +270,8 @@
                                                    const ReshapeDescriptor& reshapeDescriptor,
                                                    const char* name)
 {
-    boost::ignore_unused(reshapeDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(reshapeDescriptor);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
@@ -279,8 +279,8 @@
                                                     const SplitterDescriptor& splitterDescriptor,
                                                     const char* name)
 {
-    boost::ignore_unused(splitterDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(splitterDescriptor);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
@@ -288,8 +288,8 @@
                                                           const ResizeBilinearDescriptor& resizeDesc,
                                                           const char* name)
 {
-    boost::ignore_unused(resizeDesc);
-    boost::ignore_unused(name);
+    IgnoreUnused(resizeDesc);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
@@ -297,8 +297,8 @@
                                                         const StridedSliceDescriptor& stridedSliceDescriptor,
                                                         const char* name)
 {
-    boost::ignore_unused(stridedSliceDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(stridedSliceDescriptor);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
@@ -306,23 +306,23 @@
                                                           const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
                                                           const char* name)
 {
-    boost::ignore_unused(batchToSpaceNdDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(batchToSpaceNdDescriptor);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
 void DynamicQuantizationVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
 {
-    boost::ignore_unused(id);
-    boost::ignore_unused(name);
+    IgnoreUnused(id);
+    IgnoreUnused(name);
     SetRange(layer, 0, -0.0f, 0.0f);
     AddToCalibratedLayers(layer);
 }
 
 void DynamicQuantizationVisitor::VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
 {
-    boost::ignore_unused(id);
-    boost::ignore_unused(name);
+    IgnoreUnused(id);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
     m_OutputLayers.push_back(id);
 }
diff --git a/src/armnn/ExecutionFrame.cpp b/src/armnn/ExecutionFrame.cpp
index 58005e9..92a7990 100644
--- a/src/armnn/ExecutionFrame.cpp
+++ b/src/armnn/ExecutionFrame.cpp
@@ -13,7 +13,7 @@
 
 IExecutionFrame* ExecutionFrame::ExecuteWorkloads(IExecutionFrame* previousFrame)
 {
-    boost::ignore_unused(previousFrame);
+    IgnoreUnused(previousFrame);
     for (auto& workload: m_WorkloadQueue)
     {
         workload->Execute();
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index 8e7f75b..0d326ad 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -435,7 +435,7 @@
     const SubgraphView::Layers& substituteSubgraphLayers = substituteSubgraph.GetLayers();
     std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](Layer* layer)
     {
-        boost::ignore_unused(layer);
+        IgnoreUnused(layer);
         BOOST_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
                          "Substitute layer is not a member of graph");
     });
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index c65f12b..63bc8d0 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -297,7 +297,7 @@
         graph.m_Layers.erase(layerIt);
 
         const size_t numErased = graph.m_PosInGraphMap.erase(this);
-        boost::ignore_unused(numErased);
+        IgnoreUnused(numErased);
         BOOST_ASSERT(numErased == 1);
     }
 
@@ -355,7 +355,7 @@
     ~LayerInGraph() override
     {
         const size_t numErased = m_Graph->m_InputIds.erase(GetBindingId());
-        boost::ignore_unused(numErased);
+        IgnoreUnused(numErased);
         BOOST_ASSERT(numErased == 1);
     }
 };
@@ -381,7 +381,7 @@
     ~LayerInGraph() override
     {
         const size_t numErased = m_Graph->m_OutputIds.erase(GetBindingId());
-        boost::ignore_unused(numErased);
+        IgnoreUnused(numErased);
         BOOST_ASSERT(numErased == 1);
     }
 };
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 1f63d6e..9de812c 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -196,7 +196,7 @@
 , m_BackendHint(EmptyOptional())
 , m_Guid(profiling::ProfilingService::Instance().NextGuid())
 {
-    boost::ignore_unused(layout);
+    IgnoreUnused(layout);
     m_InputSlots.reserve(numInputSlots);
     for (unsigned int i = 0; i < numInputSlots; ++i)
     {
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index 5ad38f0..ec35d71 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -17,6 +17,7 @@
 #include <armnn/Types.hpp>
 #include <armnn/Tensor.hpp>
 #include <armnn/INetwork.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <algorithm>
 #include <memory>
@@ -27,7 +28,6 @@
 #include <list>
 
 #include <boost/numeric/conversion/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
 #include <boost/cast.hpp>
 
 namespace armnn
diff --git a/src/armnn/LayerSupportCommon.hpp b/src/armnn/LayerSupportCommon.hpp
index e0c6b80..9252b3b 100644
--- a/src/armnn/LayerSupportCommon.hpp
+++ b/src/armnn/LayerSupportCommon.hpp
@@ -4,13 +4,12 @@
 //
 #pragma once
 
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/DescriptorsFwd.hpp>
 #include <armnn/Types.hpp>
 #include <armnn/Tensor.hpp>
 #include <armnn/Optional.hpp>
 
-#include <boost/core/ignore_unused.hpp>
-
 namespace armnn
 {
 
@@ -54,23 +53,23 @@
 template<typename ... Params>
 bool TrueFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(reasonIfUnsupported);
-    boost::ignore_unused(params...);
+    IgnoreUnused(reasonIfUnsupported);
+    IgnoreUnused(params...);
     return true;
 }
 
 template<typename ... Params>
 bool FalseFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(reasonIfUnsupported);
-    boost::ignore_unused(params...);
+    IgnoreUnused(reasonIfUnsupported);
+    IgnoreUnused(params...);
     return false;
 }
 
 template<typename ... Params>
 bool FalseFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type");
     return false;
 }
@@ -78,7 +77,7 @@
 template<typename ... Params>
 bool FalseFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type");
     return false;
 }
@@ -86,7 +85,7 @@
 template<typename ... Params>
 bool FalseFuncU8(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with 8-bit data type");
     return false;
 }
@@ -94,7 +93,7 @@
 template<typename ... Params>
 bool FalseFuncI32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with int32 data type");
     return false;
 }
@@ -102,7 +101,7 @@
 template<typename ... Params>
 bool FalseInputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type input");
     return false;
 }
@@ -110,7 +109,7 @@
 template<typename ... Params>
 bool FalseInputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type input");
     return false;
 }
@@ -118,7 +117,7 @@
 template<typename ... Params>
 bool FalseOutputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type output");
     return false;
 }
@@ -126,7 +125,7 @@
 template<typename ... Params>
 bool FalseOutputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type output");
     return false;
 }
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 2e95dd8..69e42ba 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -311,7 +311,7 @@
     std::string reasonIfUnsupported;
     BOOST_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, {}, reasonIfUnsupported),
         "Factory does not support layer");
-    boost::ignore_unused(reasonIfUnsupported);
+    IgnoreUnused(reasonIfUnsupported);
     return *workloadFactory;
 }
 
diff --git a/src/armnn/Logging.cpp b/src/armnn/Logging.cpp
index 2c07751..ba40123 100644
--- a/src/armnn/Logging.cpp
+++ b/src/armnn/Logging.cpp
@@ -2,9 +2,9 @@
 // Copyright © 2019 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #include <armnn/Logging.hpp>
-
-
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/Utils.hpp>
 
 #if defined(_MSC_VER)
@@ -20,7 +20,6 @@
 #endif
 
 #include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
 #include <iostream>
 
 namespace armnn
@@ -107,14 +106,14 @@
 public:
     void Consume(const std::string& s) override
     {
-        boost::ignore_unused(s);
+        IgnoreUnused(s);
 #if defined(_MSC_VER)
         OutputDebugString(s.c_str());
         OutputDebugString("\n");
 #elif defined(__ANDROID__)
         __android_log_write(ANDROID_LOG_DEBUG, "armnn", s.c_str());
 #else
-        boost::ignore_unused(s);
+        IgnoreUnused(s);
 #endif
     }
 };
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 50a7df6..3663727 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -22,6 +22,7 @@
 #include <armnn/TypesUtils.hpp>
 #include <armnn/BackendRegistry.hpp>
 #include <armnn/Logging.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <ProfilingService.hpp>
 
@@ -628,7 +629,7 @@
                                                             OutputSlot& slot,
                                                             TensorHandleFactoryRegistry& registry)
 {
-    boost::ignore_unused(backends, slot, registry);
+    IgnoreUnused(backends, slot, registry);
     return ITensorHandleFactory::DeferredFactoryId;
 }
 
diff --git a/src/armnn/OverrideInputRangeVisitor.cpp b/src/armnn/OverrideInputRangeVisitor.cpp
index d047c5b..d0453fe 100644
--- a/src/armnn/OverrideInputRangeVisitor.cpp
+++ b/src/armnn/OverrideInputRangeVisitor.cpp
@@ -7,8 +7,9 @@
 #include "NetworkQuantizerUtils.hpp"
 #include "Layer.hpp"
 
+#include <armnn/utility/IgnoreUnused.hpp>
+
 #include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
 
 namespace armnn
 {
@@ -23,7 +24,7 @@
 
 void OverrideInputRangeVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
     if (m_LayerId == id)
     {
         m_Ranges.SetRange(layer, 0, m_MinMaxRange.first, m_MinMaxRange.second);
diff --git a/src/armnn/Profiling.cpp b/src/armnn/Profiling.cpp
index 1cd21ab..b1aedaa 100644
--- a/src/armnn/Profiling.cpp
+++ b/src/armnn/Profiling.cpp
@@ -5,6 +5,7 @@
 #include "Profiling.hpp"
 
 #include <armnn/BackendId.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include "JsonPrinter.hpp"
 
@@ -20,7 +21,7 @@
 #include <stack>
 
 #include <boost/algorithm/string.hpp>
-#include <boost/core/ignore_unused.hpp>
+
 namespace armnn
 {
 
@@ -223,7 +224,7 @@
     m_Parents.pop();
 
     Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
-    boost::ignore_unused(parent);
+    IgnoreUnused(parent);
     BOOST_ASSERT(event->GetParentEvent() == parent);
 
 #if ARMNN_STREAMLINE_ENABLED
diff --git a/src/armnn/Profiling.hpp b/src/armnn/Profiling.hpp
index 4afd691..e6ea090 100644
--- a/src/armnn/Profiling.hpp
+++ b/src/armnn/Profiling.hpp
@@ -6,6 +6,7 @@
 
 #include "ProfilingEvent.hpp"
 
+#include <armnn/utility/IgnoreUnused.hpp>
 #include "armnn/IProfiler.hpp"
 
 #include "WallClockTimer.hpp"
@@ -17,8 +18,6 @@
 #include <stack>
 #include <map>
 
-#include <boost/core/ignore_unused.hpp>
-
 namespace armnn
 {
 
@@ -141,7 +140,7 @@
 
     void ConstructNextInVector(std::vector<InstrumentPtr>& instruments)
     {
-        boost::ignore_unused(instruments);
+        IgnoreUnused(instruments);
     }
 
     template<typename Arg, typename... Args>
diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp
index 81428c1..0e820c3 100644
--- a/src/armnn/StaticRangeVisitor.cpp
+++ b/src/armnn/StaticRangeVisitor.cpp
@@ -5,7 +5,7 @@
 
 #include "StaticRangeVisitor.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/Descriptors.hpp>
 #include <armnn/Types.hpp>
 
@@ -31,7 +31,7 @@
 
 void StaticRangeVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
     SetRange(layer, 0, -20.f, 20.f);
 }
 
@@ -43,12 +43,12 @@
                                                       const ConstTensor& gamma,
                                                       const char* name)
 {
-    boost::ignore_unused(desc);
-    boost::ignore_unused(mean);
-    boost::ignore_unused(variance);
-    boost::ignore_unused(beta);
-    boost::ignore_unused(gamma);
-    boost::ignore_unused(name);
+    IgnoreUnused(desc);
+    IgnoreUnused(mean);
+    IgnoreUnused(variance);
+    IgnoreUnused(beta);
+    IgnoreUnused(gamma);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
 }
 
@@ -58,10 +58,10 @@
                                                  const Optional<ConstTensor>& biases,
                                                  const char* name)
 {
-    boost::ignore_unused(convolution2dDescriptor);
-    boost::ignore_unused(weights);
-    boost::ignore_unused(biases);
-    boost::ignore_unused(name);
+    IgnoreUnused(convolution2dDescriptor);
+    IgnoreUnused(weights);
+    IgnoreUnused(biases);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
 }
 
@@ -71,10 +71,10 @@
                                                           const Optional<ConstTensor>& biases,
                                                           const char* name)
 {
-    boost::ignore_unused(desc);
-    boost::ignore_unused(weights);
-    boost::ignore_unused(biases);
-    boost::ignore_unused(name);
+    IgnoreUnused(desc);
+    IgnoreUnused(weights);
+    IgnoreUnused(biases);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
 }
 
@@ -82,7 +82,7 @@
                                               const ActivationDescriptor& activationDescriptor,
                                               const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
     switch (activationDescriptor.m_Function)
     {
         // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
@@ -113,10 +113,10 @@
                                                   const Optional<ConstTensor>& biases,
                                                   const char *name)
 {
-    boost::ignore_unused(desc);
-    boost::ignore_unused(weights);
-    boost::ignore_unused(biases);
-    boost::ignore_unused(name);
+    IgnoreUnused(desc);
+    IgnoreUnused(weights);
+    IgnoreUnused(biases);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
 }
 
@@ -124,8 +124,8 @@
                                            const PermuteDescriptor& permuteDescriptor,
                                            const char* name)
 {
-    boost::ignore_unused(permuteDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(permuteDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -133,8 +133,8 @@
                                                   const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                                   const char* name)
 {
-    boost::ignore_unused(spaceToBatchNdDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(spaceToBatchNdDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -142,8 +142,8 @@
                                              const Pooling2dDescriptor& pooling2dDescriptor,
                                              const char* name)
 {
-    boost::ignore_unused(pooling2dDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(pooling2dDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -151,8 +151,8 @@
                                            const SoftmaxDescriptor& softmaxDescriptor,
                                            const char* name)
 {
-    boost::ignore_unused(softmaxDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(softmaxDescriptor);
+    IgnoreUnused(name);
     SetRange(layer, 0, 0.f, 1.f);
 }
 
@@ -160,8 +160,8 @@
                                           const OriginsDescriptor& originsDescriptor,
                                           const char* name)
 {
-    boost::ignore_unused(originsDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(originsDescriptor);
+    IgnoreUnused(name);
     float min = std::numeric_limits<float>::max();
     float max = std::numeric_limits<float>::lowest();
     for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
@@ -180,7 +180,7 @@
                                             const ConstTensor& input,
                                             const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     if (input.GetDataType() != DataType::Float32)
     {
@@ -208,8 +208,8 @@
                                            const ReshapeDescriptor& reshapeDescriptor,
                                            const char* name)
 {
-    boost::ignore_unused(reshapeDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(reshapeDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -217,8 +217,8 @@
                                             const SplitterDescriptor& splitterDescriptor,
                                             const char* name)
 {
-    boost::ignore_unused(splitterDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(splitterDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -226,8 +226,8 @@
                                                   const ResizeBilinearDescriptor& resizeDesc,
                                                   const char* name)
 {
-    boost::ignore_unused(resizeDesc);
-    boost::ignore_unused(name);
+    IgnoreUnused(resizeDesc);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -235,8 +235,8 @@
                                           const ResizeDescriptor& resizeDescriptor,
                                           const char* name)
 {
-    boost::ignore_unused(resizeDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(resizeDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -244,8 +244,8 @@
                                                 const StridedSliceDescriptor& stridedSliceDescriptor,
                                                 const char* name)
 {
-    boost::ignore_unused(stridedSliceDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(stridedSliceDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -253,8 +253,8 @@
                                                   const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
                                                   const char* name)
 {
-    boost::ignore_unused(batchToSpaceNdDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(batchToSpaceNdDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
diff --git a/src/armnn/SubgraphView.cpp b/src/armnn/SubgraphView.cpp
index a87cc9b..7705e68 100644
--- a/src/armnn/SubgraphView.cpp
+++ b/src/armnn/SubgraphView.cpp
@@ -6,8 +6,9 @@
 #include "SubgraphView.hpp"
 #include "Graph.hpp"
 
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
+#include <boost/numeric/conversion/cast.hpp>
 #include <utility>
 
 namespace armnn
@@ -24,7 +25,7 @@
     std::for_each(container.begin(), container.end(), [&duplicateSet, &errorMessage](const T& i)
     {
         // Ignore unused for release builds
-        boost::ignore_unused(errorMessage);
+        IgnoreUnused(errorMessage);
 
         // Check if the item is valid
         BOOST_ASSERT_MSG(i, errorMessage.c_str());
diff --git a/src/armnn/SubgraphViewSelector.cpp b/src/armnn/SubgraphViewSelector.cpp
index 8798b72..02b7bda 100644
--- a/src/armnn/SubgraphViewSelector.cpp
+++ b/src/armnn/SubgraphViewSelector.cpp
@@ -5,6 +5,9 @@
 
 #include "SubgraphViewSelector.hpp"
 #include "Graph.hpp"
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
 #include <boost/assert.hpp>
 #include <algorithm>
 #include <map>
@@ -78,14 +81,14 @@
             {
                 size_t numErased = a->m_Dependants.erase(this);
                 BOOST_ASSERT(numErased == 1);
-                boost::ignore_unused(numErased);
+                IgnoreUnused(numErased);
                 a->m_Dependants.insert(m_Parent);
             }
             for (PartialSubgraph* a : m_Dependants)
             {
                 size_t numErased = a->m_Antecedents.erase(this);
                 BOOST_ASSERT(numErased == 1);
-                boost::ignore_unused(numErased);
+                IgnoreUnused(numErased);
                 a->m_Antecedents.insert(m_Parent);
             }
 
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 317d61f..f4024af 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -130,7 +130,7 @@
                                       const IWorkloadFactory& workloadFactory,
                                       const bool IsMemoryManaged)
 {
-    boost::ignore_unused(IsMemoryManaged);
+    IgnoreUnused(IsMemoryManaged);
     OutputSlot& slot = GetOutputSlot(0);
     ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
 
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 026e8de..7873c94 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -48,7 +48,7 @@
 {
     // these conversion layers are only inserted by the
     // optimizer and so will never be in an input graph.
-    boost::ignore_unused(visitor);
+    IgnoreUnused(visitor);
     throw armnn::Exception("ConvertFp16ToFp32Layer should never appear in an input graph");
 }
 
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 90bd894..bbf4dbf 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -47,7 +47,7 @@
 {
     // These conversion layers are only inserted by the
     // optimizer and so will never be in an input graph.
-    boost::ignore_unused(visitor);
+    IgnoreUnused(visitor);
     throw armnn::Exception("ConvertFp32ToFp16Layer should never appear in an input graph");
 }
 
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index d0e0f03..76d33f2 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -8,8 +8,7 @@
 
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace armnn
 {
@@ -53,7 +52,7 @@
 void DebugLayer::Accept(ILayerVisitor& visitor) const
 {
     // by design debug layers are never in input graphs
-    boost::ignore_unused(visitor);
+    IgnoreUnused(visitor);
     throw armnn::Exception("DebugLayer should never appear in an input graph");
 }
 
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index 90f8445..8611b9b 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -45,7 +45,7 @@
 
 void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
 {
-    boost::ignore_unused(visitor);
+    IgnoreUnused(visitor);
     throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
 }
 
diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp
index e0c2544..84cc43c 100644
--- a/src/armnn/layers/InputLayer.cpp
+++ b/src/armnn/layers/InputLayer.cpp
@@ -19,7 +19,7 @@
 
 std::unique_ptr<IWorkload> InputLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    boost::ignore_unused(factory);
+    IgnoreUnused(factory);
     return nullptr;
 }
 
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index 231b285..cf69c17 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -26,7 +26,7 @@
 
 std::unique_ptr<IWorkload> MemCopyLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    boost::ignore_unused(factory);
+    IgnoreUnused(factory);
     MemCopyQueueDescriptor descriptor;
 
     //This is different from other workloads. Does not get created by the workload factory.
@@ -49,7 +49,7 @@
 
 void MemCopyLayer::Accept(ILayerVisitor& visitor) const
 {
-    boost::ignore_unused(visitor);
+    IgnoreUnused(visitor);
     throw armnn::Exception("MemCopyLayer should not appear in an input graph");
 }
 
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index 3b0e6d2..80f9fda 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -26,7 +26,7 @@
 
 std::unique_ptr<IWorkload> MemImportLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    boost::ignore_unused(factory);
+    IgnoreUnused(factory);
     MemImportQueueDescriptor descriptor;
 
     //This is different from other workloads. Does not get created by the workload factory.
@@ -49,7 +49,7 @@
 
 void MemImportLayer::Accept(ILayerVisitor& visitor) const
 {
-    boost::ignore_unused(visitor);
+    IgnoreUnused(visitor);
     throw armnn::Exception("MemImportLayer should not appear in an input graph");
 }
 
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index ce75950..f2fd29f 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -18,7 +18,7 @@
 
 std::unique_ptr<IWorkload> MergeLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    boost::ignore_unused(factory);
+    IgnoreUnused(factory);
     return nullptr;
 }
 
diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp
index 4239323..f00e0a5 100644
--- a/src/armnn/layers/OutputLayer.cpp
+++ b/src/armnn/layers/OutputLayer.cpp
@@ -6,11 +6,10 @@
 
 #include "LayerCloneBase.hpp"
 
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
-#include <boost/core/ignore_unused.hpp>
-
 namespace armnn
 {
 
@@ -21,7 +20,7 @@
 
 std::unique_ptr<IWorkload> OutputLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    boost::ignore_unused(factory);
+    IgnoreUnused(factory);
     return nullptr;
 }
 
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index 8994556..89bcfd6 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -28,7 +28,7 @@
                                      const IWorkloadFactory& factory,
                                      const bool IsMemoryManaged = true) override
     {
-        boost::ignore_unused(registry, factory, IsMemoryManaged);
+        IgnoreUnused(registry, factory, IsMemoryManaged);
     }
 
     /// Creates a dynamically-allocated copy of this layer.
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index 00a316c..3444afc 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -48,7 +48,7 @@
 
 void PreCompiledLayer::Accept(ILayerVisitor& visitor) const
 {
-    boost::ignore_unused(visitor);
+    IgnoreUnused(visitor);
     throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
 }
 
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index 3a95258..fbf3eaa 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -6,6 +6,7 @@
 
 #include "LayerCloneBase.hpp"
 
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/TypesUtils.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
@@ -31,7 +32,7 @@
 
 std::vector<TensorShape> ReshapeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    boost::ignore_unused(inputShapes);
+    IgnoreUnused(inputShapes);
     return std::vector<TensorShape>({ m_Param.m_TargetShape });
 }
 
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index e39caa5..ec82082 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -50,7 +50,7 @@
 
 std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    boost::ignore_unused(inputShapes);
+    IgnoreUnused(inputShapes);
     BOOST_ASSERT(inputShapes.size() == 1);
 
     TensorShape outputShape(boost::numeric_cast<unsigned int>(m_Param.m_Size.size()), m_Param.m_Size.data());
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index d38187c..ec724ba 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -35,7 +35,7 @@
 
 SpaceToBatchNdLayer* SpaceToBatchNdLayer::Clone(Graph& graph) const
 {
-    boost::ignore_unused(graph);
+    IgnoreUnused(graph);
     return CloneBase<SpaceToBatchNdLayer>(graph, m_Param, GetName());
 }
 
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index f8a6eb3..8aa0c9f 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -7,7 +7,7 @@
 #include "LayerCloneBase.hpp"
 
 #include <armnn/TypesUtils.hpp>
-
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnnUtils/DataLayoutIndexed.hpp>
 
 #include <backendsCommon/WorkloadData.hpp>
@@ -15,8 +15,6 @@
 
 #include <numeric>
 
-#include <boost/core/ignore_unused.hpp>
-
 using namespace armnnUtils;
 
 namespace armnn
@@ -37,7 +35,7 @@
 
 SpaceToDepthLayer* SpaceToDepthLayer::Clone(Graph& graph) const
 {
-    boost::ignore_unused(graph);
+    IgnoreUnused(graph);
     return CloneBase<SpaceToDepthLayer>(graph, m_Param, GetName());
 }
 
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 84a598c..f655e71 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -104,7 +104,7 @@
                                         const IWorkloadFactory& workloadFactory,
                                         const bool IsMemoryManaged)
 {
-    boost::ignore_unused(IsMemoryManaged);
+    IgnoreUnused(IsMemoryManaged);
     OutputSlot& slot = GetOutputSlot(0);
     ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
 
@@ -127,7 +127,7 @@
 
 std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    boost::ignore_unused(inputShapes);
+    IgnoreUnused(inputShapes);
     BOOST_ASSERT(inputShapes.size() ==  m_Param.GetNumViews());
     std::vector<TensorShape> outShapes;
     //Output shapes must match View shapes.
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 1a060f9..6f793ca 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -32,7 +32,7 @@
 
 std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    boost::ignore_unused(inputShapes);
+    IgnoreUnused(inputShapes);
 
     const TensorShape& inputShape = m_Param.m_InputShape;
     const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index d0fc325..d23d1d0 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -16,7 +16,7 @@
 
 std::unique_ptr<IWorkload> StandInLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    boost::ignore_unused(factory);
+    IgnoreUnused(factory);
     // This throws in the event that it's called. We would expect that any backend that
     // "claims" to support the StandInLayer type would actually substitute it with a PrecompiledLayer
     // during graph optimization. There is no interface on the IWorkloadFactory to create a StandInWorkload.
@@ -30,7 +30,7 @@
 
 std::vector<TensorShape> StandInLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    boost::ignore_unused(inputShapes);
+    IgnoreUnused(inputShapes);
     throw Exception("Stand in layer does not support infering output shapes");
 }
 
diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp
index b3842e3..5e19c7b 100644
--- a/src/armnn/optimizations/ConvertConstants.hpp
+++ b/src/armnn/optimizations/ConvertConstants.hpp
@@ -11,7 +11,7 @@
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <Half.hpp>
 
@@ -72,7 +72,7 @@
 
     void Run(Graph& graph, Layer& layer) const override
     {
-        boost::ignore_unused(graph);
+        IgnoreUnused(graph);
         if (Predicate::Test(layer))
         {
             layer.OperateOnConstantTensors(Converter::Func);
diff --git a/src/armnn/optimizations/OptimizeInverseConversions.hpp b/src/armnn/optimizations/OptimizeInverseConversions.hpp
index f0d11ce..3ea4a5b 100644
--- a/src/armnn/optimizations/OptimizeInverseConversions.hpp
+++ b/src/armnn/optimizations/OptimizeInverseConversions.hpp
@@ -6,7 +6,7 @@
 
 #include "Optimization.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace armnn
 {
@@ -20,7 +20,7 @@
     /// Fp16ToFp32 followed by Fp32ToFp16 or vice-versa.
     void Run(Graph& graph, InputSlot& connection) const
     {
-        boost::ignore_unused(graph);
+        IgnoreUnused(graph);
         Layer& base  = connection.GetConnectedOutputSlot()->GetOwningLayer();
         Layer& child = connection.GetOwningLayer();
 
diff --git a/src/armnn/optimizations/OptimizeInversePermutes.hpp b/src/armnn/optimizations/OptimizeInversePermutes.hpp
index 77d62a5..98e87c3 100644
--- a/src/armnn/optimizations/OptimizeInversePermutes.hpp
+++ b/src/armnn/optimizations/OptimizeInversePermutes.hpp
@@ -6,7 +6,7 @@
 
 #include "Optimization.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace armnn
 {
@@ -21,7 +21,7 @@
     /// Bypasses both layers for that connection if one is the inverse of the other.
     void Run(Graph& graph, InputSlot& connection) const
     {
-        boost::ignore_unused(graph);
+        IgnoreUnused(graph);
         Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
         auto child = boost::polymorphic_downcast<PermuteType*>(&connection.GetOwningLayer());
 
diff --git a/src/armnn/optimizations/SquashEqualSiblings.hpp b/src/armnn/optimizations/SquashEqualSiblings.hpp
index d5a8a5d..bac27c0 100644
--- a/src/armnn/optimizations/SquashEqualSiblings.hpp
+++ b/src/armnn/optimizations/SquashEqualSiblings.hpp
@@ -6,7 +6,7 @@
 
 #include "Optimization.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace armnn
 {
@@ -23,7 +23,7 @@
     /// the child layer, so the siblings are left unconnected (and later removed).
     void Run(Graph& graph, InputSlot& connection) const
     {
-        boost::ignore_unused(graph);
+        IgnoreUnused(graph);
         auto& child = connection.GetOwningLayer();
 
         if (!child.IsOutputUnconnected())
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 4782c43..72ad9d4 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -11,6 +11,7 @@
 #include <ResolveType.hpp>
 
 #include <armnnUtils/DataLayoutIndexed.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
@@ -18,7 +19,6 @@
 
 #include <boost/test/unit_test.hpp>
 #include <boost/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
 
 #include <utility>
 
@@ -1298,7 +1298,7 @@
     armnn::Graph& graph,
     bool biasEnabled = false)
 {
-    boost::ignore_unused(graph);
+    IgnoreUnused(graph);
 
     // To create a PreCompiled layer, create a network and Optimize it.
     armnn::Network net;
diff --git a/src/armnn/test/DebugCallbackTest.cpp b/src/armnn/test/DebugCallbackTest.cpp
index bd8bdd5..c89da83 100644
--- a/src/armnn/test/DebugCallbackTest.cpp
+++ b/src/armnn/test/DebugCallbackTest.cpp
@@ -60,7 +60,7 @@
     std::vector<unsigned int> slotIndexes;
     auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensor)
     {
-        boost::ignore_unused(guid);
+        IgnoreUnused(guid);
         slotIndexes.push_back(slotIndex);
         tensorShapes.push_back(tensor->GetShape());
         callCount++;
diff --git a/src/armnn/test/EndToEndTest.cpp b/src/armnn/test/EndToEndTest.cpp
index df84be4..a8192a6 100644
--- a/src/armnn/test/EndToEndTest.cpp
+++ b/src/armnn/test/EndToEndTest.cpp
@@ -6,8 +6,8 @@
 #include <armnn/Descriptors.hpp>
 #include <armnn/IRuntime.hpp>
 #include <armnn/INetwork.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/core/ignore_unused.hpp>
 #include <boost/test/unit_test.hpp>
 
 #include <set>
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 0ca4fc4..56032ad 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -691,7 +691,7 @@
                              LayerBindingId id,
                              const char* name = nullptr) override
         {
-            boost::ignore_unused(id, name);
+            IgnoreUnused(id, name);
             auto inputLayer = boost::polymorphic_downcast<const InputLayer*>(layer);
             BOOST_TEST((inputLayer->GetBackendId() == "MockBackend"));
         }
@@ -700,7 +700,7 @@
                               LayerBindingId id,
                               const char* name = nullptr) override
         {
-            boost::ignore_unused(id, name);
+            IgnoreUnused(id, name);
             auto outputLayer = boost::polymorphic_downcast<const OutputLayer*>(layer);
             BOOST_TEST((outputLayer->GetBackendId() == "MockBackend"));
         }
@@ -709,7 +709,7 @@
                                   const ActivationDescriptor& activationDescriptor,
                                   const char* name = nullptr) override
         {
-            boost::ignore_unused(activationDescriptor, name);
+            IgnoreUnused(activationDescriptor, name);
             auto activation = boost::polymorphic_downcast<const ActivationLayer*>(layer);
             BOOST_TEST((activation->GetBackendId() == "CustomBackend"));
         }
diff --git a/src/armnn/test/OptionalTest.cpp b/src/armnn/test/OptionalTest.cpp
index fd13643..73c9643 100644
--- a/src/armnn/test/OptionalTest.cpp
+++ b/src/armnn/test/OptionalTest.cpp
@@ -7,19 +7,19 @@
 #include <armnn/Optional.hpp>
 #include <string>
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace
 {
 
 void PassStringRef(armnn::Optional<std::string&> value)
 {
-    boost::ignore_unused(value);
+    armnn::IgnoreUnused(value);
 }
 
 void PassStringRefWithDefault(armnn::Optional<std::string&> value = armnn::EmptyOptional())
 {
-    boost::ignore_unused(value);
+    armnn::IgnoreUnused(value);
 }
 
 } // namespace <anonymous>
diff --git a/src/armnn/test/ProfilerTests.cpp b/src/armnn/test/ProfilerTests.cpp
index a052862..9376fa4 100644
--- a/src/armnn/test/ProfilerTests.cpp
+++ b/src/armnn/test/ProfilerTests.cpp
@@ -5,6 +5,7 @@
 
 #include <armnn/IRuntime.hpp>
 #include <armnn/TypesUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <boost/test/unit_test.hpp>
 #include <boost/test/tools/output_test_stream.hpp>
@@ -309,7 +310,7 @@
     profiler->Print(json);
 
     std::string output = buffer.str();
-    boost::ignore_unused(output);
+    armnn::IgnoreUnused(output);
 
     // Disable profiling here to not print out anything on stdout.
     profiler->EnableProfiling(false);
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 2dc054a..ef9b2da 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -3,15 +3,6 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <armnn/INetwork.hpp>
-#include <armnn/LayerVisitorBase.hpp>
-#include <armnn/Tensor.hpp>
-#include <armnn/Types.hpp>
-
-#include <armnnQuantizer/INetworkQuantizer.hpp>
-
-#include <QuantizeHelper.hpp>
-
 #include "../Graph.hpp"
 #include "../Network.hpp"
 #include "../NetworkQuantizerUtils.hpp"
@@ -19,7 +10,14 @@
 #include "../RangeTracker.hpp"
 #include "../../armnnQuantizer/CommandLineProcessor.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/INetwork.hpp>
+#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/Types.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnnQuantizer/INetworkQuantizer.hpp>
+#include <QuantizeHelper.hpp>
+
 #include <boost/test/unit_test.hpp>
 
 #include <unordered_map>
@@ -58,7 +56,7 @@
                          LayerBindingId id,
                          const char* name = nullptr) override
     {
-        boost::ignore_unused(id, name);
+        IgnoreUnused(id, name);
         const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
         BOOST_TEST(m_InputShape == info.GetShape());
         // Based off current default [-15.0f, 15.0f]
@@ -72,7 +70,7 @@
                           LayerBindingId id,
                           const char* name = nullptr) override
     {
-        boost::ignore_unused(id, name);
+        IgnoreUnused(id, name);
         const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
         BOOST_TEST(m_OutputShape == info.GetShape());
     }
@@ -116,7 +114,7 @@
                                         const OffsetScalePair& params,
                                         DataType dataType = DataType::QAsymmU8)
     {
-        boost::ignore_unused(dataType);
+        IgnoreUnused(dataType);
         TestQuantizationParamsImpl(info, dataType, params.first, params.second);
     }
 
@@ -212,7 +210,7 @@
     void VisitAdditionLayer(const IConnectableLayer* layer,
                             const char* name = nullptr) override
     {
-        boost::ignore_unused(name);
+        IgnoreUnused(name);
         TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
         // Based off default static range [-20.0f, 20.0f]
@@ -282,7 +280,7 @@
                               const ActivationDescriptor& descriptor,
                               const char* name = nullptr) override
     {
-        boost::ignore_unused(descriptor, name);
+        IgnoreUnused(descriptor, name);
 
         TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
@@ -385,7 +383,7 @@
                                       LayerBindingId id,
                                       const char* name = nullptr) override
         {
-            boost::ignore_unused(id, name);
+            IgnoreUnused(id, name);
             const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
             BOOST_CHECK_MESSAGE(info.GetDataType() == m_DataType,
                                 std::string(armnn::GetDataTypeName(info.GetDataType()))
@@ -543,7 +541,7 @@
                                   const ActivationDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [0.0f, 3.5f]
@@ -599,7 +597,7 @@
                                   const ActivationDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [-1.0f, 1.0f]
@@ -654,7 +652,7 @@
                               const ActivationDescriptor& descriptor,
                               const char* name = nullptr) override
     {
-        boost::ignore_unused(descriptor, name);
+        IgnoreUnused(descriptor, name);
         TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
         // Based off default static range [-5.0f, 15.0f]
@@ -725,7 +723,7 @@
                                   const ActivationDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [-15.0f, 15.0f]
@@ -779,7 +777,7 @@
                                   const ActivationDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [-15.0f, 15.0f]
@@ -839,7 +837,7 @@
                                           const ConstTensor& gamma,
                                           const char* name = nullptr) override
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [-15.0f, 15.0f]
@@ -924,7 +922,7 @@
                                             const DepthToSpaceDescriptor& desc,
                                             const char* name = nullptr)
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -1116,7 +1114,7 @@
                                       const Optional<ConstTensor>& biases,
                                       const char* name = nullptr) override
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             TestQuantizationOnLayersWithBiases(layer, weights, biases);
         }
     };
@@ -1173,7 +1171,7 @@
                                      const Optional<ConstTensor>& biases,
                                      const char *name = nullptr) override
         {
-            boost::ignore_unused(convolution2dDescriptor, name);
+            IgnoreUnused(convolution2dDescriptor, name);
             TestQuantizationOnLayersWithBiases(layer, weights, biases);
         }
     };
@@ -1259,7 +1257,7 @@
                                               const Optional<ConstTensor>& biases,
                                               const char *name = nullptr) override
         {
-            boost::ignore_unused(convolution2dDescriptor, name);
+            IgnoreUnused(convolution2dDescriptor, name);
             TestQuantizationOnLayersWithBiases(layer, weights, biases);
         }
     };
@@ -1343,7 +1341,7 @@
                                                      const InstanceNormalizationDescriptor& descriptor,
                                                      const char* name = nullptr)
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -1411,7 +1409,7 @@
                                   const SoftmaxDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -1503,7 +1501,7 @@
                                const SoftmaxDescriptor& descriptor,
                                const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [0.0f, 1.0f]
@@ -1636,7 +1634,7 @@
                                const PermuteDescriptor& desc,
                                const char* name = nullptr) override
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -1691,7 +1689,7 @@
                                       const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                       const char* name = nullptr) override
         {
-            boost::ignore_unused(spaceToBatchNdDescriptor, name);
+            IgnoreUnused(spaceToBatchNdDescriptor, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -1804,7 +1802,7 @@
                                  const Pooling2dDescriptor& desc,
                                  const char* name = nullptr) override
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -1873,7 +1871,7 @@
                                 const ConstTensor& input,
                                 const char* name = nullptr) override
         {
-            boost::ignore_unused(input, name);
+            IgnoreUnused(input, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off the range of values in the const tensor used for the test: [-2.0f, 6.0f]
@@ -1946,20 +1944,20 @@
                              LayerBindingId id,
                              const char* name = nullptr) override
         {
-            boost::ignore_unused(layer, id, name);
+            IgnoreUnused(layer, id, name);
         }
 
         void VisitOutputLayer(const IConnectableLayer* layer,
                               LayerBindingId id,
                               const char* name = nullptr) override
         {
-            boost::ignore_unused(layer, id, name);
+            IgnoreUnused(layer, id, name);
         }
         void VisitArgMinMaxLayer(const IConnectableLayer* layer,
                                  const ArgMinMaxDescriptor& argMinMaxDescriptor,
                                  const char* name = nullptr) override
         {
-                boost::ignore_unused(argMinMaxDescriptor, name);
+                IgnoreUnused(argMinMaxDescriptor, name);
                 TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
 
                 TestQuantizationParams(outputInfo,
@@ -2034,7 +2032,7 @@
                                   const ComparisonDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -2106,19 +2104,19 @@
                              LayerBindingId id,
                              const char* name = nullptr) override
         {
-            boost::ignore_unused(layer, id, name);
+            IgnoreUnused(layer, id, name);
         }
         void VisitOutputLayer(const IConnectableLayer* layer,
                               LayerBindingId id,
                               const char* name = nullptr) override
         {
-            boost::ignore_unused(layer, id, name);
+            IgnoreUnused(layer, id, name);
         }
         void VisitConcatLayer(const IConnectableLayer* layer,
                               const OriginsDescriptor& originsDescriptor,
                               const char* name = nullptr) override
         {
-            boost::ignore_unused(originsDescriptor, name);
+            IgnoreUnused(originsDescriptor, name);
             TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
             TestQuantizationParams(
                 outputInfo, {60.8f / g_AsymmU8QuantizationBase, 65},
@@ -2214,7 +2212,7 @@
                                        const ReshapeDescriptor& reshapeDescriptor,
                                        const char* name = nullptr) override
         {
-            boost::ignore_unused(reshapeDescriptor, name);
+            IgnoreUnused(reshapeDescriptor, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2269,7 +2267,7 @@
                                         const SplitterDescriptor& desc,
                                         const char* name = nullptr)
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2325,7 +2323,7 @@
                                       const ResizeDescriptor& resizeDescriptor,
                                       const char* name = nullptr) override
         {
-            boost::ignore_unused(resizeDescriptor, name);
+            IgnoreUnused(resizeDescriptor, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2382,7 +2380,7 @@
                                             const StridedSliceDescriptor& desc,
                                             const char* name = nullptr)
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2437,7 +2435,7 @@
                                       const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
                                       const char* name = nullptr) override
         {
-            boost::ignore_unused(batchToSpaceNdDescriptor, name);
+            IgnoreUnused(batchToSpaceNdDescriptor, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2499,7 +2497,7 @@
                              LayerBindingId id,
                              const char* name = nullptr) override
         {
-            boost::ignore_unused(id, name);
+            IgnoreUnused(id, name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
 
             switch (id)
@@ -2526,7 +2524,7 @@
                               LayerBindingId id,
                               const char* name = nullptr) override
         {
-            boost::ignore_unused(id, name);
+            IgnoreUnused(id, name);
             const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
             BOOST_TEST(m_OutputShape == info.GetShape());
         }
@@ -2534,7 +2532,7 @@
         void VisitPreluLayer(const IConnectableLayer* layer,
                              const char* name = nullptr) override
         {
-            boost::ignore_unused(name);
+            IgnoreUnused(name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
             TestQuantizationParams(info,
                                    { 30.0f / g_AsymmU8QuantizationBase, 128 }, // QASymmU8
@@ -2617,7 +2615,7 @@
                                               const Optional<ConstTensor>& biases,
                                               const char *name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TestQuantizationOnLayersWithBiases(layer, weights, biases);
         }
     };
@@ -2704,20 +2702,20 @@
                              LayerBindingId id,
                              const char* name = nullptr) override
         {
-            boost::ignore_unused(layer, id, name);
+            IgnoreUnused(layer, id, name);
         }
         void VisitOutputLayer(const IConnectableLayer* layer,
                               LayerBindingId id,
                               const char* name = nullptr) override
         {
-            boost::ignore_unused(layer, id, name);
+            IgnoreUnused(layer, id, name);
         }
 
         void VisitStackLayer(const IConnectableLayer* layer,
                              const StackDescriptor& descriptor,
                              const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
 
             TestQuantizationParams(outputInfo,
@@ -2784,7 +2782,7 @@
                                      const SliceDescriptor& desc,
                                      const char* name = nullptr)
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -2876,7 +2874,7 @@
                          LayerBindingId id,
                          const char* name = nullptr) override
     {
-        boost::ignore_unused(id, name);
+        IgnoreUnused(id, name);
         const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
         BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
         BOOST_TEST(m_InputShape == info.GetShape());
@@ -2886,7 +2884,7 @@
                           LayerBindingId id,
                           const char* name = nullptr) override
     {
-        boost::ignore_unused(id, name);
+        IgnoreUnused(id, name);
         const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
         BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
         BOOST_TEST(m_OutputShape == info.GetShape());
@@ -2895,14 +2893,14 @@
     void VisitQuantizeLayer(const IConnectableLayer* layer,
                             const char* name = nullptr) override
     {
-        boost::ignore_unused(layer, name);
+        IgnoreUnused(layer, name);
         m_VisitedQuantizeLayer = true;
     }
 
     void VisitDequantizeLayer(const IConnectableLayer* layer,
                               const char* name = nullptr) override
     {
-        boost::ignore_unused(layer, name);
+        IgnoreUnused(layer, name);
         m_VisitedDequantizeLayer = true;
     }
 
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index d9ed18b..e3cbe03 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -158,8 +158,8 @@
 
     // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
     // so they are assigned to, but still considered unused, causing a warning.
-    boost::ignore_unused(dubious);
-    boost::ignore_unused(suppressed);
+    IgnoreUnused(dubious);
+    IgnoreUnused(suppressed);
 }
 #endif // WITH_VALGRIND
 
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index 3e59c0b..976e58e 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -16,10 +16,11 @@
 
 #include <Network.hpp>
 
+#include <armnn/utility/IgnoreUnused.hpp>
+
 #include <vector>
 #include <string>
 
-#include <boost/core/ignore_unused.hpp>
 
 using namespace armnn;
 
@@ -44,20 +45,20 @@
                                                          TensorShape const& subTensorShape,
                                                          unsigned int const* subTensorOrigin) const override
     {
-        boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+        IgnoreUnused(parent, subTensorShape, subTensorOrigin);
         return nullptr;
     }
 
     std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override
     {
-        boost::ignore_unused(tensorInfo);
+        IgnoreUnused(tensorInfo);
         return nullptr;
     }
 
     std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
                                                       DataLayout dataLayout) const override
     {
-        boost::ignore_unused(tensorInfo, dataLayout);
+        IgnoreUnused(tensorInfo, dataLayout);
         return nullptr;
     }
 
@@ -85,20 +86,20 @@
                                                          TensorShape const& subTensorShape,
                                                          unsigned int const* subTensorOrigin) const override
     {
-        boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+        IgnoreUnused(parent, subTensorShape, subTensorOrigin);
         return nullptr;
     }
 
     std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override
     {
-        boost::ignore_unused(tensorInfo);
+        IgnoreUnused(tensorInfo);
         return nullptr;
     }
 
     std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
                                                       DataLayout dataLayout) const override
     {
-        boost::ignore_unused(tensorInfo, dataLayout);
+        IgnoreUnused(tensorInfo, dataLayout);
         return nullptr;
     }
 
@@ -123,7 +124,7 @@
 
     IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
     {
-        boost::ignore_unused(memoryManager);
+        IgnoreUnused(memoryManager);
         return IWorkloadFactoryPtr{};
     }
 
@@ -164,7 +165,7 @@
 
     IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
     {
-        boost::ignore_unused(memoryManager);
+        IgnoreUnused(memoryManager);
         return IWorkloadFactoryPtr{};
     }
 
@@ -202,7 +203,7 @@
 
     IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
     {
-        boost::ignore_unused(memoryManager);
+        IgnoreUnused(memoryManager);
         return IWorkloadFactoryPtr{};
     }
 
@@ -239,7 +240,7 @@
 
     IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
     {
-        boost::ignore_unused(memoryManager);
+        IgnoreUnused(memoryManager);
         return IWorkloadFactoryPtr{};
     }