IVGCVSW-4482 Remove boost::ignore_unused

!referencetests:229377

Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: Ia9b360b4a057fe7bbce5b268092627c09a0dba82
diff --git a/src/armnn/DynamicQuantizationVisitor.cpp b/src/armnn/DynamicQuantizationVisitor.cpp
index 4b1dce0..862a926 100644
--- a/src/armnn/DynamicQuantizationVisitor.cpp
+++ b/src/armnn/DynamicQuantizationVisitor.cpp
@@ -6,7 +6,7 @@
 #include "DynamicQuantizationVisitor.hpp"
 #include "NetworkUtils.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/Descriptors.hpp>
 #include <armnn/Types.hpp>
 
@@ -85,7 +85,7 @@
 
 void DynamicQuantizationVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
     SetRange(layer, 0, -20.f, 20.f);
     AddToCalibratedLayers(layer);
 }
@@ -98,12 +98,12 @@
                                                               const ConstTensor& gamma,
                                                               const char* name)
 {
-    boost::ignore_unused(desc);
-    boost::ignore_unused(mean);
-    boost::ignore_unused(variance);
-    boost::ignore_unused(beta);
-    boost::ignore_unused(gamma);
-    boost::ignore_unused(name);
+    IgnoreUnused(desc);
+    IgnoreUnused(mean);
+    IgnoreUnused(variance);
+    IgnoreUnused(beta);
+    IgnoreUnused(gamma);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
     AddToCalibratedLayers(layer);
 }
@@ -114,10 +114,10 @@
                                                          const Optional<ConstTensor>& biases,
                                                          const char* name)
 {
-    boost::ignore_unused(convolution2dDescriptor);
-    boost::ignore_unused(weights);
-    boost::ignore_unused(biases);
-    boost::ignore_unused(name);
+    IgnoreUnused(convolution2dDescriptor);
+    IgnoreUnused(weights);
+    IgnoreUnused(biases);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
     AddToCalibratedLayers(layer);
 }
@@ -128,10 +128,10 @@
                                                                   const Optional<ConstTensor>& biases,
                                                                   const char* name)
 {
-    boost::ignore_unused(desc);
-    boost::ignore_unused(weights);
-    boost::ignore_unused(biases);
-    boost::ignore_unused(name);
+    IgnoreUnused(desc);
+    IgnoreUnused(weights);
+    IgnoreUnused(biases);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
     AddToCalibratedLayers(layer);
 }
@@ -140,7 +140,7 @@
                                                       const ActivationDescriptor& activationDescriptor,
                                                       const char* name)
 {
-    boost::ignore_unused(name, activationDescriptor);
+    IgnoreUnused(name, activationDescriptor);
     switch (activationDescriptor.m_Function)
     {
         // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
@@ -172,10 +172,10 @@
                                                           const Optional<ConstTensor>& biases,
                                                           const char *name)
 {
-    boost::ignore_unused(desc);
-    boost::ignore_unused(weights);
-    boost::ignore_unused(biases);
-    boost::ignore_unused(name);
+    IgnoreUnused(desc);
+    IgnoreUnused(weights);
+    IgnoreUnused(biases);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
     AddToCalibratedLayers(layer);
 }
@@ -184,8 +184,8 @@
                                                    const PermuteDescriptor& permuteDescriptor,
                                                    const char* name)
 {
-    boost::ignore_unused(permuteDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(permuteDescriptor);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
@@ -193,8 +193,8 @@
                                                           const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                                           const char* name)
 {
-    boost::ignore_unused(spaceToBatchNdDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(spaceToBatchNdDescriptor);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
@@ -202,8 +202,8 @@
                                                      const Pooling2dDescriptor& pooling2dDescriptor,
                                                      const char* name)
 {
-    boost::ignore_unused(pooling2dDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(pooling2dDescriptor);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
@@ -211,8 +211,8 @@
                                                    const SoftmaxDescriptor& softmaxDescriptor,
                                                    const char* name)
 {
-    boost::ignore_unused(softmaxDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(softmaxDescriptor);
+    IgnoreUnused(name);
     SetRange(layer, 0, 0.f, 1.f);
     AddToCalibratedLayers(layer);
 }
@@ -221,7 +221,7 @@
                                                     const ConstTensor& input,
                                                     const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     if (input.GetDataType() != DataType::Float32)
     {
@@ -249,8 +249,8 @@
                                                   const ConcatDescriptor& originsDescriptor,
                                                   const char* name)
 {
-    boost::ignore_unused(name);
-    boost::ignore_unused(originsDescriptor);
+    IgnoreUnused(name);
+    IgnoreUnused(originsDescriptor);
     float min = std::numeric_limits<float>::max();
     float max = std::numeric_limits<float>::lowest();
     for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
@@ -270,8 +270,8 @@
                                                    const ReshapeDescriptor& reshapeDescriptor,
                                                    const char* name)
 {
-    boost::ignore_unused(reshapeDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(reshapeDescriptor);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
@@ -279,8 +279,8 @@
                                                     const SplitterDescriptor& splitterDescriptor,
                                                     const char* name)
 {
-    boost::ignore_unused(splitterDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(splitterDescriptor);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
@@ -288,8 +288,8 @@
                                                           const ResizeBilinearDescriptor& resizeDesc,
                                                           const char* name)
 {
-    boost::ignore_unused(resizeDesc);
-    boost::ignore_unused(name);
+    IgnoreUnused(resizeDesc);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
@@ -297,8 +297,8 @@
                                                         const StridedSliceDescriptor& stridedSliceDescriptor,
                                                         const char* name)
 {
-    boost::ignore_unused(stridedSliceDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(stridedSliceDescriptor);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
@@ -306,23 +306,23 @@
                                                           const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
                                                           const char* name)
 {
-    boost::ignore_unused(batchToSpaceNdDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(batchToSpaceNdDescriptor);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
 }
 
 void DynamicQuantizationVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
 {
-    boost::ignore_unused(id);
-    boost::ignore_unused(name);
+    IgnoreUnused(id);
+    IgnoreUnused(name);
     SetRange(layer, 0, -0.0f, 0.0f);
     AddToCalibratedLayers(layer);
 }
 
 void DynamicQuantizationVisitor::VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
 {
-    boost::ignore_unused(id);
-    boost::ignore_unused(name);
+    IgnoreUnused(id);
+    IgnoreUnused(name);
     AddToNonCalibratedLayers(layer);
     m_OutputLayers.push_back(id);
 }
diff --git a/src/armnn/ExecutionFrame.cpp b/src/armnn/ExecutionFrame.cpp
index 58005e9..92a7990 100644
--- a/src/armnn/ExecutionFrame.cpp
+++ b/src/armnn/ExecutionFrame.cpp
@@ -13,7 +13,7 @@
 
 IExecutionFrame* ExecutionFrame::ExecuteWorkloads(IExecutionFrame* previousFrame)
 {
-    boost::ignore_unused(previousFrame);
+    IgnoreUnused(previousFrame);
     for (auto& workload: m_WorkloadQueue)
     {
         workload->Execute();
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index 8e7f75b..0d326ad 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -435,7 +435,7 @@
     const SubgraphView::Layers& substituteSubgraphLayers = substituteSubgraph.GetLayers();
     std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](Layer* layer)
     {
-        boost::ignore_unused(layer);
+        IgnoreUnused(layer);
         BOOST_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
                          "Substitute layer is not a member of graph");
     });
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index c65f12b..63bc8d0 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -297,7 +297,7 @@
         graph.m_Layers.erase(layerIt);
 
         const size_t numErased = graph.m_PosInGraphMap.erase(this);
-        boost::ignore_unused(numErased);
+        IgnoreUnused(numErased);
         BOOST_ASSERT(numErased == 1);
     }
 
@@ -355,7 +355,7 @@
     ~LayerInGraph() override
     {
         const size_t numErased = m_Graph->m_InputIds.erase(GetBindingId());
-        boost::ignore_unused(numErased);
+        IgnoreUnused(numErased);
         BOOST_ASSERT(numErased == 1);
     }
 };
@@ -381,7 +381,7 @@
     ~LayerInGraph() override
     {
         const size_t numErased = m_Graph->m_OutputIds.erase(GetBindingId());
-        boost::ignore_unused(numErased);
+        IgnoreUnused(numErased);
         BOOST_ASSERT(numErased == 1);
     }
 };
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 1f63d6e..9de812c 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -196,7 +196,7 @@
 , m_BackendHint(EmptyOptional())
 , m_Guid(profiling::ProfilingService::Instance().NextGuid())
 {
-    boost::ignore_unused(layout);
+    IgnoreUnused(layout);
     m_InputSlots.reserve(numInputSlots);
     for (unsigned int i = 0; i < numInputSlots; ++i)
     {
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index 5ad38f0..ec35d71 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -17,6 +17,7 @@
 #include <armnn/Types.hpp>
 #include <armnn/Tensor.hpp>
 #include <armnn/INetwork.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <algorithm>
 #include <memory>
@@ -27,7 +28,6 @@
 #include <list>
 
 #include <boost/numeric/conversion/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
 #include <boost/cast.hpp>
 
 namespace armnn
diff --git a/src/armnn/LayerSupportCommon.hpp b/src/armnn/LayerSupportCommon.hpp
index e0c6b80..9252b3b 100644
--- a/src/armnn/LayerSupportCommon.hpp
+++ b/src/armnn/LayerSupportCommon.hpp
@@ -4,13 +4,12 @@
 //
 #pragma once
 
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/DescriptorsFwd.hpp>
 #include <armnn/Types.hpp>
 #include <armnn/Tensor.hpp>
 #include <armnn/Optional.hpp>
 
-#include <boost/core/ignore_unused.hpp>
-
 namespace armnn
 {
 
@@ -54,23 +53,23 @@
 template<typename ... Params>
 bool TrueFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(reasonIfUnsupported);
-    boost::ignore_unused(params...);
+    IgnoreUnused(reasonIfUnsupported);
+    IgnoreUnused(params...);
     return true;
 }
 
 template<typename ... Params>
 bool FalseFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(reasonIfUnsupported);
-    boost::ignore_unused(params...);
+    IgnoreUnused(reasonIfUnsupported);
+    IgnoreUnused(params...);
     return false;
 }
 
 template<typename ... Params>
 bool FalseFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type");
     return false;
 }
@@ -78,7 +77,7 @@
 template<typename ... Params>
 bool FalseFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type");
     return false;
 }
@@ -86,7 +85,7 @@
 template<typename ... Params>
 bool FalseFuncU8(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with 8-bit data type");
     return false;
 }
@@ -94,7 +93,7 @@
 template<typename ... Params>
 bool FalseFuncI32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with int32 data type");
     return false;
 }
@@ -102,7 +101,7 @@
 template<typename ... Params>
 bool FalseInputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type input");
     return false;
 }
@@ -110,7 +109,7 @@
 template<typename ... Params>
 bool FalseInputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type input");
     return false;
 }
@@ -118,7 +117,7 @@
 template<typename ... Params>
 bool FalseOutputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type output");
     return false;
 }
@@ -126,7 +125,7 @@
 template<typename ... Params>
 bool FalseOutputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
-    boost::ignore_unused(params...);
+    IgnoreUnused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type output");
     return false;
 }
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 2e95dd8..69e42ba 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -311,7 +311,7 @@
     std::string reasonIfUnsupported;
     BOOST_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, {}, reasonIfUnsupported),
         "Factory does not support layer");
-    boost::ignore_unused(reasonIfUnsupported);
+    IgnoreUnused(reasonIfUnsupported);
     return *workloadFactory;
 }
 
diff --git a/src/armnn/Logging.cpp b/src/armnn/Logging.cpp
index 2c07751..ba40123 100644
--- a/src/armnn/Logging.cpp
+++ b/src/armnn/Logging.cpp
@@ -2,9 +2,9 @@
 // Copyright © 2019 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #include <armnn/Logging.hpp>
-
-
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/Utils.hpp>
 
 #if defined(_MSC_VER)
@@ -20,7 +20,6 @@
 #endif
 
 #include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
 #include <iostream>
 
 namespace armnn
@@ -107,14 +106,14 @@
 public:
     void Consume(const std::string& s) override
     {
-        boost::ignore_unused(s);
+        IgnoreUnused(s);
 #if defined(_MSC_VER)
         OutputDebugString(s.c_str());
         OutputDebugString("\n");
 #elif defined(__ANDROID__)
         __android_log_write(ANDROID_LOG_DEBUG, "armnn", s.c_str());
 #else
-        boost::ignore_unused(s);
+        IgnoreUnused(s);
 #endif
     }
 };
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 50a7df6..3663727 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -22,6 +22,7 @@
 #include <armnn/TypesUtils.hpp>
 #include <armnn/BackendRegistry.hpp>
 #include <armnn/Logging.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <ProfilingService.hpp>
 
@@ -628,7 +629,7 @@
                                                             OutputSlot& slot,
                                                             TensorHandleFactoryRegistry& registry)
 {
-    boost::ignore_unused(backends, slot, registry);
+    IgnoreUnused(backends, slot, registry);
     return ITensorHandleFactory::DeferredFactoryId;
 }
 
diff --git a/src/armnn/OverrideInputRangeVisitor.cpp b/src/armnn/OverrideInputRangeVisitor.cpp
index d047c5b..d0453fe 100644
--- a/src/armnn/OverrideInputRangeVisitor.cpp
+++ b/src/armnn/OverrideInputRangeVisitor.cpp
@@ -7,8 +7,9 @@
 #include "NetworkQuantizerUtils.hpp"
 #include "Layer.hpp"
 
+#include <armnn/utility/IgnoreUnused.hpp>
+
 #include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
 
 namespace armnn
 {
@@ -23,7 +24,7 @@
 
 void OverrideInputRangeVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
     if (m_LayerId == id)
     {
         m_Ranges.SetRange(layer, 0, m_MinMaxRange.first, m_MinMaxRange.second);
diff --git a/src/armnn/Profiling.cpp b/src/armnn/Profiling.cpp
index 1cd21ab..b1aedaa 100644
--- a/src/armnn/Profiling.cpp
+++ b/src/armnn/Profiling.cpp
@@ -5,6 +5,7 @@
 #include "Profiling.hpp"
 
 #include <armnn/BackendId.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include "JsonPrinter.hpp"
 
@@ -20,7 +21,7 @@
 #include <stack>
 
 #include <boost/algorithm/string.hpp>
-#include <boost/core/ignore_unused.hpp>
+
 namespace armnn
 {
 
@@ -223,7 +224,7 @@
     m_Parents.pop();
 
     Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
-    boost::ignore_unused(parent);
+    IgnoreUnused(parent);
     BOOST_ASSERT(event->GetParentEvent() == parent);
 
 #if ARMNN_STREAMLINE_ENABLED
diff --git a/src/armnn/Profiling.hpp b/src/armnn/Profiling.hpp
index 4afd691..e6ea090 100644
--- a/src/armnn/Profiling.hpp
+++ b/src/armnn/Profiling.hpp
@@ -6,6 +6,7 @@
 
 #include "ProfilingEvent.hpp"
 
+#include <armnn/utility/IgnoreUnused.hpp>
 #include "armnn/IProfiler.hpp"
 
 #include "WallClockTimer.hpp"
@@ -17,8 +18,6 @@
 #include <stack>
 #include <map>
 
-#include <boost/core/ignore_unused.hpp>
-
 namespace armnn
 {
 
@@ -141,7 +140,7 @@
 
     void ConstructNextInVector(std::vector<InstrumentPtr>& instruments)
     {
-        boost::ignore_unused(instruments);
+        IgnoreUnused(instruments);
     }
 
     template<typename Arg, typename... Args>
diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp
index 81428c1..0e820c3 100644
--- a/src/armnn/StaticRangeVisitor.cpp
+++ b/src/armnn/StaticRangeVisitor.cpp
@@ -5,7 +5,7 @@
 
 #include "StaticRangeVisitor.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/Descriptors.hpp>
 #include <armnn/Types.hpp>
 
@@ -31,7 +31,7 @@
 
 void StaticRangeVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
     SetRange(layer, 0, -20.f, 20.f);
 }
 
@@ -43,12 +43,12 @@
                                                       const ConstTensor& gamma,
                                                       const char* name)
 {
-    boost::ignore_unused(desc);
-    boost::ignore_unused(mean);
-    boost::ignore_unused(variance);
-    boost::ignore_unused(beta);
-    boost::ignore_unused(gamma);
-    boost::ignore_unused(name);
+    IgnoreUnused(desc);
+    IgnoreUnused(mean);
+    IgnoreUnused(variance);
+    IgnoreUnused(beta);
+    IgnoreUnused(gamma);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
 }
 
@@ -58,10 +58,10 @@
                                                  const Optional<ConstTensor>& biases,
                                                  const char* name)
 {
-    boost::ignore_unused(convolution2dDescriptor);
-    boost::ignore_unused(weights);
-    boost::ignore_unused(biases);
-    boost::ignore_unused(name);
+    IgnoreUnused(convolution2dDescriptor);
+    IgnoreUnused(weights);
+    IgnoreUnused(biases);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
 }
 
@@ -71,10 +71,10 @@
                                                           const Optional<ConstTensor>& biases,
                                                           const char* name)
 {
-    boost::ignore_unused(desc);
-    boost::ignore_unused(weights);
-    boost::ignore_unused(biases);
-    boost::ignore_unused(name);
+    IgnoreUnused(desc);
+    IgnoreUnused(weights);
+    IgnoreUnused(biases);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
 }
 
@@ -82,7 +82,7 @@
                                               const ActivationDescriptor& activationDescriptor,
                                               const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
     switch (activationDescriptor.m_Function)
     {
         // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
@@ -113,10 +113,10 @@
                                                   const Optional<ConstTensor>& biases,
                                                   const char *name)
 {
-    boost::ignore_unused(desc);
-    boost::ignore_unused(weights);
-    boost::ignore_unused(biases);
-    boost::ignore_unused(name);
+    IgnoreUnused(desc);
+    IgnoreUnused(weights);
+    IgnoreUnused(biases);
+    IgnoreUnused(name);
     SetRange(layer, 0, -15.0f, 15.0f);
 }
 
@@ -124,8 +124,8 @@
                                            const PermuteDescriptor& permuteDescriptor,
                                            const char* name)
 {
-    boost::ignore_unused(permuteDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(permuteDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -133,8 +133,8 @@
                                                   const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                                   const char* name)
 {
-    boost::ignore_unused(spaceToBatchNdDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(spaceToBatchNdDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -142,8 +142,8 @@
                                              const Pooling2dDescriptor& pooling2dDescriptor,
                                              const char* name)
 {
-    boost::ignore_unused(pooling2dDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(pooling2dDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -151,8 +151,8 @@
                                            const SoftmaxDescriptor& softmaxDescriptor,
                                            const char* name)
 {
-    boost::ignore_unused(softmaxDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(softmaxDescriptor);
+    IgnoreUnused(name);
     SetRange(layer, 0, 0.f, 1.f);
 }
 
@@ -160,8 +160,8 @@
                                           const OriginsDescriptor& originsDescriptor,
                                           const char* name)
 {
-    boost::ignore_unused(originsDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(originsDescriptor);
+    IgnoreUnused(name);
     float min = std::numeric_limits<float>::max();
     float max = std::numeric_limits<float>::lowest();
     for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
@@ -180,7 +180,7 @@
                                             const ConstTensor& input,
                                             const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     if (input.GetDataType() != DataType::Float32)
     {
@@ -208,8 +208,8 @@
                                            const ReshapeDescriptor& reshapeDescriptor,
                                            const char* name)
 {
-    boost::ignore_unused(reshapeDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(reshapeDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -217,8 +217,8 @@
                                             const SplitterDescriptor& splitterDescriptor,
                                             const char* name)
 {
-    boost::ignore_unused(splitterDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(splitterDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -226,8 +226,8 @@
                                                   const ResizeBilinearDescriptor& resizeDesc,
                                                   const char* name)
 {
-    boost::ignore_unused(resizeDesc);
-    boost::ignore_unused(name);
+    IgnoreUnused(resizeDesc);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -235,8 +235,8 @@
                                           const ResizeDescriptor& resizeDescriptor,
                                           const char* name)
 {
-    boost::ignore_unused(resizeDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(resizeDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -244,8 +244,8 @@
                                                 const StridedSliceDescriptor& stridedSliceDescriptor,
                                                 const char* name)
 {
-    boost::ignore_unused(stridedSliceDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(stridedSliceDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
@@ -253,8 +253,8 @@
                                                   const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
                                                   const char* name)
 {
-    boost::ignore_unused(batchToSpaceNdDescriptor);
-    boost::ignore_unused(name);
+    IgnoreUnused(batchToSpaceNdDescriptor);
+    IgnoreUnused(name);
     ForwardParentParameters(layer);
 }
 
diff --git a/src/armnn/SubgraphView.cpp b/src/armnn/SubgraphView.cpp
index a87cc9b..7705e68 100644
--- a/src/armnn/SubgraphView.cpp
+++ b/src/armnn/SubgraphView.cpp
@@ -6,8 +6,9 @@
 #include "SubgraphView.hpp"
 #include "Graph.hpp"
 
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
+#include <boost/numeric/conversion/cast.hpp>
 #include <utility>
 
 namespace armnn
@@ -24,7 +25,7 @@
     std::for_each(container.begin(), container.end(), [&duplicateSet, &errorMessage](const T& i)
     {
         // Ignore unused for release builds
-        boost::ignore_unused(errorMessage);
+        IgnoreUnused(errorMessage);
 
         // Check if the item is valid
         BOOST_ASSERT_MSG(i, errorMessage.c_str());
diff --git a/src/armnn/SubgraphViewSelector.cpp b/src/armnn/SubgraphViewSelector.cpp
index 8798b72..02b7bda 100644
--- a/src/armnn/SubgraphViewSelector.cpp
+++ b/src/armnn/SubgraphViewSelector.cpp
@@ -5,6 +5,9 @@
 
 #include "SubgraphViewSelector.hpp"
 #include "Graph.hpp"
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
 #include <boost/assert.hpp>
 #include <algorithm>
 #include <map>
@@ -78,14 +81,14 @@
             {
                 size_t numErased = a->m_Dependants.erase(this);
                 BOOST_ASSERT(numErased == 1);
-                boost::ignore_unused(numErased);
+                IgnoreUnused(numErased);
                 a->m_Dependants.insert(m_Parent);
             }
             for (PartialSubgraph* a : m_Dependants)
             {
                 size_t numErased = a->m_Antecedents.erase(this);
                 BOOST_ASSERT(numErased == 1);
-                boost::ignore_unused(numErased);
+                IgnoreUnused(numErased);
                 a->m_Antecedents.insert(m_Parent);
             }
 
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 317d61f..f4024af 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -130,7 +130,7 @@
                                       const IWorkloadFactory& workloadFactory,
                                       const bool IsMemoryManaged)
 {
-    boost::ignore_unused(IsMemoryManaged);
+    IgnoreUnused(IsMemoryManaged);
     OutputSlot& slot = GetOutputSlot(0);
     ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
 
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 026e8de..7873c94 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -48,7 +48,7 @@
 {
     // these conversion layers are only inserted by the
     // optimizer and so will never be in an input graph.
-    boost::ignore_unused(visitor);
+    IgnoreUnused(visitor);
     throw armnn::Exception("ConvertFp16ToFp32Layer should never appear in an input graph");
 }
 
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 90bd894..bbf4dbf 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -47,7 +47,7 @@
 {
     // These conversion layers are only inserted by the
     // optimizer and so will never be in an input graph.
-    boost::ignore_unused(visitor);
+    IgnoreUnused(visitor);
     throw armnn::Exception("ConvertFp32ToFp16Layer should never appear in an input graph");
 }
 
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index d0e0f03..76d33f2 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -8,8 +8,7 @@
 
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace armnn
 {
@@ -53,7 +52,7 @@
 void DebugLayer::Accept(ILayerVisitor& visitor) const
 {
     // by design debug layers are never in input graphs
-    boost::ignore_unused(visitor);
+    IgnoreUnused(visitor);
     throw armnn::Exception("DebugLayer should never appear in an input graph");
 }
 
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index 90f8445..8611b9b 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -45,7 +45,7 @@
 
 void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
 {
-    boost::ignore_unused(visitor);
+    IgnoreUnused(visitor);
     throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
 }
 
diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp
index e0c2544..84cc43c 100644
--- a/src/armnn/layers/InputLayer.cpp
+++ b/src/armnn/layers/InputLayer.cpp
@@ -19,7 +19,7 @@
 
 std::unique_ptr<IWorkload> InputLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    boost::ignore_unused(factory);
+    IgnoreUnused(factory);
     return nullptr;
 }
 
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index 231b285..cf69c17 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -26,7 +26,7 @@
 
 std::unique_ptr<IWorkload> MemCopyLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    boost::ignore_unused(factory);
+    IgnoreUnused(factory);
     MemCopyQueueDescriptor descriptor;
 
     //This is different from other workloads. Does not get created by the workload factory.
@@ -49,7 +49,7 @@
 
 void MemCopyLayer::Accept(ILayerVisitor& visitor) const
 {
-    boost::ignore_unused(visitor);
+    IgnoreUnused(visitor);
     throw armnn::Exception("MemCopyLayer should not appear in an input graph");
 }
 
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index 3b0e6d2..80f9fda 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -26,7 +26,7 @@
 
 std::unique_ptr<IWorkload> MemImportLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    boost::ignore_unused(factory);
+    IgnoreUnused(factory);
     MemImportQueueDescriptor descriptor;
 
     //This is different from other workloads. Does not get created by the workload factory.
@@ -49,7 +49,7 @@
 
 void MemImportLayer::Accept(ILayerVisitor& visitor) const
 {
-    boost::ignore_unused(visitor);
+    IgnoreUnused(visitor);
     throw armnn::Exception("MemImportLayer should not appear in an input graph");
 }
 
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index ce75950..f2fd29f 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -18,7 +18,7 @@
 
 std::unique_ptr<IWorkload> MergeLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    boost::ignore_unused(factory);
+    IgnoreUnused(factory);
     return nullptr;
 }
 
diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp
index 4239323..f00e0a5 100644
--- a/src/armnn/layers/OutputLayer.cpp
+++ b/src/armnn/layers/OutputLayer.cpp
@@ -6,11 +6,10 @@
 
 #include "LayerCloneBase.hpp"
 
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
-#include <boost/core/ignore_unused.hpp>
-
 namespace armnn
 {
 
@@ -21,7 +20,7 @@
 
 std::unique_ptr<IWorkload> OutputLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    boost::ignore_unused(factory);
+    IgnoreUnused(factory);
     return nullptr;
 }
 
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index 8994556..89bcfd6 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -28,7 +28,7 @@
                                      const IWorkloadFactory& factory,
                                      const bool IsMemoryManaged = true) override
     {
-        boost::ignore_unused(registry, factory, IsMemoryManaged);
+        IgnoreUnused(registry, factory, IsMemoryManaged);
     }
 
     /// Creates a dynamically-allocated copy of this layer.
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index 00a316c..3444afc 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -48,7 +48,7 @@
 
 void PreCompiledLayer::Accept(ILayerVisitor& visitor) const
 {
-    boost::ignore_unused(visitor);
+    IgnoreUnused(visitor);
     throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
 }
 
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index 3a95258..fbf3eaa 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -6,6 +6,7 @@
 
 #include "LayerCloneBase.hpp"
 
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/TypesUtils.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
@@ -31,7 +32,7 @@
 
 std::vector<TensorShape> ReshapeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    boost::ignore_unused(inputShapes);
+    IgnoreUnused(inputShapes);
     return std::vector<TensorShape>({ m_Param.m_TargetShape });
 }
 
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index e39caa5..ec82082 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -50,7 +50,7 @@
 
 std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    boost::ignore_unused(inputShapes);
+    IgnoreUnused(inputShapes);
     BOOST_ASSERT(inputShapes.size() == 1);
 
     TensorShape outputShape(boost::numeric_cast<unsigned int>(m_Param.m_Size.size()), m_Param.m_Size.data());
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index d38187c..ec724ba 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -35,7 +35,7 @@
 
 SpaceToBatchNdLayer* SpaceToBatchNdLayer::Clone(Graph& graph) const
 {
-    boost::ignore_unused(graph);
+    IgnoreUnused(graph);
     return CloneBase<SpaceToBatchNdLayer>(graph, m_Param, GetName());
 }
 
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index f8a6eb3..8aa0c9f 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -7,7 +7,7 @@
 #include "LayerCloneBase.hpp"
 
 #include <armnn/TypesUtils.hpp>
-
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnnUtils/DataLayoutIndexed.hpp>
 
 #include <backendsCommon/WorkloadData.hpp>
@@ -15,8 +15,6 @@
 
 #include <numeric>
 
-#include <boost/core/ignore_unused.hpp>
-
 using namespace armnnUtils;
 
 namespace armnn
@@ -37,7 +35,7 @@
 
 SpaceToDepthLayer* SpaceToDepthLayer::Clone(Graph& graph) const
 {
-    boost::ignore_unused(graph);
+    IgnoreUnused(graph);
     return CloneBase<SpaceToDepthLayer>(graph, m_Param, GetName());
 }
 
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 84a598c..f655e71 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -104,7 +104,7 @@
                                         const IWorkloadFactory& workloadFactory,
                                         const bool IsMemoryManaged)
 {
-    boost::ignore_unused(IsMemoryManaged);
+    IgnoreUnused(IsMemoryManaged);
     OutputSlot& slot = GetOutputSlot(0);
     ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
 
@@ -127,7 +127,7 @@
 
 std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    boost::ignore_unused(inputShapes);
+    IgnoreUnused(inputShapes);
     BOOST_ASSERT(inputShapes.size() ==  m_Param.GetNumViews());
     std::vector<TensorShape> outShapes;
     //Output shapes must match View shapes.
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 1a060f9..6f793ca 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -32,7 +32,7 @@
 
 std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    boost::ignore_unused(inputShapes);
+    IgnoreUnused(inputShapes);
 
     const TensorShape& inputShape = m_Param.m_InputShape;
     const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index d0fc325..d23d1d0 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -16,7 +16,7 @@
 
 std::unique_ptr<IWorkload> StandInLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    boost::ignore_unused(factory);
+    IgnoreUnused(factory);
     // This throws in the event that it's called. We would expect that any backend that
     // "claims" to support the StandInLayer type would actually substitute it with a PrecompiledLayer
     // during graph optimization. There is no interface on the IWorkloadFactory to create a StandInWorkload.
@@ -30,7 +30,7 @@
 
 std::vector<TensorShape> StandInLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    boost::ignore_unused(inputShapes);
+    IgnoreUnused(inputShapes);
     throw Exception("Stand in layer does not support infering output shapes");
 }
 
diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp
index b3842e3..5e19c7b 100644
--- a/src/armnn/optimizations/ConvertConstants.hpp
+++ b/src/armnn/optimizations/ConvertConstants.hpp
@@ -11,7 +11,7 @@
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <Half.hpp>
 
@@ -72,7 +72,7 @@
 
     void Run(Graph& graph, Layer& layer) const override
     {
-        boost::ignore_unused(graph);
+        IgnoreUnused(graph);
         if (Predicate::Test(layer))
         {
             layer.OperateOnConstantTensors(Converter::Func);
diff --git a/src/armnn/optimizations/OptimizeInverseConversions.hpp b/src/armnn/optimizations/OptimizeInverseConversions.hpp
index f0d11ce..3ea4a5b 100644
--- a/src/armnn/optimizations/OptimizeInverseConversions.hpp
+++ b/src/armnn/optimizations/OptimizeInverseConversions.hpp
@@ -6,7 +6,7 @@
 
 #include "Optimization.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace armnn
 {
@@ -20,7 +20,7 @@
     /// Fp16ToFp32 followed by Fp32ToFp16 or vice-versa.
     void Run(Graph& graph, InputSlot& connection) const
     {
-        boost::ignore_unused(graph);
+        IgnoreUnused(graph);
         Layer& base  = connection.GetConnectedOutputSlot()->GetOwningLayer();
         Layer& child = connection.GetOwningLayer();
 
diff --git a/src/armnn/optimizations/OptimizeInversePermutes.hpp b/src/armnn/optimizations/OptimizeInversePermutes.hpp
index 77d62a5..98e87c3 100644
--- a/src/armnn/optimizations/OptimizeInversePermutes.hpp
+++ b/src/armnn/optimizations/OptimizeInversePermutes.hpp
@@ -6,7 +6,7 @@
 
 #include "Optimization.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace armnn
 {
@@ -21,7 +21,7 @@
     /// Bypasses both layers for that connection if one is the inverse of the other.
     void Run(Graph& graph, InputSlot& connection) const
     {
-        boost::ignore_unused(graph);
+        IgnoreUnused(graph);
         Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
         auto child = boost::polymorphic_downcast<PermuteType*>(&connection.GetOwningLayer());
 
diff --git a/src/armnn/optimizations/SquashEqualSiblings.hpp b/src/armnn/optimizations/SquashEqualSiblings.hpp
index d5a8a5d..bac27c0 100644
--- a/src/armnn/optimizations/SquashEqualSiblings.hpp
+++ b/src/armnn/optimizations/SquashEqualSiblings.hpp
@@ -6,7 +6,7 @@
 
 #include "Optimization.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace armnn
 {
@@ -23,7 +23,7 @@
     /// the child layer, so the siblings are left unconnected (and later removed).
     void Run(Graph& graph, InputSlot& connection) const
     {
-        boost::ignore_unused(graph);
+        IgnoreUnused(graph);
         auto& child = connection.GetOwningLayer();
 
         if (!child.IsOutputUnconnected())
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 4782c43..72ad9d4 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -11,6 +11,7 @@
 #include <ResolveType.hpp>
 
 #include <armnnUtils/DataLayoutIndexed.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
@@ -18,7 +19,6 @@
 
 #include <boost/test/unit_test.hpp>
 #include <boost/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
 
 #include <utility>
 
@@ -1298,7 +1298,7 @@
     armnn::Graph& graph,
     bool biasEnabled = false)
 {
-    boost::ignore_unused(graph);
+    IgnoreUnused(graph);
 
     // To create a PreCompiled layer, create a network and Optimize it.
     armnn::Network net;
diff --git a/src/armnn/test/DebugCallbackTest.cpp b/src/armnn/test/DebugCallbackTest.cpp
index bd8bdd5..c89da83 100644
--- a/src/armnn/test/DebugCallbackTest.cpp
+++ b/src/armnn/test/DebugCallbackTest.cpp
@@ -60,7 +60,7 @@
     std::vector<unsigned int> slotIndexes;
     auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensor)
     {
-        boost::ignore_unused(guid);
+        IgnoreUnused(guid);
         slotIndexes.push_back(slotIndex);
         tensorShapes.push_back(tensor->GetShape());
         callCount++;
diff --git a/src/armnn/test/EndToEndTest.cpp b/src/armnn/test/EndToEndTest.cpp
index df84be4..a8192a6 100644
--- a/src/armnn/test/EndToEndTest.cpp
+++ b/src/armnn/test/EndToEndTest.cpp
@@ -6,8 +6,8 @@
 #include <armnn/Descriptors.hpp>
 #include <armnn/IRuntime.hpp>
 #include <armnn/INetwork.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/core/ignore_unused.hpp>
 #include <boost/test/unit_test.hpp>
 
 #include <set>
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 0ca4fc4..56032ad 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -691,7 +691,7 @@
                              LayerBindingId id,
                              const char* name = nullptr) override
         {
-            boost::ignore_unused(id, name);
+            IgnoreUnused(id, name);
             auto inputLayer = boost::polymorphic_downcast<const InputLayer*>(layer);
             BOOST_TEST((inputLayer->GetBackendId() == "MockBackend"));
         }
@@ -700,7 +700,7 @@
                               LayerBindingId id,
                               const char* name = nullptr) override
         {
-            boost::ignore_unused(id, name);
+            IgnoreUnused(id, name);
             auto outputLayer = boost::polymorphic_downcast<const OutputLayer*>(layer);
             BOOST_TEST((outputLayer->GetBackendId() == "MockBackend"));
         }
@@ -709,7 +709,7 @@
                                   const ActivationDescriptor& activationDescriptor,
                                   const char* name = nullptr) override
         {
-            boost::ignore_unused(activationDescriptor, name);
+            IgnoreUnused(activationDescriptor, name);
             auto activation = boost::polymorphic_downcast<const ActivationLayer*>(layer);
             BOOST_TEST((activation->GetBackendId() == "CustomBackend"));
         }
diff --git a/src/armnn/test/OptionalTest.cpp b/src/armnn/test/OptionalTest.cpp
index fd13643..73c9643 100644
--- a/src/armnn/test/OptionalTest.cpp
+++ b/src/armnn/test/OptionalTest.cpp
@@ -7,19 +7,19 @@
 #include <armnn/Optional.hpp>
 #include <string>
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace
 {
 
 void PassStringRef(armnn::Optional<std::string&> value)
 {
-    boost::ignore_unused(value);
+    armnn::IgnoreUnused(value);
 }
 
 void PassStringRefWithDefault(armnn::Optional<std::string&> value = armnn::EmptyOptional())
 {
-    boost::ignore_unused(value);
+    armnn::IgnoreUnused(value);
 }
 
 } // namespace <anonymous>
diff --git a/src/armnn/test/ProfilerTests.cpp b/src/armnn/test/ProfilerTests.cpp
index a052862..9376fa4 100644
--- a/src/armnn/test/ProfilerTests.cpp
+++ b/src/armnn/test/ProfilerTests.cpp
@@ -5,6 +5,7 @@
 
 #include <armnn/IRuntime.hpp>
 #include <armnn/TypesUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <boost/test/unit_test.hpp>
 #include <boost/test/tools/output_test_stream.hpp>
@@ -309,7 +310,7 @@
     profiler->Print(json);
 
     std::string output = buffer.str();
-    boost::ignore_unused(output);
+    armnn::IgnoreUnused(output);
 
     // Disable profiling here to not print out anything on stdout.
     profiler->EnableProfiling(false);
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 2dc054a..ef9b2da 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -3,15 +3,6 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <armnn/INetwork.hpp>
-#include <armnn/LayerVisitorBase.hpp>
-#include <armnn/Tensor.hpp>
-#include <armnn/Types.hpp>
-
-#include <armnnQuantizer/INetworkQuantizer.hpp>
-
-#include <QuantizeHelper.hpp>
-
 #include "../Graph.hpp"
 #include "../Network.hpp"
 #include "../NetworkQuantizerUtils.hpp"
@@ -19,7 +10,14 @@
 #include "../RangeTracker.hpp"
 #include "../../armnnQuantizer/CommandLineProcessor.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/INetwork.hpp>
+#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/Types.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnnQuantizer/INetworkQuantizer.hpp>
+#include <QuantizeHelper.hpp>
+
 #include <boost/test/unit_test.hpp>
 
 #include <unordered_map>
@@ -58,7 +56,7 @@
                          LayerBindingId id,
                          const char* name = nullptr) override
     {
-        boost::ignore_unused(id, name);
+        IgnoreUnused(id, name);
         const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
         BOOST_TEST(m_InputShape == info.GetShape());
         // Based off current default [-15.0f, 15.0f]
@@ -72,7 +70,7 @@
                           LayerBindingId id,
                           const char* name = nullptr) override
     {
-        boost::ignore_unused(id, name);
+        IgnoreUnused(id, name);
         const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
         BOOST_TEST(m_OutputShape == info.GetShape());
     }
@@ -116,7 +114,7 @@
                                         const OffsetScalePair& params,
                                         DataType dataType = DataType::QAsymmU8)
     {
-        boost::ignore_unused(dataType);
+        IgnoreUnused(dataType);
         TestQuantizationParamsImpl(info, dataType, params.first, params.second);
     }
 
@@ -212,7 +210,7 @@
     void VisitAdditionLayer(const IConnectableLayer* layer,
                             const char* name = nullptr) override
     {
-        boost::ignore_unused(name);
+        IgnoreUnused(name);
         TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
         // Based off default static range [-20.0f, 20.0f]
@@ -282,7 +280,7 @@
                               const ActivationDescriptor& descriptor,
                               const char* name = nullptr) override
     {
-        boost::ignore_unused(descriptor, name);
+        IgnoreUnused(descriptor, name);
 
         TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
@@ -385,7 +383,7 @@
                                       LayerBindingId id,
                                       const char* name = nullptr) override
         {
-            boost::ignore_unused(id, name);
+            IgnoreUnused(id, name);
             const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
             BOOST_CHECK_MESSAGE(info.GetDataType() == m_DataType,
                                 std::string(armnn::GetDataTypeName(info.GetDataType()))
@@ -543,7 +541,7 @@
                                   const ActivationDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [0.0f, 3.5f]
@@ -599,7 +597,7 @@
                                   const ActivationDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [-1.0f, 1.0f]
@@ -654,7 +652,7 @@
                               const ActivationDescriptor& descriptor,
                               const char* name = nullptr) override
     {
-        boost::ignore_unused(descriptor, name);
+        IgnoreUnused(descriptor, name);
         TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
         // Based off default static range [-5.0f, 15.0f]
@@ -725,7 +723,7 @@
                                   const ActivationDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [-15.0f, 15.0f]
@@ -779,7 +777,7 @@
                                   const ActivationDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [-15.0f, 15.0f]
@@ -839,7 +837,7 @@
                                           const ConstTensor& gamma,
                                           const char* name = nullptr) override
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [-15.0f, 15.0f]
@@ -924,7 +922,7 @@
                                             const DepthToSpaceDescriptor& desc,
                                             const char* name = nullptr)
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -1116,7 +1114,7 @@
                                       const Optional<ConstTensor>& biases,
                                       const char* name = nullptr) override
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             TestQuantizationOnLayersWithBiases(layer, weights, biases);
         }
     };
@@ -1173,7 +1171,7 @@
                                      const Optional<ConstTensor>& biases,
                                      const char *name = nullptr) override
         {
-            boost::ignore_unused(convolution2dDescriptor, name);
+            IgnoreUnused(convolution2dDescriptor, name);
             TestQuantizationOnLayersWithBiases(layer, weights, biases);
         }
     };
@@ -1259,7 +1257,7 @@
                                               const Optional<ConstTensor>& biases,
                                               const char *name = nullptr) override
         {
-            boost::ignore_unused(convolution2dDescriptor, name);
+            IgnoreUnused(convolution2dDescriptor, name);
             TestQuantizationOnLayersWithBiases(layer, weights, biases);
         }
     };
@@ -1343,7 +1341,7 @@
                                                      const InstanceNormalizationDescriptor& descriptor,
                                                      const char* name = nullptr)
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -1411,7 +1409,7 @@
                                   const SoftmaxDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -1503,7 +1501,7 @@
                                const SoftmaxDescriptor& descriptor,
                                const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [0.0f, 1.0f]
@@ -1636,7 +1634,7 @@
                                const PermuteDescriptor& desc,
                                const char* name = nullptr) override
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -1691,7 +1689,7 @@
                                       const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                       const char* name = nullptr) override
         {
-            boost::ignore_unused(spaceToBatchNdDescriptor, name);
+            IgnoreUnused(spaceToBatchNdDescriptor, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -1804,7 +1802,7 @@
                                  const Pooling2dDescriptor& desc,
                                  const char* name = nullptr) override
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -1873,7 +1871,7 @@
                                 const ConstTensor& input,
                                 const char* name = nullptr) override
         {
-            boost::ignore_unused(input, name);
+            IgnoreUnused(input, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off the range of values in the const tensor used for the test: [-2.0f, 6.0f]
@@ -1946,20 +1944,20 @@
                              LayerBindingId id,
                              const char* name = nullptr) override
         {
-            boost::ignore_unused(layer, id, name);
+            IgnoreUnused(layer, id, name);
         }
 
         void VisitOutputLayer(const IConnectableLayer* layer,
                               LayerBindingId id,
                               const char* name = nullptr) override
         {
-            boost::ignore_unused(layer, id, name);
+            IgnoreUnused(layer, id, name);
         }
         void VisitArgMinMaxLayer(const IConnectableLayer* layer,
                                  const ArgMinMaxDescriptor& argMinMaxDescriptor,
                                  const char* name = nullptr) override
         {
-                boost::ignore_unused(argMinMaxDescriptor, name);
+                IgnoreUnused(argMinMaxDescriptor, name);
                 TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
 
                 TestQuantizationParams(outputInfo,
@@ -2034,7 +2032,7 @@
                                   const ComparisonDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -2106,19 +2104,19 @@
                              LayerBindingId id,
                              const char* name = nullptr) override
         {
-            boost::ignore_unused(layer, id, name);
+            IgnoreUnused(layer, id, name);
         }
         void VisitOutputLayer(const IConnectableLayer* layer,
                               LayerBindingId id,
                               const char* name = nullptr) override
         {
-            boost::ignore_unused(layer, id, name);
+            IgnoreUnused(layer, id, name);
         }
         void VisitConcatLayer(const IConnectableLayer* layer,
                               const OriginsDescriptor& originsDescriptor,
                               const char* name = nullptr) override
         {
-            boost::ignore_unused(originsDescriptor, name);
+            IgnoreUnused(originsDescriptor, name);
             TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
             TestQuantizationParams(
                 outputInfo, {60.8f / g_AsymmU8QuantizationBase, 65},
@@ -2214,7 +2212,7 @@
                                        const ReshapeDescriptor& reshapeDescriptor,
                                        const char* name = nullptr) override
         {
-            boost::ignore_unused(reshapeDescriptor, name);
+            IgnoreUnused(reshapeDescriptor, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2269,7 +2267,7 @@
                                         const SplitterDescriptor& desc,
                                         const char* name = nullptr)
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2325,7 +2323,7 @@
                                       const ResizeDescriptor& resizeDescriptor,
                                       const char* name = nullptr) override
         {
-            boost::ignore_unused(resizeDescriptor, name);
+            IgnoreUnused(resizeDescriptor, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2382,7 +2380,7 @@
                                             const StridedSliceDescriptor& desc,
                                             const char* name = nullptr)
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2437,7 +2435,7 @@
                                       const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
                                       const char* name = nullptr) override
         {
-            boost::ignore_unused(batchToSpaceNdDescriptor, name);
+            IgnoreUnused(batchToSpaceNdDescriptor, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2499,7 +2497,7 @@
                              LayerBindingId id,
                              const char* name = nullptr) override
         {
-            boost::ignore_unused(id, name);
+            IgnoreUnused(id, name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
 
             switch (id)
@@ -2526,7 +2524,7 @@
                               LayerBindingId id,
                               const char* name = nullptr) override
         {
-            boost::ignore_unused(id, name);
+            IgnoreUnused(id, name);
             const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
             BOOST_TEST(m_OutputShape == info.GetShape());
         }
@@ -2534,7 +2532,7 @@
         void VisitPreluLayer(const IConnectableLayer* layer,
                              const char* name = nullptr) override
         {
-            boost::ignore_unused(name);
+            IgnoreUnused(name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
             TestQuantizationParams(info,
                                    { 30.0f / g_AsymmU8QuantizationBase, 128 }, // QASymmU8
@@ -2617,7 +2615,7 @@
                                               const Optional<ConstTensor>& biases,
                                               const char *name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TestQuantizationOnLayersWithBiases(layer, weights, biases);
         }
     };
@@ -2704,20 +2702,20 @@
                              LayerBindingId id,
                              const char* name = nullptr) override
         {
-            boost::ignore_unused(layer, id, name);
+            IgnoreUnused(layer, id, name);
         }
         void VisitOutputLayer(const IConnectableLayer* layer,
                               LayerBindingId id,
                               const char* name = nullptr) override
         {
-            boost::ignore_unused(layer, id, name);
+            IgnoreUnused(layer, id, name);
         }
 
         void VisitStackLayer(const IConnectableLayer* layer,
                              const StackDescriptor& descriptor,
                              const char* name = nullptr) override
         {
-            boost::ignore_unused(descriptor, name);
+            IgnoreUnused(descriptor, name);
             TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
 
             TestQuantizationParams(outputInfo,
@@ -2784,7 +2782,7 @@
                                      const SliceDescriptor& desc,
                                      const char* name = nullptr)
         {
-            boost::ignore_unused(desc, name);
+            IgnoreUnused(desc, name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -2876,7 +2874,7 @@
                          LayerBindingId id,
                          const char* name = nullptr) override
     {
-        boost::ignore_unused(id, name);
+        IgnoreUnused(id, name);
         const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
         BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
         BOOST_TEST(m_InputShape == info.GetShape());
@@ -2886,7 +2884,7 @@
                           LayerBindingId id,
                           const char* name = nullptr) override
     {
-        boost::ignore_unused(id, name);
+        IgnoreUnused(id, name);
         const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
         BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
         BOOST_TEST(m_OutputShape == info.GetShape());
@@ -2895,14 +2893,14 @@
     void VisitQuantizeLayer(const IConnectableLayer* layer,
                             const char* name = nullptr) override
     {
-        boost::ignore_unused(layer, name);
+        IgnoreUnused(layer, name);
         m_VisitedQuantizeLayer = true;
     }
 
     void VisitDequantizeLayer(const IConnectableLayer* layer,
                               const char* name = nullptr) override
     {
-        boost::ignore_unused(layer, name);
+        IgnoreUnused(layer, name);
         m_VisitedDequantizeLayer = true;
     }
 
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index d9ed18b..e3cbe03 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -158,8 +158,8 @@
 
     // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
     // so they are assigned to, but still considered unused, causing a warning.
-    boost::ignore_unused(dubious);
-    boost::ignore_unused(suppressed);
+    IgnoreUnused(dubious);
+    IgnoreUnused(suppressed);
 }
 #endif // WITH_VALGRIND
 
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index 3e59c0b..976e58e 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -16,10 +16,11 @@
 
 #include <Network.hpp>
 
+#include <armnn/utility/IgnoreUnused.hpp>
+
 #include <vector>
 #include <string>
 
-#include <boost/core/ignore_unused.hpp>
 
 using namespace armnn;
 
@@ -44,20 +45,20 @@
                                                          TensorShape const& subTensorShape,
                                                          unsigned int const* subTensorOrigin) const override
     {
-        boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+        IgnoreUnused(parent, subTensorShape, subTensorOrigin);
         return nullptr;
     }
 
     std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override
     {
-        boost::ignore_unused(tensorInfo);
+        IgnoreUnused(tensorInfo);
         return nullptr;
     }
 
     std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
                                                       DataLayout dataLayout) const override
     {
-        boost::ignore_unused(tensorInfo, dataLayout);
+        IgnoreUnused(tensorInfo, dataLayout);
         return nullptr;
     }
 
@@ -85,20 +86,20 @@
                                                          TensorShape const& subTensorShape,
                                                          unsigned int const* subTensorOrigin) const override
     {
-        boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+        IgnoreUnused(parent, subTensorShape, subTensorOrigin);
         return nullptr;
     }
 
     std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override
     {
-        boost::ignore_unused(tensorInfo);
+        IgnoreUnused(tensorInfo);
         return nullptr;
     }
 
     std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
                                                       DataLayout dataLayout) const override
     {
-        boost::ignore_unused(tensorInfo, dataLayout);
+        IgnoreUnused(tensorInfo, dataLayout);
         return nullptr;
     }
 
@@ -123,7 +124,7 @@
 
     IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
     {
-        boost::ignore_unused(memoryManager);
+        IgnoreUnused(memoryManager);
         return IWorkloadFactoryPtr{};
     }
 
@@ -164,7 +165,7 @@
 
     IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
     {
-        boost::ignore_unused(memoryManager);
+        IgnoreUnused(memoryManager);
         return IWorkloadFactoryPtr{};
     }
 
@@ -202,7 +203,7 @@
 
     IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
     {
-        boost::ignore_unused(memoryManager);
+        IgnoreUnused(memoryManager);
         return IWorkloadFactoryPtr{};
     }
 
@@ -239,7 +240,7 @@
 
     IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
     {
-        boost::ignore_unused(memoryManager);
+        IgnoreUnused(memoryManager);
         return IWorkloadFactoryPtr{};
     }
 
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index ed4605b..1f7c360 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -13,13 +13,13 @@
 
 #include <armnnUtils/Permute.hpp>
 #include <armnnUtils/Transpose.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <ParserHelper.hpp>
 #include <VerificationHelpers.hpp>
 
 #include <boost/filesystem.hpp>
 #include <boost/format.hpp>
-#include <boost/core/ignore_unused.hpp>
 #include <boost/assert.hpp>
 #include <boost/format.hpp>
 #include <boost/format.hpp>
@@ -743,7 +743,7 @@
 BindingPointInfo Deserializer::GetNetworkInputBindingInfo(unsigned int layerIndex,
                                                           const std::string& name) const
 {
-    boost::ignore_unused(layerIndex);
+    IgnoreUnused(layerIndex);
     for (auto inputBinding : m_InputBindings)
     {
         if (inputBinding.first == name)
@@ -761,7 +761,7 @@
 BindingPointInfo Deserializer::GetNetworkOutputBindingInfo(unsigned int layerIndex,
                                                                 const std::string& name) const
 {
-    boost::ignore_unused(layerIndex);
+    IgnoreUnused(layerIndex);
     for (auto outputBinding : m_OutputBindings)
     {
         if (outputBinding.first == name)
@@ -1805,7 +1805,7 @@
 armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::PoolingDescriptor pooling2dDesc,
                                                               unsigned int layerIndex)
 {
-    boost::ignore_unused(layerIndex);
+    IgnoreUnused(layerIndex);
     armnn::Pooling2dDescriptor desc;
 
     switch (pooling2dDesc->poolType())
@@ -2157,7 +2157,7 @@
     Deserializer::NormalizationDescriptorPtr normalizationDescriptor,
     unsigned int layerIndex)
 {
-    boost::ignore_unused(layerIndex);
+    IgnoreUnused(layerIndex);
     armnn::NormalizationDescriptor desc;
 
     switch (normalizationDescriptor->normChannelType())
diff --git a/src/armnnDeserializer/test/DeserializeAdd.cpp b/src/armnnDeserializer/test/DeserializeAdd.cpp
index 325bb6e..4f29189 100644
--- a/src/armnnDeserializer/test/DeserializeAdd.cpp
+++ b/src/armnnDeserializer/test/DeserializeAdd.cpp
@@ -7,7 +7,7 @@
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include "../Deserializer.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <string>
 #include <iostream>
@@ -22,7 +22,7 @@
                         const std::string & dataType,
                         const std::string & activation="NONE")
     {
-        boost::ignore_unused(activation);
+        armnn::IgnoreUnused(activation);
         m_JsonString = R"(
         {
                 inputIds: [0, 1],
diff --git a/src/armnnDeserializer/test/DeserializeMultiplication.cpp b/src/armnnDeserializer/test/DeserializeMultiplication.cpp
index c0bb13e..8198001 100644
--- a/src/armnnDeserializer/test/DeserializeMultiplication.cpp
+++ b/src/armnnDeserializer/test/DeserializeMultiplication.cpp
@@ -3,11 +3,12 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include "../Deserializer.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <boost/test/unit_test.hpp>
 
 #include <string>
 #include <iostream>
@@ -22,7 +23,7 @@
                                    const std::string & dataType,
                                    const std::string & activation="NONE")
     {
-        boost::ignore_unused(activation);
+        armnn::IgnoreUnused(activation);
         m_JsonString = R"(
         {
                 inputIds: [0, 1],
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
index de7fe5c..91d07f3 100644
--- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -6,21 +6,20 @@
 #pragma once
 
 #include "SchemaSerialize.hpp"
-
-#include <armnn/IRuntime.hpp>
-#include <armnnDeserializer/IDeserializer.hpp>
-
-#include <boost/core/ignore_unused.hpp>
-#include <boost/assert.hpp>
-#include <boost/format.hpp>
-
-#include <ResolveType.hpp>
 #include "test/TensorHelpers.hpp"
 
 #include "flatbuffers/idl.h"
 #include "flatbuffers/util.h"
 
 #include <ArmnnSchema_generated.h>
+#include <armnn/IRuntime.hpp>
+#include <armnnDeserializer/IDeserializer.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <ResolveType.hpp>
+
+#include <boost/assert.hpp>
+#include <boost/format.hpp>
+
 
 using armnnDeserializer::IDeserializer;
 using TensorRawPtr = armnnSerializer::TensorInfo*;
@@ -155,7 +154,7 @@
                       armnnSerializer::TensorInfo tensorType, const std::string& name,
                       const float scale, const int64_t zeroPoint)
     {
-        boost::ignore_unused(name);
+        armnn::IgnoreUnused(name);
         BOOST_CHECK_EQUAL(shapeSize, tensors->dimensions()->size());
         BOOST_CHECK_EQUAL_COLLECTIONS(shape.begin(), shape.end(),
                                       tensors->dimensions()->begin(), tensors->dimensions()->end());
diff --git a/src/armnnQuantizer/QuantizationDataSet.cpp b/src/armnnQuantizer/QuantizationDataSet.cpp
index 9694342..7042d74 100644
--- a/src/armnnQuantizer/QuantizationDataSet.cpp
+++ b/src/armnnQuantizer/QuantizationDataSet.cpp
@@ -8,7 +8,8 @@
 
 #define BOOST_FILESYSTEM_NO_DEPRECATED
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
 #include <boost/filesystem/operations.hpp>
 #include <boost/filesystem/path.hpp>
 
@@ -52,7 +53,7 @@
                                         armnn::LayerBindingId id,
                                         const char* name)
 {
-    boost::ignore_unused(name);
+    armnn::IgnoreUnused(name);
     m_TensorInfos.emplace(id, layer->GetOutputSlot(0).GetTensorInfo());
 }
 
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 39df0c2..47b5d05 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -8,10 +8,10 @@
 #include <armnn/Descriptors.hpp>
 #include <armnn/LstmParams.hpp>
 #include <armnn/QuantizedLstmParams.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <iostream>
 
-#include <boost/core/ignore_unused.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 #include <flatbuffers/util.h>
 
@@ -86,7 +86,7 @@
 // Build FlatBuffer for Input Layer
 void SerializerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
@@ -108,7 +108,7 @@
 // Build FlatBuffer for Output Layer
 void SerializerVisitor::VisitOutputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
@@ -128,7 +128,7 @@
 
 void SerializerVisitor::VisitAbsLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Abs);
     auto flatBufferAbsLayer  = serializer::CreateAbsLayer(m_flatBufferBuilder, flatBufferBaseLayer);
 
@@ -140,7 +140,7 @@
                                              const armnn::ActivationDescriptor& descriptor,
                                              const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
@@ -163,7 +163,7 @@
 // Build FlatBuffer for Addition Layer
 void SerializerVisitor::VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
@@ -180,7 +180,7 @@
                                             const armnn::ArgMinMaxDescriptor& descriptor,
                                             const char *name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
@@ -203,7 +203,7 @@
                                                  const armnn::BatchToSpaceNdDescriptor& descriptor,
                                                  const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
@@ -237,7 +237,7 @@
                                                      const armnn::ConstTensor& gamma,
                                                      const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbBatchNormalizationBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
     auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
@@ -264,7 +264,7 @@
                                              const armnn::ComparisonDescriptor& descriptor,
                                              const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Comparison);
     auto fbDescriptor = serializer::CreateComparisonDescriptor(
@@ -280,7 +280,7 @@
                                            const armnn::ConstTensor& input,
                                            const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
@@ -303,7 +303,7 @@
                                                 const armnn::Optional<armnn::ConstTensor>& biases,
                                                 const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
@@ -342,7 +342,7 @@
                                                const armnn::DepthToSpaceDescriptor& descriptor,
                                                const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthToSpace);
     auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
@@ -360,7 +360,7 @@
                                                          const armnn::Optional<armnn::ConstTensor>& biases,
                                                          const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d);
     auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
@@ -394,7 +394,7 @@
 void SerializerVisitor::VisitDequantizeLayer(const armnn::IConnectableLayer* layer,
                                              const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
     auto fbDequantizeLayer     = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
@@ -407,7 +407,7 @@
                                                        const armnn::ConstTensor& anchors,
                                                        const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_DetectionPostProcess);
     auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
@@ -435,7 +435,7 @@
 
 void SerializerVisitor::VisitDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
     auto fbDivisionLayer     = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
@@ -447,7 +447,7 @@
                                                    const armnn::ElementwiseUnaryDescriptor& descriptor,
                                                    const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseUnary);
     auto fbDescriptor = serializer::CreateElementwiseUnaryDescriptor(
@@ -460,7 +460,7 @@
 
 void SerializerVisitor::VisitEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Equal);
     auto fbEqualLayer = serializer::CreateEqualLayer(m_flatBufferBuilder, fbBaseLayer);
@@ -470,7 +470,7 @@
 
 void SerializerVisitor::VisitFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
     auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
@@ -480,7 +480,7 @@
 
 void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
     auto flatBufferLayer   = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer);
@@ -490,7 +490,7 @@
 
 void SerializerVisitor::VisitGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbGreaterBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Greater);
     auto fbGreaterLayer = serializer::CreateGreaterLayer(m_flatBufferBuilder, fbGreaterBaseLayer);
@@ -503,7 +503,7 @@
     const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
     const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbDescriptor = serializer::CreateInstanceNormalizationDescriptor(
             m_flatBufferBuilder,
@@ -522,7 +522,7 @@
                                                   const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
                                                   const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_L2Normalization);
@@ -543,7 +543,7 @@
                                              const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
                                              const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
@@ -568,7 +568,7 @@
                                        const armnn::LstmInputParams& params,
                                        const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
 
@@ -673,7 +673,7 @@
 
 void SerializerVisitor::VisitMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
     auto fbMaximumLayer     = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
@@ -685,7 +685,7 @@
                                        const armnn::MeanDescriptor& descriptor,
                                        const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbMeanBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
     auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
@@ -701,7 +701,7 @@
 
 void SerializerVisitor::VisitMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
     auto fbMinimumLayer     = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
@@ -711,7 +711,7 @@
 
 void SerializerVisitor::VisitMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
     auto fbMergeLayer     = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
@@ -730,7 +730,7 @@
                                          const armnn::ConcatDescriptor& concatDescriptor,
                                          const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
 
@@ -763,7 +763,7 @@
 
 void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
     auto fbMultiplicationLayer     = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
@@ -776,7 +776,7 @@
                                       const armnn::PadDescriptor& padDescriptor,
                                       const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
 
@@ -802,7 +802,7 @@
                                           const armnn::PermuteDescriptor& permuteDescriptor,
                                           const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
@@ -830,7 +830,7 @@
                                           const armnn::ReshapeDescriptor& reshapeDescriptor,
                                           const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
@@ -856,7 +856,7 @@
                                                  const armnn::ResizeBilinearDescriptor& resizeDescriptor,
                                                  const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ResizeBilinear);
 
@@ -877,7 +877,7 @@
                                          const armnn::ResizeDescriptor& resizeDescriptor,
                                          const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
 
@@ -897,7 +897,7 @@
 
 void SerializerVisitor::VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt);
     auto fbRsqrtLayer     = serializer::CreateRsqrtLayer(m_flatBufferBuilder, fbRsqrtBaseLayer);
@@ -909,7 +909,7 @@
                                         const armnn::SliceDescriptor& sliceDescriptor,
                                         const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbSliceBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
     auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
@@ -926,7 +926,7 @@
                                           const armnn::SoftmaxDescriptor& softmaxDescriptor,
                                           const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
@@ -948,7 +948,7 @@
                                             const armnn::Pooling2dDescriptor& pooling2dDescriptor,
                                             const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbPooling2dBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
     auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
@@ -976,7 +976,7 @@
 void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer,
                                         const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
@@ -990,7 +990,7 @@
 
 void SerializerVisitor::VisitQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
     auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
@@ -1005,7 +1005,7 @@
                                                  const armnn::Optional<armnn::ConstTensor>& biases,
                                                  const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
@@ -1042,7 +1042,7 @@
                                                  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                                  const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
@@ -1073,7 +1073,7 @@
                                                const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
                                                const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto flatBufferBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
     auto flatBufferDescriptor =
@@ -1093,7 +1093,7 @@
                                            const armnn::ViewsDescriptor& viewsDescriptor,
                                            const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer ViewOrigins
     std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
@@ -1159,7 +1159,7 @@
                                                 const armnn::NormalizationDescriptor& descriptor,
                                                 const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbNormalizationBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
 
@@ -1184,7 +1184,7 @@
                                         const armnn::StackDescriptor& stackDescriptor,
                                         const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
 
@@ -1207,7 +1207,7 @@
                                           const armnn::StandInDescriptor& standInDescriptor,
                                           const char *name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
                                                             standInDescriptor.m_NumInputs,
@@ -1223,7 +1223,7 @@
                                                const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
                                                const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
 
@@ -1248,7 +1248,7 @@
 
 void SerializerVisitor::VisitSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
     auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
@@ -1258,7 +1258,7 @@
 
 void SerializerVisitor::VisitSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
     auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
@@ -1273,7 +1273,7 @@
     const armnn::Optional<armnn::ConstTensor>& biases,
     const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
     auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
@@ -1307,7 +1307,7 @@
                                             const armnn::TransposeDescriptor& descriptor,
                                             const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
@@ -1334,7 +1334,7 @@
                                                 const armnn::QuantizedLstmInputParams& params,
                                                 const char* name)
 {
-    boost::ignore_unused(name);
+    IgnoreUnused(name);
 
     auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
 
diff --git a/src/armnnSerializer/test/ActivationSerializationTests.cpp b/src/armnnSerializer/test/ActivationSerializationTests.cpp
index 0362412..abc63ae 100644
--- a/src/armnnSerializer/test/ActivationSerializationTests.cpp
+++ b/src/armnnSerializer/test/ActivationSerializationTests.cpp
@@ -3,18 +3,18 @@
 // SPDX-License-Identifier: MIT
 //
 
+#include "../Serializer.hpp"
+
 #include <armnn/Descriptors.hpp>
 #include <armnn/INetwork.hpp>
 #include <armnn/IRuntime.hpp>
 #include <armnnDeserializer/IDeserializer.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
-#include "../Serializer.hpp"
+#include <boost/test/unit_test.hpp>
 
 #include <sstream>
 
-#include <boost/core/ignore_unused.hpp>
-#include <boost/test/unit_test.hpp>
-
 BOOST_AUTO_TEST_SUITE(SerializerTests)
 
 class VerifyActivationName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
@@ -24,7 +24,7 @@
                               const armnn::ActivationDescriptor& activationDescriptor,
                               const char* name) override
     {
-        boost::ignore_unused(layer, activationDescriptor);
+        IgnoreUnused(layer, activationDescriptor);
         BOOST_TEST(name == "activation");
     }
 };
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 5a5274a..eab9f4e 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -9,7 +9,7 @@
 #include <armnn/Exceptions.hpp>
 #include <armnn/Logging.hpp>
 #include <armnn/TypesUtils.hpp>
-#include <boost/filesystem.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 // armnnUtils:
 #include <armnnUtils/Permute.hpp>
@@ -22,10 +22,10 @@
 
 #include <flatbuffers/flexbuffers.h>
 
-#include <boost/core/ignore_unused.hpp>
 #include <boost/assert.hpp>
 #include <boost/format.hpp>
 #include <boost/numeric/conversion/cast.hpp>
+#include <boost/filesystem.hpp>
 
 #include <fstream>
 #include <algorithm>
@@ -426,7 +426,7 @@
                       armnn::TensorInfo& tensorInfo,
                       armnn::Optional<armnn::PermutationVector&> permutationVector)
 {
-    boost::ignore_unused(tensorPtr);
+    IgnoreUnused(tensorPtr);
     BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
     BOOST_ASSERT_MSG(bufferPtr != nullptr,
         boost::str(
@@ -1827,7 +1827,7 @@
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
     const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
-    boost::ignore_unused(operatorPtr);
+    IgnoreUnused(operatorPtr);
 
     auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(inputs.size(), 1);
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 1383331..793bd0e 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -11,6 +11,7 @@
 #include <armnnUtils/Permute.hpp>
 #include <armnnUtils/DataLayoutIndexed.hpp>
 #include <armnnUtils/Transpose.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <GraphTopologicalSort.hpp>
 #include <ParserHelper.hpp>
@@ -21,7 +22,6 @@
 #include <tensorflow/core/framework/graph.pb.h>
 
 #include <boost/format.hpp>
-#include <boost/core/ignore_unused.hpp>
 #include <boost/format.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 #include <boost/polymorphic_cast.hpp>
@@ -732,7 +732,7 @@
 
 ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
     if (numberOfInputs < 2)
     {
@@ -812,7 +812,7 @@
 
 ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
 
     // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
@@ -842,7 +842,7 @@
 
 ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     return AddAdditionLayer(nodeDef, true);
 }
 
@@ -873,7 +873,7 @@
 
 ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
     // Any requests for the output slots of this node should be forwarded to the node connected as input.
     return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
@@ -1067,7 +1067,7 @@
 
 ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     BOOST_ASSERT(nodeDef.op() == "Const");
 
     if (nodeDef.attr().count("value") == 0)
@@ -1204,7 +1204,7 @@
 ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
     IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
     TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
@@ -1346,7 +1346,7 @@
 ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
                                                     const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
     IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
     TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
@@ -1542,7 +1542,7 @@
 
 ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
 
     IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
@@ -1563,7 +1563,7 @@
 ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
                                                    const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
 
     if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
@@ -1712,7 +1712,7 @@
 ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
                                             const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
     if (inputs.size() != 2)
     {
@@ -1850,7 +1850,7 @@
 ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
                                            const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
     IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
     IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
@@ -1887,7 +1887,7 @@
 ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
                                             const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
     IOutputSlot* input0Slot = inputLayers.first;
     IOutputSlot* input1Slot = inputLayers.second;
@@ -1901,7 +1901,7 @@
 ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
                                           const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
     IOutputSlot* input0Slot = inputLayers.first;
     IOutputSlot* input1Slot = inputLayers.second;
@@ -1915,7 +1915,7 @@
 ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
                                             const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
     IOutputSlot* input0Slot = inputLayers.first;
     IOutputSlot* input1Slot = inputLayers.second;
@@ -1927,7 +1927,7 @@
 
 ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
 
     IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
@@ -1967,7 +1967,7 @@
 
 ParsedTfOperationPtr TfParser::ParseStack(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
 
     unsigned int numInputs = static_cast<unsigned int>(nodes.size());
@@ -2058,7 +2058,7 @@
 
 ParsedTfOperationPtr TfParser::ParseTranspose(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
 
     auto inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
     const auto inputCount = inputs.size();
@@ -2157,7 +2157,7 @@
 ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
                                         const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     // input consists of:
     // input[0] the tensor which will be padded
     // input[1] the tensor holding the padding values
@@ -2232,7 +2232,7 @@
 ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
                                            const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
 
     // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
@@ -2318,7 +2318,7 @@
 ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     // Note: the Shape layer is handled in a special way, because:
     //        1. ARMNN doesn't support int32 tensors which it outputs.
     //        2. ARMNN works with statically shaped tensors which are known at parse time.
@@ -2361,7 +2361,7 @@
 ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
     ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
 
@@ -2400,7 +2400,7 @@
 ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
 
     if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
@@ -2539,7 +2539,7 @@
 
 ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
 
     IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
@@ -2559,7 +2559,7 @@
 
 ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
 
     NormalizationDescriptor normalizationDescriptor;
@@ -2605,7 +2605,7 @@
 
 ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
 
     // Defers the creation of the layer (see ParsedMatMulTfOperation).
     return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
@@ -2613,7 +2613,7 @@
 
 ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
     IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
     TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
@@ -2688,7 +2688,7 @@
 
 ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
 
     return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
 }
@@ -2696,7 +2696,7 @@
 ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
 
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
 
@@ -2725,14 +2725,14 @@
 
 ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
     return AddRealDivLayer(nodeDef);
 }
 
 ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
 
     ActivationDescriptor activationDesc;
     activationDesc.m_Function = ActivationFunction::ReLu;
@@ -2742,7 +2742,7 @@
 ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
 
     ActivationDescriptor activationDesc;
     activationDesc.m_Function = ActivationFunction::BoundedReLu;
@@ -2755,7 +2755,7 @@
 ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
 
     ActivationDescriptor activationDesc;
     activationDesc.m_Function = ActivationFunction::Sigmoid;
@@ -2766,7 +2766,7 @@
 ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
     const tensorflow::GraphDef &graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
 
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
 
@@ -2783,7 +2783,7 @@
 ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
 
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
 
@@ -2800,7 +2800,7 @@
 ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
 
     std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
     unsigned int numInputs = static_cast<unsigned int>(nodes.size());
@@ -2895,7 +2895,7 @@
 ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
 
     ActivationDescriptor activationDesc;
     activationDesc.m_Function = ActivationFunction::SoftReLu;
@@ -2906,7 +2906,7 @@
 ParsedTfOperationPtr TfParser::ParseStridedSlice(const tensorflow::NodeDef& nodeDef,
                                                  const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
 
     std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
     unsigned int numInputs = static_cast<unsigned int>(nodes.size());
@@ -2953,7 +2953,7 @@
 
 ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
 
     ActivationDescriptor activationDesc;
     activationDesc.m_Function = ActivationFunction::TanH;
@@ -2991,7 +2991,7 @@
 ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
 {
-    boost::ignore_unused(graphDef);
+    IgnoreUnused(graphDef);
 
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
     IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
diff --git a/src/armnnTfParser/test/Split.cpp b/src/armnnTfParser/test/Split.cpp
index d53ae67..eeef90a 100644
--- a/src/armnnTfParser/test/Split.cpp
+++ b/src/armnnTfParser/test/Split.cpp
@@ -3,10 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "armnnTfParser/ITfParser.hpp"
 #include "ParserPrototxtFixture.hpp"
 
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <boost/test/unit_test.hpp>
+
 BOOST_AUTO_TEST_SUITE(TensorflowParser)
 
 struct SplitFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
@@ -176,7 +179,7 @@
 struct SplitLastDimFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
 {
     SplitLastDimFixture(bool withDimZero=false) {
-        boost::ignore_unused(withDimZero);
+        armnn::IgnoreUnused(withDimZero);
         m_Prototext = R"(
         node {
           name: "Placeholder"
diff --git a/src/armnnUtils/QuantizeHelper.hpp b/src/armnnUtils/QuantizeHelper.hpp
index 061c459..6fd13fd 100644
--- a/src/armnnUtils/QuantizeHelper.hpp
+++ b/src/armnnUtils/QuantizeHelper.hpp
@@ -5,6 +5,7 @@
 
 #pragma once
 
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/TypesUtils.hpp>
 
 #include <Half.hpp>
@@ -13,7 +14,6 @@
 #include <iterator>
 #include <vector>
 
-#include <boost/core/ignore_unused.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 namespace armnnUtils
@@ -38,13 +38,13 @@
 {
     static T Quantize(float value, float scale, int32_t offset)
     {
-        boost::ignore_unused(scale, offset);
+        armnn::IgnoreUnused(scale, offset);
         return value;
     }
 
     static float Dequantize(T value, float scale, int32_t offset)
     {
-        boost::ignore_unused(scale, offset);
+        armnn::IgnoreUnused(scale, offset);
         return value;
     }
 };
@@ -54,13 +54,13 @@
 {
     static armnn::Half Quantize(float value, float scale, int32_t offset)
     {
-        boost::ignore_unused(scale, offset);
+        armnn::IgnoreUnused(scale, offset);
         return armnn::Half(value);
     }
 
     static float Dequantize(armnn::Half value, float scale, int32_t offset)
     {
-        boost::ignore_unused(scale, offset);
+        armnn::IgnoreUnused(scale, offset);
         return value;
     }
 };
diff --git a/src/armnnUtils/test/QuantizeHelperTest.cpp b/src/armnnUtils/test/QuantizeHelperTest.cpp
index 7e781d0..410fdfa 100644
--- a/src/armnnUtils/test/QuantizeHelperTest.cpp
+++ b/src/armnnUtils/test/QuantizeHelperTest.cpp
@@ -4,8 +4,8 @@
 //
 
 #include <QuantizeHelper.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/core/ignore_unused.hpp>
 #include <boost/test/unit_test.hpp>
 
 #include <vector>
@@ -18,7 +18,7 @@
 template<typename T>
 bool IsFloatIterFunc(T iter)
 {
-    boost::ignore_unused(iter);
+    armnn::IgnoreUnused(iter);
     return armnnUtils::IsFloatingPointIterator<T>::value;
 }
 
diff --git a/src/backends/backendsCommon/CpuTensorHandle.cpp b/src/backends/backendsCommon/CpuTensorHandle.cpp
index de83048..65e6c47 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.cpp
+++ b/src/backends/backendsCommon/CpuTensorHandle.cpp
@@ -3,6 +3,7 @@
 // SPDX-License-Identifier: MIT
 //
 #include <armnn/Exceptions.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 1279134..e8ef46e 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -10,7 +10,7 @@
 
 #include <backendsCommon/LayerSupportBase.hpp>
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace
 {
diff --git a/src/backends/backendsCommon/MakeWorkloadHelper.hpp b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
index 250a10a..7ef140e 100644
--- a/src/backends/backendsCommon/MakeWorkloadHelper.hpp
+++ b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
@@ -31,9 +31,9 @@
                                               const WorkloadInfo& info,
                                               Args&&... args)
     {
-        boost::ignore_unused(descriptor);
-        boost::ignore_unused(info);
-        boost::ignore_unused(args...);
+        IgnoreUnused(descriptor);
+        IgnoreUnused(info);
+        IgnoreUnused(args...);
         return nullptr;
     }
 };
diff --git a/src/backends/backendsCommon/WorkloadUtils.hpp b/src/backends/backendsCommon/WorkloadUtils.hpp
index 92ef2d2..66056db 100644
--- a/src/backends/backendsCommon/WorkloadUtils.hpp
+++ b/src/backends/backendsCommon/WorkloadUtils.hpp
@@ -55,11 +55,11 @@
     TensorShape srcStrides      = srcTensor->GetStrides();
     const TensorShape& srcShape = srcTensor->GetShape();
     const auto srcSize          = srcTensor->GetStrides()[0] * srcShape[0];
-    boost::ignore_unused(srcSize);  // Only used for asserts
+    IgnoreUnused(srcSize);  // Only used for asserts
     TensorShape dstStrides      = dstTensor->GetStrides();
     const TensorShape& dstShape = dstTensor->GetShape();
     const auto dstSize          = dstTensor->GetStrides()[0] * dstShape[0];
-    boost::ignore_unused(dstSize);  // Only used for asserts
+    IgnoreUnused(dstSize);  // Only used for asserts
 
     size_t srcDepth    = 1;
     size_t srcBatches  = 1;
diff --git a/src/backends/backendsCommon/test/BackendProfilingTests.cpp b/src/backends/backendsCommon/test/BackendProfilingTests.cpp
index 4555336..b9e0e45 100644
--- a/src/backends/backendsCommon/test/BackendProfilingTests.cpp
+++ b/src/backends/backendsCommon/test/BackendProfilingTests.cpp
@@ -14,6 +14,7 @@
 #include "ProfilingUtils.hpp"
 #include "RequestCounterDirectoryCommandHandler.hpp"
 
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/BackendId.hpp>
 #include <armnn/Logging.hpp>
 #include <armnn/profiling/ISendTimelinePacket.hpp>
@@ -56,7 +57,7 @@
     /// Create and write a CounterDirectoryPacket from the parameters to the buffer.
     virtual void SendCounterDirectoryPacket(const ICounterDirectory& counterDirectory)
     {
-        boost::ignore_unused(counterDirectory);
+        armnn::IgnoreUnused(counterDirectory);
     }
 
     /// Create and write a PeriodicCounterCapturePacket from the parameters to the buffer.
@@ -69,8 +70,8 @@
     virtual void SendPeriodicCounterSelectionPacket(uint32_t capturePeriod,
                                                     const std::vector<uint16_t>& selectedCounterIds)
     {
-        boost::ignore_unused(capturePeriod);
-        boost::ignore_unused(selectedCounterIds);
+        armnn::IgnoreUnused(capturePeriod);
+        armnn::IgnoreUnused(selectedCounterIds);
     }
 
     std::vector<Timestamp> GetTimestamps()
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 395a63d..15608cc 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -8,7 +8,7 @@
 
 #include <backendsCommon/WorkloadFactory.hpp>
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace
 {
@@ -414,7 +414,7 @@
     static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
         unsigned int nIn, unsigned int nOut) \
     { \
-        boost::ignore_unused(factory, nIn, nOut); \
+        IgnoreUnused(factory, nIn, nOut); \
         return std::unique_ptr<armnn::IWorkload>(); \
     } \
 };
@@ -559,7 +559,7 @@
 template<>
 unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
 {
-    boost::ignore_unused(layer);
+    IgnoreUnused(layer);
     return 2;
 }
 
@@ -613,7 +613,7 @@
         }
         catch(const armnn::InvalidArgumentException& e)
         {
-            boost::ignore_unused(e);
+            IgnoreUnused(e);
             // This is ok since we throw InvalidArgumentException when creating the dummy workload.
             return true;
         }
@@ -644,12 +644,12 @@
         // InvalidArgumentException or UnimplementedException.
         catch(const armnn::InvalidArgumentException& e)
         {
-            boost::ignore_unused(e);
+            IgnoreUnused(e);
             return true;
         }
         catch(const armnn::UnimplementedException& e)
         {
-            boost::ignore_unused(e);
+            IgnoreUnused(e);
             return true;
         }
         catch(const std::exception& e)
diff --git a/src/backends/backendsCommon/test/MockBackend.cpp b/src/backends/backendsCommon/test/MockBackend.cpp
index b2388cf..8d40117 100644
--- a/src/backends/backendsCommon/test/MockBackend.cpp
+++ b/src/backends/backendsCommon/test/MockBackend.cpp
@@ -108,7 +108,7 @@
 IBackendInternal::IBackendProfilingContextPtr MockBackend::CreateBackendProfilingContext(
     const IRuntime::CreationOptions& options, IBackendProfilingPtr& backendProfiling)
 {
-    boost::ignore_unused(options);
+    IgnoreUnused(options);
     std::shared_ptr<armnn::MockBackendProfilingContext> context =
         std::make_shared<MockBackendProfilingContext>(backendProfiling);
     MockBackendProfilingService::Instance().SetProfilingContextPtr(context);
diff --git a/src/backends/backendsCommon/test/TestDynamicBackend.cpp b/src/backends/backendsCommon/test/TestDynamicBackend.cpp
index cbfe093..5018b44 100644
--- a/src/backends/backendsCommon/test/TestDynamicBackend.cpp
+++ b/src/backends/backendsCommon/test/TestDynamicBackend.cpp
@@ -7,7 +7,7 @@
 
 #include <armnn/backends/IBackendInternal.hpp>
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 constexpr const char* TestDynamicBackendId()
 {
@@ -65,7 +65,7 @@
     }
     IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager) const override
     {
-        boost::ignore_unused(memoryManager);
+        IgnoreUnused(memoryManager);
         return IWorkloadFactoryPtr{};
     }
     ILayerSupportSharedPtr GetLayerSupport() const override
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 6993b9e..319434e 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -36,7 +36,7 @@
     unsigned int inputChannels,
     unsigned int inputBatchSize)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     unsigned int outputWidth = inputWidth;
     unsigned int outputHeight = inputHeight;
     unsigned int outputChannels = inputChannels;
@@ -245,7 +245,7 @@
     float upperBound,
     const armnn::ActivationDescriptor& activationDescriptor)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
     const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
 
@@ -310,7 +310,7 @@
     float qScale = 0.0f,
     int32_t qOffset = 0)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     unsigned int inputHeight    = 20;
     unsigned int inputWidth     = 17;
     unsigned int inputChannels  = 3;
@@ -402,7 +402,7 @@
     int32_t outOffset,
     const std::vector<float>& outputExpectedData)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     constexpr static unsigned int inputWidth = 16u;
     constexpr static unsigned int inputHeight = 1u;
     constexpr static unsigned int inputChannels = 1u;
@@ -793,7 +793,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const int inputDataSize = 120;
     std::vector<float> inputData(inputDataSize);
 
@@ -1148,7 +1148,7 @@
     float qScale = 0.0f,
     int32_t qOffset = 0)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     unsigned int width     = 17;
     unsigned int height    = 29;
     unsigned int channels  = 2;
diff --git a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
index 82dc59b..bfe0282 100644
--- a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
@@ -165,7 +165,7 @@
     float qScale,
     int32_t qOffset)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
     armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
     armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
@@ -248,7 +248,7 @@
     float qScale,
     int32_t qOffset)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
     armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
     armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
@@ -455,7 +455,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
 
     // Create Initial Tensor
     // 1, 2, 3
@@ -563,7 +563,7 @@
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     unsigned int batchSize = 4;
     unsigned int channels  = 1;
     unsigned int height    = 2;
diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
index 7bfccd6..20dcef5 100644
--- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
@@ -26,7 +26,7 @@
         const std::vector<int32_t>& outputData,
         int axis = 3)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
 
     LayerTestResult<int32_t, 3> result(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
index f64b06d..48f7257 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
@@ -8,7 +8,7 @@
 #include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
-
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnnUtils/DataLayoutIndexed.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
@@ -36,7 +36,7 @@
     int32_t qOffset,
     armnn::DataLayout dataLayout)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
     armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
 
@@ -115,7 +115,7 @@
     float qScale,
     int32_t qOffset)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
 
     const unsigned int width    = 2;
     const unsigned int height   = 3;
@@ -589,7 +589,7 @@
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const unsigned int width     = 2;
     const unsigned int height    = 3;
     const unsigned int channels  = 5;
diff --git a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
index 1241366..2ba3a0c 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
@@ -39,7 +39,7 @@
         float scale = 1.0f,
         int32_t offset = 0)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
 
     armnn::TensorInfo inputTensorInfo(InputDim, inputShape, ArmnnType);
     armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, ArmnnType);
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
index 9f9944d..2156b0e 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
@@ -43,7 +43,7 @@
     float outQuantScale,
     int outQuantOffset)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     BOOST_ASSERT(shape0.GetNumDimensions() == NumDims);
     armnn::TensorInfo inputTensorInfo0(shape0, ArmnnInType, quantScale0, quantOffset0);
 
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index 9d590e3..f6f4b09 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -126,7 +126,7 @@
     const T * inputData,
     std::vector<T>& outputData)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
     if (inputData == nullptr)
     {
@@ -178,7 +178,7 @@
     unsigned int & concatDim,
     TensorInfo & outputTensorInfo)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
         "Expecting more than one tensor to be concatenated here");
 
@@ -1918,7 +1918,7 @@
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
 
     // Defines the tensor descriptors.
     TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
@@ -2073,7 +2073,7 @@
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
 
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
@@ -2346,7 +2346,7 @@
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
 
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
@@ -2491,7 +2491,7 @@
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
 
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
@@ -2629,7 +2629,7 @@
         IWorkloadFactory& workloadFactory,
         const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
 
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
index d11004c..7a8aac4 100644
--- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
@@ -28,7 +28,7 @@
     float qScale,
     int32_t qOffset)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     constexpr unsigned int inputWidth = 3;
     constexpr unsigned int inputHeight = 4;
     constexpr unsigned int inputChannels = 3;
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 669398f..89cdd96 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -8,7 +8,7 @@
 #include <QuantizeHelper.hpp>
 #include <armnnUtils/TensorUtils.hpp>
 
-
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnnUtils/DataLayoutIndexed.hpp>
 #include <armnnUtils/Permute.hpp>
 
@@ -217,7 +217,7 @@
     uint32_t dilationX = 1,
     uint32_t dilationY = 1)
 {
-    boost::ignore_unused(memoryManager);
+    armnn::IgnoreUnused(memoryManager);
     unsigned int inputHeight   = boost::numeric_cast<unsigned int>(originalInput.shape()[2]);
     unsigned int inputWidth    = boost::numeric_cast<unsigned int>(originalInput.shape()[3]);
     unsigned int inputChannels = boost::numeric_cast<unsigned int>(originalInput.shape()[1]);
@@ -381,7 +381,7 @@
     uint32_t strideX  = 1,
     uint32_t strideY  = 1)
 {
-    boost::ignore_unused(qScale, qOffset);
+    armnn::IgnoreUnused(qScale, qOffset);
     unsigned int inputNum       = boost::numeric_cast<unsigned int>(input.shape()[0]);
     unsigned int inputChannels  = boost::numeric_cast<unsigned int>(input.shape()[3]);
     unsigned int inputHeight    = boost::numeric_cast<unsigned int>(input.shape()[1]);
@@ -587,7 +587,7 @@
     bool biasEnabled,
     armnn::DataLayout dataLayout)
 {
-    boost::ignore_unused(biasEnabled);
+    armnn::IgnoreUnused(biasEnabled);
     // Use common single-batch 5x5 image.
 
     armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
@@ -640,7 +640,7 @@
         bool biasEnabled,
         const armnn::DataLayout& dataLayout)
 {
-    boost::ignore_unused(biasEnabled);
+    armnn::IgnoreUnused(biasEnabled);
 
     // Input is a single-batch, 1 channel, 5x5 image.
     armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
index cea6efb..8b3bbd8 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
@@ -17,7 +17,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     using namespace half_float::literal;
 
     const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
index 9f4eeca..1e60471 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
@@ -15,7 +15,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     using namespace half_float::literal;
 
     const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
diff --git a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
index 92c5d92..149779b 100644
--- a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
@@ -30,7 +30,7 @@
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     if(armnn::IsQuantizedType<T>())
     {
         inputTensorInfo.SetQuantizationScale(qScale);
diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
index 4ddfb30..4d4a6bc 100644
--- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
@@ -29,7 +29,7 @@
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
     {
         PermuteTensorNhwcToNchw<float>(inputInfo, inputData);
diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
index 91d56bb..0a4bdb8 100644
--- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
@@ -26,7 +26,7 @@
         const std::vector<T1>& expectedOutputData,
         armnn::DequantizeQueueDescriptor descriptor)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     boost::multi_array<T, Dim> input = MakeTensor<T, Dim>(inputTensorInfo, inputData);
 
     LayerTestResult<T1, Dim> ret(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
index 223beb4..2359f77 100644
--- a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
@@ -20,7 +20,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const unsigned int width        = 2u;
     const unsigned int height       = 2u;
     const unsigned int channelCount = 2u;
diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
index d905bde..905f97b 100644
--- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
@@ -17,7 +17,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     constexpr unsigned int width = 2;
     constexpr unsigned int height = 3;
 
diff --git a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
index ebad7fc..444809f 100644
--- a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
@@ -16,7 +16,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, ArmnnType);
     inputTensorInfo.SetQuantizationScale(0.1f);
 
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index 7c6122e..43bcfb1 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -34,7 +34,7 @@
         bool biasEnabled,
         bool transposeWeights)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
index 6841055..47adb22 100644
--- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
@@ -31,7 +31,7 @@
     const std::vector<int32_t>& indicesData,
     const std::vector<T>& outputData)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     auto params  = MakeTensor<T, ParamsDim>(paramsInfo, paramsData);
     auto indices = MakeTensor<int32_t, IndicesDim>(indicesInfo, indicesData);
 
diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
index ae28bc0..a13198b 100644
--- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
@@ -34,7 +34,7 @@
     float qScale = 0.0f,
     int32_t qOffset = 0)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
                                         armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
 
diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
index e500a12..4d98e23 100644
--- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
@@ -33,7 +33,7 @@
     const armnn::DataLayout layout,
     float epsilon = 1e-12f)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
     const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
 
diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
index 392983c..208bed2 100644
--- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
@@ -36,7 +36,7 @@
     float qScale = 1.0f,
     int32_t qOffset = 0)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     LayerTestResult<T, NumDims> result(outputInfo);
     result.outputExpected =
         MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index b12df8a..50ef5c9 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -142,7 +142,7 @@
         int32_t qOffset = 0,
         armnn::DataType constantDataType = armnn::DataType::Float32)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
     unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
     unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
@@ -345,7 +345,7 @@
                                                   int32_t qOffset = 0,
                                                   armnn::DataType constantDataType = armnn::DataType::Float32)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     unsigned int batchSize = 2;
     unsigned int outputSize = 16;
     unsigned int inputSize = 5;
@@ -1060,7 +1060,7 @@
         int32_t qOffset = 0,
         armnn::DataType constantDataType = armnn::DataType::Float32)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     bool cifgEnabled = true;
     bool peepholeEnabled = true;
     bool projectionEnabled = false;
@@ -1285,7 +1285,7 @@
                                                   int32_t qOffset = 0,
                                                   armnn::DataType constantDataType = armnn::DataType::Float32)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     unsigned int batchSize = 2;
     unsigned int outputSize = 3;
     unsigned int inputSize = 5;
@@ -1552,7 +1552,7 @@
     const boost::multi_array<uint8_t, 2>& input,
     const boost::multi_array<uint8_t, 2>& outputExpected)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     auto numBatches = boost::numeric_cast<unsigned int>(input.shape()[0]);
     auto inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
     auto outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
diff --git a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
index 5147cff..0e66d9f 100644
--- a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
@@ -19,7 +19,7 @@
 LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
                                            const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const unsigned int width        = 2u;
     const unsigned int height       = 2u;
     const unsigned int channelCount = 2u;
diff --git a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
index b8eae1c..cd7b22e 100644
--- a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
@@ -28,7 +28,7 @@
         float scale = 1.0f,
         int32_t offset = 0)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
 
     armnn::TensorInfo inputTensorInfo(InputDim, inputShape, ArmnnType);
     armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, ArmnnType);
diff --git a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
index a0a4029..ae54746 100644
--- a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
@@ -20,7 +20,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     unsigned int shape0[] = { 1, 2, 2, 2 };
     unsigned int shape1[] = { 1, 1, 1, 1 };
 
diff --git a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
index d32e0cf..1a9cf5b 100644
--- a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
@@ -401,7 +401,7 @@
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const unsigned int width = 16;
     const unsigned int height = 32;
     const unsigned int channelCount = 2;
diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
index ef82855..ef3a45b 100644
--- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
@@ -24,7 +24,7 @@
     armnn::NormalizationAlgorithmChannel normChannel,
     armnn::NormalizationAlgorithmMethod normMethod)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const unsigned int inputHeight = 2;
     const unsigned int inputWidth = 2;
     const unsigned int inputChannels = 1;
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
index 9239c66..69c651b 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
@@ -24,7 +24,7 @@
     int32_t qOffset,
     const float customPaddingValue)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const armnn::TensorShape inputShape{ 3, 3 };
     const armnn::TensorShape outputShape{ 7, 7 };
 
@@ -96,7 +96,7 @@
     float qScale,
     int32_t qOffset)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const armnn::TensorShape inputShape{ 2, 2, 2 };
     const armnn::TensorShape outputShape{ 3, 5, 6 };
 
@@ -180,7 +180,7 @@
     float qScale,
     int32_t qOffset)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
     const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
 
diff --git a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
index 9460592..71e1533 100644
--- a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
@@ -25,7 +25,7 @@
         const std::vector<T>& inputData,
         const std::vector<T>& outputExpectedData)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
 
     LayerTestResult<T, 4> ret(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
index b58e982..89e46fb 100644
--- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
@@ -14,6 +14,8 @@
 #include <armnnUtils/DataLayoutIndexed.hpp>
 #include <armnnUtils/Permute.hpp>
 
+#include <armnn/utility/IgnoreUnused.hpp>
+
 #include <backendsCommon/WorkloadInfo.hpp>
 
 #include <backendsCommon/test/TensorCopyUtils.hpp>
@@ -38,7 +40,7 @@
     const boost::multi_array<T, 4>& input,
     const boost::multi_array<T, 4>& outputExpected)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
     const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
     auto heightIndex = dimensionIndices.GetHeightIndex();
@@ -740,7 +742,7 @@
     float qScale = 1.0f,
     int32_t qOffset = 0)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const unsigned int inputWidth = 16;
     const unsigned int inputHeight = 32;
     const unsigned int channelCount = 2;
diff --git a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
index 7138b46..3b6c2d8 100644
--- a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
@@ -24,7 +24,7 @@
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
 
     armnn::TensorInfo inputTensorInfo ({ 1, 2, 2, 3 }, ArmnnType);
     armnn::TensorInfo alphaTensorInfo ({ 1, 1, 1, 3 }, ArmnnType);
diff --git a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
index e8996d4..673bfef 100644
--- a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
@@ -29,7 +29,7 @@
     const std::vector<T>& expectedOutputData,
     armnn::QuantizeQueueDescriptor descriptor)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData);
 
     LayerTestResult<T, Dim> ret(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
index 894ece6..5ed947d 100644
--- a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
@@ -23,7 +23,7 @@
     const std::vector<T>& inputData,
     const std::vector<T>& outputExpectedData)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     auto input = MakeTensor<T, NumDims>(inputTensorInfo, inputData);
 
     LayerTestResult<T, NumDims> ret(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
index a6e0ca1..e95f18b 100644
--- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
@@ -73,7 +73,7 @@
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const ResizeTestParams& params)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     armnn::TensorInfo inputInfo(params.m_InputShape, ArmnnType);
     armnn::TensorInfo outputInfo(params.m_OutputShape, ArmnnType);
 
diff --git a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
index 09f15c0..df3b623 100644
--- a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
@@ -29,7 +29,7 @@
     const float qScale = 1.0f,
     const int qOffset = 0)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     if(armnn::IsQuantizedType<T>())
     {
         inputInfo.SetQuantizationScale(qScale);
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index 044589b..772ae2c 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -65,7 +65,7 @@
     const std::vector<float>& inputData,
     int axis = 1)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     using std::exp;
 
     const float qScale = 1.f / 256.f;
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
index ed35413..d1bc2a9 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
@@ -31,7 +31,7 @@
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const armnn::PermutationVector NCHWToNHWC = {0, 3, 1, 2};
     if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NHWC)
     {
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
index bf2f48c..c6a5bbe 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
@@ -31,7 +31,7 @@
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const armnn::PermutationVector NHWCToNCHW = {0, 2, 3, 1};
 
     if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
index f55aca1..88b18b9 100644
--- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
@@ -24,7 +24,7 @@
     float qScale = 0.0f,
     int32_t qOffset = 0)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     unsigned int inputWidth = 5;
     unsigned int inputHeight = 6;
     unsigned int inputChannels = 3;
@@ -257,7 +257,7 @@
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale, int32_t qOffset)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset);
     auto input = MakeTensor<T, 3>(
         tensorInfo,
diff --git a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
index 45dff96..eeaa846 100644
--- a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
@@ -30,7 +30,7 @@
         const std::vector<std::vector<T>>& inputData,
         const std::vector<T>& outputExpectedData)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     unsigned int numInputs = static_cast<unsigned int>(inputData.size());
     std::vector<boost::multi_array<T, outputDimLength-1>> inputs;
     for (unsigned int i = 0; i < numInputs; ++i)
diff --git a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
index 63a95b1..b857a1b 100644
--- a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
@@ -29,7 +29,7 @@
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     if(armnn::IsQuantizedType<T>())
     {
         inputTensorInfo.SetQuantizationScale(qScale);
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
index 378ec46..07f5258 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
@@ -51,7 +51,7 @@
                                     const TensorData<T>& weights,
                                     const armnn::Optional<TensorData<BT>>& biases)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     using namespace armnn;
 
     VerifyInputTensorData(input, "input");
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
index 3949dcc..0e0f317 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
@@ -25,7 +25,7 @@
         const std::vector<T>& inputData,
         const std::vector<T>& outputExpectedData)
 {
-    boost::ignore_unused(memoryManager);
+    IgnoreUnused(memoryManager);
     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
 
     LayerTestResult<T, 4> ret(outputTensorInfo);
diff --git a/src/backends/cl/ClContextControl.cpp b/src/backends/cl/ClContextControl.cpp
index 72c8e9f..f307133 100644
--- a/src/backends/cl/ClContextControl.cpp
+++ b/src/backends/cl/ClContextControl.cpp
@@ -9,13 +9,14 @@
 
 #include <LeakChecking.hpp>
 
+#include <armnn/utility/IgnoreUnused.hpp>
+
 #include <arm_compute/core/CL/CLKernelLibrary.h>
 #include <arm_compute/runtime/CL/CLScheduler.h>
 
 #include <boost/assert.hpp>
 #include <boost/format.hpp>
 #include <boost/polymorphic_cast.hpp>
-#include <boost/core/ignore_unused.hpp>
 
 namespace cl
 {
@@ -33,7 +34,7 @@
     , m_ProfilingEnabled(profilingEnabled)
 {
     // Ignore m_ProfilingEnabled if unused to avoid compiling problems when ArmCompute is disabled.
-    boost::ignore_unused(m_ProfilingEnabled);
+    IgnoreUnused(m_ProfilingEnabled);
 
     try
     {
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index d3ac986..cdb93d7 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -6,14 +6,13 @@
 #include "ClLayerSupport.hpp"
 #include "ClBackendId.hpp"
 
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/Descriptors.hpp>
 #include <armnn/BackendRegistry.hpp>
 
 #include <InternalTypes.hpp>
 #include <LayerSupportCommon.hpp>
 
-#include <boost/core/ignore_unused.hpp>
-
 #if defined(ARMCOMPUTECL_ENABLED)
 #include <aclCommon/ArmComputeUtils.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
@@ -63,7 +62,6 @@
 #include "workloads/ClTransposeWorkload.hpp"
 #endif
 
-using namespace boost;
 
 namespace armnn
 {
@@ -93,7 +91,7 @@
 template<typename ... Args>
 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
 {
-    boost::ignore_unused(reasonIfUnsupported, (args)...);
+    IgnoreUnused(reasonIfUnsupported, (args)...);
 #if defined(ARMCOMPUTECL_ENABLED)
     return true;
 #else
@@ -649,7 +647,7 @@
                                         const ReshapeDescriptor& descriptor,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
 }
 
@@ -728,7 +726,7 @@
                                          const ViewsDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     return IsSupportedForDataTypeCl(reasonIfUnsupported,
                                     input.GetDataType(),
                                     &TrueFunc<>,
@@ -756,7 +754,7 @@
                                        *splitAxis.begin());
     }
 #endif
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     for (auto output : outputs)
     {
         if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 21c2629..e7e4fa7 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -9,6 +9,7 @@
 
 #include <armnn/Exceptions.hpp>
 #include <armnn/Utils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <backendsCommon/MakeWorkloadHelper.hpp>
@@ -23,7 +24,6 @@
 #include <arm_compute/runtime/CL/CLBufferAllocator.h>
 #include <arm_compute/runtime/CL/CLScheduler.h>
 
-#include <boost/core/ignore_unused.hpp>
 #include <boost/polymorphic_cast.hpp>
 #include <boost/format.hpp>
 
@@ -85,7 +85,7 @@
 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
                                                                      const bool IsMemoryManaged) const
 {
-    boost::ignore_unused(IsMemoryManaged);
+    IgnoreUnused(IsMemoryManaged);
     std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo);
     tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
 
@@ -96,7 +96,7 @@
                                                                      DataLayout dataLayout,
                                                                      const bool IsMemoryManaged) const
 {
-    boost::ignore_unused(IsMemoryManaged);
+    IgnoreUnused(IsMemoryManaged);
     std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
     tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
 
@@ -131,7 +131,7 @@
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
                                                         const WorkloadInfo& info) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
     elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs);
@@ -279,7 +279,7 @@
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     ComparisonQueueDescriptor comparisonDescriptor;
     comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Equal);
@@ -308,7 +308,7 @@
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     ComparisonQueueDescriptor comparisonDescriptor;
     comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Greater);
@@ -477,7 +477,7 @@
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
     elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt);
diff --git a/src/backends/cl/OpenClTimer.cpp b/src/backends/cl/OpenClTimer.cpp
index ee3c114..5f10699 100644
--- a/src/backends/cl/OpenClTimer.cpp
+++ b/src/backends/cl/OpenClTimer.cpp
@@ -5,10 +5,11 @@
 
 #include "OpenClTimer.hpp"
 
+#include <armnn/utility/IgnoreUnused.hpp>
+
 #include <string>
 #include <sstream>
 
-#include <boost/core/ignore_unused.hpp>
 
 namespace armnn
 {
@@ -31,7 +32,7 @@
                                 const cl_event * event_wait_list,
                                 cl_event *       event)
         {
-            boost::ignore_unused(event);
+            IgnoreUnused(event);
             cl_int retVal = 0;
 
             // Get the name of the kernel
diff --git a/src/backends/cl/test/ClRuntimeTests.cpp b/src/backends/cl/test/ClRuntimeTests.cpp
index 9aa3617..a0d7963 100644
--- a/src/backends/cl/test/ClRuntimeTests.cpp
+++ b/src/backends/cl/test/ClRuntimeTests.cpp
@@ -9,8 +9,8 @@
 
 #include <backendsCommon/test/RuntimeTestImpl.hpp>
 #include <test/ProfilingTestUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/core/ignore_unused.hpp>
 #include <boost/test/unit_test.hpp>
 
 #ifdef WITH_VALGRIND
@@ -144,8 +144,8 @@
 
     // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
     // so they are assigned to, but still considered unused, causing a warning.
-    boost::ignore_unused(dubious);
-    boost::ignore_unused(suppressed);
+    IgnoreUnused(dubious);
+    IgnoreUnused(suppressed);
 }
 #endif
 
diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp
index ee5163f6..b7d274f 100644
--- a/src/backends/cl/test/Fp16SupportTest.cpp
+++ b/src/backends/cl/test/Fp16SupportTest.cpp
@@ -11,8 +11,8 @@
 #include <Graph.hpp>
 #include <Optimizer.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/core/ignore_unused.hpp>
 #include <boost/test/unit_test.hpp>
 
 #include <set>
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 7e58dab..7877612 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -13,8 +13,7 @@
 
 #include <InternalTypes.hpp>
 #include <LayerSupportCommon.hpp>
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #if defined(ARMCOMPUTENEON_ENABLED)
 #include <aclCommon/ArmComputeUtils.hpp>
@@ -62,8 +61,6 @@
 #include "workloads/NeonTransposeWorkload.hpp"
 #endif
 
-using namespace boost;
-
 namespace armnn
 {
 
@@ -73,7 +70,7 @@
 template< typename ... Args>
 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
 {
-    boost::ignore_unused(reasonIfUnsupported, (args)...);
+    IgnoreUnused(reasonIfUnsupported, (args)...);
 #if defined(ARMCOMPUTENEON_ENABLED)
     return true;
 #else
@@ -134,7 +131,7 @@
                                              const ActivationDescriptor& descriptor,
                                              Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
                                    reasonIfUnsupported,
                                    input,
@@ -268,9 +265,9 @@
                                                     const TensorInfo& output,
                                                     Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(input);
-    ignore_unused(output);
-    ignore_unused(reasonIfUnsupported);
+    armnn::IgnoreUnused(input);
+    armnn::IgnoreUnused(output);
+    armnn::IgnoreUnused(reasonIfUnsupported);
     return true;
 }
 
@@ -278,9 +275,9 @@
                                                     const TensorInfo& output,
                                                     Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(input);
-    ignore_unused(output);
-    ignore_unused(reasonIfUnsupported);
+    armnn::IgnoreUnused(input);
+    armnn::IgnoreUnused(output);
+    armnn::IgnoreUnused(reasonIfUnsupported);
     return true;
 }
 
@@ -381,7 +378,7 @@
                                         const TensorInfo& output,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(output);
+    armnn::IgnoreUnused(output);
     return IsNeonBackendSupported(reasonIfUnsupported) &&
            IsSupportedForDataTypeGeneric(reasonIfUnsupported,
                                          input.GetDataType(),
@@ -622,7 +619,7 @@
                                           const ReshapeDescriptor& descriptor,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    armnn::IgnoreUnused(descriptor);
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonReshapeWorkloadValidate,
                                    reasonIfUnsupported,
                                    input,
@@ -712,7 +709,7 @@
                                            const ViewsDescriptor& descriptor,
                                            Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    armnn::IgnoreUnused(descriptor);
     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
                                       input.GetDataType(),
                                       &TrueFunc<>,
@@ -740,7 +737,7 @@
                                        *splitAxis.begin());
     }
 #endif
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     for (auto output : outputs)
     {
         if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
diff --git a/src/backends/neon/NeonTensorHandleFactory.cpp b/src/backends/neon/NeonTensorHandleFactory.cpp
index d5fef4e..26b14af 100644
--- a/src/backends/neon/NeonTensorHandleFactory.cpp
+++ b/src/backends/neon/NeonTensorHandleFactory.cpp
@@ -6,7 +6,7 @@
 #include "NeonTensorHandleFactory.hpp"
 #include "NeonTensorHandle.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace armnn
 {
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 939590c..cf9999f 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -10,6 +10,7 @@
 #include <Layer.hpp>
 
 #include <armnn/Utils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <backendsCommon/MakeWorkloadHelper.hpp>
@@ -19,7 +20,6 @@
 #include <neon/workloads/NeonWorkloadUtils.hpp>
 #include <neon/workloads/NeonWorkloads.hpp>
 
-#include <boost/core/ignore_unused.hpp>
 #include <boost/polymorphic_cast.hpp>
 
 namespace armnn
@@ -98,7 +98,7 @@
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
     elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs);
@@ -245,7 +245,7 @@
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     ComparisonQueueDescriptor comparisonDescriptor;
     comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Equal);
@@ -275,7 +275,7 @@
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
                                                               const WorkloadInfo& info) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     ComparisonQueueDescriptor comparisonDescriptor;
     comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Greater);
@@ -446,7 +446,7 @@
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor &descriptor,
                                                             const WorkloadInfo &info) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
     elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt);
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index bc4107d..f122792 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -8,8 +8,7 @@
 
 #include <backendsCommon/WorkloadFactoryBase.hpp>
 #include <aclCommon/BaseMemoryManager.hpp>
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace armnn
 {
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 4dc9641..d3ca675 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -219,7 +219,7 @@
     uint32_t depthMultiplier = 1, uint32_t padLeft = 0, uint32_t padRight = 0,
     uint32_t padTop = 0, uint32_t padBottom = 0)
 {
-    boost::ignore_unused(depthMultiplier);
+    IgnoreUnused(depthMultiplier);
 
     DepthwiseConvolution2dDescriptor desc;
 
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 7d5c3b5..bd2e728 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -8,13 +8,12 @@
 #include <armnn/TypesUtils.hpp>
 #include <armnn/Types.hpp>
 #include <armnn/Descriptors.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <LayerSupportCommon.hpp>
-
 #include <backendsCommon/LayerSupportRules.hpp>
 
 #include <boost/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
 
 #include <vector>
 #include <array>
@@ -178,7 +177,7 @@
                                            const armnn::ArgMinMaxDescriptor &descriptor,
                                            armnn::Optional<std::string &> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     std::array<DataType, 4> supportedTypes =
     {
@@ -207,7 +206,7 @@
                                                     const BatchNormalizationDescriptor& descriptor,
                                                     Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     std::array<DataType, 4> supportedTypes =
     {
@@ -248,7 +247,7 @@
                                                 const BatchToSpaceNdDescriptor& descriptor,
                                                 Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     bool supported = true;
 
@@ -297,7 +296,7 @@
                                             const ComparisonDescriptor& descriptor,
                                             Optional<std::string&> reasonIfUnsupported) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     std::array<DataType, 4> supportedInputTypes =
     {
@@ -325,7 +324,7 @@
                                         const ConcatDescriptor& descriptor,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     bool supported = true;
     std::array<DataType,5> supportedTypes =
@@ -475,7 +474,7 @@
         supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
                                       "Reference Convolution2d: biases is not a supported type.");
     }
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     return supported;
 }
@@ -514,7 +513,7 @@
                                               const DepthToSpaceDescriptor& descriptor,
                                               Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
 
     std::array<DataType,4> supportedTypes =
@@ -602,7 +601,7 @@
         supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
                                       "Reference DepthwiseConvolution2d: biases is not a supported type.");
     }
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     return supported;
 
@@ -655,7 +654,7 @@
                                                       const DetectionPostProcessDescriptor& descriptor,
                                                       Optional<std::string&> reasonIfUnsupported) const
 {
-    boost::ignore_unused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
+    IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
 
     bool supported = true;
 
@@ -725,7 +724,7 @@
                                                   const ElementwiseUnaryDescriptor& descriptor,
                                                   Optional<std::string&> reasonIfUnsupported) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     std::array<DataType, 4> supportedTypes =
     {
@@ -769,7 +768,7 @@
                                                   const FakeQuantizationDescriptor& descriptor,
                                                   Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
 
     std::array<DataType,1> supportedTypes =
@@ -787,7 +786,7 @@
                                        const TensorInfo& output,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(output);
+    IgnoreUnused(output);
     bool supported = true;
 
     std::array<DataType,3> supportedTypes =
@@ -916,7 +915,7 @@
                                                        const InstanceNormalizationDescriptor& descriptor,
                                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     // Define supported types
     std::array<DataType, 4> supportedTypes =
         {
@@ -947,7 +946,7 @@
                                                  const L2NormalizationDescriptor& descriptor,
                                                  Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     // Define supported types
     std::array<DataType, 4> supportedTypes =
     {
@@ -980,7 +979,7 @@
                                             const LogSoftmaxDescriptor& descriptor,
                                             Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     std::array<DataType, 2> supportedTypes =
     {
@@ -1012,8 +1011,8 @@
                                       const LstmInputParamsInfo& paramsInfo,
                                       Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
-    ignore_unused(paramsInfo);
+    IgnoreUnused(descriptor);
+    IgnoreUnused(paramsInfo);
 
     bool supported = true;
 
@@ -1319,7 +1318,7 @@
                                                const NormalizationDescriptor& descriptor,
                                                Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     // Define supported types
     std::array<DataType, 4> supportedTypes =
@@ -1356,7 +1355,7 @@
                                      const PadDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
 
     // Define supported output and inputs types.
@@ -1385,7 +1384,7 @@
                                          const PermuteDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
 
     // Define supported output and inputs types.
@@ -1414,7 +1413,7 @@
                                            const Pooling2dDescriptor& descriptor,
                                            Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
 
     // Define supported output and inputs types.
@@ -1479,8 +1478,8 @@
                                          const ReshapeDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(output);
-    ignore_unused(descriptor);
+    IgnoreUnused(output);
+    IgnoreUnused(descriptor);
     // Define supported output types.
     std::array<DataType,7> supportedOutputTypes =
     {
@@ -1526,7 +1525,7 @@
                                         const ResizeDescriptor& descriptor,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
     std::array<DataType,5> supportedTypes =
     {
@@ -1564,7 +1563,7 @@
                                        const SliceDescriptor& descriptor,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
 
     std::array<DataType, 3> supportedTypes =
@@ -1591,7 +1590,7 @@
                                          const SoftmaxDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
     std::array<DataType,6> supportedTypes =
     {
@@ -1620,7 +1619,7 @@
                                                 const SpaceToBatchNdDescriptor& descriptor,
                                                 Optional<std::string&> reasonIfUnsupported) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
     std::array<DataType,4> supportedTypes =
     {
@@ -1648,7 +1647,7 @@
                                               Optional<std::string&> reasonIfUnsupported) const
 {
 
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
 
     std::array<DataType,4> supportedTypes =
@@ -1675,7 +1674,7 @@
                                           const ViewsDescriptor& descriptor,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
     std::array<DataType,4> supportedTypes =
     {
@@ -1696,7 +1695,7 @@
                                           const ViewsDescriptor& descriptor,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
     std::array<DataType,4> supportedTypes =
     {
@@ -1725,7 +1724,7 @@
                                        const StackDescriptor& descriptor,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
 
     bool supported = true;
     std::array<DataType,4> supportedTypes =
@@ -1756,7 +1755,7 @@
                                               const StridedSliceDescriptor& descriptor,
                                               Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
 
     std::array<DataType,3> supportedTypes =
@@ -1853,7 +1852,7 @@
                                                         const Optional<TensorInfo>& biases,
                                                         Optional<std::string&> reasonIfUnsupported) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
 
     std::array<DataType,4> supportedTypes =
@@ -1919,7 +1918,7 @@
                                            const TransposeDescriptor& descriptor,
                                            Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     bool supported = true;
 
     // Define supported output and inputs types.
diff --git a/src/backends/reference/RefTensorHandleFactory.cpp b/src/backends/reference/RefTensorHandleFactory.cpp
index c97a779..d687c78 100644
--- a/src/backends/reference/RefTensorHandleFactory.cpp
+++ b/src/backends/reference/RefTensorHandleFactory.cpp
@@ -6,7 +6,7 @@
 #include "RefTensorHandleFactory.hpp"
 #include "RefTensorHandle.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace armnn
 {
@@ -23,7 +23,7 @@
                                                                              TensorShape const& subTensorShape,
                                                                              unsigned int const* subTensorOrigin) const
 {
-    boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+    IgnoreUnused(parent, subTensorShape, subTensorOrigin);
     return nullptr;
 }
 
@@ -35,7 +35,7 @@
 std::unique_ptr<ITensorHandle> RefTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
                                                                           DataLayout dataLayout) const
 {
-    boost::ignore_unused(dataLayout);
+    IgnoreUnused(dataLayout);
     return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager, m_ImportFlags);
 }
 
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 2a415bf..52d71df 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -102,7 +102,7 @@
 {
     // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
     // to unmanaged memory. This also ensures memory alignment.
-    boost::ignore_unused(isMemoryManaged);
+    IgnoreUnused(isMemoryManaged);
     return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
 }
 
@@ -112,14 +112,14 @@
 {
     // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
     // to unmanaged memory. This also ensures memory alignment.
-    boost::ignore_unused(isMemoryManaged, dataLayout);
+    IgnoreUnused(isMemoryManaged, dataLayout);
     return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
 }
 
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
                                                          const WorkloadInfo& info) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
     elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Abs;
 
@@ -267,7 +267,7 @@
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     ComparisonQueueDescriptor comparisonDescriptor;
     comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Equal;
 
@@ -303,7 +303,7 @@
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
                                                              const WorkloadInfo& info) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     ComparisonQueueDescriptor comparisonDescriptor;
     comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Greater;
 
@@ -506,7 +506,7 @@
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
-    boost::ignore_unused(descriptor);
+    IgnoreUnused(descriptor);
     ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
     elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Rsqrt;
 
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 030ce6f..b64479e 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -4,12 +4,11 @@
 //
 #pragma once
 
-#include <armnn/Optional.hpp>
-#include <backendsCommon/WorkloadFactory.hpp>
-
 #include "RefMemoryManager.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/Optional.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 
 namespace armnn
@@ -48,7 +47,7 @@
                                                          TensorShape const& subTensorShape,
                                                          unsigned int const* subTensorOrigin) const override
     {
-        boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+        IgnoreUnused(parent, subTensorShape, subTensorOrigin);
         return nullptr;
     }
 
diff --git a/src/backends/reference/test/RefWorkloadFactoryHelper.hpp b/src/backends/reference/test/RefWorkloadFactoryHelper.hpp
index 10e5b9f..30d2037 100644
--- a/src/backends/reference/test/RefWorkloadFactoryHelper.hpp
+++ b/src/backends/reference/test/RefWorkloadFactoryHelper.hpp
@@ -25,7 +25,7 @@
     static armnn::RefWorkloadFactory GetFactory(
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr)
     {
-        boost::ignore_unused(memoryManager);
+        IgnoreUnused(memoryManager);
         return armnn::RefWorkloadFactory();
     }
 };
diff --git a/src/backends/reference/workloads/ArgMinMax.cpp b/src/backends/reference/workloads/ArgMinMax.cpp
index db85b95..637aa17 100644
--- a/src/backends/reference/workloads/ArgMinMax.cpp
+++ b/src/backends/reference/workloads/ArgMinMax.cpp
@@ -15,7 +15,7 @@
 void ArgMinMax(Decoder<float>& in, int32_t* out, const TensorInfo& inputTensorInfo,
                const TensorInfo& outputTensorInfo, ArgMinMaxFunction function, int axis)
 {
-    boost::ignore_unused(outputTensorInfo);
+    IgnoreUnused(outputTensorInfo);
 
     unsigned int uAxis = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
 
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index 3f01446..5cae5bd 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -5,14 +5,13 @@
 
 #pragma once
 
-
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/TypesUtils.hpp>
 #include <armnnUtils/FloatingPointConverter.hpp>
 
 #include <ResolveType.hpp>
 
 #include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
 
 namespace armnn
 {
@@ -107,7 +106,7 @@
 
     TypedIterator& SetIndex(unsigned int index, unsigned int axisIndex = 0) override
     {
-        boost::ignore_unused(axisIndex);
+        IgnoreUnused(axisIndex);
         BOOST_ASSERT(m_Iterator);
         m_Iterator = m_Start + index;
         return *this;
diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp
index 4025e8d..63c0405 100644
--- a/src/backends/reference/workloads/Dequantize.cpp
+++ b/src/backends/reference/workloads/Dequantize.cpp
@@ -5,7 +5,8 @@
 
 #include "Dequantize.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
 namespace armnn
 {
 
@@ -14,7 +15,7 @@
                 const TensorInfo& inputInfo,
                 const TensorInfo& outputInfo)
 {
-    boost::ignore_unused(outputInfo);
+    IgnoreUnused(outputInfo);
     BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
     for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
     {
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index 96e5780..57cf01e 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -154,7 +154,7 @@
                           float* detectionScores,
                           float* numDetections)
 {
-    boost::ignore_unused(anchorsInfo, detectionClassesInfo, detectionScoresInfo, numDetectionsInfo);
+    IgnoreUnused(anchorsInfo, detectionClassesInfo, detectionScoresInfo, numDetectionsInfo);
 
     // Transform center-size format which is (ycenter, xcenter, height, width) to box-corner format,
     // which represents the lower left corner and the upper right corner (ymin, xmin, ymax, xmax)
diff --git a/src/backends/reference/workloads/Gather.cpp b/src/backends/reference/workloads/Gather.cpp
index 5416855..4cf3a14 100644
--- a/src/backends/reference/workloads/Gather.cpp
+++ b/src/backends/reference/workloads/Gather.cpp
@@ -8,8 +8,8 @@
 #include "RefWorkloadUtils.hpp"
 
 #include <backendsCommon/WorkloadData.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/core/ignore_unused.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 namespace armnn
@@ -22,7 +22,7 @@
             const int32_t* indices,
             Encoder<float>& output)
 {
-    boost::ignore_unused(outputInfo);
+    IgnoreUnused(outputInfo);
     const TensorShape& paramsShape = paramsInfo.GetShape();
 
     unsigned int paramsProduct = 1;
diff --git a/src/backends/reference/workloads/LogSoftmax.cpp b/src/backends/reference/workloads/LogSoftmax.cpp
index ddf5674..103d62a 100644
--- a/src/backends/reference/workloads/LogSoftmax.cpp
+++ b/src/backends/reference/workloads/LogSoftmax.cpp
@@ -6,11 +6,11 @@
 #include "LogSoftmax.hpp"
 
 #include <armnnUtils/TensorUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <cmath>
 
 #include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 namespace
@@ -37,7 +37,7 @@
     bool axisIsValid = ValidateAxis(descriptor.m_Axis, numDimensions);
     BOOST_ASSERT_MSG(axisIsValid,
         "Axis index is not in range [-numDimensions, numDimensions).");
-    boost::ignore_unused(axisIsValid);
+    IgnoreUnused(axisIsValid);
 
     unsigned int uAxis = descriptor.m_Axis < 0  ?
         numDimensions - boost::numeric_cast<unsigned int>(std::abs(descriptor.m_Axis)) :
diff --git a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
index 8bb1670..bfd3c28 100644
--- a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
+++ b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
@@ -28,7 +28,7 @@
     DataType outputDataType = outputInfo.GetDataType();
 
     BOOST_ASSERT(inputDataType == outputDataType);
-    boost::ignore_unused(outputDataType);
+    IgnoreUnused(outputDataType);
 
     StridedSlice(inputInfo,
                  m_Data.m_Parameters,
diff --git a/src/backends/reference/workloads/Slice.cpp b/src/backends/reference/workloads/Slice.cpp
index c7ca3b1..0223cdc 100644
--- a/src/backends/reference/workloads/Slice.cpp
+++ b/src/backends/reference/workloads/Slice.cpp
@@ -5,8 +5,9 @@
 
 #include "Slice.hpp"
 
+#include <armnn/utility/IgnoreUnused.hpp>
+
 #include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 namespace armnn
@@ -72,7 +73,7 @@
     const unsigned char* input = reinterpret_cast<const unsigned char*>(inputData);
     unsigned char* output      = reinterpret_cast<unsigned char*>(outputData);
 
-    boost::ignore_unused(dim0);
+    IgnoreUnused(dim0);
     for (unsigned int idx0 = begin0; idx0 < begin0 + size0; ++idx0)
     {
         for (unsigned int idx1 = begin1; idx1 < begin1 + size1; ++idx1)
diff --git a/src/dynamic/sample/SampleDynamicWorkloadFactory.hpp b/src/dynamic/sample/SampleDynamicWorkloadFactory.hpp
index 88b6798..86e6555 100644
--- a/src/dynamic/sample/SampleDynamicWorkloadFactory.hpp
+++ b/src/dynamic/sample/SampleDynamicWorkloadFactory.hpp
@@ -33,7 +33,7 @@
                                                          TensorShape const& subTensorShape,
                                                          unsigned int const* subTensorOrigin) const override
     {
-        boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+        IgnoreUnused(parent, subTensorShape, subTensorOrigin);
         return nullptr;
     }
 
diff --git a/src/profiling/CommandHandlerFunctor.hpp b/src/profiling/CommandHandlerFunctor.hpp
index ea76d10..743bb93 100644
--- a/src/profiling/CommandHandlerFunctor.hpp
+++ b/src/profiling/CommandHandlerFunctor.hpp
@@ -3,11 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
+#pragma once
+
 #include "Packet.hpp"
 
-#include <cstdint>
+#include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/core/ignore_unused.hpp>
+#include <cstdint>
 
 namespace armnn
 {
@@ -15,8 +17,6 @@
 namespace profiling
 {
 
-#pragma once
-
 class CommandHandlerFunctor
 {
 public:
diff --git a/src/profiling/CounterDirectory.cpp b/src/profiling/CounterDirectory.cpp
index 052e452..c84da10 100644
--- a/src/profiling/CounterDirectory.cpp
+++ b/src/profiling/CounterDirectory.cpp
@@ -8,8 +8,8 @@
 
 #include <armnn/Exceptions.hpp>
 #include <armnn/Conversion.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/core/ignore_unused.hpp>
 #include <boost/format.hpp>
 
 namespace armnn
@@ -191,7 +191,7 @@
                                                  const Optional<uint16_t>& deviceUid,
                                                  const Optional<uint16_t>& counterSetUid)
 {
-    boost::ignore_unused(backendId);
+    IgnoreUnused(backendId);
 
     // Check that the given parent category name is valid
     if (parentCategoryName.empty() ||
diff --git a/src/profiling/FileOnlyProfilingConnection.cpp b/src/profiling/FileOnlyProfilingConnection.cpp
index 1db8030..83229ca 100644
--- a/src/profiling/FileOnlyProfilingConnection.cpp
+++ b/src/profiling/FileOnlyProfilingConnection.cpp
@@ -40,7 +40,7 @@
 
 bool FileOnlyProfilingConnection::WaitForStreamMeta(const unsigned char* buffer, uint32_t length)
 {
-    boost::ignore_unused(length);
+    IgnoreUnused(length);
 
     // The first word, stream_metadata_identifer, should always be 0.
     if (ToUint32(buffer, TargetEndianness::BeWire) != 0)
diff --git a/src/profiling/PacketVersionResolver.cpp b/src/profiling/PacketVersionResolver.cpp
index 869f09e..2c75067 100644
--- a/src/profiling/PacketVersionResolver.cpp
+++ b/src/profiling/PacketVersionResolver.cpp
@@ -5,7 +5,7 @@
 
 #include "PacketVersionResolver.hpp"
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace armnn
 {
@@ -54,7 +54,7 @@
 
 Version PacketVersionResolver::ResolvePacketVersion(uint32_t familyId, uint32_t packetId) const
 {
-    boost::ignore_unused(familyId, packetId);
+    IgnoreUnused(familyId, packetId);
     // NOTE: For now every packet specification is at version 1.0.0
     return Version(1, 0, 0);
 }
diff --git a/src/profiling/ProfilingStateMachine.hpp b/src/profiling/ProfilingStateMachine.hpp
index 160de71..cbc65ec 100644
--- a/src/profiling/ProfilingStateMachine.hpp
+++ b/src/profiling/ProfilingStateMachine.hpp
@@ -7,7 +7,7 @@
 
 #include <atomic>
 
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace armnn
 {
@@ -35,7 +35,7 @@
 
     bool IsOneOfStates(ProfilingState state1)
     {
-        boost::ignore_unused(state1);
+        IgnoreUnused(state1);
         return false;
     }
 
diff --git a/src/profiling/SendCounterPacket.cpp b/src/profiling/SendCounterPacket.cpp
index b8ef189..942ccc7 100644
--- a/src/profiling/SendCounterPacket.cpp
+++ b/src/profiling/SendCounterPacket.cpp
@@ -9,10 +9,10 @@
 #include <armnn/Exceptions.hpp>
 #include <armnn/Conversion.hpp>
 #include <Processes.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <boost/format.hpp>
 #include <boost/numeric/conversion/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
 
 #include <cstring>
 
diff --git a/src/profiling/SendThread.cpp b/src/profiling/SendThread.cpp
index 0318a74..5962f2f 100644
--- a/src/profiling/SendThread.cpp
+++ b/src/profiling/SendThread.cpp
@@ -13,7 +13,6 @@
 
 #include <boost/format.hpp>
 #include <boost/numeric/conversion/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
 
 #include <cstring>
 
diff --git a/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp b/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
index 77d4d68..7db42de 100644
--- a/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
+++ b/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
@@ -8,8 +8,8 @@
 #include <ProfilingService.hpp>
 #include <Runtime.hpp>
 #include <Filesystem.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/core/ignore_unused.hpp>
 #include <boost/filesystem.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 #include <boost/test/unit_test.hpp>
diff --git a/src/profiling/test/ProfilingConnectionDumpToFileDecoratorTests.cpp b/src/profiling/test/ProfilingConnectionDumpToFileDecoratorTests.cpp
index 6a09281..c368678 100644
--- a/src/profiling/test/ProfilingConnectionDumpToFileDecoratorTests.cpp
+++ b/src/profiling/test/ProfilingConnectionDumpToFileDecoratorTests.cpp
@@ -5,11 +5,11 @@
 
 #include "../ProfilingConnectionDumpToFileDecorator.hpp"
 #include <Runtime.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <fstream>
 #include <sstream>
 
-#include <boost/core/ignore_unused.hpp>
 #include <boost/filesystem.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 #include <boost/test/unit_test.hpp>
@@ -49,14 +49,14 @@
 
     bool WritePacket(const unsigned char* buffer, uint32_t length) override
     {
-        boost::ignore_unused(buffer);
-        boost::ignore_unused(length);
+        armnn::IgnoreUnused(buffer);
+        armnn::IgnoreUnused(length);
         return true;
     }
 
     Packet ReadPacket(uint32_t timeout) override
     {
-        boost::ignore_unused(timeout);
+        armnn::IgnoreUnused(timeout);
         return std::move(*m_Packet);
     }
 
diff --git a/src/profiling/test/ProfilingMocks.hpp b/src/profiling/test/ProfilingMocks.hpp
index 19aad49..944aea6 100644
--- a/src/profiling/test/ProfilingMocks.hpp
+++ b/src/profiling/test/ProfilingMocks.hpp
@@ -16,9 +16,9 @@
 #include <armnn/Exceptions.hpp>
 #include <armnn/Optional.hpp>
 #include <armnn/Conversion.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 #include <atomic>
@@ -128,7 +128,7 @@
 
     Packet ReadPacket(uint32_t timeout) override
     {
-        boost::ignore_unused(timeout);
+        IgnoreUnused(timeout);
 
         // Simulate a delay in the reading process. The default timeout is way too long.
         std::this_thread::sleep_for(std::chrono::milliseconds(5));
@@ -162,7 +162,7 @@
 public:
     IProfilingConnectionPtr GetProfilingConnection(const ExternalProfilingOptions& options) const override
     {
-        boost::ignore_unused(options);
+        IgnoreUnused(options);
         return std::make_unique<MockProfilingConnection>();
     }
 };
@@ -399,7 +399,7 @@
 
     void SendCounterDirectoryPacket(const ICounterDirectory& counterDirectory) override
     {
-        boost::ignore_unused(counterDirectory);
+        IgnoreUnused(counterDirectory);
 
         std::string message("SendCounterDirectoryPacket");
         unsigned int reserved = 0;
@@ -411,7 +411,7 @@
     void SendPeriodicCounterCapturePacket(uint64_t timestamp,
                                           const std::vector<CounterValue>& values) override
     {
-        boost::ignore_unused(timestamp, values);
+        IgnoreUnused(timestamp, values);
 
         std::string message("SendPeriodicCounterCapturePacket");
         unsigned int reserved = 0;
@@ -423,7 +423,7 @@
     void SendPeriodicCounterSelectionPacket(uint32_t capturePeriod,
                                             const std::vector<uint16_t>& selectedCounterIds) override
     {
-        boost::ignore_unused(capturePeriod, selectedCounterIds);
+        IgnoreUnused(capturePeriod, selectedCounterIds);
 
         std::string message("SendPeriodicCounterSelectionPacket");
         unsigned int reserved = 0;
@@ -513,7 +513,7 @@
                                    const armnn::Optional<uint16_t>& deviceUid = armnn::EmptyOptional(),
                                    const armnn::Optional<uint16_t>& counterSetUid = armnn::EmptyOptional())
     {
-        boost::ignore_unused(backendId);
+        IgnoreUnused(backendId);
 
         // Get the number of cores from the argument only
         uint16_t deviceCores = numberOfCores.has_value() ? numberOfCores.value() : 0;
@@ -597,19 +597,19 @@
 
     const Device* GetDevice(uint16_t uid) const override
     {
-        boost::ignore_unused(uid);
+        IgnoreUnused(uid);
         return nullptr; // Not used by the unit tests
     }
 
     const CounterSet* GetCounterSet(uint16_t uid) const override
     {
-        boost::ignore_unused(uid);
+        IgnoreUnused(uid);
         return nullptr; // Not used by the unit tests
     }
 
     const Counter* GetCounter(uint16_t uid) const override
     {
-        boost::ignore_unused(uid);
+        IgnoreUnused(uid);
         return nullptr; // Not used by the unit tests
     }
 
diff --git a/src/profiling/test/ProfilingTests.cpp b/src/profiling/test/ProfilingTests.cpp
index 3dab93d..0e91696 100644
--- a/src/profiling/test/ProfilingTests.cpp
+++ b/src/profiling/test/ProfilingTests.cpp
@@ -34,6 +34,7 @@
 #include <armnn/Types.hpp>
 
 #include <armnn/Utils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <boost/algorithm/string.hpp>
 #include <boost/numeric/conversion/cast.hpp>
@@ -1697,7 +1698,7 @@
     {
         bool IsCounterRegistered(uint16_t counterUid) const override
         {
-            boost::ignore_unused(counterUid);
+            armnn::IgnoreUnused(counterUid);
             return true;
         }
         uint16_t GetCounterCount() const override
@@ -1706,7 +1707,7 @@
         }
         uint32_t GetCounterValue(uint16_t counterUid) const override
         {
-            boost::ignore_unused(counterUid);
+            armnn::IgnoreUnused(counterUid);
             return 0;
         }
     };
@@ -2092,7 +2093,7 @@
         //not used
         bool IsCounterRegistered(uint16_t counterUid) const override
         {
-            boost::ignore_unused(counterUid);
+            armnn::IgnoreUnused(counterUid);
             return false;
         }
 
diff --git a/src/profiling/test/ProfilingTests.hpp b/src/profiling/test/ProfilingTests.hpp
index 8b4bc84..0081103 100644
--- a/src/profiling/test/ProfilingTests.hpp
+++ b/src/profiling/test/ProfilingTests.hpp
@@ -77,7 +77,7 @@
 
     bool WritePacket(const unsigned char* buffer, uint32_t length) override
     {
-        boost::ignore_unused(buffer, length);
+        IgnoreUnused(buffer, length);
 
         return false;
     }
@@ -139,7 +139,7 @@
 
     Packet ReadPacket(uint32_t timeout) override
     {
-        boost::ignore_unused(timeout);
+        IgnoreUnused(timeout);
         ++m_ReadRequests;
         throw armnn::Exception("Simulate a non-timeout error");
     }
@@ -158,7 +158,7 @@
 public:
     Packet ReadPacket(uint32_t timeout) override
     {
-        boost::ignore_unused(timeout);
+        IgnoreUnused(timeout);
         // Connection Acknowledged Packet header (word 0, word 1 is always zero):
         // 26:31 [6]  packet_family: Control Packet Family, value 0b000000
         // 16:25 [10] packet_id: Packet identifier, value 0b0000000001
@@ -181,7 +181,7 @@
 
     void operator()(const Packet& packet) override
     {
-        boost::ignore_unused(packet);
+        IgnoreUnused(packet);
         m_Count++;
     }
 
diff --git a/src/profiling/test/SendCounterPacketTests.hpp b/src/profiling/test/SendCounterPacketTests.hpp
index 8b46ed1..7a5f796 100644
--- a/src/profiling/test/SendCounterPacketTests.hpp
+++ b/src/profiling/test/SendCounterPacketTests.hpp
@@ -13,9 +13,9 @@
 #include <armnn/Exceptions.hpp>
 #include <armnn/Optional.hpp>
 #include <armnn/Conversion.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 #include <atomic>