IVGCVSW-4246 Clean build of Visitors with -Wextra

Change-Id: Icb1b35ff55fa22103777853e6f49fc282d61750d
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
diff --git a/src/armnn/DynamicQuantizationVisitor.cpp b/src/armnn/DynamicQuantizationVisitor.cpp
index d4e0c90..ba87c6d 100644
--- a/src/armnn/DynamicQuantizationVisitor.cpp
+++ b/src/armnn/DynamicQuantizationVisitor.cpp
@@ -84,6 +84,7 @@
 
 void DynamicQuantizationVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
     SetRange(layer, 0, -20.f, 20.f);
     AddToCalibratedLayers(layer);
 }
@@ -138,6 +139,7 @@
                                                       const ActivationDescriptor& activationDescriptor,
                                                       const char* name)
 {
+    boost::ignore_unused(name, activationDescriptor);
     switch (activationDescriptor.m_Function)
     {
         // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
@@ -246,6 +248,8 @@
                                                   const ConcatDescriptor& originsDescriptor,
                                                   const char* name)
 {
+    boost::ignore_unused(name);
+    boost::ignore_unused(originsDescriptor);
     float min = std::numeric_limits<float>::max();
     float max = std::numeric_limits<float>::lowest();
     for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
diff --git a/src/armnn/OverrideInputRangeVisitor.cpp b/src/armnn/OverrideInputRangeVisitor.cpp
index 058e630..d047c5b 100644
--- a/src/armnn/OverrideInputRangeVisitor.cpp
+++ b/src/armnn/OverrideInputRangeVisitor.cpp
@@ -8,6 +8,7 @@
 #include "Layer.hpp"
 
 #include <boost/assert.hpp>
+#include <boost/core/ignore_unused.hpp>
 
 namespace armnn
 {
@@ -22,6 +23,7 @@
 
 void OverrideInputRangeVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
 {
+    boost::ignore_unused(name);
     if (m_LayerId == id)
     {
         m_Ranges.SetRange(layer, 0, m_MinMaxRange.first, m_MinMaxRange.second);
diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp
index 94f0a01..81428c1 100644
--- a/src/armnn/StaticRangeVisitor.cpp
+++ b/src/armnn/StaticRangeVisitor.cpp
@@ -31,6 +31,7 @@
 
 void StaticRangeVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
     SetRange(layer, 0, -20.f, 20.f);
 }
 
@@ -81,6 +82,7 @@
                                               const ActivationDescriptor& activationDescriptor,
                                               const char* name)
 {
+    boost::ignore_unused(name);
     switch (activationDescriptor.m_Function)
     {
         // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
@@ -141,6 +143,7 @@
                                              const char* name)
 {
     boost::ignore_unused(pooling2dDescriptor);
+    boost::ignore_unused(name);
     ForwardParentParameters(layer);
 }
 
@@ -149,6 +152,7 @@
                                            const char* name)
 {
     boost::ignore_unused(softmaxDescriptor);
+    boost::ignore_unused(name);
     SetRange(layer, 0, 0.f, 1.f);
 }
 
@@ -156,6 +160,8 @@
                                           const OriginsDescriptor& originsDescriptor,
                                           const char* name)
 {
+    boost::ignore_unused(originsDescriptor);
+    boost::ignore_unused(name);
     float min = std::numeric_limits<float>::max();
     float max = std::numeric_limits<float>::lowest();
     for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 37b3bfa..e147a84 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -19,6 +19,7 @@
 #include "../RangeTracker.hpp"
 #include "../../armnnQuantizer/CommandLineProcessor.hpp"
 
+#include <boost/core/ignore_unused.hpp>
 #include <boost/test/unit_test.hpp>
 
 #include <unordered_map>
@@ -55,6 +56,7 @@
                          LayerBindingId id,
                          const char* name = nullptr) override
     {
+        boost::ignore_unused(id, name);
         const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
         BOOST_TEST(m_InputShape == info.GetShape());
         // Based off current default [-15.0f, 15.0f]
@@ -67,6 +69,7 @@
                           LayerBindingId id,
                           const char* name = nullptr) override
     {
+        boost::ignore_unused(id, name);
         const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
         BOOST_TEST(m_OutputShape == info.GetShape());
     }
@@ -105,6 +108,7 @@
                                         const OffsetScalePair& params,
                                         DataType dataType = DataType::QuantisedAsymm8)
     {
+        boost::ignore_unused(dataType);
         TestQuantizationParamsImpl(info, DataType::QuantisedAsymm8, params.first, params.second);
     }
 
@@ -191,6 +195,7 @@
     void VisitAdditionLayer(const IConnectableLayer* layer,
                             const char* name = nullptr) override
     {
+        boost::ignore_unused(name);
         TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
         // Based off default static range [-20.0f, 20.0f]
@@ -253,6 +258,8 @@
                               const ActivationDescriptor& descriptor,
                               const char* name = nullptr) override
     {
+        boost::ignore_unused(descriptor, name);
+
         TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
         // Based off default static range [0.0f, 15.0f]
@@ -353,6 +360,7 @@
                                       LayerBindingId id,
                                       const char* name = nullptr) override
         {
+            boost::ignore_unused(id, name);
             const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
             BOOST_CHECK_MESSAGE(info.GetDataType() == m_DataType,
                                 std::string(armnn::GetDataTypeName(info.GetDataType()))
@@ -489,6 +497,7 @@
                                   const ActivationDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
+            boost::ignore_unused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [0.0f, 3.5f]
@@ -538,6 +547,7 @@
                                   const ActivationDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
+            boost::ignore_unused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [-1.0f, 1.0f]
@@ -586,6 +596,7 @@
                               const ActivationDescriptor& descriptor,
                               const char* name = nullptr) override
     {
+        boost::ignore_unused(descriptor, name);
         TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
         // Based off default static range [-5.0f, 15.0f]
@@ -652,6 +663,7 @@
                                           const ConstTensor& gamma,
                                           const char* name = nullptr) override
         {
+            boost::ignore_unused(desc, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [-15.0f, 15.0f]
@@ -730,6 +742,7 @@
                                             const DepthToSpaceDescriptor& desc,
                                             const char* name = nullptr)
         {
+            boost::ignore_unused(desc, name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymm8Params{ 30.0f / g_Asymm8QuantizationBase, 128 };
@@ -914,6 +927,7 @@
                                       const Optional<ConstTensor>& biases,
                                       const char* name = nullptr) override
         {
+            boost::ignore_unused(desc, name);
             TestQuantizationOnLayersWithBiases(layer, weights, biases);
         }
     };
@@ -965,6 +979,7 @@
                                      const Optional<ConstTensor>& biases,
                                      const char *name = nullptr) override
         {
+            boost::ignore_unused(convolution2dDescriptor, name);
             TestQuantizationOnLayersWithBiases(layer, weights, biases);
         }
     };
@@ -1045,6 +1060,7 @@
                                               const Optional<ConstTensor>& biases,
                                               const char *name = nullptr) override
         {
+            boost::ignore_unused(convolution2dDescriptor, name);
             TestQuantizationOnLayersWithBiases(layer, weights, biases);
         }
     };
@@ -1123,6 +1139,7 @@
                                                      const InstanceNormalizationDescriptor& descriptor,
                                                      const char* name = nullptr)
         {
+            boost::ignore_unused(descriptor, name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymm8Params{ 30.0f / g_Asymm8QuantizationBase, 128 };
@@ -1183,6 +1200,7 @@
                                   const SoftmaxDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
+            boost::ignore_unused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymm8Params{ 30.0f / g_Asymm8QuantizationBase, 128 };
@@ -1267,6 +1285,7 @@
                                const SoftmaxDescriptor& descriptor,
                                const char* name = nullptr) override
         {
+            boost::ignore_unused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off default static range [0.0f, 1.0f]
@@ -1388,6 +1407,7 @@
                                const PermuteDescriptor& desc,
                                const char* name = nullptr) override
         {
+            boost::ignore_unused(desc, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -1437,6 +1457,7 @@
                                       const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                       const char* name = nullptr) override
         {
+            boost::ignore_unused(spaceToBatchNdDescriptor, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -1538,6 +1559,7 @@
                                  const Pooling2dDescriptor& desc,
                                  const char* name = nullptr) override
         {
+            boost::ignore_unused(desc, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -1601,6 +1623,7 @@
                                 const ConstTensor& input,
                                 const char* name = nullptr) override
         {
+            boost::ignore_unused(input, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             // Based off the range of values in the const tensor used for the test: [-2.0f, 6.0f]
@@ -1667,6 +1690,7 @@
         void VisitAbsLayer(const IConnectableLayer *layer,
                            const char *name = nullptr) override
         {
+            boost::ignore_unused(name);
             TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
 
             TestQuantizationParams(outputInfo,
@@ -1720,16 +1744,21 @@
         void VisitInputLayer(const IConnectableLayer* layer,
                              LayerBindingId id,
                              const char* name = nullptr) override
-        {}
+        {
+            boost::ignore_unused(layer, id, name);
+        }
 
         void VisitOutputLayer(const IConnectableLayer* layer,
                               LayerBindingId id,
                               const char* name = nullptr) override
-        {}
+        {
+            boost::ignore_unused(layer, id, name);
+        }
         void VisitArgMinMaxLayer(const IConnectableLayer* layer,
                                  const ArgMinMaxDescriptor& argMinMaxDescriptor,
                                  const char* name = nullptr) override
         {
+                boost::ignore_unused(argMinMaxDescriptor, name);
                 TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
 
                 TestQuantizationParams(outputInfo,
@@ -1798,6 +1827,7 @@
                                   const ComparisonDescriptor& descriptor,
                                   const char* name = nullptr) override
         {
+            boost::ignore_unused(descriptor, name);
             TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymm8Params{ 30.0f / g_Asymm8QuantizationBase, 128 };
@@ -1860,15 +1890,20 @@
         void VisitInputLayer(const IConnectableLayer* layer,
                              LayerBindingId id,
                              const char* name = nullptr) override
-        {}
+        {
+            boost::ignore_unused(layer, id, name);
+        }
         void VisitOutputLayer(const IConnectableLayer* layer,
                               LayerBindingId id,
                               const char* name = nullptr) override
-        {}
+        {
+            boost::ignore_unused(layer, id, name);
+        }
         void VisitConcatLayer(const IConnectableLayer* layer,
                               const OriginsDescriptor& originsDescriptor,
                               const char* name = nullptr) override
         {
+            boost::ignore_unused(originsDescriptor, name);
             TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
 
             TestQuantizationParams(
@@ -1964,6 +1999,7 @@
                                        const ReshapeDescriptor& reshapeDescriptor,
                                        const char* name = nullptr) override
         {
+            boost::ignore_unused(reshapeDescriptor, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2013,6 +2049,7 @@
                                         const SplitterDescriptor& desc,
                                         const char* name = nullptr)
         {
+            boost::ignore_unused(desc, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2063,6 +2100,7 @@
                                       const ResizeDescriptor& resizeDescriptor,
                                       const char* name = nullptr) override
         {
+            boost::ignore_unused(resizeDescriptor, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2114,6 +2152,7 @@
                                             const StridedSliceDescriptor& desc,
                                             const char* name = nullptr)
         {
+            boost::ignore_unused(desc, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2163,6 +2202,7 @@
                                       const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
                                       const char* name = nullptr) override
         {
+            boost::ignore_unused(batchToSpaceNdDescriptor, name);
             CheckForwardedQuantizationSettings(layer);
         }
     };
@@ -2219,6 +2259,7 @@
                              LayerBindingId id,
                              const char* name = nullptr) override
         {
+            boost::ignore_unused(id, name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
 
             switch (id)
@@ -2244,6 +2285,7 @@
                               LayerBindingId id,
                               const char* name = nullptr) override
         {
+            boost::ignore_unused(id, name);
             const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
             BOOST_TEST(m_OutputShape == info.GetShape());
         }
@@ -2251,6 +2293,7 @@
         void VisitPreluLayer(const IConnectableLayer* layer,
                              const char* name = nullptr) override
         {
+            boost::ignore_unused(name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
             TestQuantizationParams(info,
                                    { 30.0f / g_Asymm8QuantizationBase, 128 }, // QASymm8
@@ -2327,6 +2370,7 @@
                                               const Optional<ConstTensor>& biases,
                                               const char *name = nullptr) override
         {
+            boost::ignore_unused(descriptor, name);
             TestQuantizationOnLayersWithBiases(layer, weights, biases);
         }
     };
@@ -2406,16 +2450,21 @@
         void VisitInputLayer(const IConnectableLayer* layer,
                              LayerBindingId id,
                              const char* name = nullptr) override
-        {}
+        {
+            boost::ignore_unused(layer, id, name);
+        }
         void VisitOutputLayer(const IConnectableLayer* layer,
                               LayerBindingId id,
                               const char* name = nullptr) override
-        {}
+        {
+            boost::ignore_unused(layer, id, name);
+        }
 
         void VisitStackLayer(const IConnectableLayer* layer,
                              const StackDescriptor& descriptor,
                              const char* name = nullptr) override
         {
+            boost::ignore_unused(descriptor, name);
             TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
 
             TestQuantizationParams(outputInfo,
@@ -2476,6 +2525,7 @@
                                      const SliceDescriptor& desc,
                                      const char* name = nullptr)
         {
+            boost::ignore_unused(desc, name);
             const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
 
             const OffsetScalePair qAsymm8Params{ 30.0f / g_Asymm8QuantizationBase, 128 };
@@ -2559,6 +2609,7 @@
                          LayerBindingId id,
                          const char* name = nullptr) override
     {
+        boost::ignore_unused(id, name);
         const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
         BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
         BOOST_TEST(m_InputShape == info.GetShape());
@@ -2568,6 +2619,7 @@
                           LayerBindingId id,
                           const char* name = nullptr) override
     {
+        boost::ignore_unused(id, name);
         const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
         BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
         BOOST_TEST(m_OutputShape == info.GetShape());
@@ -2576,12 +2628,14 @@
     void VisitQuantizeLayer(const IConnectableLayer* layer,
                             const char* name = nullptr) override
     {
+        boost::ignore_unused(layer, name);
         m_VisitedQuantizeLayer = true;
     }
 
     void VisitDequantizeLayer(const IConnectableLayer* layer,
                               const char* name = nullptr) override
     {
+        boost::ignore_unused(layer, name);
         m_VisitedDequantizeLayer = true;
     }
 
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index e01ed47..6a65c6d 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -710,6 +710,7 @@
 BindingPointInfo Deserializer::GetNetworkInputBindingInfo(unsigned int layerIndex,
                                                           const std::string& name) const
 {
+    boost::ignore_unused(layerIndex);
     for (auto inputBinding : m_InputBindings)
     {
         if (inputBinding.first == name)
@@ -727,6 +728,7 @@
 BindingPointInfo Deserializer::GetNetworkOutputBindingInfo(unsigned int layerIndex,
                                                                 const std::string& name) const
 {
+    boost::ignore_unused(layerIndex);
     for (auto outputBinding : m_OutputBindings)
     {
         if (outputBinding.first == name)
@@ -1676,6 +1678,7 @@
 armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::PoolingDescriptor pooling2dDesc,
                                                               unsigned int layerIndex)
 {
+    boost::ignore_unused(layerIndex);
     armnn::Pooling2dDescriptor desc;
 
     switch (pooling2dDesc->poolType())
@@ -2027,6 +2030,7 @@
     Deserializer::NormalizationDescriptorPtr normalizationDescriptor,
     unsigned int layerIndex)
 {
+    boost::ignore_unused(layerIndex);
     armnn::NormalizationDescriptor desc;
 
     switch (normalizationDescriptor->normChannelType())
diff --git a/src/armnnDeserializer/test/DeserializeAdd.cpp b/src/armnnDeserializer/test/DeserializeAdd.cpp
index b053b10..be292bc 100644
--- a/src/armnnDeserializer/test/DeserializeAdd.cpp
+++ b/src/armnnDeserializer/test/DeserializeAdd.cpp
@@ -7,6 +7,8 @@
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include "../Deserializer.hpp"
 
+#include <boost/core/ignore_unused.hpp>
+
 #include <string>
 #include <iostream>
 
@@ -20,6 +22,7 @@
                         const std::string & dataType,
                         const std::string & activation="NONE")
     {
+        boost::ignore_unused(activation);
         m_JsonString = R"(
         {
                 inputIds: [0, 1],
diff --git a/src/armnnDeserializer/test/DeserializeMultiplication.cpp b/src/armnnDeserializer/test/DeserializeMultiplication.cpp
index a9dbfbf..f784ba6 100644
--- a/src/armnnDeserializer/test/DeserializeMultiplication.cpp
+++ b/src/armnnDeserializer/test/DeserializeMultiplication.cpp
@@ -7,6 +7,8 @@
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include "../Deserializer.hpp"
 
+#include <boost/core/ignore_unused.hpp>
+
 #include <string>
 #include <iostream>
 
@@ -20,6 +22,7 @@
                                    const std::string & dataType,
                                    const std::string & activation="NONE")
     {
+        boost::ignore_unused(activation);
         m_JsonString = R"(
         {
                 inputIds: [0, 1],
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
index fef2409..de7fe5c 100644
--- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -10,6 +10,7 @@
 #include <armnn/IRuntime.hpp>
 #include <armnnDeserializer/IDeserializer.hpp>
 
+#include <boost/core/ignore_unused.hpp>
 #include <boost/assert.hpp>
 #include <boost/format.hpp>
 
@@ -154,6 +155,7 @@
                       armnnSerializer::TensorInfo tensorType, const std::string& name,
                       const float scale, const int64_t zeroPoint)
     {
+        boost::ignore_unused(name);
         BOOST_CHECK_EQUAL(shapeSize, tensors->dimensions()->size());
         BOOST_CHECK_EQUAL_COLLECTIONS(shape.begin(), shape.end(),
                                       tensors->dimensions()->begin(), tensors->dimensions()->end());
diff --git a/src/armnnQuantizer/QuantizationDataSet.cpp b/src/armnnQuantizer/QuantizationDataSet.cpp
index 007cf0a..9694342 100644
--- a/src/armnnQuantizer/QuantizationDataSet.cpp
+++ b/src/armnnQuantizer/QuantizationDataSet.cpp
@@ -8,6 +8,7 @@
 
 #define BOOST_FILESYSTEM_NO_DEPRECATED
 
+#include <boost/core/ignore_unused.hpp>
 #include <boost/filesystem/operations.hpp>
 #include <boost/filesystem/path.hpp>
 
@@ -51,6 +52,7 @@
                                         armnn::LayerBindingId id,
                                         const char* name)
 {
+    boost::ignore_unused(name);
     m_TensorInfos.emplace(id, layer->GetOutputSlot(0).GetTensorInfo());
 }
 
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 5d06958..608a9c3 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -11,6 +11,7 @@
 
 #include <iostream>
 
+#include <boost/core/ignore_unused.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 #include <flatbuffers/util.h>
@@ -80,6 +81,8 @@
 // Build FlatBuffer for Input Layer
 void SerializerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
 
@@ -100,6 +103,8 @@
 // Build FlatBuffer for Output Layer
 void SerializerVisitor::VisitOutputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
 
@@ -118,6 +123,7 @@
 
 void SerializerVisitor::VisitAbsLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Abs);
     auto flatBufferAbsLayer  = serializer::CreateAbsLayer(m_flatBufferBuilder, flatBufferBaseLayer);
 
@@ -129,6 +135,8 @@
                                              const armnn::ActivationDescriptor& descriptor,
                                              const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
 
@@ -150,6 +158,8 @@
 // Build FlatBuffer for Addition Layer
 void SerializerVisitor::VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
 
@@ -165,6 +175,8 @@
                                             const armnn::ArgMinMaxDescriptor& descriptor,
                                             const char *name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
 
@@ -186,6 +198,8 @@
                                                  const armnn::BatchToSpaceNdDescriptor& descriptor,
                                                  const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
 
@@ -218,6 +232,8 @@
                                                      const armnn::ConstTensor& gamma,
                                                      const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbBatchNormalizationBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
     auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
                                                   m_flatBufferBuilder,
@@ -243,6 +259,8 @@
                                              const armnn::ComparisonDescriptor& descriptor,
                                              const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Comparison);
     auto fbDescriptor = serializer::CreateComparisonDescriptor(
         m_flatBufferBuilder,
@@ -257,6 +275,8 @@
                                            const armnn::ConstTensor& input,
                                            const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
 
@@ -278,6 +298,8 @@
                                                 const armnn::Optional<armnn::ConstTensor>& biases,
                                                 const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
 
@@ -315,6 +337,8 @@
                                                const armnn::DepthToSpaceDescriptor& descriptor,
                                                const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthToSpace);
     auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
                                                      descriptor.m_BlockSize,
@@ -331,6 +355,8 @@
                                                          const armnn::Optional<armnn::ConstTensor>& biases,
                                                          const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d);
     auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
                                                                descriptor.m_PadLeft,
@@ -363,6 +389,8 @@
 void SerializerVisitor::VisitDequantizeLayer(const armnn::IConnectableLayer* layer,
                                              const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
     auto fbDequantizeLayer     = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
 
@@ -374,6 +402,8 @@
                                                        const armnn::ConstTensor& anchors,
                                                        const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_DetectionPostProcess);
     auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
                                                              descriptor.m_MaxDetections,
@@ -400,6 +430,8 @@
 
 void SerializerVisitor::VisitDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
     auto fbDivisionLayer     = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
 
@@ -408,6 +440,8 @@
 
 void SerializerVisitor::VisitEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Equal);
     auto fbEqualLayer = serializer::CreateEqualLayer(m_flatBufferBuilder, fbBaseLayer);
 
@@ -416,6 +450,8 @@
 
 void SerializerVisitor::VisitFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
 {
+    boost::ignore_unused(name);
+
     auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
     auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
 
@@ -424,6 +460,8 @@
 
 void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
     auto flatBufferLayer   = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer);
 
@@ -432,6 +470,8 @@
 
 void SerializerVisitor::VisitGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbGreaterBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Greater);
     auto fbGreaterLayer = serializer::CreateGreaterLayer(m_flatBufferBuilder, fbGreaterBaseLayer);
 
@@ -443,6 +483,8 @@
     const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
     const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbDescriptor = serializer::CreateInstanceNormalizationDescriptor(
             m_flatBufferBuilder,
             instanceNormalizationDescriptor.m_Gamma,
@@ -460,6 +502,8 @@
                                                   const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
                                                   const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_L2Normalization);
 
@@ -479,6 +523,8 @@
                                              const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
                                              const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
 
@@ -502,6 +548,8 @@
                                        const armnn::LstmInputParams& params,
                                        const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
 
     auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
@@ -605,6 +653,8 @@
 
 void SerializerVisitor::VisitMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
     auto fbMaximumLayer     = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
 
@@ -615,6 +665,8 @@
                                        const armnn::MeanDescriptor& descriptor,
                                        const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbMeanBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
     auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
                                                              m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
@@ -629,6 +681,8 @@
 
 void SerializerVisitor::VisitMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
     auto fbMinimumLayer     = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
 
@@ -637,6 +691,8 @@
 
 void SerializerVisitor::VisitMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
     auto fbMergeLayer     = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
 
@@ -654,6 +710,8 @@
                                          const armnn::ConcatDescriptor& concatDescriptor,
                                          const char* name)
 {
+    boost::ignore_unused(name);
+
     auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
 
     std::vector<flatbuffers::Offset<UintVector>> views;
@@ -685,6 +743,8 @@
 
 void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
     auto fbMultiplicationLayer     = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
                                                                            fbMultiplicationBaseLayer);
@@ -696,6 +756,8 @@
                                       const armnn::PadDescriptor& padDescriptor,
                                       const char* name)
 {
+    boost::ignore_unused(name);
+
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
 
     std::vector<unsigned int> padList;
@@ -720,6 +782,8 @@
                                           const armnn::PermuteDescriptor& permuteDescriptor,
                                           const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
 
@@ -746,6 +810,8 @@
                                           const armnn::ReshapeDescriptor& reshapeDescriptor,
                                           const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
 
@@ -770,6 +836,8 @@
                                                  const armnn::ResizeBilinearDescriptor& resizeDescriptor,
                                                  const char* name)
 {
+    boost::ignore_unused(name);
+
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ResizeBilinear);
 
     auto flatBufferDescriptor =
@@ -789,6 +857,8 @@
                                          const armnn::ResizeDescriptor& resizeDescriptor,
                                          const char* name)
 {
+    boost::ignore_unused(name);
+
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
 
     auto flatBufferDescriptor =
@@ -807,6 +877,8 @@
 
 void SerializerVisitor::VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt);
     auto fbRsqrtLayer     = serializer::CreateRsqrtLayer(m_flatBufferBuilder, fbRsqrtBaseLayer);
 
@@ -817,6 +889,8 @@
                                         const armnn::SliceDescriptor& sliceDescriptor,
                                         const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbSliceBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
     auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
                                                    m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
@@ -832,6 +906,8 @@
                                           const armnn::SoftmaxDescriptor& softmaxDescriptor,
                                           const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
 
@@ -852,6 +928,8 @@
                                             const armnn::Pooling2dDescriptor& pooling2dDescriptor,
                                             const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbPooling2dBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
     auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
         m_flatBufferBuilder,
@@ -878,6 +956,8 @@
 void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer,
                                         const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
 
@@ -890,6 +970,8 @@
 
 void SerializerVisitor::VisitQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
 {
+    boost::ignore_unused(name);
+
     auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
     auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
                                                            fbQuantizeBaseLayer);
@@ -903,6 +985,8 @@
                                                  const armnn::Optional<armnn::ConstTensor>& biases,
                                                  const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
 
@@ -938,6 +1022,8 @@
                                                  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                                  const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
 
@@ -967,6 +1053,8 @@
                                                const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
                                                const char* name)
 {
+    boost::ignore_unused(name);
+
     auto flatBufferBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
     auto flatBufferDescriptor =
         CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
@@ -985,6 +1073,8 @@
                                            const armnn::ViewsDescriptor& viewsDescriptor,
                                            const char* name)
 {
+    boost::ignore_unused(name);
+
     // Create FlatBuffer ViewOrigins
     std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
     flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
@@ -1049,6 +1139,8 @@
                                                 const armnn::NormalizationDescriptor& descriptor,
                                                 const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbNormalizationBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
 
     auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
@@ -1072,6 +1164,8 @@
                                         const armnn::StackDescriptor& stackDescriptor,
                                         const char* name)
 {
+    boost::ignore_unused(name);
+
     auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
 
     std::vector<unsigned int> inputShape;
@@ -1093,6 +1187,8 @@
                                           const armnn::StandInDescriptor& standInDescriptor,
                                           const char *name)
 {
+    boost::ignore_unused(name);
+
     auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
                                                             standInDescriptor.m_NumInputs,
                                                             standInDescriptor.m_NumOutputs);
@@ -1107,6 +1203,8 @@
                                                const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
                                                const char* name)
 {
+    boost::ignore_unused(name);
+
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
 
     auto flatBufferDescriptor =
@@ -1130,6 +1228,8 @@
 
 void SerializerVisitor::VisitSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
     auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
 
@@ -1138,6 +1238,8 @@
 
 void SerializerVisitor::VisitSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
     auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
 
@@ -1151,6 +1253,8 @@
     const armnn::Optional<armnn::ConstTensor>& biases,
     const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
     auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
                                                                descriptor.m_PadLeft,
@@ -1183,6 +1287,8 @@
                                                 const armnn::QuantizedLstmInputParams& params,
                                                 const char* name)
 {
+    boost::ignore_unused(name);
+
     auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
 
     // Get input parameters
@@ -1227,6 +1333,7 @@
 fb::Offset<serializer::LayerBase> SerializerVisitor::CreateLayerBase(const IConnectableLayer* layer,
                                                                      const serializer::LayerType layerType)
 {
+
     uint32_t fbIndex = GetSerializedId(layer->GetGuid());
 
     std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
@@ -1242,6 +1349,7 @@
 
 void SerializerVisitor::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
 {
+
     auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
     m_serializedLayers.push_back(anyLayer);
 }
diff --git a/src/armnnSerializer/test/ActivationSerializationTests.cpp b/src/armnnSerializer/test/ActivationSerializationTests.cpp
index 2a46045..34e99f6 100644
--- a/src/armnnSerializer/test/ActivationSerializationTests.cpp
+++ b/src/armnnSerializer/test/ActivationSerializationTests.cpp
@@ -8,6 +8,8 @@
 #include <armnn/INetwork.hpp>
 #include "../Serializer.hpp"
 #include <sstream>
+
+#include <boost/core/ignore_unused.hpp>
 #include <boost/test/unit_test.hpp>
 
 BOOST_AUTO_TEST_SUITE(SerializerTests)
@@ -19,6 +21,7 @@
                               const armnn::ActivationDescriptor& activationDescriptor,
                               const char* name) override
     {
+        boost::ignore_unused(layer, activationDescriptor);
         BOOST_TEST(name == "activation");
     }
 };
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 4260669..8dfca3c 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -56,7 +56,7 @@
 
 struct DefaultLayerVerifierPolicy
 {
-    static void Apply(const std::string s = "")
+    static void Apply(const std::string)
     {
         BOOST_TEST_MESSAGE("Unexpected layer found in network");
         BOOST_TEST(false);
@@ -75,7 +75,7 @@
 
     void VisitInputLayer(const armnn::IConnectableLayer*, armnn::LayerBindingId, const char*) override {}
 
-    void VisitOutputLayer(const armnn::IConnectableLayer*, armnn::LayerBindingId id, const char*) override {}
+    void VisitOutputLayer(const armnn::IConnectableLayer*, armnn::LayerBindingId, const char*) override {}
 
 protected:
     void VerifyNameAndConnections(const armnn::IConnectableLayer* layer, const char* name)
@@ -521,7 +521,7 @@
             CompareConstTensor(input, m_LayerInput);
         }
 
-        void VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override {}
+        void VisitAdditionLayer(const armnn::IConnectableLayer*, const char*) override {}
 
     private:
         armnn::ConstTensor m_LayerInput;
@@ -927,7 +927,7 @@
         BOOST_CHECK(descriptor.m_Operation == armnn::ComparisonOperation::Equal);
     }
 
-    void VisitEqualLayer(const armnn::IConnectableLayer* layer, const char* name) override
+    void VisitEqualLayer(const armnn::IConnectableLayer*, const char*) override
     {
         throw armnn::Exception("EqualLayer should have translated to ComparisonLayer");
     }
@@ -1146,9 +1146,9 @@
             VerifyNameAndConnections(layer, name);
         }
 
-        void VisitConstantLayer(const armnn::IConnectableLayer* layer,
-                                const armnn::ConstTensor& input,
-                                const char *name) override {}
+        void VisitConstantLayer(const armnn::IConnectableLayer*,
+                                const armnn::ConstTensor&,
+                                const char*) override {}
     };
 
     const std::string layerName("gather");
@@ -1201,7 +1201,7 @@
         BOOST_CHECK(descriptor.m_Operation == armnn::ComparisonOperation::Greater);
     }
 
-    void VisitGreaterLayer(const armnn::IConnectableLayer* layer, const char* name) override
+    void VisitGreaterLayer(const armnn::IConnectableLayer*, const char*) override
     {
         throw armnn::Exception("GreaterLayer should have translated to ComparisonLayer");
     }
@@ -1543,9 +1543,9 @@
                         const armnn::OriginsDescriptor& descriptor)
         : LayerVerifierBaseWithDescriptor<armnn::OriginsDescriptor>(layerName, inputInfos, outputInfos, descriptor) {}
 
-    void VisitMergerLayer(const armnn::IConnectableLayer* layer,
-                          const armnn::OriginsDescriptor& descriptor,
-                          const char* name) override
+    void VisitMergerLayer(const armnn::IConnectableLayer*,
+                          const armnn::OriginsDescriptor&,
+                          const char*) override
     {
         throw armnn::Exception("MergerLayer should have translated to ConcatLayer");
     }
@@ -2514,9 +2514,9 @@
             VerifyNameAndConnections(layer, name);
         }
 
-        void VisitConstantLayer(const armnn::IConnectableLayer* layer,
-                                const armnn::ConstTensor& input,
-                                const char *name) override {}
+        void VisitConstantLayer(const armnn::IConnectableLayer*,
+                                const armnn::ConstTensor&,
+                                const char*) override {}
     };
 
     const std::string layerName("switch");
@@ -2658,7 +2658,7 @@
             CompareConstTensor(input, m_LayerInput);
         }
 
-        void VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override {}
+        void VisitAdditionLayer(const armnn::IConnectableLayer*, const char*) override {}
 
     private:
         armnn::ConstTensor m_LayerInput;