IVGCVSW-4246 Clean build of parsers with -Wextra

Change-Id: Ib00f185b431ab74fd9425d8f478bd2ddb182f74b
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 6853512..9c7dda8 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -385,6 +385,7 @@
                       armnn::TensorInfo& tensorInfo,
                       armnn::Optional<armnn::PermutationVector&> permutationVector)
 {
+    boost::ignore_unused(tensorPtr);
     BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
     BOOST_ASSERT_MSG(bufferPtr != nullptr,
         boost::str(
diff --git a/src/armnnTfLiteParser/test/Unsupported.cpp b/src/armnnTfLiteParser/test/Unsupported.cpp
index 25abde8..9a9cdc5 100644
--- a/src/armnnTfLiteParser/test/Unsupported.cpp
+++ b/src/armnnTfLiteParser/test/Unsupported.cpp
@@ -33,7 +33,7 @@
 
     void VisitInputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
 
-    void VisitOutputLayer(const IConnectableLayer*, LayerBindingId id, const char*) override {}
+    void VisitOutputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
 
     void VisitStandInLayer(const IConnectableLayer* layer,
                            const StandInDescriptor& descriptor,
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 8c68659..ca98f46 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -17,7 +17,7 @@
 #include <google/protobuf/io/zero_copy_stream_impl.h>
 #include <google/protobuf/text_format.h>
 
-#include "tensorflow/core/framework/graph.pb.h"
+#include <tensorflow/core/framework/graph.pb.h>
 
 #include <boost/format.hpp>
 #include <boost/core/ignore_unused.hpp>
@@ -727,6 +727,7 @@
 
 ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
     if (numberOfInputs < 2)
     {
@@ -806,6 +807,7 @@
 
 ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
 
     // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
@@ -835,6 +837,7 @@
 
 ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     return AddAdditionLayer(nodeDef, true);
 }
 
@@ -865,6 +868,7 @@
 
 ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
     // Any requests for the output slots of this node should be forwarded to the node connected as input.
     return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
@@ -1058,6 +1062,7 @@
 
 ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     BOOST_ASSERT(nodeDef.op() == "Const");
 
     if (nodeDef.attr().count("value") == 0)
@@ -1194,6 +1199,7 @@
 ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
     IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
     TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
@@ -1335,6 +1341,7 @@
 ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
                                                     const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
     IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
     TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
@@ -1530,6 +1537,7 @@
 
 ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
 
     IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
@@ -1550,6 +1558,7 @@
 ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
                                                    const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
 
     if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
@@ -1698,6 +1707,7 @@
 ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
                                             const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
     if (inputs.size() != 2)
     {
@@ -1835,6 +1845,7 @@
 ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
                                            const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
     IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
     IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
@@ -1871,6 +1882,7 @@
 ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
                                             const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
     IOutputSlot* input0Slot = inputLayers.first;
     IOutputSlot* input1Slot = inputLayers.second;
@@ -1884,6 +1896,7 @@
 ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
                                           const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
     IOutputSlot* input0Slot = inputLayers.first;
     IOutputSlot* input1Slot = inputLayers.second;
@@ -1897,6 +1910,7 @@
 ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
                                             const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
     IOutputSlot* input0Slot = inputLayers.first;
     IOutputSlot* input1Slot = inputLayers.second;
@@ -1908,6 +1922,7 @@
 
 ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
 
     IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
@@ -1999,6 +2014,7 @@
 ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
                                         const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     // input consists of:
     // input[0] the tensor which will be padded
     // input[1] the tensor holding the padding values
@@ -2073,6 +2089,7 @@
 ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
                                            const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
 
     // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
@@ -2158,6 +2175,7 @@
 ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     // Note: the Shape layer is handled in a special way, because:
     //        1. ARMNN doesn't support int32 tensors which it outputs.
     //        2. ARMNN works with statically shaped tensors which are known at parse time.
@@ -2200,6 +2218,7 @@
 ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
     ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
 
@@ -2238,6 +2257,7 @@
 ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
 
     if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
@@ -2376,6 +2396,7 @@
 
 ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
 
     IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
@@ -2395,6 +2416,7 @@
 
 ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
 
     NormalizationDescriptor normalizationDescriptor;
@@ -2440,12 +2462,15 @@
 
 ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
+
     // Defers the creation of the layer (see ParsedMatMulTfOperation).
     return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
 }
 
 ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
+    boost::ignore_unused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
     IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
     TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
@@ -2484,7 +2509,7 @@
                    std::inserter(positiveAxisSet, positiveAxisSet.begin()),
                    [rank](int i) -> unsigned int { return static_cast<unsigned int>((i + rank) % rank); });
 
-    CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
+    CalculateReducedOutputTensoInfo(inputTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
 
     if (inputTensorInfo.GetNumDimensions() > positiveAxisSet.size())
     {
@@ -2774,6 +2799,8 @@
 ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
 {
+    boost::ignore_unused(graphDef);
+
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
     IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
     TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
diff --git a/src/armnnUtils/ParserHelper.cpp b/src/armnnUtils/ParserHelper.cpp
index 990a9b2..88e5756 100644
--- a/src/armnnUtils/ParserHelper.cpp
+++ b/src/armnnUtils/ParserHelper.cpp
@@ -51,8 +51,9 @@
     }
 }
 
-void CalculateReducedOutputTensoInfo(const armnn::TensorInfo& inputTensorInfo, const armnn::TensorInfo& axisTensorInfo,
-                                     const std::set<unsigned int>& axisSet, bool keepDims,
+void CalculateReducedOutputTensoInfo(const armnn::TensorInfo& inputTensorInfo,
+                                     const std::set<unsigned int>& axisSet,
+                                     bool keepDims,
                                      armnn::TensorInfo& outputTensorInfo)
 {
     std::vector<unsigned int> outputShapeVector;
diff --git a/src/armnnUtils/ParserHelper.hpp b/src/armnnUtils/ParserHelper.hpp
index bcc1e5b..d85ce26 100644
--- a/src/armnnUtils/ParserHelper.hpp
+++ b/src/armnnUtils/ParserHelper.hpp
@@ -17,8 +17,9 @@
                                   unsigned int& mergeDimOrigin);
 
 /// Creates a tensor info after reducing the dimensions mentioned in axisData.
-void CalculateReducedOutputTensoInfo(const armnn::TensorInfo& inputTensorInfo, const armnn::TensorInfo& axisTensorInfo,
-                                     const std::set<unsigned int>& axisSet, bool keepDims,
+void CalculateReducedOutputTensoInfo(const armnn::TensorInfo& inputTensorInfo,
+                                     const std::set<unsigned int>& axisSet,
+                                     bool keepDims,
                                      armnn::TensorInfo& outputTensorInfo);
 
 } // namespace armnnUtils
diff --git a/src/armnnUtils/test/ParserHelperTest.cpp b/src/armnnUtils/test/ParserHelperTest.cpp
index 122ad76..636e2bd 100644
--- a/src/armnnUtils/test/ParserHelperTest.cpp
+++ b/src/armnnUtils/test/ParserHelperTest.cpp
@@ -22,54 +22,39 @@
     TensorInfo inputTensorInfo(3, &inputShape[0], DataType::Float32);
 
     // Reducing all dimensions results in one single output value (one dimension)
-    unsigned int axisShape1[] = { 3 };
     std::set<unsigned int> axisData1 = { 0, 1, 2 };
-    TensorInfo axisTensorInfo1(1, &axisShape1[0], DataType::Signed32);
-
     TensorInfo outputTensorInfo1;
 
-    CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo1, axisData1,
-    keepDims, outputTensorInfo1);
+    CalculateReducedOutputTensoInfo(inputTensorInfo, axisData1, keepDims, outputTensorInfo1);
 
     BOOST_ASSERT(outputTensorInfo1.GetNumDimensions() == 1);
     BOOST_ASSERT(outputTensorInfo1.GetShape()[0] == 1);
 
     // Reducing dimension 0 results in a 3x4 size tensor (one dimension)
-    unsigned int axisShape2[] = { 1 };
     std::set<unsigned int> axisData2 = { 0 };
-    TensorInfo axisTensorInfo2(1, &axisShape2[0], DataType::Signed32);
-
     TensorInfo outputTensorInfo2;
 
-    CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo2, axisData2,
-    keepDims, outputTensorInfo2);
+    CalculateReducedOutputTensoInfo(inputTensorInfo, axisData2, keepDims, outputTensorInfo2);
 
     BOOST_ASSERT(outputTensorInfo2.GetNumDimensions() == 1);
     BOOST_ASSERT(outputTensorInfo2.GetShape()[0] == 12);
 
     // Reducing dimensions 0,1 results in a 4 size tensor (one dimension)
-    unsigned int axisShape3[] = { 2 };
     std::set<unsigned int> axisData3 = { 0, 1 };
-    TensorInfo axisTensorInfo3(1, &axisShape3[0], DataType::Signed32);
-
     TensorInfo outputTensorInfo3;
 
-    CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo3, axisData3,
-    keepDims, outputTensorInfo3);
+    CalculateReducedOutputTensoInfo(inputTensorInfo, axisData3, keepDims, outputTensorInfo3);
 
     BOOST_ASSERT(outputTensorInfo3.GetNumDimensions() == 1);
     BOOST_ASSERT(outputTensorInfo3.GetShape()[0] == 4);
 
     // Reducing dimension 0 results in a { 1, 3, 4 } dimension tensor
     keepDims = true;
-    unsigned int axisShape4[] = { 1 };
     std::set<unsigned int> axisData4 = { 0 };
-    TensorInfo axisTensorInfo4(1, &axisShape4[0], DataType::Signed32);
 
     TensorInfo outputTensorInfo4;
 
-    CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo4, axisData4,
-    keepDims, outputTensorInfo4);
+    CalculateReducedOutputTensoInfo(inputTensorInfo, axisData4, keepDims, outputTensorInfo4);
 
     BOOST_ASSERT(outputTensorInfo4.GetNumDimensions() == 3);
     BOOST_ASSERT(outputTensorInfo4.GetShape()[0] == 1);
@@ -78,14 +63,11 @@
 
     // Reducing dimension 1, 2 results in a { 2, 1, 1 } dimension tensor
     keepDims = true;
-    unsigned int axisShape5[] = { 2 };
     std::set<unsigned int> axisData5 = { 1, 2 };
-    TensorInfo axisTensorInfo5(1, &axisShape5[0], DataType::Signed32);
 
     TensorInfo outputTensorInfo5;
 
-    CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo5, axisData5,
-    keepDims, outputTensorInfo5);
+    CalculateReducedOutputTensoInfo(inputTensorInfo, axisData5,  keepDims, outputTensorInfo5);
 
     BOOST_ASSERT(outputTensorInfo5.GetNumDimensions() == 3);
     BOOST_ASSERT(outputTensorInfo5.GetShape()[0] == 2);
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index c79aa78..a73837b 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -69,7 +69,7 @@
 template< typename ... Args>
 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
 {
-    boost::ignore_unused((args)...);
+    boost::ignore_unused(reasonIfUnsupported, (args)...);
 #if defined(ARMCOMPUTENEON_ENABLED)
     return true;
 #else