IVGCVSW-4246 Clean build end-to-end tests with -Wextra
Change-Id: Ia25f919e45a210e1e2d5d50b0c9098bf01d88013
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp
index 7b7dcec..6423d1c 100644
--- a/tests/InferenceTest.hpp
+++ b/tests/InferenceTest.hpp
@@ -9,7 +9,7 @@
#include <armnn/TypesUtils.hpp>
#include "InferenceModel.hpp"
-
+#include <boost/core/ignore_unused.hpp>
#include <boost/program_options.hpp>
@@ -91,8 +91,15 @@
public:
virtual ~IInferenceTestCaseProvider() {}
- virtual void AddCommandLineOptions(boost::program_options::options_description& options) {};
- virtual bool ProcessCommandLineOptions(const InferenceTestOptions &commonOptions) { return true; };
+ virtual void AddCommandLineOptions(boost::program_options::options_description& options)
+ {
+ boost::ignore_unused(options);
+ };
+ virtual bool ProcessCommandLineOptions(const InferenceTestOptions &commonOptions)
+ {
+ boost::ignore_unused(commonOptions);
+ return true;
+ };
virtual std::unique_ptr<IInferenceTestCase> GetTestCase(unsigned int testCaseId) = 0;
virtual bool OnInferenceTestFinished() { return true; };
};
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index fd888e2..c05e70d 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -79,6 +79,7 @@
void operator()(const std::vector<int>& values)
{
+ boost::ignore_unused(values);
BOOST_ASSERT_MSG(false, "Non-float predictions output not supported.");
}
diff --git a/tests/MobileNetSsdInferenceTest.hpp b/tests/MobileNetSsdInferenceTest.hpp
index a950b93..c99844b 100644
--- a/tests/MobileNetSsdInferenceTest.hpp
+++ b/tests/MobileNetSsdInferenceTest.hpp
@@ -33,6 +33,8 @@
TestCaseResult ProcessResult(const InferenceTestOptions& options) override
{
+ boost::ignore_unused(options);
+
const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // bounding boxes
BOOST_ASSERT(output1.size() == k_OutputSize1);
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index 279bf30..f9e9b14 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -732,6 +732,7 @@
const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime,
const bool printIntermediate, bool enableLayerDetails = false, bool parseUnuspported = false)
{
+ boost::ignore_unused(runtime);
std::string modelFormat;
std::string modelPath;
std::string inputNames;
diff --git a/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp b/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
index bf5a865..f4b3955 100644
--- a/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
+++ b/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
@@ -37,7 +37,7 @@
"input", // input tensor name
"output", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
// we need to get the input quantization parameters from
// the parsed model
return DatabaseType(
diff --git a/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp b/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
index b0af830..169ecb0 100644
--- a/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
+++ b/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
@@ -37,7 +37,7 @@
"input", // input tensor name
"InceptionV4/Logits/Predictions", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
// we need to get the input quantization parameters from
// the parsed model
return DatabaseType(
diff --git a/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp b/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp
index 4cf16d7..4194d4b 100644
--- a/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp
+++ b/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp
@@ -37,7 +37,7 @@
"input", // input tensor name
"output", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
return DatabaseType(
dataDir,
224,
diff --git a/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp b/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp
index f193a98..f497de5 100644
--- a/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp
+++ b/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp
@@ -39,7 +39,7 @@
"input", // input tensor name
"MobilenetV1/Predictions/Reshape_1", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
// we need to get the input quantization parameters from
// the parsed model
return DatabaseType(
diff --git a/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp b/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp
index 1b411f9..b2d3f0f 100644
--- a/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp
+++ b/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp
@@ -105,7 +105,7 @@
"input", // input tensor name
"MobilenetV1/Predictions/Reshape_1", // output tensor name
indices, // vector of indices to select which images to validate
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
// we need to get the input quantization parameters from
// the parsed model
return DatabaseType(
diff --git a/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp b/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp
index 9bc1034..b8def4f 100644
--- a/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp
+++ b/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp
@@ -37,7 +37,7 @@
"input", // input tensor name
"output", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
// we need to get the input quantization parameters from
// the parsed model
return DatabaseType(
diff --git a/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp b/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
index 98235e3..7446809 100644
--- a/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
+++ b/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
@@ -37,7 +37,7 @@
"input", // input tensor name
"resnet_v2_50/predictions/Reshape_1", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
// we need to get the input quantization parameters from
// the parsed model
return DatabaseType(
diff --git a/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp b/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp
index 1e2ffbf..107660e 100644
--- a/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp
+++ b/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp
@@ -37,7 +37,7 @@
"input", // input tensor name
"output", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
return DatabaseType(
dataDir,
299,
diff --git a/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp b/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
index 030f01c..8da553f 100644
--- a/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
+++ b/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
@@ -39,7 +39,7 @@
"input", // input tensor name
"vgg_16/fc8/squeezed", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
// we need to get the input quantization parameters from
// the parsed model
return DatabaseType(
diff --git a/tests/YoloInferenceTest.hpp b/tests/YoloInferenceTest.hpp
index 91ea977..16d0355 100644
--- a/tests/YoloInferenceTest.hpp
+++ b/tests/YoloInferenceTest.hpp
@@ -32,6 +32,8 @@
virtual TestCaseResult ProcessResult(const InferenceTestOptions& options) override
{
+ boost::ignore_unused(options);
+
using Boost3dArray = boost::multi_array<float, 3>;
const std::vector<float>& output = boost::get<std::vector<float>>(this->GetOutputs()[0]);