COMPMID-450 Add SqueezeNetV1.1 benchmark tests

Change-Id: I489cd7cbc77ac389679ad41876acfb8b09584c0b
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/81360
Reviewed-by: Moritz Pflanzer <moritz.pflanzer@arm.com>
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
diff --git a/tests/benchmark_new/CL/ActivationLayer.cpp b/tests/benchmark_new/CL/ActivationLayer.cpp
index 6f8a007..bf7b36a 100644
--- a/tests/benchmark_new/CL/ActivationLayer.cpp
+++ b/tests/benchmark_new/CL/ActivationLayer.cpp
@@ -56,6 +56,11 @@
                                                                                         framework::dataset::make("DataType", DataType::F32)),
                                                             framework::dataset::make("Batches", { 1, 4, 8 })));
 
+REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetActivationLayer, CLActivationLayerFixture, framework::DatasetMode::ALL,
+                                framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetActivationLayerDataset(),
+                                                                                        framework::dataset::make("DataType", DataType::F32)),
+                                                            framework::dataset::make("Batches", { 1, 4, 8 })));
+
 TEST_SUITE_END()
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/benchmark_new/CL/ConvolutionLayer.cpp b/tests/benchmark_new/CL/ConvolutionLayer.cpp
index 6e0d6ff..cae0640 100644
--- a/tests/benchmark_new/CL/ConvolutionLayer.cpp
+++ b/tests/benchmark_new/CL/ConvolutionLayer.cpp
@@ -33,6 +33,7 @@
 #include "tests/datasets_new/AlexNetConvolutionLayerDataset.h"
 #include "tests/datasets_new/GoogLeNetConvolutionLayerDataset.h"
 #include "tests/datasets_new/LeNet5ConvolutionLayerDataset.h"
+#include "tests/datasets_new/SqueezeNetConvolutionLayerDataset.h"
 #include "tests/fixtures_new/ConvolutionLayerFixture.h"
 
 namespace arm_compute
@@ -58,6 +59,11 @@
                                                                                         framework::dataset::make("DataType", DataType::F32)),
                                                             framework::dataset::make("Batches", { 1, 4, 8 })));
 
+REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetConvolutionLayer, CLConvolutionLayerFixture, framework::DatasetMode::ALL,
+                                framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(),
+                                                                                        framework::dataset::make("DataType", DataType::F32)),
+                                                            framework::dataset::make("Batches", { 1, 4, 8 })));
+
 TEST_SUITE_END()
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/benchmark_new/CL/PoolingLayer.cpp b/tests/benchmark_new/CL/PoolingLayer.cpp
index 125907b..de22992 100644
--- a/tests/benchmark_new/CL/PoolingLayer.cpp
+++ b/tests/benchmark_new/CL/PoolingLayer.cpp
@@ -33,6 +33,7 @@
 #include "tests/datasets_new/AlexNetPoolingLayerDataset.h"
 #include "tests/datasets_new/GoogLeNetPoolingLayerDataset.h"
 #include "tests/datasets_new/LeNet5PoolingLayerDataset.h"
+#include "tests/datasets_new/SqueezeNetPoolingLayerDataset.h"
 #include "tests/fixtures_new/PoolingLayerFixture.h"
 
 namespace arm_compute
@@ -58,6 +59,11 @@
                                                                                         framework::dataset::make("DataType", DataType::F32)),
                                                             framework::dataset::make("Batches", { 1, 4, 8 })));
 
+REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetPoolingLayer, CLPoolingLayerFixture, framework::DatasetMode::ALL,
+                                framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetPoolingLayerDataset(),
+                                                                                        framework::dataset::make("DataType", DataType::F32)),
+                                                            framework::dataset::make("Batches", { 1, 4, 8 })));
+
 TEST_SUITE_END()
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/benchmark_new/NEON/ActivationLayer.cpp b/tests/benchmark_new/NEON/ActivationLayer.cpp
index 53b401a..beb98b4 100644
--- a/tests/benchmark_new/NEON/ActivationLayer.cpp
+++ b/tests/benchmark_new/NEON/ActivationLayer.cpp
@@ -40,11 +40,13 @@
 namespace
 {
 #ifdef ARM_COMPUTE_ENABLE_FP16
-const auto alexnet_data_types = framework::dataset::make("DataType", { DataType::QS8, DataType::F16, DataType::F32 });
-const auto lenet_data_types   = framework::dataset::make("DataType", { DataType::F16, DataType::F32 });
+const auto alexnet_data_types    = framework::dataset::make("DataType", { DataType::QS8, DataType::F16, DataType::F32 });
+const auto lenet_data_types      = framework::dataset::make("DataType", { DataType::F16, DataType::F32 });
+const auto squeezenet_data_types = framework::dataset::make("DataType", { DataType::F16, DataType::F32 });
 #else  /* ARM_COMPUTE_ENABLE_FP16 */
-const auto alexnet_data_types = framework::dataset::make("DataType", { DataType::QS8, DataType::F32 });
-const auto lenet_data_types   = framework::dataset::make("DataType", { DataType::F32 });
+const auto alexnet_data_types    = framework::dataset::make("DataType", { DataType::QS8, DataType::F32 });
+const auto lenet_data_types      = framework::dataset::make("DataType", { DataType::F32 });
+const auto squeezenet_data_types = framework::dataset::make("DataType", { DataType::F32 });
 #endif /* ARM_COMPUTE_ENABLE_FP16 */
 } // namespace
 
@@ -64,6 +66,10 @@
                                 framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetActivationLayerDataset(), lenet_data_types),
                                                             framework::dataset::make("Batches", { 1, 4, 8 })));
 
+REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetActivationLayer, NEActivationLayerFixture, framework::DatasetMode::ALL,
+                                framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetActivationLayerDataset(), squeezenet_data_types),
+                                                            framework::dataset::make("Batches", { 1, 4, 8 })));
+
 TEST_SUITE_END()
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/benchmark_new/NEON/ConvolutionLayer.cpp b/tests/benchmark_new/NEON/ConvolutionLayer.cpp
index 6dd4df8..7406180 100644
--- a/tests/benchmark_new/NEON/ConvolutionLayer.cpp
+++ b/tests/benchmark_new/NEON/ConvolutionLayer.cpp
@@ -33,6 +33,7 @@
 #include "tests/datasets_new/AlexNetConvolutionLayerDataset.h"
 #include "tests/datasets_new/GoogLeNetConvolutionLayerDataset.h"
 #include "tests/datasets_new/LeNet5ConvolutionLayerDataset.h"
+#include "tests/datasets_new/SqueezeNetConvolutionLayerDataset.h"
 #include "tests/fixtures_new/ConvolutionLayerFixture.h"
 
 namespace arm_compute
@@ -42,11 +43,13 @@
 namespace
 {
 #ifdef ARM_COMPUTE_ENABLE_FP16
-const auto alexnet_data_types = framework::dataset::make("DataType", { DataType::QS8, DataType::F16, DataType::F32 });
-const auto lenet_data_types   = framework::dataset::make("DataType", { DataType::F16, DataType::F32 });
+const auto alexnet_data_types    = framework::dataset::make("DataType", { DataType::QS8, DataType::F16, DataType::F32 });
+const auto lenet_data_types      = framework::dataset::make("DataType", { DataType::F16, DataType::F32 });
+const auto squeezenet_data_types = framework::dataset::make("DataType", { DataType::F16, DataType::F32 });
 #else  /* ARM_COMPUTE_ENABLE_FP16 */
-const auto alexnet_data_types = framework::dataset::make("DataType", { DataType::QS8, DataType::F32 });
-const auto lenet_data_types   = framework::dataset::make("DataType", { DataType::F32 });
+const auto alexnet_data_types    = framework::dataset::make("DataType", { DataType::QS8, DataType::F32 });
+const auto lenet_data_types      = framework::dataset::make("DataType", { DataType::F32 });
+const auto squeezenet_data_types = framework::dataset::make("DataType", { DataType::F32 });
 #endif /* ARM_COMPUTE_ENABLE_FP16 */
 } // namespace
 
@@ -66,6 +69,10 @@
                                 framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetConvolutionLayerDataset(), lenet_data_types),
                                                             framework::dataset::make("Batches", { 1, 4, 8 })));
 
+REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetConvolutionLayer, NEConvolutionLayerFixture, framework::DatasetMode::ALL,
+                                framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(), squeezenet_data_types),
+                                                            framework::dataset::make("Batches", { 1, 4, 8 })));
+
 TEST_SUITE_END()
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/benchmark_new/NEON/PoolingLayer.cpp b/tests/benchmark_new/NEON/PoolingLayer.cpp
index c9d598d..0991024 100644
--- a/tests/benchmark_new/NEON/PoolingLayer.cpp
+++ b/tests/benchmark_new/NEON/PoolingLayer.cpp
@@ -33,6 +33,7 @@
 #include "tests/datasets_new/AlexNetPoolingLayerDataset.h"
 #include "tests/datasets_new/GoogLeNetPoolingLayerDataset.h"
 #include "tests/datasets_new/LeNet5PoolingLayerDataset.h"
+#include "tests/datasets_new/SqueezeNetPoolingLayerDataset.h"
 #include "tests/fixtures_new/PoolingLayerFixture.h"
 
 namespace arm_compute
@@ -42,11 +43,13 @@
 namespace
 {
 #ifdef ARM_COMPUTE_ENABLE_FP16
-const auto alexnet_data_types = framework::dataset::make("DataType", { DataType::QS8, DataType::F16, DataType::F32 });
-const auto lenet_data_types   = framework::dataset::make("DataType", { DataType::F16, DataType::F32 });
+const auto alexnet_data_types    = framework::dataset::make("DataType", { DataType::QS8, DataType::F16, DataType::F32 });
+const auto lenet_data_types      = framework::dataset::make("DataType", { DataType::F16, DataType::F32 });
+const auto squeezenet_data_types = framework::dataset::make("DataType", { DataType::F16, DataType::F32 });
 #else  /* ARM_COMPUTE_ENABLE_FP16 */
-const auto alexnet_data_types = framework::dataset::make("DataType", { DataType::QS8, DataType::F32 });
-const auto lenet_data_types   = framework::dataset::make("DataType", { DataType::F32 });
+const auto alexnet_data_types    = framework::dataset::make("DataType", { DataType::QS8, DataType::F32 });
+const auto lenet_data_types      = framework::dataset::make("DataType", { DataType::F32 });
+const auto squeezenet_data_types = framework::dataset::make("DataType", { DataType::F32 });
 #endif /* ARM_COMPUTE_ENABLE_FP16 */
 } // namespace
 
@@ -63,6 +66,9 @@
 REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetPoolingLayer, NEPoolingLayerFixture, framework::DatasetMode::ALL,
                                 framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetPoolingLayerDataset(), lenet_data_types), framework::dataset::make("Batches", { 1, 4, 8 })));
 
+REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetPoolingLayer, NEPoolingLayerFixture, framework::DatasetMode::ALL,
+                                framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetPoolingLayerDataset(), squeezenet_data_types), framework::dataset::make("Batches", { 1, 4, 8 })));
+
 TEST_SUITE_END()
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/datasets_new/ActivationLayerDataset.h b/tests/datasets_new/ActivationLayerDataset.h
index 02f5803..a6b882f 100644
--- a/tests/datasets_new/ActivationLayerDataset.h
+++ b/tests/datasets_new/ActivationLayerDataset.h
@@ -152,6 +152,41 @@
     GoogLeNetActivationLayerDataset(GoogLeNetActivationLayerDataset &&) = default;
     ~GoogLeNetActivationLayerDataset()                                  = default;
 };
+
+class SqueezeNetActivationLayerDataset final : public
+    framework::dataset::CartesianProductDataset<framework::dataset::InitializerListDataset<TensorShape>, framework::dataset::SingletonDataset<ActivationLayerInfo>>
+{
+public:
+    SqueezeNetActivationLayerDataset()
+        : CartesianProductDataset
+    {
+        framework::dataset::make("Shape", { // relu_conv1
+            TensorShape(111U, 111U, 64U),
+            // fire2/relu_squeeze1x1, fire3/relu_squeeze1x1
+            TensorShape(55U, 55U, 16U),
+            // fire2/relu_expand1x1, fire2/relu_expand3x3, fire3/relu_expand1x1, fire3/relu_expand3x3
+            TensorShape(55U, 55U, 64U),
+            // fire4/relu_squeeze1x1, fire5/relu_squeeze1x1
+            TensorShape(27U, 27U, 32U),
+            // fire4/relu_expand1x1, fire4/relu_expand3x3, fire5/relu_expand1x1, fire5/relu_expand3x3
+            TensorShape(27U, 27U, 128U),
+            // fire6/relu_squeeze1x1, fire7/relu_squeeze1x1
+            TensorShape(13U, 13U, 48U),
+            // fire6/relu_expand1x1, fire6/relu_expand3x3, fire7/relu_expand1x1, fire7/relu_expand3x3
+            TensorShape(13U, 13U, 192U),
+            // fire8/relu_squeeze1x1, fire9/relu_squeeze1x1
+            TensorShape(13U, 13U, 64U),
+            // fire8/relu_expand1x1, fire8/relu_expand3x3, fire9/relu_expand1x1, fire9/relu_expand3x3
+            TensorShape(13U, 13U, 256U),
+            // relu_conv10
+            TensorShape(13U, 13U, 1000U) }),
+        framework::dataset::make("Info", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+    }
+    {
+    }
+    SqueezeNetActivationLayerDataset(SqueezeNetActivationLayerDataset &&) = default;
+    ~SqueezeNetActivationLayerDataset()                                   = default;
+};
 } // namespace datasets
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/datasets_new/SqueezeNetConvolutionLayerDataset.h b/tests/datasets_new/SqueezeNetConvolutionLayerDataset.h
new file mode 100644
index 0000000..07ec6c9
--- /dev/null
+++ b/tests/datasets_new/SqueezeNetConvolutionLayerDataset.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_SQUEEZENET_CONVOLUTION_LAYER_DATASET
+#define ARM_COMPUTE_TEST_SQUEEZENET_CONVOLUTION_LAYER_DATASET
+
+#include "tests/datasets_new/ConvolutionLayerDataset.h"
+
+#include "tests/TypePrinter.h"
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class SqueezeNetConvolutionLayerDataset final : public ConvolutionLayerDataset
+{
+public:
+    SqueezeNetConvolutionLayerDataset()
+    {
+        // conv1
+        add_config(TensorShape(224U, 224U, 3U), TensorShape(3U, 3U, 3U, 64U), TensorShape(64U), TensorShape(111U, 111U, 64U), PadStrideInfo(2, 2, 0, 0));
+        // fire2/squeeze1x1
+        add_config(TensorShape(55U, 55U, 64U), TensorShape(1U, 1U, 64U, 16U), TensorShape(16U), TensorShape(55U, 55U, 16U), PadStrideInfo(1, 1, 0, 0));
+        // fire2/expand1x1, fire3/expand1x1
+        add_config(TensorShape(55U, 55U, 16U), TensorShape(1U, 1U, 16U, 64U), TensorShape(64U), TensorShape(55U, 55U, 64U), PadStrideInfo(1, 1, 0, 0));
+        // fire2/expand3x3, fire3/expand3x3
+        add_config(TensorShape(55U, 55U, 16U), TensorShape(3U, 3U, 16U, 64U), TensorShape(64U), TensorShape(55U, 55U, 64U), PadStrideInfo(1, 1, 1, 1));
+        // fire3/squeeze1x1
+        add_config(TensorShape(55U, 55U, 128U), TensorShape(1U, 1U, 128U, 16U), TensorShape(16U), TensorShape(55U, 55U, 16U), PadStrideInfo(1, 1, 0, 0));
+        // fire4/squeeze1x1
+        add_config(TensorShape(27U, 27U, 128U), TensorShape(1U, 1U, 128U, 32U), TensorShape(32U), TensorShape(27U, 27U, 32U), PadStrideInfo(1, 1, 0, 0));
+        // fire4/expand1x1, fire5/expand1x1
+        add_config(TensorShape(27U, 27U, 32U), TensorShape(1U, 1U, 32U, 128U), TensorShape(128U), TensorShape(27U, 27U, 128U), PadStrideInfo(1, 1, 0, 0));
+        // fire4/expand3x3, fire5/expand3x3
+        add_config(TensorShape(27U, 27U, 32U), TensorShape(3U, 3U, 32U, 128U), TensorShape(128U), TensorShape(27U, 27U, 128U), PadStrideInfo(1, 1, 1, 1));
+        // fire5/squeeze1x1
+        add_config(TensorShape(27U, 27U, 256U), TensorShape(1U, 1U, 256U, 32U), TensorShape(32U), TensorShape(27U, 27U, 32U), PadStrideInfo(1, 1, 0, 0));
+        // fire6/squeeze1x1
+        add_config(TensorShape(13U, 13U, 256U), TensorShape(1U, 1U, 256U, 48U), TensorShape(48U), TensorShape(13U, 13U, 48U), PadStrideInfo(1, 1, 0, 0));
+        // fire6/expand1x1, fire7/expand1x1
+        add_config(TensorShape(13U, 13U, 48U), TensorShape(1U, 1U, 48U, 192U), TensorShape(192U), TensorShape(13U, 13U, 192U), PadStrideInfo(1, 1, 0, 0));
+        // fire6/expand3x3, fire7/expand3x3
+        add_config(TensorShape(13U, 13U, 48U), TensorShape(3U, 3U, 48U, 192U), TensorShape(192U), TensorShape(13U, 13U, 192U), PadStrideInfo(1, 1, 1, 1));
+        // fire7/squeeze1x1
+        add_config(TensorShape(13U, 13U, 384U), TensorShape(1U, 1U, 384U, 48U), TensorShape(48U), TensorShape(13U, 13U, 48U), PadStrideInfo(1, 1, 0, 0));
+        // fire8/squeeze1x1
+        add_config(TensorShape(13U, 13U, 384U), TensorShape(1U, 1U, 384U, 64U), TensorShape(64U), TensorShape(13U, 13U, 64U), PadStrideInfo(1, 1, 0, 0));
+        // fire8/expand1x1, fire9/expand1x1
+        add_config(TensorShape(13U, 13U, 64U), TensorShape(1U, 1U, 64U, 256U), TensorShape(256U), TensorShape(13U, 13U, 256U), PadStrideInfo(1, 1, 0, 0));
+        // fire8/expand3x3, fire9/expand3x3
+        add_config(TensorShape(13U, 13U, 64U), TensorShape(3U, 3U, 64U, 256U), TensorShape(256U), TensorShape(13U, 13U, 256U), PadStrideInfo(1, 1, 1, 1));
+        // fire9/squeeze1x1
+        add_config(TensorShape(13U, 13U, 512U), TensorShape(1U, 1U, 512U, 64U), TensorShape(64U), TensorShape(13U, 13U, 64U), PadStrideInfo(1, 1, 0, 0));
+        // conv10
+        add_config(TensorShape(13U, 13U, 512U), TensorShape(1U, 1U, 512U, 1000U), TensorShape(1000U), TensorShape(13U, 13U, 1000U), PadStrideInfo(1, 1, 0, 0));
+    }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_SQUEEZENET_CONVOLUTION_LAYER_DATASET */
diff --git a/tests/datasets_new/SqueezeNetPoolingLayerDataset.h b/tests/datasets_new/SqueezeNetPoolingLayerDataset.h
new file mode 100644
index 0000000..dc443c8
--- /dev/null
+++ b/tests/datasets_new/SqueezeNetPoolingLayerDataset.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_SQUEEZENET_POOLING_LAYER_DATASET
+#define ARM_COMPUTE_TEST_SQUEEZENET_POOLING_LAYER_DATASET
+
+#include "tests/datasets_new/PoolingLayerDataset.h"
+
+#include "tests/TypePrinter.h"
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class SqueezeNetPoolingLayerDataset final : public PoolingLayerDataset
+{
+public:
+    SqueezeNetPoolingLayerDataset()
+    {
+        // pool1
+        add_config(TensorShape(111U, 111U, 64U), TensorShape(55U, 55U, 64U), PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)));
+        // pool3
+        add_config(TensorShape(55U, 55U, 128U), TensorShape(27U, 27U, 128U), PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)));
+        // pool5
+        add_config(TensorShape(27U, 27U, 256U), TensorShape(13U, 13U, 256U), PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)));
+        //FIXME: Add support for global pooling.
+    }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_SQUEEZENET_POOLING_LAYER_DATASET */