IVGCVSW-1813 - Add MeanLayer
 * add MeanLayer functionalities
 * modify MeanQueueDescriptor to use parameter
 * add IsMeanSupported placeholder for all backends

Change-Id: Ic69a34a61df667849977aad9b38f9a01eef565b5
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index ee93d48..fce1e95 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -29,6 +29,7 @@
         case LayerType::Input: return "Input";
         case LayerType::L2Normalization: return "L2Normalization";
         case LayerType::Lstm: return "Lstm";
+        case LayerType::Mean: return "Mean";
         case LayerType::MemCopy: return "MemCopy";
         case LayerType::Merger: return "Merger";
         case LayerType::Multiplication: return "Multiplication";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index d2c83cd..13ab2bc 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -29,6 +29,7 @@
     Input,
     L2Normalization,
     Lstm,
+    Mean,
     MemCopy,
     Merger,
     Multiplication,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 59c1c8d..7ed56c5 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -345,4 +345,14 @@
     FORWARD_LAYER_SUPPORT_FUNC(compute, IsFloorSupported, input, output);
 }
 
+bool IsMeanSupported(Compute compute,
+                     const TensorInfo& input,
+                     const TensorInfo& output,
+                     const MeanDescriptor& descriptor,
+                     char* reasonIfUnsupported,
+                     size_t reasonIfUnsupportedMaxLength)
+{
+    FORWARD_LAYER_SUPPORT_FUNC(compute, IsMeanSupported, input, output, descriptor);
+}
+
 }
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index a1dc355..c9ee9db 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -21,6 +21,7 @@
 #include "layers/InputLayer.hpp"
 #include "layers/L2NormalizationLayer.hpp"
 #include "layers/LstmLayer.hpp"
+#include "layers/MeanLayer.hpp"
 #include "layers/MemCopyLayer.hpp"
 #include "layers/MergerLayer.hpp"
 #include "layers/MultiplicationLayer.hpp"
@@ -76,6 +77,7 @@
 DECLARE_LAYER(Input)
 DECLARE_LAYER(L2Normalization)
 DECLARE_LAYER(Lstm)
+DECLARE_LAYER(Mean)
 DECLARE_LAYER(MemCopy)
 DECLARE_LAYER(Merger)
 DECLARE_LAYER(Multiplication)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index dc531d1..22d80d3 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -594,6 +594,11 @@
     return m_Graph->AddLayer<SubtractionLayer>(name);
 }
 
+IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
+{
+    return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
+}
+
 OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
     : m_Graph(std::move(graph))
 {
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index b6b8548..1411242 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -117,6 +117,8 @@
 
     IConnectableLayer* AddSubtractionLayer(const char* name = nullptr) override;
 
+    IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr) override;
+
 private:
     IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
         const ConstTensor& weights,
diff --git a/src/armnn/backends/ClLayerSupport.cpp b/src/armnn/backends/ClLayerSupport.cpp
index aeb2759..4664c2e 100644
--- a/src/armnn/backends/ClLayerSupport.cpp
+++ b/src/armnn/backends/ClLayerSupport.cpp
@@ -462,4 +462,12 @@
                                    reasonIfUnsupported);
 }
 
+bool IsMeanSupportedCl(const TensorInfo& input,
+                       const TensorInfo& output,
+                       const MeanDescriptor& descriptor,
+                       std::string* reasonIfUnsupported)
+{
+    return false;
+}
+
 }
diff --git a/src/armnn/backends/ClLayerSupport.hpp b/src/armnn/backends/ClLayerSupport.hpp
index dbe546c..f5c1226e 100644
--- a/src/armnn/backends/ClLayerSupport.hpp
+++ b/src/armnn/backends/ClLayerSupport.hpp
@@ -142,6 +142,11 @@
                         const TensorInfo& output,
                         std::string* reasonIfUnsupported = nullptr);
 
+bool IsMeanSupportedCl(const TensorInfo& input,
+                       const TensorInfo& output,
+                       const MeanDescriptor& descriptor,
+                       std::string* reasonIfUnsupported = nullptr);
+
 bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input,
                                     const TensorInfo& output,
                                     std::string* reasonIfUnsupported = nullptr);
diff --git a/src/armnn/backends/NeonLayerSupport.cpp b/src/armnn/backends/NeonLayerSupport.cpp
index 73d2518..7f33c48 100644
--- a/src/armnn/backends/NeonLayerSupport.cpp
+++ b/src/armnn/backends/NeonLayerSupport.cpp
@@ -453,4 +453,12 @@
     return true;
 }
 
+bool IsMeanSupportedNeon(const TensorInfo& input,
+                         const TensorInfo& output,
+                         const MeanDescriptor& descriptor,
+                         std::string* reasonIfUnsupported)
+{
+    return false;
+}
+
 }
diff --git a/src/armnn/backends/NeonLayerSupport.hpp b/src/armnn/backends/NeonLayerSupport.hpp
index f7b6253..95b14b3 100644
--- a/src/armnn/backends/NeonLayerSupport.hpp
+++ b/src/armnn/backends/NeonLayerSupport.hpp
@@ -155,4 +155,9 @@
                                       const TensorInfo& output,
                                       std::string* reasonIfUnsupported = nullptr);
 
+bool IsMeanSupportedNeon(const TensorInfo& input,
+                         const TensorInfo& output,
+                         const MeanDescriptor& descriptor,
+                         std::string* reasonIfUnsupported = nullptr);
+
 }
diff --git a/src/armnn/backends/RefLayerSupport.cpp b/src/armnn/backends/RefLayerSupport.cpp
index 41f57f1..d56cdeb 100644
--- a/src/armnn/backends/RefLayerSupport.cpp
+++ b/src/armnn/backends/RefLayerSupport.cpp
@@ -387,4 +387,12 @@
                                           &FalseFuncU8<>));
 }
 
+bool IsMeanSupportedRef(const TensorInfo& input,
+                        const TensorInfo& output,
+                        const MeanDescriptor& descriptor,
+                        std::string* reasonIfUnsupported)
+{
+    return false;
+}
+
 }
diff --git a/src/armnn/backends/RefLayerSupport.hpp b/src/armnn/backends/RefLayerSupport.hpp
index 464eb1c..ff2e7e3 100644
--- a/src/armnn/backends/RefLayerSupport.hpp
+++ b/src/armnn/backends/RefLayerSupport.hpp
@@ -147,4 +147,9 @@
                                      const TensorInfo& output,
                                      std::string* reasonIfUnsupported = nullptr);
 
+bool IsMeanSupportedRef(const TensorInfo& input,
+                        const TensorInfo& output,
+                        const MeanDescriptor& descriptor,
+                        std::string* reasonIfUnsupported = nullptr);
+
 }
diff --git a/src/armnn/backends/WorkloadData.cpp b/src/armnn/backends/WorkloadData.cpp
index 3ed77da..25144a4 100644
--- a/src/armnn/backends/WorkloadData.cpp
+++ b/src/armnn/backends/WorkloadData.cpp
@@ -129,18 +129,6 @@
     }
 }
 
-void ValidateTensorMaxNumElements(const TensorInfo& tensor,
-                                  std::string const& descName,
-                                  unsigned int maxNumElements,
-                                  std::string const& tensorName)
-{
-    if (tensor.GetNumElements() > maxNumElements)
-    {
-        throw InvalidArgumentException(descName + ": Expected maximum of " + to_string(maxNumElements) + " but got " +
-            to_string(tensor.GetNumElements()) + " elements for " + tensorName + " tensor.");
-    }
-}
-
 //---------------------------------------------------------------
 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
     const std::string& descName, std::string const& tensorName)
@@ -844,20 +832,17 @@
     const TensorInfo& input  = workloadInfo.m_InputTensorInfos[0];
     const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
 
-    if (m_Keepdims)
+    if (m_Parameters.m_KeepDims)
     {
         ValidateTensorNumDimensions(output, "MeanQueueDescriptor", input.GetNumDimensions(), "output");
     }
-    else if (m_Axis == nullptr)
+    else if (m_Parameters.m_Axis.empty())
     {
         ValidateTensorNumDimensions(output, "MeanQueueDescriptor", 1, "output");
     }
     else
     {
-        const TensorInfo& axis = m_Axis->GetTensorInfo();
-        ValidateTensorNumDimensions(axis, "MeanQueueDescriptor", 1, "axis");
-        ValidateTensorMaxNumElements(axis, "MeanQueueDescriptor", input.GetNumDimensions(), "axis");
-        unsigned int outputDim = input.GetNumDimensions() - axis.GetNumElements();
+        auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
         ValidateTensorNumDimensions(output,
                                     "MeanQueueDescriptor",
                                     outputDim > 0 ? outputDim : 1,
diff --git a/src/armnn/backends/WorkloadData.hpp b/src/armnn/backends/WorkloadData.hpp
index face761..a36f0ad 100644
--- a/src/armnn/backends/WorkloadData.hpp
+++ b/src/armnn/backends/WorkloadData.hpp
@@ -197,17 +197,8 @@
 };
 
 // Mean layer workload data.
-struct MeanQueueDescriptor : QueueDescriptor
+struct MeanQueueDescriptor : QueueDescriptorWithParameters<MeanDescriptor>
 {
-    MeanQueueDescriptor()
-        : m_Axis(nullptr)
-        , m_Keepdims(false)
-    {
-    }
-
-    const ConstCpuTensorHandle* m_Axis;
-    bool m_Keepdims;
-
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
diff --git a/src/armnn/backends/WorkloadFactory.cpp b/src/armnn/backends/WorkloadFactory.cpp
index d188725..773a8c1 100644
--- a/src/armnn/backends/WorkloadFactory.cpp
+++ b/src/armnn/backends/WorkloadFactory.cpp
@@ -537,6 +537,19 @@
                                             reasonCapacity);
             break;
         }
+        case LayerType::Mean:
+        {
+            auto cLayer = boost::polymorphic_downcast<const MeanLayer*>(&layer);
+            const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+            result = IsMeanSupported(compute,
+                                     OverrideDataType(input, dataType),
+                                     OverrideDataType(output, dataType),
+                                     cLayer->GetParameters(),
+                                     reason,
+                                     reasonCapacity);
+            break;
+        }
         default:
         {
             BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
diff --git a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
index 7745972..c5389df 100644
--- a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
+++ b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
@@ -328,6 +328,8 @@
 
 DECLARE_LAYER_POLICY_2_PARAM(Lstm)
 
+DECLARE_LAYER_POLICY_2_PARAM(Mean)
+
 DECLARE_LAYER_POLICY_2_PARAM(Merger)
 
 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
new file mode 100644
index 0000000..6bbb094
--- /dev/null
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -0,0 +1,105 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "MeanLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+#include "backends/CpuTensorHandle.hpp"
+#include "backends/WorkloadData.hpp"
+#include "backends/WorkloadFactory.hpp"
+
+#include <cstring>
+
+namespace armnn
+{
+
+MeanLayer::MeanLayer(const armnn::MeanDescriptor& param, const char* name)
+    : LayerWithParameters(1, 1, LayerType::Mean, param, name)
+{}
+
+std::unique_ptr<IWorkload> MeanLayer::CreateWorkload(const armnn::Graph& graph,
+                                                     const armnn::IWorkloadFactory& factory) const
+{
+    MeanQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_Axis = m_Param.m_Axis;
+    descriptor.m_Parameters.m_KeepDims = m_Param.m_KeepDims;
+
+    return factory.CreateMean(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+MeanLayer* MeanLayer::Clone(Graph& graph) const
+{
+    auto layer = CloneBase<MeanLayer>(graph, m_Param, GetName());
+
+    layer->m_Param.m_Axis = m_Param.m_Axis;
+    layer->m_Param.m_KeepDims = m_Param.m_KeepDims;
+
+    return std::move(layer);
+}
+
+void MeanLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(1, CHECK_LOCATION());
+
+    const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo();
+    
+    BOOST_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= MaxNumOfTensorDimensions,
+                     "MeanLayer: Mean supports up to 4D input.");
+
+    unsigned int rank = input.GetNumDimensions();
+    unsigned int outputRank = 0;
+
+    // Calculate output dimension
+    if (m_Param.m_KeepDims)
+    {
+        outputRank = rank;
+    }
+    else if (m_Param.m_Axis.empty())
+    {
+        outputRank = 1;
+    }
+    else if (m_Param.m_Axis.size() <= input.GetNumDimensions())
+    {
+        throw LayerValidationException("MeanLayer: Dimensions to reduce can not be bigger than input dimensions");
+    }
+    else
+    {
+        outputRank = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Param.m_Axis.size());
+        if (outputRank == 0)
+        {
+            outputRank = 1;
+        }
+    }
+
+    unsigned int dimSizes[outputRank];
+    memset(dimSizes, 1, outputRank * sizeof(unsigned int));
+
+    if (!m_Param.m_Axis.empty())
+    {
+        // Skip the dimension that has been reduced unless keepDims is true.
+        unsigned int outputIndex = 0;
+        for (unsigned int i = 0; i < input.GetNumDimensions(); ++i)
+        {
+            if (std::find(m_Param.m_Axis.begin(), m_Param.m_Axis.end(), i) == m_Param.m_Axis.end())
+            {
+                dimSizes[outputIndex] = boost::numeric_cast<unsigned int>(input.GetShape()[i]);
+                ++outputIndex;
+            }
+            else if (m_Param.m_KeepDims)
+            {
+                dimSizes[outputIndex] = 1;
+                ++outputIndex;
+            }
+        }
+    }
+    const TensorShape& inferredShape = TensorShape(outputRank, dimSizes);
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "MeanLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+        GetOutputSlot(0).GetTensorInfo().GetShape(),
+        inferredShape);
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp
new file mode 100644
index 0000000..ecb9297
--- /dev/null
+++ b/src/armnn/layers/MeanLayer.hpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+class MeanLayer : public LayerWithParameters<MeanDescriptor>
+{
+public:
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
+                                                      const IWorkloadFactory& factory) const override;
+
+    MeanLayer* Clone(Graph& graph) const override;
+
+    void ValidateTensorShapesFromInputs() override;
+
+protected:
+    MeanLayer(const MeanDescriptor& param, const char* name);
+    ~MeanLayer() = default;
+
+};
+
+}
\ No newline at end of file