IVGCVSW-2556 Add Layer implementation for Detection PostProcess

 * Add DetectionPostProcessDescriptor.
 * Add implementation for DetectionPostProcessLayer.
 * Unit test to validate output.

Change-Id: If63e83eb2a2978c549071c7aeb272906e7c35fe9
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 44235c7..29d294e 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -329,6 +329,42 @@
 
 struct DetectionPostProcessDescriptor
 {
+    DetectionPostProcessDescriptor()
+    : m_MaxDetections(0)
+    , m_MaxClassesPerDetection(1)
+    , m_DetectionsPerClass(100)
+    , m_NmsScoreThreshold(0)
+    , m_NmsIouThreshold(0)
+    , m_NumClasses(0)
+    , m_UseRegularNms(false)
+    , m_ScaleX(0)
+    , m_ScaleY(0)
+    , m_ScaleW(0)
+    , m_ScaleH(0)
+    {}
+
+    /// Maximum numbers of detections.
+    uint32_t m_MaxDetections;
+    /// Maximum numbers of classes per detection, used in Fast NMS.
+    uint32_t m_MaxClassesPerDetection;
+    /// Detections per classes, used in Regular NMS.
+    uint32_t m_DetectionsPerClass;
+    /// NMS score threshold.
+    float m_NmsScoreThreshold;
+    /// Intersection over union threshold.
+    float m_NmsIouThreshold;
+    /// Number of classes.
+    int32_t m_NumClasses;
+    /// Use Regular NMS.
+    bool m_UseRegularNms;
+    /// Center size encoding scale x.
+    float m_ScaleX;
+    /// Center size encoding scale y.
+    float m_ScaleY;
+    /// Center size encoding scale weight.
+    float m_ScaleW;
+    /// Center size encoding scale height.
+    float m_ScaleH;
 };
 
 /// A NormalizationDescriptor for the NormalizationLayer.
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index 7858922..3eea198 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -8,6 +8,7 @@
 #include "LayerCloneBase.hpp"
 
 #include <armnn/TypesUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
@@ -34,6 +35,39 @@
 void DetectionPostProcessLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(2, CHECK_LOCATION());
+
+    // on this level constant data should not be released.
+    BOOST_ASSERT_MSG(m_Anchors != nullptr, "DetectionPostProcessLayer: Anchors data should not be null.");
+
+    BOOST_ASSERT_MSG(GetNumOutputSlots() == 4, "DetectionPostProcessLayer: The layer should return 4 outputs.");
+
+    unsigned int detectedBoxes = m_Param.m_MaxDetections * m_Param.m_MaxClassesPerDetection;
+
+    const TensorShape& inferredDetectionBoxes = TensorShape({ 1, detectedBoxes, 4 });
+    const TensorShape& inferredDetectionScores = TensorShape({ 1, detectedBoxes });
+    const TensorShape& inferredNumberDetections = TensorShape({ 1 });
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "DetectionPostProcessLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+        GetOutputSlot(0).GetTensorInfo().GetShape(),
+        inferredDetectionBoxes);
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "DetectionPostProcessLayer: TensorShape set on OutputSlot[1] does not match the inferred shape.",
+        GetOutputSlot(1).GetTensorInfo().GetShape(),
+        inferredDetectionScores);
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "DetectionPostProcessLayer: TensorShape set on OutputSlot[2] does not match the inferred shape.",
+        GetOutputSlot(2).GetTensorInfo().GetShape(),
+        inferredDetectionScores);
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "DetectionPostProcessLayer: TensorShape set on OutputSlot[3] does not match the inferred shape.",
+        GetOutputSlot(3).GetTensorInfo().GetShape(),
+        inferredNumberDetections);
+}
+
+Layer::ConstantTensors DetectionPostProcessLayer::GetConstantTensorsByRef()
+{
+    return { m_Anchors };
 }
 
 void DetectionPostProcessLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index 629e386..a1c499e 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -10,10 +10,15 @@
 namespace armnn
 {
 
+class ScopedCpuTensorHandle;
+
 /// This layer represents a detection postprocess operator.
 class DetectionPostProcessLayer : public LayerWithParameters<DetectionPostProcessDescriptor>
 {
 public:
+    /// A unique pointer to store Anchor values.
+    std::unique_ptr<ScopedCpuTensorHandle> m_Anchors;
+
     /// Makes a workload for the DetectionPostProcess type.
     /// @param [in] graph The graph where this layer can be found.
     /// @param [in] factory The workload factory which will create the workload.
@@ -39,6 +44,10 @@
 
     /// Default destructor
     ~DetectionPostProcessLayer() = default;
+
+    /// Retrieve the handles to the constant values stored by the layer.
+    /// @return A vector of the constant tensors stored by this layer.
+    ConstantTensors GetConstantTensorsByRef() override;
 };
 
 } // namespace armnn
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 3b07986..f40a78a 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -1050,4 +1050,39 @@
     BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
 }
 
+BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes)
+{
+    Graph graph;
+    armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QuantisedAsymm8);
+    armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QuantisedAsymm8);
+    std::vector<uint8_t> anchorsVector(40);
+    armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QuantisedAsymm8), anchorsVector);
+
+    armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QuantisedAsymm8);
+    armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QuantisedAsymm8);
+    armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QuantisedAsymm8);
+    armnn::TensorInfo numDetectionInfo({1}, DataType::QuantisedAsymm8);
+
+    Layer* input0 = graph.AddLayer<InputLayer>(0, "boxEncodings");
+    input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo);
+
+    Layer* input1 = graph.AddLayer<InputLayer>(1, "score");
+    input1->GetOutputSlot().SetTensorInfo(scoresInfo);
+
+    DetectionPostProcessDescriptor descriptor;
+    descriptor.m_MaxDetections = 3;
+
+    DetectionPostProcessLayer* layer = graph.AddLayer<DetectionPostProcessLayer>(descriptor, "detectionPostProcess");
+    layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(anchors);
+    layer->GetOutputSlot(0).SetTensorInfo(detectionBoxesInfo);
+    layer->GetOutputSlot(1).SetTensorInfo(detectionScoresInfo);
+    layer->GetOutputSlot(2).SetTensorInfo(detectionClassesInfo);
+    layer->GetOutputSlot(3).SetTensorInfo(numDetectionInfo);
+
+    input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
+    input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
+
+    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+}
+
 BOOST_AUTO_TEST_SUITE_END()