IVGCVSW-3363 Add frontend support for Resize Layer

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I63493ddb7598515773073deb6db2eb3a635c5dfe
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index dc3dc17..6c49eac 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -53,6 +53,7 @@
     Quantize,
     Reshape,
     ResizeBilinear,
+    Resize,
     Rsqrt,
     Softmax,
     SpaceToBatchNd,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 9837cd3..2e049ec 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -45,6 +45,7 @@
 #include "layers/QuantizeLayer.hpp"
 #include "layers/ReshapeLayer.hpp"
 #include "layers/ResizeBilinearLayer.hpp"
+#include "layers/ResizeLayer.hpp"
 #include "layers/RsqrtLayer.hpp"
 #include "layers/SoftmaxLayer.hpp"
 #include "layers/SpaceToBatchNdLayer.hpp"
@@ -120,6 +121,7 @@
 DECLARE_LAYER(Prelu)
 DECLARE_LAYER(Quantize)
 DECLARE_LAYER(Reshape)
+DECLARE_LAYER(Resize)
 DECLARE_LAYER(ResizeBilinear)
 DECLARE_LAYER(Rsqrt)
 DECLARE_LAYER(Softmax)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 58ccfb7..63432da 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1188,6 +1188,12 @@
     return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name);
 }
 
+IConnectableLayer* Network::AddResizeLayer(const ResizeDescriptor&
+resizeDescriptor, const char* name)
+{
+    return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor,name);
+}
+
 IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
                                                     const char* name)
 {
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 8db968a..f0dfb1d 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -137,6 +137,9 @@
     IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
                                               const char* name = nullptr) override;
 
+    IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
+                                      const char* name = nullptr) override;
+
     IConnectableLayer* AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
                                                const char* name = nullptr) override;
 
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
new file mode 100644
index 0000000..44b4d9d
--- /dev/null
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -0,0 +1,76 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "ResizeLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <DataLayoutIndexed.hpp>
+
+using namespace armnnUtils;
+
+namespace armnn
+{
+
+ResizeLayer::ResizeLayer(const ResizeDescriptor& param, const char* name)
+    : LayerWithParameters(1, 1, LayerType::Resize, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> ResizeLayer::CreateWorkload(const Graph& graph,
+                                                       const IWorkloadFactory& factory) const
+{
+    ResizeQueueDescriptor descriptor;
+    return factory.CreateResize(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+ResizeLayer* ResizeLayer::Clone(Graph& graph) const
+{
+    return CloneBase<ResizeLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> ResizeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+    BOOST_ASSERT(inputShapes.size() == 1);
+
+    const TensorShape& inputShape = inputShapes[0];
+    const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
+
+    unsigned int outWidth = m_Param.m_TargetWidth;
+    unsigned int outHeight = m_Param.m_TargetHeight;
+    unsigned int outChannels = inputShape[dimensionIndices.GetChannelsIndex()];
+    unsigned int outBatch = inputShape[0];
+
+    TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ?
+        TensorShape( { outBatch, outHeight, outWidth, outChannels } ) :
+        TensorShape( { outBatch, outChannels, outHeight, outWidth });
+
+    return std::vector<TensorShape>({ tensorShape });
+}
+
+void ResizeLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(1, CHECK_LOCATION());
+
+    auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+    BOOST_ASSERT(inferredShapes.size() == 1);
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "ResizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+        GetOutputSlot(0).GetTensorInfo().GetShape(),
+        inferredShapes[0]);
+}
+
+void ResizeLayer::Accept(ILayerVisitor& visitor) const
+{
+    visitor.VisitResizeLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/ResizeLayer.hpp b/src/armnn/layers/ResizeLayer.hpp
new file mode 100644
index 0000000..0d309ff
--- /dev/null
+++ b/src/armnn/layers/ResizeLayer.hpp
@@ -0,0 +1,49 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a resize  operation.
+class ResizeLayer : public LayerWithParameters<ResizeDescriptor>
+{
+public:
+    /// Makes a workload for the Resize type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
+                                                     const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
+    ResizeLayer* Clone(Graph& graph) const override;
+
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref ResizeLayer.
+    void ValidateTensorShapesFromInputs() override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
+    std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+    void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+    /// Constructor to create a ResizeLayerLayer.
+    /// @param [in] param ResizeDescriptor to configure the resize  operation.
+    /// @param [in] name Optional name for the layer.
+    ResizeLayer(const ResizeDescriptor& param, const char* name);
+
+    /// Default destructor
+    ~ResizeLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 208262b..2d5877d 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -656,6 +656,13 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeBilinearLayer);
 }
 
+void SerializerVisitor::VisitResizeLayer(const armnn::IConnectableLayer* layer,
+                                         const armnn::ResizeDescriptor& resizeDescriptor,
+                                         const char* name)
+{
+    throw armnn::Exception("SerializerVisitor::VisitResizeLayer is not yet implemented");
+}
+
 void SerializerVisitor::VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt);
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 31f7d05..2529796 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -174,6 +174,10 @@
                                   const armnn::ResizeBilinearDescriptor& resizeDescriptor,
                                   const char* name = nullptr) override;
 
+    void VisitResizeLayer(const armnn::IConnectableLayer* layer,
+                          const armnn::ResizeDescriptor& resizeDescriptor,
+                          const char* name = nullptr) override;
+
     void VisitRsqrtLayer(const armnn::IConnectableLayer* layer,
                          const char* name = nullptr) override;
 
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 2eb0e41..6c25f87 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -377,6 +377,14 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsResizeSupported(const TensorInfo& input,
+                                         const TensorInfo& output,
+                                         const ResizeDescriptor& descriptor,
+                                         Optional<std::string&> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 bool LayerSupportBase::IsRsqrtSupported(const TensorInfo &input,
                                         const TensorInfo &output,
                                         Optional<std::string &> reasonIfUnsupported) const
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 52ba5b2..7f63ccf 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -238,6 +238,11 @@
                                    const TensorInfo& output,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsResizeSupported(const TensorInfo& input,
+                           const TensorInfo& output,
+                           const ResizeDescriptor& descriptor,
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsRsqrtSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 7447583..fa9e1cd 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -268,6 +268,11 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct ResizeQueueDescriptor : QueueDescriptorWithParameters<ResizeDescriptor>
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 struct FakeQuantizationQueueDescriptor : QueueDescriptorWithParameters<FakeQuantizationDescriptor>
 {
     FakeQuantizationQueueDescriptor()
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 7cda3fe..b74b6af 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -640,6 +640,17 @@
                                                             reason);
             break;
         }
+        case LayerType::Resize:
+        {
+            auto cLayer = boost::polymorphic_downcast<const ResizeLayer*>(&layer);
+            const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+            result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
+                                                           OverrideDataType(output, dataType),
+                                                           cLayer->GetParameters(),
+                                                           reason);
+            break;
+        }
         case LayerType::ResizeBilinear:
         {
             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
@@ -1080,6 +1091,12 @@
     return std::unique_ptr<IWorkload>();
 }
 
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
+                                                            const WorkloadInfo& info) const
+{
+    return std::unique_ptr<IWorkload>();
+}
+
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
                                                          const WorkloadInfo& info) const
 {
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 978d3a3..02a8002 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -167,6 +167,9 @@
     virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const;
 
+    virtual std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
+                                                    const WorkloadInfo& info) const;
+
     virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const;
 
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 7c9d0f5..6f3a9d3 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -394,6 +394,8 @@
 
 DECLARE_LAYER_POLICY_1_PARAM(Division)
 
+DECLARE_LAYER_POLICY_2_PARAM(Resize)
+
 DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear)
 
 DECLARE_LAYER_POLICY_2_PARAM(Reshape)