IVGCVSW-3267 Add Arm NN front end support for the new Prelu Activation layer

 * Added new PreluLayer class
 * Made necessary changes to ILayerSupport, ILayerVisitor, etc.
 * Added unit tests

Change-Id: Ifcfb78e823bb5a245ed1dad15290d2f60115c882
Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
diff --git a/Android.mk b/Android.mk
index b5376ac..7f3080b 100644
--- a/Android.mk
+++ b/Android.mk
@@ -123,6 +123,7 @@
         src/armnn/layers/PermuteLayer.cpp \
         src/armnn/layers/Pooling2dLayer.cpp \
         src/armnn/layers/PreCompiledLayer.cpp \
+        src/armnn/layers/PreluLayer.cpp \
         src/armnn/layers/QuantizeLayer.cpp \
         src/armnn/layers/ReshapeLayer.cpp \
         src/armnn/layers/ResizeBilinearLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1b6abb6..2971b0d 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -296,18 +296,20 @@
     src/armnn/layers/DivisionLayer.hpp
     src/armnn/layers/PreCompiledLayer.hpp
     src/armnn/layers/PreCompiledLayer.cpp
+    src/armnn/layers/PreluLayer.hpp
+    src/armnn/layers/PreluLayer.cpp
     src/armnn/layers/ReshapeLayer.hpp
     src/armnn/layers/ReshapeLayer.cpp
-    src/armnn/layers/SpaceToBatchNdLayer.hpp
-    src/armnn/layers/SpaceToBatchNdLayer.cpp
-    src/armnn/layers/SpaceToDepthLayer.hpp
-    src/armnn/layers/SpaceToDepthLayer.cpp
     src/armnn/layers/ResizeBilinearLayer.hpp
     src/armnn/layers/ResizeBilinearLayer.cpp
     src/armnn/layers/RsqrtLayer.cpp
     src/armnn/layers/RsqrtLayer.hpp
     src/armnn/layers/SoftmaxLayer.hpp
     src/armnn/layers/SoftmaxLayer.cpp
+    src/armnn/layers/SpaceToBatchNdLayer.hpp
+    src/armnn/layers/SpaceToBatchNdLayer.cpp
+    src/armnn/layers/SpaceToDepthLayer.hpp
+    src/armnn/layers/SpaceToDepthLayer.cpp
     src/armnn/layers/SplitterLayer.hpp
     src/armnn/layers/SplitterLayer.cpp
     src/armnn/layers/StridedSliceLayer.cpp
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index 4c113d3..324a9f5 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -234,6 +234,11 @@
                                         const PreCompiledDescriptor& descriptor,
                                         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
+    virtual bool IsPreluSupported(const TensorInfo& input,
+                                  const TensorInfo& alpha,
+                                  const TensorInfo& output,
+                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
     virtual bool IsQuantizeSupported(const TensorInfo& input,
                                      const TensorInfo& output,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
index ab83dbf..9519c8b 100644
--- a/include/armnn/ILayerVisitor.hpp
+++ b/include/armnn/ILayerVisitor.hpp
@@ -290,6 +290,12 @@
                                      const Pooling2dDescriptor& pooling2dDescriptor,
                                      const char* name = nullptr) = 0;
 
+    /// Function that a PReLU activation layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+    /// @param layer - pointer to the layer which is calling back to this visit function.
+    /// @param name - Optional name for the layer.
+    virtual void VisitPreluLayer(const IConnectableLayer* layer,
+                                 const char* name = nullptr) = 0;
+
     /// Function a quantize layer should call back to when its Accept(ILayerVisitor&) function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
     /// @param name - Optional name for the layer.
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index e5ebbc4..cacca33 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -427,6 +427,11 @@
     /// @return - Interface for configuring the layer.
     virtual IConnectableLayer* AddSwitchLayer(const char* name = nullptr) = 0;
 
+    /// Adds a PReLU layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    virtual IConnectableLayer* AddPreluLayer(const char* name = nullptr) = 0;
+
     virtual void Accept(ILayerVisitor& visitor) const = 0;
 
 protected:
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index d58aa87..673193f 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -282,6 +282,14 @@
                             size_t reasonIfUnsupportedMaxLength = 1024);
 
 /// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsPreluSupported(const BackendId& backend,
+                      const TensorInfo& input,
+                      const TensorInfo& alpha,
+                      const TensorInfo& output,
+                      char* reasonIfUnsupported = nullptr,
+                      size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
 bool IsPooling2dSupported(const BackendId& backend,
                           const TensorInfo& input,
                           const TensorInfo& output,
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
index b4a2ac7..48fc2bb 100644
--- a/include/armnn/LayerVisitorBase.hpp
+++ b/include/armnn/LayerVisitorBase.hpp
@@ -151,6 +151,9 @@
                              const Pooling2dDescriptor&,
                              const char*) override { DefaultPolicy::Apply(__func__); }
 
+    void VisitPreluLayer(const IConnectableLayer*,
+                         const char*) override { DefaultPolicy::Apply(__func__); }
+
     void VisitQuantizeLayer(const IConnectableLayer*,
                             const char*) override { DefaultPolicy::Apply(__func__); }
 
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 377fb92..a1434ea 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -49,6 +49,7 @@
     Permute,
     Pooling2d,
     PreCompiled,
+    Prelu,
     Quantize,
     Reshape,
     ResizeBilinear,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index de9717c..a801431 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -41,6 +41,7 @@
 #include "layers/PermuteLayer.hpp"
 #include "layers/Pooling2dLayer.hpp"
 #include "layers/PreCompiledLayer.hpp"
+#include "layers/PreluLayer.hpp"
 #include "layers/QuantizeLayer.hpp"
 #include "layers/ReshapeLayer.hpp"
 #include "layers/ResizeBilinearLayer.hpp"
@@ -115,6 +116,7 @@
 DECLARE_LAYER(Permute)
 DECLARE_LAYER(Pooling2d)
 DECLARE_LAYER(PreCompiled)
+DECLARE_LAYER(Prelu)
 DECLARE_LAYER(Quantize)
 DECLARE_LAYER(Reshape)
 DECLARE_LAYER(ResizeBilinear)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 3e7d4d5..75b63e4 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1003,6 +1003,11 @@
     return m_Graph->AddLayer<SwitchLayer>(name);
 }
 
+IConnectableLayer* Network::AddPreluLayer(const char* name)
+{
+    return m_Graph->AddLayer<PreluLayer>(name);
+}
+
 void Network::Accept(ILayerVisitor& visitor) const
 {
     for (auto layer : GetGraph())
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 2648c3f..e1379d0 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -185,6 +185,8 @@
 
     IConnectableLayer* AddSwitchLayer(const char* name = nullptr) override;
 
+    IConnectableLayer* AddPreluLayer(const char* name = nullptr) override;
+
     void Accept(ILayerVisitor& visitor) const override;
 
 private:
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
new file mode 100644
index 0000000..6040248
--- /dev/null
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -0,0 +1,121 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "PreluLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+namespace armnn
+{
+
+PreluLayer::PreluLayer(const char* name)
+    : Layer(2, 1, LayerType::Prelu, name)
+{}
+
+std::unique_ptr<IWorkload> PreluLayer::CreateWorkload(const Graph& graph,
+                                                      const IWorkloadFactory& factory) const
+{
+    PreluQueueDescriptor descriptor;
+
+    return factory.CreatePrelu(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+PreluLayer* PreluLayer::Clone(Graph& graph) const
+{
+    auto layer = CloneBase<PreluLayer>(graph, GetName());
+
+    return std::move(layer);
+}
+
+std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+    BOOST_ASSERT(inputShapes.size() == 2);
+
+    const TensorShape& inputShape = inputShapes[0];
+    const TensorShape& alphaShape = inputShapes[1];
+
+    const unsigned int inputShapeDimensions = inputShape.GetNumDimensions();
+    const unsigned int alphaShapeDimensions = alphaShape.GetNumDimensions();
+
+    BOOST_ASSERT(inputShapeDimensions > 0);
+    BOOST_ASSERT(alphaShapeDimensions > 0);
+
+    // The size of the output is the maximum size along each dimension of the input operands,
+    // it starts with the trailing dimensions, and works its way forward
+
+    unsigned int outputDimensions = std::max(inputShapeDimensions, alphaShapeDimensions);
+
+    TensorShape outputShape(outputDimensions);
+
+    int inputShapeIndex = boost::numeric_cast<int>(inputShapeDimensions) - 1;
+    int alphaShapeIndex = boost::numeric_cast<int>(alphaShapeDimensions) - 1;
+    unsigned int outputShapeIndex = outputDimensions - 1;
+
+    // Loop backwards through the common part of the shapes
+    while (inputShapeIndex >= 0 && alphaShapeIndex >= 0)
+    {
+        unsigned int inputDimension = inputShape[boost::numeric_cast<unsigned int>(inputShapeIndex)];
+        unsigned int alphaDimension = alphaShape[boost::numeric_cast<unsigned int>(alphaShapeIndex)];
+
+        // Check that the inputs are broadcast compatible
+        BOOST_ASSERT_MSG(inputDimension == alphaDimension || inputDimension == 1 || alphaDimension == 1,
+                         "PreluLayer: Dimensions should either match or one should be of size 1");
+
+        outputShape[outputShapeIndex] = std::max(inputDimension, alphaDimension);
+
+        inputShapeIndex--;
+        alphaShapeIndex--;
+        outputShapeIndex--;
+    }
+
+    // Loop backwards through the remaing part of the input shape (if any)
+    while (inputShapeIndex >= 0)
+    {
+        outputShape[outputShapeIndex] = inputShape[boost::numeric_cast<unsigned int>(inputShapeIndex)];
+
+        inputShapeIndex--;
+        outputShapeIndex--;
+    }
+
+    // Loop backwards through the remaing part of the alpha shape (if any)
+    while (alphaShapeIndex >= 0)
+    {
+        outputShape[outputShapeIndex] = alphaShape[boost::numeric_cast<unsigned int>(alphaShapeIndex)];
+
+        alphaShapeIndex--;
+        outputShapeIndex--;
+    }
+
+    return { outputShape };
+}
+
+void PreluLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(2, CHECK_LOCATION());
+
+    std::vector<TensorShape> inferredShapes = InferOutputShapes(
+    {
+        GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+        GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
+    });
+
+    BOOST_ASSERT(inferredShapes.size() == 1);
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "PreluLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+        GetOutputSlot(0).GetTensorInfo().GetShape(),
+        inferredShapes[0]);
+}
+
+void PreluLayer::Accept(ILayerVisitor& visitor) const
+{
+    visitor.VisitPreluLayer(this, GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/PreluLayer.hpp b/src/armnn/layers/PreluLayer.hpp
new file mode 100644
index 0000000..54e57b2
--- /dev/null
+++ b/src/armnn/layers/PreluLayer.hpp
@@ -0,0 +1,49 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+// This layer represents a PReLU activation operation.
+class PreluLayer : public Layer
+{
+public:
+    /// Makes a workload for the PReLU type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+                                                      const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
+    PreluLayer* Clone(Graph& graph) const override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
+    std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref PreluLayer.
+    void ValidateTensorShapesFromInputs() override;
+
+    void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+    /// Constructor to create a PreluLayer.
+    /// @param [in] name Optional name for the layer.
+    PreluLayer(const char* name);
+
+    /// Default destructor
+    ~PreluLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/armnn/test/LayerValidateOutputTest.cpp b/src/armnn/test/LayerValidateOutputTest.cpp
index acefd51..d47959c 100644
--- a/src/armnn/test/LayerValidateOutputTest.cpp
+++ b/src/armnn/test/LayerValidateOutputTest.cpp
@@ -58,4 +58,27 @@
     BOOST_CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
 }
 
+BOOST_AUTO_TEST_CASE(TestPreluInferOutputShape)
+{
+    armnn::Graph graph;
+
+    armnn::PreluLayer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
+
+    std::vector<armnn::TensorShape> inputShapes
+    {
+        { 4, 1, 2 },  // Input shape
+        { 5, 4, 3, 1} // Alpha shape
+    };
+
+    const std::vector<armnn::TensorShape> expectedOutputShapes
+    {
+        { 5, 4, 3, 2 } // Output shape
+    };
+
+    const std::vector<armnn::TensorShape> outputShapes = preluLayer->InferOutputShapes(inputShapes);
+
+    BOOST_CHECK(outputShapes.size() == 1);
+    BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 131bb95..012ed66 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -709,6 +709,12 @@
     CreateAnyLayer(fbPooling2dLayer.o, serializer::Layer::Layer_Pooling2dLayer);
 }
 
+void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer,
+                                        const char* name)
+{
+    throw UnimplementedException("SerializerVisitor::VisitPreluLayer not yet implemented");
+}
+
 void SerializerVisitor::VisitQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
 {
     auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index c9416bb..aae8799 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -160,6 +160,9 @@
                              const armnn::Pooling2dDescriptor& pooling2dDescriptor,
                              const char* name = nullptr) override;
 
+    void VisitPreluLayer(const armnn::IConnectableLayer* layer,
+                         const char* name = nullptr) override;
+
     void VisitQuantizeLayer(const armnn::IConnectableLayer* layer,
                             const char* name = nullptr) override;
 
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 48705c8..12e4ee8 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -348,6 +348,14 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsPreluSupported(const TensorInfo& input,
+                                        const TensorInfo& alpha,
+                                        const TensorInfo& output,
+                                        Optional<std::string &> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo& input,
                                            const armnn::TensorInfo& output,
                                            armnn::Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 4921cf9..d035dfc 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -221,6 +221,11 @@
                                 const PreCompiledDescriptor& descriptor,
                                 Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsPreluSupported(const TensorInfo& input,
+                          const TensorInfo& alpha,
+                          const TensorInfo& output,
+                          Optional<std::string &> reasonIfUnsupported) const override;
+
     bool IsQuantizeSupported(const TensorInfo& input,
                              const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 7c9d4ac..d8c10bd 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1711,4 +1711,44 @@
     // This is internally generated so it should not need validation.
 }
 
+void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    ValidateNumInputs(workloadInfo, "PreluQueueDescriptor", 2);
+    ValidateNumOutputs(workloadInfo, "PreluQueueDescriptor", 1);
+
+    std::vector<DataType> supportedTypes
+    {
+        DataType::Float16,
+        DataType::Float32,
+        DataType::QuantisedAsymm8
+    };
+
+    ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+                      supportedTypes,
+                      "PreluQueueDescriptor");
+
+    ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
+                      supportedTypes,
+                      "PreluQueueDescriptor");
+
+    ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
+                      supportedTypes,
+                      "PreluQueueDescriptor");
+
+    ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+                      { workloadInfo.m_InputTensorInfos[1].GetDataType() },
+                      "PreluQueueDescriptor");
+
+    ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+                      { workloadInfo.m_OutputTensorInfos[0].GetDataType() },
+                      "PreluQueueDescriptor");
+
+    ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
+                                       workloadInfo.m_InputTensorInfos[1],
+                                       workloadInfo.m_OutputTensorInfos[0],
+                                       "PreluQueueDescriptor",
+                                       "input",
+                                       "alpha");
+}
+
 } //namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 501fdd8..6a51bc3 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -440,4 +440,9 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct PreluQueueDescriptor : QueueDescriptor
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 } //namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 678d330..cca3919 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -785,6 +785,17 @@
                                                             reason);
             break;
         }
+        case LayerType::Prelu:
+        {
+            const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& alpha  = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+            result = layerSupportObject->IsPreluSupported(OverrideDataType(input,  dataType),
+                                                          OverrideDataType(alpha,  dataType),
+                                                          OverrideDataType(output, dataType),
+                                                          reason);
+            break;
+        }
         default:
         {
             BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
@@ -1015,6 +1026,12 @@
     return std::unique_ptr<IWorkload>();
 }
 
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &descriptor,
+                                                         const WorkloadInfo &info) const
+{
+    return std::unique_ptr<IWorkload>();
+}
+
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
                                                             const WorkloadInfo& Info) const
 {
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index cc99356..c9fbe71 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -155,6 +155,9 @@
     virtual std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
                                                          const WorkloadInfo& info) const;
 
+    virtual std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
+                                                   const WorkloadInfo& info) const;
+
     virtual std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
                                                       const WorkloadInfo& Info) const;
 
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index ff632fc..111cf8f 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -384,6 +384,8 @@
 
 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
 
+DECLARE_LAYER_POLICY_1_PARAM(Prelu)
+
 DECLARE_LAYER_POLICY_1_PARAM(Division)
 
 DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear)