IVGCVSW-2874 Add DequantizeLayer and no-op factory method

*Add Dequantize layer to the frontend
*Add Serializer and Deserializer for Dequantize

Change-Id: Ide2647b9e0348d599deb97e61ca4bf66e2f17fc0
Signed-off-by: Nattapat Chaimanowong <nattapat.chaimanowong@arm.com>
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index e4a6ac8..fe1542b 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -24,6 +24,7 @@
         case LayerType::Convolution2d: return "Convolution2d";
         case LayerType::Debug: return "Debug";
         case LayerType::DepthwiseConvolution2d: return "DepthwiseConvolution2d";
+        case LayerType::Dequantize: return "Dequantize";
         case LayerType::DetectionPostProcess: return "DetectionPostProcess";
         case LayerType::Division: return "Division";
         case LayerType::Equal: return "Equal";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index d6d6603..1972e9c 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -24,6 +24,7 @@
     Convolution2d,
     Debug,
     DepthwiseConvolution2d,
+    Dequantize,
     DetectionPostProcess,
     Division,
     Equal,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 0e3d252..0309733 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -189,6 +189,15 @@
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
 }
 
+bool IsDequantizeSupported(const BackendId& backend,
+                           const TensorInfo& input,
+                           const TensorInfo& output,
+                           char* reasonIfUnsupported,
+                           size_t reasonIfUnsupportedMaxLength)
+{
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsDequantizeSupported, input, output);
+}
+
 bool IsDetectionPostProcessSupported(const BackendId& backend,
                                      const TensorInfo& input0,
                                      const TensorInfo& input1,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 9300a75..9d87aee 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -16,6 +16,7 @@
 #include "layers/Convolution2dLayer.hpp"
 #include "layers/DebugLayer.hpp"
 #include "layers/DepthwiseConvolution2dLayer.hpp"
+#include "layers/DequantizeLayer.hpp"
 #include "layers/DetectionPostProcessLayer.hpp"
 #include "layers/DivisionLayer.hpp"
 #include "layers/EqualLayer.hpp"
@@ -86,6 +87,7 @@
 DECLARE_LAYER(Convolution2d)
 DECLARE_LAYER(Debug)
 DECLARE_LAYER(DepthwiseConvolution2d)
+DECLARE_LAYER(Dequantize)
 DECLARE_LAYER(DetectionPostProcess)
 DECLARE_LAYER(Division)
 DECLARE_LAYER(Equal)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index c5dfbd7..6dbd461 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -935,6 +935,11 @@
     return m_Graph->AddLayer<QuantizeLayer>(name);
 }
 
+IConnectableLayer* Network::AddDequantizeLayer(const char* name)
+{
+    return m_Graph->AddLayer<DequantizeLayer>(name);
+}
+
 IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
                                                  const char* name)
 {
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 5ed8cca..782531a 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -72,6 +72,8 @@
         const ConstTensor& biases,
         const char* name = nullptr) override;
 
+    IConnectableLayer* AddDequantizeLayer(const char* name = nullptr) override;
+
     IConnectableLayer* AddDetectionPostProcessLayer(
         const DetectionPostProcessDescriptor& descriptor,
         const ConstTensor& anchors,
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
new file mode 100644
index 0000000..4dd30de
--- /dev/null
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -0,0 +1,52 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "DequantizeLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+DequantizeLayer::DequantizeLayer(const char* name)
+    : Layer(1, 1, LayerType::Dequantize, name)
+{}
+
+std::unique_ptr<IWorkload> DequantizeLayer::CreateWorkload(const Graph& graph,
+                                                           const IWorkloadFactory& factory) const
+{
+    DequantizeQueueDescriptor descriptor;
+
+    return factory.CreateDequantize(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+DequantizeLayer* DequantizeLayer::Clone(Graph& graph) const
+{
+    return CloneBase<DequantizeLayer>(graph, GetName());
+}
+
+void DequantizeLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(1, CHECK_LOCATION());
+
+    std::vector<TensorShape> inferredShapes = InferOutputShapes({
+        GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+    BOOST_ASSERT(inferredShapes.size() == 1);
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "DequantizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+        GetOutputSlot(0).GetTensorInfo().GetShape(),
+        inferredShapes[0]);
+}
+
+void DequantizeLayer::Accept(ILayerVisitor& visitor) const
+{
+    visitor.VisitDequantizeLayer(this, GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/DequantizeLayer.hpp b/src/armnn/layers/DequantizeLayer.hpp
new file mode 100644
index 0000000..1340f96
--- /dev/null
+++ b/src/armnn/layers/DequantizeLayer.hpp
@@ -0,0 +1,42 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "Layer.hpp"
+
+namespace armnn
+{
+
+/// This layer dequantizes the input tensor.
+class DequantizeLayer : public Layer
+{
+public:
+    /// Makes a workload for the Dequantize type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+                                                      const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
+    DequantizeLayer* Clone(Graph& graph) const override;
+
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref DequantizeLayer.
+    void ValidateTensorShapesFromInputs() override;
+
+    void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+    /// Constructor to create a DequantizeLayer.
+    /// @param [in] name Optional name for the layer.
+    DequantizeLayer(const char* name);
+
+    /// Default destructor
+    ~DequantizeLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index ff5bf8b..943c6a7 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -193,6 +193,7 @@
     m_ParserFunctions[Layer_ConstantLayer]               = &Deserializer::ParseConstant;
     m_ParserFunctions[Layer_Convolution2dLayer]          = &Deserializer::ParseConvolution2d;
     m_ParserFunctions[Layer_DepthwiseConvolution2dLayer] = &Deserializer::ParseDepthwiseConvolution2d;
+    m_ParserFunctions[Layer_DequantizeLayer]             = &Deserializer::ParseDequantize;
     m_ParserFunctions[Layer_DetectionPostProcessLayer]   = &Deserializer::ParseDetectionPostProcess;
     m_ParserFunctions[Layer_DivisionLayer]               = &Deserializer::ParseDivision;
     m_ParserFunctions[Layer_EqualLayer]                  = &Deserializer::ParseEqual;
@@ -242,6 +243,8 @@
             return graphPtr->layers()->Get(layerIndex)->layer_as_Convolution2dLayer()->base();
         case Layer::Layer_DepthwiseConvolution2dLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_DepthwiseConvolution2dLayer()->base();
+        case Layer::Layer_DequantizeLayer:
+            return graphPtr->layers()->Get(layerIndex)->layer_as_DequantizeLayer()->base();
         case Layer::Layer_DetectionPostProcessLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_DetectionPostProcessLayer()->base();
         case Layer::Layer_DivisionLayer:
@@ -2062,4 +2065,24 @@
     RegisterOutputSlots(graph, layerIndex, layer);
 }
 
+void Deserializer::ParseDequantize(GraphPtr graph, unsigned int layerIndex)
+{
+    CHECK_LAYERS(graph, 0, layerIndex);
+
+    Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
+    CHECK_VALID_SIZE(inputs.size(), 1);
+
+    Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    const std::string layerName = GetLayerName(graph, layerIndex);
+    IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
+
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    RegisterInputSlots(graph, layerIndex, layer);
+    RegisterOutputSlots(graph, layerIndex, layer);
+}
+
 } // namespace armnnDeserializer
diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp
index 5d57dfc..f18c163 100644
--- a/src/armnnDeserializer/Deserializer.hpp
+++ b/src/armnnDeserializer/Deserializer.hpp
@@ -85,6 +85,7 @@
     void ParseConstant(GraphPtr graph, unsigned int layerIndex);
     void ParseConvolution2d(GraphPtr graph, unsigned int layerIndex);
     void ParseDepthwiseConvolution2d(GraphPtr graph, unsigned int layerIndex);
+    void ParseDequantize(GraphPtr graph, unsigned int layerIndex);
     void ParseDetectionPostProcess(GraphPtr graph, unsigned int layerIndex);
     void ParseDivision(GraphPtr graph, unsigned int layerIndex);
     void ParseEqual(GraphPtr graph, unsigned int layerIndex);
diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md
index d53252e..77856cf 100644
--- a/src/armnnDeserializer/DeserializerSupport.md
+++ b/src/armnnDeserializer/DeserializerSupport.md
@@ -13,6 +13,7 @@
 * Constant
 * Convolution2d
 * DepthwiseConvolution2d
+* Dequantize
 * DetectionPostProcess
 * Division
 * Equal
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index 7ac8359..3aa644d 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -117,7 +117,8 @@
     Splitter = 32,
     DetectionPostProcess = 33,
     Lstm = 34,
-    Quantize = 35
+    Quantize = 35,
+    Dequantize = 36
 }
 
 // Base layer table to be used as part of other layers
@@ -519,6 +520,10 @@
     inputParams:LstmInputParams;
 }
 
+table DequantizeLayer {
+    base:LayerBase;
+}
+
 union Layer {
     ActivationLayer,
     AdditionLayer,
@@ -555,7 +560,8 @@
     SplitterLayer,
     DetectionPostProcessLayer,
     LstmLayer,
-    QuantizeLayer
+    QuantizeLayer,
+    DequantizeLayer
 }
 
 table AnyLayer {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 83777c9..7181f01 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -289,6 +289,15 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DepthwiseConvolution2dLayer);
 }
 
+void SerializerVisitor::VisitDequantizeLayer(const armnn::IConnectableLayer* layer,
+                                             const char* name)
+{
+    auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
+    auto fbDequantizeLayer     = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
+
+    CreateAnyLayer(fbDequantizeLayer.o, serializer::Layer::Layer_DequantizeLayer);
+}
+
 void SerializerVisitor::VisitDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
                                                        const armnn::DetectionPostProcessDescriptor& descriptor,
                                                        const armnn::ConstTensor& anchors,
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 82e1931..5c3e48a 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -77,6 +77,9 @@
                                           const armnn::Optional<armnn::ConstTensor>& biases,
                                           const char* name = nullptr) override;
 
+    void VisitDequantizeLayer(const armnn::IConnectableLayer* layer,
+                              const char* name = nullptr) override;
+
     void VisitDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
                                         const armnn::DetectionPostProcessDescriptor& descriptor,
                                         const armnn::ConstTensor& anchors,
diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md
index 7686d5c..a3c5852 100644
--- a/src/armnnSerializer/SerializerSupport.md
+++ b/src/armnnSerializer/SerializerSupport.md
@@ -13,6 +13,7 @@
 * Constant
 * Convolution2d
 * DepthwiseConvolution2d
+* Dequantize
 * DetectionPostProcess
 * Division
 * Equal
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 0345e53..0979076 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -59,6 +59,9 @@
             BOOST_TEST(connectedInfo.GetShape() == m_InputTensorInfos[i].GetShape());
             BOOST_TEST(
                 GetDataTypeName(connectedInfo.GetDataType()) == GetDataTypeName(m_InputTensorInfos[i].GetDataType()));
+
+            BOOST_TEST(connectedInfo.GetQuantizationScale() == m_InputTensorInfos[i].GetQuantizationScale());
+            BOOST_TEST(connectedInfo.GetQuantizationOffset() == m_InputTensorInfos[i].GetQuantizationOffset());
         }
 
         for (unsigned int i = 0; i < m_OutputTensorInfos.size(); i++)
@@ -67,6 +70,9 @@
             BOOST_TEST(outputInfo.GetShape() == m_OutputTensorInfos[i].GetShape());
             BOOST_TEST(
                 GetDataTypeName(outputInfo.GetDataType()) == GetDataTypeName(m_OutputTensorInfos[i].GetDataType()));
+
+            BOOST_TEST(outputInfo.GetQuantizationScale() == m_OutputTensorInfos[i].GetQuantizationScale());
+            BOOST_TEST(outputInfo.GetQuantizationOffset() == m_OutputTensorInfos[i].GetQuantizationOffset());
         }
     }
 
@@ -590,6 +596,44 @@
     deserializedNetwork->Accept(verifier);
 }
 
+BOOST_AUTO_TEST_CASE(SerializeDequantize)
+{
+    class DequantizeLayerVerifier : public LayerVerifierBase
+    {
+    public:
+        DequantizeLayerVerifier(const std::string& layerName,
+                                const std::vector<armnn::TensorInfo>& inputInfos,
+                                const std::vector<armnn::TensorInfo>& outputInfos)
+        : LayerVerifierBase(layerName, inputInfos, outputInfos) {}
+
+        void VisitDequantizeLayer(const armnn::IConnectableLayer* layer, const char* name) override
+        {
+            VerifyNameAndConnections(layer, name);
+        }
+    };
+
+    const std::string layerName("dequantize");
+    const armnn::TensorInfo inputInfo({ 1, 5, 2, 3 }, armnn::DataType::QuantisedAsymm8, 0.5f, 1);
+    const armnn::TensorInfo outputInfo({ 1, 5, 2, 3 }, armnn::DataType::Float32);
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const dequantizeLayer = network->AddDequantizeLayer(layerName.c_str());
+    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+
+    inputLayer->GetOutputSlot(0).Connect(dequantizeLayer->GetInputSlot(0));
+    dequantizeLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+    dequantizeLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    BOOST_CHECK(deserializedNetwork);
+
+    DequantizeLayerVerifier verifier(layerName, {inputInfo}, {outputInfo});
+    deserializedNetwork->Accept(verifier);
+}
+
 BOOST_AUTO_TEST_CASE(SerializeDeserializeDetectionPostProcess)
 {
     class DetectionPostProcessLayerVerifier : public LayerVerifierBase
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 137e77e..04f822c 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -115,6 +115,13 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsDequantizeSupported(const TensorInfo& input,
+                                             const TensorInfo& output,
+                                             Optional<std::string&> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 bool LayerSupportBase::IsDetectionPostProcessSupported(const armnn::TensorInfo& input0,
                                                        const armnn::TensorInfo& input1,
                                                        const armnn::DetectionPostProcessDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index ceb3b27..7d64095 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -67,6 +67,10 @@
                                          const Optional<TensorInfo>& biases,
                                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsDequantizeSupported(const TensorInfo& input,
+                               const TensorInfo& output,
+                               Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsDetectionPostProcessSupported(const TensorInfo& input0,
                                          const TensorInfo& input1,
                                          const DetectionPostProcessDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index e30a3f3..91b1c57 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1153,6 +1153,23 @@
     }
 }
 
+void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    ValidateSingleInput(workloadInfo, "DequantizeQueueDescriptor");
+    ValidateSingleOutput(workloadInfo, "DequantizeQueueDescriptor");
+
+    if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
+        workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
+    {
+        throw InvalidArgumentException("Input to dequantize layer must be quantized type.");
+    }
+
+    if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
+    {
+        throw InvalidArgumentException("Output of dequantize layer must be Float32 type.");
+    }
+}
+
 void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
 {
     // This is internally generated so it should not need validation.
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 9250cea..5640701 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -416,4 +416,9 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct DequantizeQueueDescriptor : QueueDescriptor
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 } //namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 833f3b8..6534a00 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -229,6 +229,16 @@
                                                      reason);
             break;
         }
+        case LayerType::Dequantize:
+        {
+            const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+            result = layerSupportObject->IsDequantizeSupported(OverrideDataType(input, dataType),
+                                                               OverrideDataType(output, DataType::Float32),
+                                                               reason);
+            break;
+        }
         case LayerType::DetectionPostProcess:
         {
             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
@@ -821,6 +831,12 @@
     return std::unique_ptr<IWorkload>();
 }
 
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
+    const DequantizeQueueDescriptor& descriptor, const WorkloadInfo& info) const
+{
+    return std::unique_ptr<IWorkload>();
+}
+
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
     const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const
 {
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 2aa3854..ed7303c 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -79,6 +79,9 @@
     virtual std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(
         const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const;
 
+    virtual std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
+                                                        const WorkloadInfo& info) const;
+
     virtual std::unique_ptr<IWorkload> CreateDetectionPostProcess(
         const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const;
 
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 8f86132..26fb03f 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -336,6 +336,8 @@
 
 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
 
+DECLARE_LAYER_POLICY_1_PARAM(Dequantize)
+
 DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess)
 
 DECLARE_LAYER_POLICY_1_PARAM(Equal)