MLCE-101: Adding dilation support in conv and dconv

Added support for dilation in DepthwiseConvolution2d in the
Neon and CL backends.

Change-Id: Ie1522b498c07f80d6efcf9dc79e926c8cfa06ca5
Signed-off-by: Pablo Tello <pablo.tello@arm.com>
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 42b017f..cf31599 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -274,6 +274,8 @@
     , m_PadBottom(0)
     , m_StrideX(0)
     , m_StrideY(0)
+    , m_DilationX(1)
+    , m_DilationY(1)
     , m_BiasEnabled(false)
     , m_DataLayout(DataLayout::NCHW)
     {}
@@ -290,6 +292,10 @@
     uint32_t             m_StrideX;
     /// Stride value when proceeding through input for the height dimension.
     uint32_t             m_StrideY;
+    /// Dilation along x axis
+    uint32_t             m_DilationX;
+    /// Dilation along y axis
+    uint32_t             m_DilationY;
     /// Enable/disable bias.
     bool                 m_BiasEnabled;
     /// The data layout to be used (NCHW, NHWC).
@@ -306,6 +312,8 @@
     ,   m_PadBottom(0)
     ,   m_StrideX(0)
     ,   m_StrideY(0)
+    ,   m_DilationX(1)
+    ,   m_DilationY(1)
     ,   m_BiasEnabled(false)
     ,   m_DataLayout(DataLayout::NCHW)
     {}
@@ -322,6 +330,10 @@
     uint32_t   m_StrideX;
     /// Stride value when proceeding through input for the height dimension.
     uint32_t   m_StrideY;
+    /// Dilation along x axis
+    uint32_t   m_DilationX;
+    /// Dilation along y axis
+    uint32_t   m_DilationY;
     /// Enable/disable bias.
     bool       m_BiasEnabled;
     /// The data layout to be used (NCHW, NHWC).
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index dc84302..b8e48c8 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -87,6 +87,14 @@
                      const DetectionPostProcessDescriptor& descriptor,
                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
+    virtual bool IsDilatedDepthwiseConvolutionSupported(
+                    const TensorInfo& input,
+                    const TensorInfo& output,
+                    const DepthwiseConvolution2dDescriptor& descriptor,
+                    const TensorInfo& weights,
+                    const Optional<TensorInfo>& biases,
+                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
     virtual bool IsDivisionSupported(const TensorInfo& input0,
                                      const TensorInfo& input1,
                                      const TensorInfo& output,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 320d9ce..831a846 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -186,7 +186,28 @@
                                      char* reasonIfUnsupported,
                                      size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
+    if (descriptor.m_DilationX == 1 && descriptor.m_DilationY == 1)
+    {
+        // Pre 19.05 ArmNN did not have the dilation parameters.
+        // This version of IsDepthwiseConvolutionSupported is called for backwards-compatibility
+        FORWARD_LAYER_SUPPORT_FUNC(backend,
+                                   IsDepthwiseConvolutionSupported,
+                                   input,
+                                   output,
+                                   descriptor,
+                                   weights,
+                                   biases);
+    }
+    else
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(backend,
+                                   IsDilatedDepthwiseConvolutionSupported,
+                                   input,
+                                   output,
+                                   descriptor,
+                                   weights,
+                                   biases);
+    }
 }
 
 bool IsDequantizeSupported(const BackendId& backend,
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index a1ffe91..e49c179 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -74,15 +74,16 @@
     // Expected filter shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
     // Namely: [ depth multiplier, input channels, filter height, filter width ]
     // Output channels = input channels * depthMultiplier
-
     unsigned int depthMultiplier = filterShape[0];
 
     unsigned int filterHeight = filterShape[2];
-    unsigned int readHeight   = (inputHeight + m_Param.m_PadTop + m_Param.m_PadBottom) - filterHeight;
+    unsigned int dilatedFilterHeight = filterHeight + (m_Param.m_DilationY - 1) * (filterHeight - 1);
+    unsigned int readHeight   = (inputHeight + m_Param.m_PadTop + m_Param.m_PadBottom) - dilatedFilterHeight;
     unsigned int outputHeight = 1 + (readHeight / m_Param.m_StrideY);
 
     unsigned int filterWidth = filterShape[3];
-    unsigned int readWidth   = (inputWidth + m_Param.m_PadLeft + m_Param.m_PadRight) - filterWidth;
+    unsigned int dilatedFilterWidth = filterWidth + (m_Param.m_DilationX - 1) * (filterWidth - 1);
+    unsigned int readWidth   = (inputWidth + m_Param.m_PadLeft + m_Param.m_PadRight) - dilatedFilterWidth;
     unsigned int outputWidth = 1 + (readWidth / m_Param.m_StrideX);
 
     unsigned int outputChannels  = inputChannels * depthMultiplier;
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 0cc6f93..fdb3812 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -228,24 +228,6 @@
 #define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
     CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
 
-uint32_t CheckDilation(const int32_t dilationFactor,
-                       size_t operatorIndex,
-                       const CheckLocation& location)
-{
-    if (dilationFactor != 1)
-    {
-        std::stringstream ss;
-        ss << "ArmNN only supports convolution layers with dilations [1,1,1,1] for operator with index "
-           << operatorIndex  << location.AsString();
-        throw ParseException(ss.str());
-    }
-
-    return static_cast<uint32_t>(dilationFactor);
-}
-
-#define CHECK_DILATION(DILATION_FACTOR, OPERATOR_INDEX) \
-    CheckDilation(DILATION_FACTOR, OPERATOR_INDEX, CHECK_LOCATION())
-
 bool IsActivationSupported(tflite::ActivationFunctionType activationType)
 {
     switch(activationType)
@@ -297,6 +279,7 @@
 void CalcPadding(uint32_t inputSize,
                  uint32_t filterSize,
                  uint32_t stride,
+                 uint32_t dilation,
                  uint32_t& paddingFront,
                  uint32_t& paddingBack,
                  tflite::Padding padding)
@@ -306,7 +289,8 @@
     if (padding == tflite::Padding_SAME)
     {
         uint32_t outputSize = (inputSize + stride - 1) / stride;
-        uint32_t temp = (outputSize - 1) * stride + filterSize;
+        uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
+        uint32_t temp = (outputSize - 1) * stride + dilatedSize;
         if (temp > inputSize)
         {
             paddingFront = (temp - inputSize) / 2;
@@ -722,9 +706,8 @@
     desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
     desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
     desc.m_DataLayout = armnn::DataLayout::NHWC;
-
-    CHECK_DILATION(options->dilation_h_factor, operatorIndex);
-    CHECK_DILATION(options->dilation_w_factor, operatorIndex);
+    desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
+    desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
 
     auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(inputs.size(), 2, 3);
@@ -744,8 +727,10 @@
     unsigned int filterHeight = filterTensorInfo.GetShape()[1];
     unsigned int filterWidth  = filterTensorInfo.GetShape()[2];
 
-    CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
-    CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
+    CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
+                desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
+    CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
+                desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
 
     auto filterTensorAndData = CreateConstTensor(inputs[1],
                                                  filterTensorInfo,
@@ -810,9 +795,8 @@
     CHECK_VALID_SIZE(inputs.size(), 2, 3);
     auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(outputs.size(), 1);
-
-    CHECK_DILATION(options->dilation_h_factor, operatorIndex);
-    CHECK_DILATION(options->dilation_w_factor, operatorIndex);
+    desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
+    desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
 
     armnn::TensorInfo inputTensorInfo  = ToTensorInfo(inputs[0]);
     armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
@@ -834,8 +818,10 @@
     // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
     PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
 
-    CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
-    CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
+    CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
+                desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
+    CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
+                desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
 
     auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
     armnn::IConnectableLayer* layer;
@@ -1045,8 +1031,10 @@
     unsigned int inputHeight = inputTensorInfo.GetShape()[1];
     unsigned int inputWidth  = inputTensorInfo.GetShape()[2];
 
-    CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
-    CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
+    CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
+                desc.m_PadTop, desc.m_PadBottom, options->padding);
+    CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
+                desc.m_PadLeft, desc.m_PadRight, options->padding);
 
     auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(outputs.size(), 1);
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index a5c5f2b..73c9e49 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -246,6 +246,23 @@
                                    biases);
 }
 
+bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
+                                                            const TensorInfo& output,
+                                                            const DepthwiseConvolution2dDescriptor& descriptor,
+                                                            const TensorInfo& weights,
+                                                            const Optional<TensorInfo>& biases,
+                                                            Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input,
+                                   output,
+                                   descriptor,
+                                   weights,
+                                   biases);
+}
+
+
 bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
                                          const TensorInfo& input1,
                                          const TensorInfo& output,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 6393a11..e9a9e68 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -61,6 +61,13 @@
                                          const Optional<TensorInfo>& biases,
                                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
+                                                const TensorInfo& output,
+                                                const DepthwiseConvolution2dDescriptor& descriptor,
+                                                const TensorInfo& weights,
+                                                const Optional<TensorInfo>& biases,
+                                                Optional<std::string&> reason = EmptyOptional()) const override;
+
     bool IsDivisionSupported(const TensorInfo& input0,
                              const TensorInfo& input1,
                              const TensorInfo& output,
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 1ff0978..e681e95 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -52,13 +52,19 @@
     }
 
     const arm_compute::PadStrideInfo aclPadStrideInfo = BuildArmComputePadStrideInfo(descriptor);
+    const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
+            descriptor.m_DilationX,
+            descriptor.m_DilationY);
 
     return arm_compute::CLDepthwiseConvolutionLayer::validate(&aclInputInfo,
                                                               &aclWeightsInfo,
                                                               optionalAclBiasesInfo,
                                                               &aclOutputInfo,
                                                               aclPadStrideInfo,
-                                                              aclDepthMultiplier);
+                                                              aclDepthMultiplier,
+                                                              arm_compute::ActivationLayerInfo(),
+                                                              aclDilationInfo);
+
 }
 
 ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
@@ -85,7 +91,7 @@
         BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
     }
 
-    arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
+    const arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
                                              m_Data.m_Parameters.m_StrideY,
                                              m_Data.m_Parameters.m_PadLeft,
                                              m_Data.m_Parameters.m_PadRight,
@@ -93,6 +99,11 @@
                                              m_Data.m_Parameters.m_PadBottom,
                                              arm_compute::DimensionRoundingType::FLOOR);
 
+    const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
+                m_Data.m_Parameters.m_DilationX,
+                m_Data.m_Parameters.m_DilationY);
+
+
     std::string name = std::string("ClDepthwiseConvolutionWorkload");
     m_Data.ValidateInputsOutputs(name, 1, 1);
 
@@ -109,6 +120,7 @@
     // Get the depth multiplier
     const unsigned int depthMultiplier = weightInfo.GetShape()[0];
 
+
     // Check for optimisation opportunities.
     bool use3x3Optimisation = (weightInfo.GetShape()[2] == 3) && (weightInfo.GetShape()[3] == 3);
     if (use3x3Optimisation)
@@ -120,7 +132,9 @@
             m_BiasTensor.get(),
             &output,
             padStrideInfo,
-            depthMultiplier);
+            depthMultiplier,
+            arm_compute::ActivationLayerInfo(),
+            aclDilationInfo);
     }
     else
     {
@@ -131,7 +145,10 @@
             m_BiasTensor.get(),
             &output,
             padStrideInfo,
-            depthMultiplier);
+            depthMultiplier,
+            arm_compute::ActivationLayerInfo(),
+            aclDilationInfo);
+
     }
 
     BOOST_ASSERT(m_DepthwiseConvolutionLayer);
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 898660c..c257dd3 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -203,6 +203,22 @@
                                    biases);
 }
 
+bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
+                                                              const TensorInfo& output,
+                                                              const DepthwiseConvolution2dDescriptor& descriptor,
+                                                              const TensorInfo& weights,
+                                                              const Optional<TensorInfo>& biases,
+                                                              Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input,
+                                   output,
+                                   descriptor,
+                                   weights,
+                                   biases);
+}
+
 bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
                                         const TensorInfo& output,
                                         Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 27e825a..a5aae0b 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -56,6 +56,13 @@
                                          const Optional<TensorInfo>& biases,
                                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
+                                                const TensorInfo& output,
+                                                const DepthwiseConvolution2dDescriptor& descriptor,
+                                                const TensorInfo& weights,
+                                                const Optional<TensorInfo>& biases,
+                                                Optional<std::string&> reason = EmptyOptional()) const override;
+
     bool IsFloorSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index c915555..0b917fc 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -52,14 +52,18 @@
         optionalAclBiasesInfo = &aclBiasesInfo;
     }
 
-    const arm_compute::PadStrideInfo aclPadStrideInfo = BuildArmComputePadStrideInfo(descriptor);
+    arm_compute::PadStrideInfo aclPadStrideInfo = BuildArmComputePadStrideInfo(descriptor);
+    const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
+            descriptor.m_DilationX,descriptor.m_DilationY);
 
     return arm_compute::NEDepthwiseConvolutionLayer::validate(&aclInputInfo,
                                                               &aclWeightsInfo,
                                                               optionalAclBiasesInfo,
                                                               &aclOutputInfo,
                                                               aclPadStrideInfo,
-                                                              aclDepthMultiplier);
+                                                              aclDepthMultiplier,
+                                                              arm_compute::ActivationLayerInfo(),
+                                                              aclDilationInfo);
 }
 
 NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
@@ -97,6 +101,10 @@
                                              m_Data.m_Parameters.m_PadBottom,
                                              arm_compute::DimensionRoundingType::FLOOR);
 
+
+    const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
+                m_Data.m_Parameters.m_DilationX, m_Data.m_Parameters.m_DilationY);
+
     m_Data.ValidateInputsOutputs("NeonDepthwiseConvolutionWorkload", 1, 1);
 
     INeonTensorHandle* inputTensorHandle  = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0]);
@@ -109,7 +117,7 @@
     input.info()->set_data_layout(aclDataLayout);
     output.info()->set_data_layout(aclDataLayout);
 
-    // Get the depth multiplier
+   // Get the depth multiplier
     const unsigned int depthMultiplier = weightInfo.GetShape()[0];
 
     // Check for optimisation opportunities.
@@ -123,7 +131,9 @@
                                                            m_BiasTensor.get(),
                                                            &output,
                                                            padStrideInfo,
-                                                           depthMultiplier);
+                                                           depthMultiplier,
+                                                           arm_compute::ActivationLayerInfo(),
+                                                           aclDilationInfo);
     }
     else
     {
@@ -134,7 +144,9 @@
                                                            m_BiasTensor.get(),
                                                            &output,
                                                            padStrideInfo,
-                                                           depthMultiplier);
+                                                           depthMultiplier,
+                                                           arm_compute::ActivationLayerInfo(),
+                                                           aclDilationInfo);
     }
 
     BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 8eded84..a1d8e7d 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -429,7 +429,29 @@
                                      &TrueFunc<>);
 }
 
-bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0,
+bool RefLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
+                                                             const TensorInfo& output,
+                                                             const DepthwiseConvolution2dDescriptor& descriptor,
+                                                             const TensorInfo& weights,
+                                                             const Optional<TensorInfo>& biases,
+                                                             Optional<std::string&> reasonIfUnsupported) const
+{
+    if (descriptor.m_DilationY == 1 && descriptor.m_DilationY == 1)
+    {
+        return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
+    }
+    else
+    {
+        if (reasonIfUnsupported)
+        {
+            reasonIfUnsupported.value() = "Reference Depthwise Convolution: Dilation parameters must be 1";
+        }
+        return false;
+    }
+}
+
+
+    bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0,
                                           const TensorInfo& input1,
                                           const TensorInfo& output,
                                           Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 53a1abf..9b1a95c 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -74,6 +74,14 @@
                                          const DetectionPostProcessDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
+                                                const TensorInfo& output,
+                                                const DepthwiseConvolution2dDescriptor& descriptor,
+                                                const TensorInfo& weights,
+                                                const Optional<TensorInfo>& biases,
+                                                Optional<std::string&> reasonIfUnsupported =
+                                                    EmptyOptional()) const override;
+
     bool IsDivisionSupported(const TensorInfo& input0,
                              const TensorInfo& input1,
                              const TensorInfo& output,