IVGCVSW-1642 : adding IBackendInternal interface

Change-Id: Icd55fed8381af319f11b4cd977cf03103cdf1bd9
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 39d83c2..56b0935 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -150,6 +150,7 @@
     include/armnn/Descriptors.hpp
     include/armnn/DescriptorsFwd.hpp
     include/armnn/IRuntime.hpp
+    include/armnn/ILayerSupport.hpp
     include/armnn/INetwork.hpp
     include/armnn/Tensor.hpp
     include/armnn/TensorFwd.hpp
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
new file mode 100644
index 0000000..7962393
--- /dev/null
+++ b/include/armnn/ILayerSupport.hpp
@@ -0,0 +1,210 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/DescriptorsFwd.hpp>
+
+#include <boost/optional.hpp>
+#include <vector>
+
+namespace armnn
+{
+
+class TensorInfo;
+
+class ILayerSupport
+{
+protected:
+    ILayerSupport() {}
+    virtual ~ILayerSupport() {}
+
+public:
+    virtual bool IsActivationSupported(const TensorInfo& input,
+                                       const TensorInfo& output,
+                                       const ActivationDescriptor& descriptor,
+                                       char* reasonIfUnsupported = nullptr,
+                                       size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsAdditionSupported(const TensorInfo& input0,
+                                     const TensorInfo& input1,
+                                     const TensorInfo& output,
+                                     char* reasonIfUnsupported = nullptr,
+                                     size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsBatchNormalizationSupported(const TensorInfo& input,
+                                               const TensorInfo& output,
+                                               const TensorInfo& mean,
+                                               const TensorInfo& var,
+                                               const TensorInfo& beta,
+                                               const TensorInfo& gamma,
+                                               const BatchNormalizationDescriptor& descriptor,
+                                               char* reasonIfUnsupported = nullptr,
+                                               size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsConstantSupported(const TensorInfo& output,
+                                     char* reasonIfUnsupported = nullptr,
+                                     size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
+                                              const TensorInfo& output,
+                                              char* reasonIfUnsupported = nullptr,
+                                              size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsConvertFp32ToFp16Supported(const TensorInfo& input,
+                                              const TensorInfo& output,
+                                              char* reasonIfUnsupported = nullptr,
+                                              size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsConvolution2dSupported(const TensorInfo& input,
+                                          const TensorInfo& output,
+                                          const Convolution2dDescriptor& descriptor,
+                                          const TensorInfo& weights,
+                                          const boost::optional<TensorInfo>& biases,
+                                          char* reasonIfUnsupported = nullptr,
+                                          size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsDepthwiseConvolutionSupported(const TensorInfo& input,
+                                                 const TensorInfo& output,
+                                                 const DepthwiseConvolution2dDescriptor& descriptor,
+                                                 const TensorInfo& weights,
+                                                 const boost::optional<TensorInfo>& biases,
+                                                 char* reasonIfUnsupported = nullptr,
+                                                 size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsDivisionSupported(const TensorInfo& input0,
+                                     const TensorInfo& input1,
+                                     const TensorInfo& output,
+                                     char* reasonIfUnsupported = nullptr,
+                                     size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsSubtractionSupported(const TensorInfo& input0,
+                                        const TensorInfo& input1,
+                                        const TensorInfo& output,
+                                        char* reasonIfUnsupported = nullptr,
+                                        size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsInputSupported(const TensorInfo& input,
+                                  char* reasonIfUnsupported = nullptr,
+                                  size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsFullyConnectedSupported(const TensorInfo& input,
+                                           const TensorInfo& output,
+                                           const TensorInfo& weights,
+                                           const TensorInfo& biases,
+                                           const FullyConnectedDescriptor& descriptor,
+                                           char* reasonIfUnsupported = nullptr,
+                                           size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsL2NormalizationSupported(const TensorInfo& input,
+                                            const TensorInfo& output,
+                                            const L2NormalizationDescriptor& descriptor,
+                                            char* reasonIfUnsupported = nullptr,
+                                            size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsLstmSupported(const TensorInfo& input,
+                                 const TensorInfo& outputStateIn,
+                                 const TensorInfo& cellStateIn,
+                                 const TensorInfo& scratchBuffer,
+                                 const TensorInfo& outputStateOut,
+                                 const TensorInfo& cellStateOut,
+                                 const TensorInfo& output,
+                                 const LstmDescriptor& descriptor,
+                                 const TensorInfo& inputToForgetWeights,
+                                 const TensorInfo& inputToCellWeights,
+                                 const TensorInfo& inputToOutputWeights,
+                                 const TensorInfo& recurrentToForgetWeights,
+                                 const TensorInfo& recurrentToCellWeights,
+                                 const TensorInfo& recurrentToOutputWeights,
+                                 const TensorInfo& forgetGateBias,
+                                 const TensorInfo& cellBias,
+                                 const TensorInfo& outputGateBias,
+                                 const TensorInfo* inputToInputWeights,
+                                 const TensorInfo* recurrentToInputWeights,
+                                 const TensorInfo* cellToInputWeights,
+                                 const TensorInfo* inputGateBias,
+                                 const TensorInfo* projectionWeights,
+                                 const TensorInfo* projectionBias,
+                                 const TensorInfo* cellToForgetWeights,
+                                 const TensorInfo* cellToOutputWeights,
+                                 char* reasonIfUnsupported = nullptr,
+                                 size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
+                                   const OriginsDescriptor& descriptor,
+                                   char* reasonIfUnsupported = nullptr,
+                                   size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsMultiplicationSupported(const TensorInfo& input0,
+                                           const TensorInfo& input1,
+                                           const TensorInfo& output,
+                                           char* reasonIfUnsupported = nullptr,
+                                           size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsNormalizationSupported(const TensorInfo& input,
+                                          const TensorInfo& output,
+                                          const NormalizationDescriptor& descriptor,
+                                          char* reasonIfUnsupported = nullptr,
+                                          size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsOutputSupported(const TensorInfo& output,
+                                   char* reasonIfUnsupported = nullptr,
+                                   size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsPermuteSupported(const TensorInfo& input,
+                                    const TensorInfo& output,
+                                    const PermuteDescriptor& descriptor,
+                                    char* reasonIfUnsupported = nullptr,
+                                    size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsPooling2dSupported(const TensorInfo& input,
+                                      const TensorInfo& output,
+                                      const Pooling2dDescriptor& descriptor,
+                                      char* reasonIfUnsupported = nullptr,
+                                      size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsResizeBilinearSupported(const TensorInfo& input,
+                                           char* reasonIfUnsupported = nullptr,
+                                           size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsSoftmaxSupported(const TensorInfo& input,
+                                    const TensorInfo& output,
+                                    const SoftmaxDescriptor& descriptor,
+                                    char* reasonIfUnsupported = nullptr,
+                                    size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsSplitterSupported(const TensorInfo& input,
+                                     const ViewsDescriptor& descriptor,
+                                     char* reasonIfUnsupported = nullptr,
+                                     size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsFakeQuantizationSupported(const TensorInfo& input,
+                                             const FakeQuantizationDescriptor& descriptor,
+                                             char* reasonIfUnsupported = nullptr,
+                                             size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsReshapeSupported(const TensorInfo& input,
+                                    char* reasonIfUnsupported = nullptr,
+                                    size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsFloorSupported(const TensorInfo& input,
+                                  const TensorInfo& output,
+                                  char* reasonIfUnsupported = nullptr,
+                                  size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsMeanSupported(const TensorInfo& input,
+                                 const TensorInfo& output,
+                                 const MeanDescriptor& descriptor,
+                                 char* reasonIfUnsupported = nullptr,
+                                 size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+    virtual bool IsPadSupported(const TensorInfo& input,
+                                const TensorInfo& output,
+                                const PadDescriptor& descriptor,
+                                char* reasonIfUnsupported = nullptr,
+                                size_t reasonIfUnsupportedMaxLength = 1024) const;
+
+}; // class ILayerSupport
+
+} // namespace armnn
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index d0a0174..12ecda0 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -6,6 +6,7 @@
 
 #include <array>
 #include <memory>
+#include "ILayerSupport.hpp"
 
 namespace armnn
 {
@@ -113,6 +114,7 @@
 
 public:
     virtual const std::string& GetId() const = 0;
+    virtual const ILayerSupport& GetLayerSupport() const = 0;
 };
 
 using IBackendPtr = std::shared_ptr<IBackend>;
diff --git a/src/backends/CMakeLists.txt b/src/backends/CMakeLists.txt
index c9c5cc1..ea5ad78 100644
--- a/src/backends/CMakeLists.txt
+++ b/src/backends/CMakeLists.txt
@@ -7,6 +7,8 @@
     CpuTensorHandle.cpp
     CpuTensorHandleFwd.hpp
     CpuTensorHandle.hpp
+    IBackendInternal.hpp
+    ILayerSupport.cpp
     ITensorHandle.hpp
     MakeWorkloadHelper.hpp
     MemCopyWorkload.cpp
diff --git a/src/backends/IBackendInternal.hpp b/src/backends/IBackendInternal.hpp
new file mode 100644
index 0000000..1ccf88e
--- /dev/null
+++ b/src/backends/IBackendInternal.hpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Types.hpp>
+#include <backends/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+class IBackendInternal : public IBackend
+{
+protected:
+    IBackendInternal() {}
+    virtual ~IBackendInternal() {}
+
+public:
+    virtual std::unique_ptr<IWorkloadFactory> CreateWorkloadFactory() const = 0;
+};
+
+} // namespace armnn
diff --git a/src/backends/ILayerSupport.cpp b/src/backends/ILayerSupport.cpp
new file mode 100644
index 0000000..c0446e9
--- /dev/null
+++ b/src/backends/ILayerSupport.cpp
@@ -0,0 +1,302 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/ILayerSupport.hpp>
+
+namespace armnn
+{
+
+namespace
+{
+
+bool DefaultLayerSupport(const char* func,
+                         const char* file,
+                         unsigned int line,
+                         char* reasonIfUnsupported,
+                         size_t reasonIfUnsupportedMaxLength)
+{
+    if (reasonIfUnsupported != nullptr && reasonIfUnsupportedMaxLength > 0)
+    {
+        snprintf(reasonIfUnsupported,
+                 reasonIfUnsupportedMaxLength,
+                 "%s is not supported [%s:%d]",
+                 func,
+                 file,
+                 line);
+    }
+    return false;
+}
+
+}
+
+bool ILayerSupport::IsActivationSupported(const TensorInfo& input,
+                                          const TensorInfo& output,
+                                          const ActivationDescriptor& descriptor,
+                                          char* reasonIfUnsupported,
+                                          size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsAdditionSupported(const TensorInfo& input0,
+                                        const TensorInfo& input1,
+                                        const TensorInfo& output,
+                                        char* reasonIfUnsupported,
+                                        size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
+                                                  const TensorInfo& output,
+                                                  const TensorInfo& mean,
+                                                  const TensorInfo& var,
+                                                  const TensorInfo& beta,
+                                                  const TensorInfo& gamma,
+                                                  const BatchNormalizationDescriptor& descriptor,
+                                                  char* reasonIfUnsupported,
+                                                  size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsConstantSupported(const TensorInfo& output,
+                                        char* reasonIfUnsupported,
+                                        size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
+                                                 const TensorInfo& output,
+                                                 char* reasonIfUnsupported,
+                                                 size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
+                                                 const TensorInfo& output,
+                                                 char* reasonIfUnsupported,
+                                                 size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsConvolution2dSupported(const TensorInfo& input,
+                                             const TensorInfo& output,
+                                             const Convolution2dDescriptor& descriptor,
+                                             const TensorInfo& weights,
+                                             const boost::optional<TensorInfo>& biases,
+                                             char* reasonIfUnsupported,
+                                             size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
+                                                    const TensorInfo& output,
+                                                    const DepthwiseConvolution2dDescriptor& descriptor,
+                                                    const TensorInfo& weights,
+                                                    const boost::optional<TensorInfo>& biases,
+                                                    char* reasonIfUnsupported,
+                                                    size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsDivisionSupported(const TensorInfo& input0,
+                                        const TensorInfo& input1,
+                                        const TensorInfo& output,
+                                        char* reasonIfUnsupported,
+                                        size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsSubtractionSupported(const TensorInfo& input0,
+                                           const TensorInfo& input1,
+                                           const TensorInfo& output,
+                                           char* reasonIfUnsupported,
+                                           size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsInputSupported(const TensorInfo& input,
+                                     char* reasonIfUnsupported,
+                                     size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
+                                              const TensorInfo& output,
+                                              const TensorInfo& weights,
+                                              const TensorInfo& biases,
+                                              const FullyConnectedDescriptor& descriptor,
+                                              char* reasonIfUnsupported,
+                                              size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
+                                               const TensorInfo& output,
+                                               const L2NormalizationDescriptor& descriptor,
+                                               char* reasonIfUnsupported,
+                                               size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsLstmSupported(const TensorInfo& input,
+                                    const TensorInfo& outputStateIn,
+                                    const TensorInfo& cellStateIn,
+                                    const TensorInfo& scratchBuffer,
+                                    const TensorInfo& outputStateOut,
+                                    const TensorInfo& cellStateOut,
+                                    const TensorInfo& output,
+                                    const LstmDescriptor& descriptor,
+                                    const TensorInfo& inputToForgetWeights,
+                                    const TensorInfo& inputToCellWeights,
+                                    const TensorInfo& inputToOutputWeights,
+                                    const TensorInfo& recurrentToForgetWeights,
+                                    const TensorInfo& recurrentToCellWeights,
+                                    const TensorInfo& recurrentToOutputWeights,
+                                    const TensorInfo& forgetGateBias,
+                                    const TensorInfo& cellBias,
+                                    const TensorInfo& outputGateBias,
+                                    const TensorInfo* inputToInputWeights,
+                                    const TensorInfo* recurrentToInputWeights,
+                                    const TensorInfo* cellToInputWeights,
+                                    const TensorInfo* inputGateBias,
+                                    const TensorInfo* projectionWeights,
+                                    const TensorInfo* projectionBias,
+                                    const TensorInfo* cellToForgetWeights,
+                                    const TensorInfo* cellToOutputWeights,
+                                    char* reasonIfUnsupported,
+                                    size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
+                                      const OriginsDescriptor& descriptor,
+                                      char* reasonIfUnsupported,
+                                      size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
+                                              const TensorInfo& input1,
+                                              const TensorInfo& output,
+                                              char* reasonIfUnsupported,
+                                              size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsNormalizationSupported(const TensorInfo& input,
+                                             const TensorInfo& output,
+                                             const NormalizationDescriptor& descriptor,
+                                             char* reasonIfUnsupported,
+                                             size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsOutputSupported(const TensorInfo& output,
+                                      char* reasonIfUnsupported,
+                                      size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsPermuteSupported(const TensorInfo& input,
+                                       const TensorInfo& output,
+                                       const PermuteDescriptor& descriptor,
+                                       char* reasonIfUnsupported,
+                                       size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsPooling2dSupported(const TensorInfo& input,
+                                         const TensorInfo& output,
+                                         const Pooling2dDescriptor& descriptor,
+                                         char* reasonIfUnsupported,
+                                         size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
+                                              char* reasonIfUnsupported,
+                                              size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsSoftmaxSupported(const TensorInfo& input,
+                                       const TensorInfo& output,
+                                       const SoftmaxDescriptor& descriptor,
+                                       char* reasonIfUnsupported,
+                                       size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsSplitterSupported(const TensorInfo& input,
+                                        const ViewsDescriptor& descriptor,
+                                        char* reasonIfUnsupported,
+                                        size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
+                                                const FakeQuantizationDescriptor& descriptor,
+                                                char* reasonIfUnsupported,
+                                                size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsReshapeSupported(const TensorInfo& input,
+                                       char* reasonIfUnsupported,
+                                       size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsFloorSupported(const TensorInfo& input,
+                                     const TensorInfo& output,
+                                     char* reasonIfUnsupported,
+                                     size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsMeanSupported(const TensorInfo& input,
+                                    const TensorInfo& output,
+                                    const MeanDescriptor& descriptor,
+                                    char* reasonIfUnsupported,
+                                    size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+bool ILayerSupport::IsPadSupported(const TensorInfo& input,
+                                   const TensorInfo& output,
+                                   const PadDescriptor& descriptor,
+                                   char* reasonIfUnsupported,
+                                   size_t reasonIfUnsupportedMaxLength) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+}
+
+}