IVGCVSW-1966: Ref implementation for the ILayerSupport interface

Change-Id: Idd572cae3a131acb11e884e33c0035ca74c95055
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index a42efb7..e6b1442 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -17,6 +17,317 @@
 namespace armnn
 {
 
+namespace
+{
+
+std::string* GetReasonIfUnsupportedPtr(const Optional<std::string&>& reasonIfUnsupported)
+{
+    return reasonIfUnsupported ? &reasonIfUnsupported.value() : nullptr;
+}
+
+} // anonymous namespace
+
+bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
+                                            const TensorInfo& output,
+                                            const ActivationDescriptor& descriptor,
+                                            Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsActivationSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0,
+                                          const TensorInfo& input1,
+                                          const TensorInfo& output,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsAdditionSupportedRef(input0,
+                                         input1,
+                                         output,
+                                         GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
+                                                    const TensorInfo& output,
+                                                    const TensorInfo& mean,
+                                                    const TensorInfo& var,
+                                                    const TensorInfo& beta,
+                                                    const TensorInfo& gamma,
+                                                    const BatchNormalizationDescriptor& descriptor,
+                                                    Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsBatchNormalizationSupportedRef(input,
+                                                   output,
+                                                   mean,
+                                                   var,
+                                                   beta,
+                                                   gamma,
+                                                   descriptor,
+                                                   GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsConstantSupportedRef(output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
+                                                   const TensorInfo& output,
+                                                   Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsConvertFp16ToFp32SupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
+                                                   const TensorInfo& output,
+                                                   Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsConvertFp32ToFp16SupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
+                                               const TensorInfo& output,
+                                               const Convolution2dDescriptor& descriptor,
+                                               const TensorInfo& weights,
+                                               const Optional<TensorInfo>& biases,
+                                               Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsConvolution2dSupportedRef(input,
+                                              output,
+                                              descriptor,
+                                              weights,
+                                              biases,
+                                              GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
+                                                      const TensorInfo& output,
+                                                      const DepthwiseConvolution2dDescriptor& descriptor,
+                                                      const TensorInfo& weights,
+                                                      const Optional<TensorInfo>& biases,
+                                                      Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsDepthwiseConvolutionSupportedRef(input,
+                                                     output,
+                                                     descriptor,
+                                                     weights,
+                                                     biases,
+                                                     GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0,
+                                          const TensorInfo& input1,
+                                          const TensorInfo& output,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsDivisionSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
+                                                  const FakeQuantizationDescriptor& descriptor,
+                                                  Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsFakeQuantizationSupportedRef(input, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsFloorSupported(const TensorInfo& input,
+                                       const TensorInfo& output,
+                                       Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsFloorSupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
+                                                const TensorInfo& output,
+                                                const TensorInfo& weights,
+                                                const TensorInfo& biases,
+                                                const FullyConnectedDescriptor& descriptor,
+                                                Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsFullyConnectedSupportedRef(input,
+                                               output,
+                                               weights,
+                                               biases,
+                                               descriptor,
+                                               GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsInputSupported(const TensorInfo& input,
+                                       Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsInputSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
+                                                 const TensorInfo& output,
+                                                 const L2NormalizationDescriptor& descriptor,
+                                                 Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsL2NormalizationSupportedRef(input,
+                                                output,
+                                                descriptor,
+                                                GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsLstmSupported(const TensorInfo& input,
+                                      const TensorInfo& outputStateIn,
+                                      const TensorInfo& cellStateIn,
+                                      const TensorInfo& scratchBuffer,
+                                      const TensorInfo& outputStateOut,
+                                      const TensorInfo& cellStateOut,
+                                      const TensorInfo& output,
+                                      const LstmDescriptor& descriptor,
+                                      const TensorInfo& inputToForgetWeights,
+                                      const TensorInfo& inputToCellWeights,
+                                      const TensorInfo& inputToOutputWeights,
+                                      const TensorInfo& recurrentToForgetWeights,
+                                      const TensorInfo& recurrentToCellWeights,
+                                      const TensorInfo& recurrentToOutputWeights,
+                                      const TensorInfo& forgetGateBias,
+                                      const TensorInfo& cellBias,
+                                      const TensorInfo& outputGateBias,
+                                      const TensorInfo* inputToInputWeights,
+                                      const TensorInfo* recurrentToInputWeights,
+                                      const TensorInfo* cellToInputWeights,
+                                      const TensorInfo* inputGateBias,
+                                      const TensorInfo* projectionWeights,
+                                      const TensorInfo* projectionBias,
+                                      const TensorInfo* cellToForgetWeights,
+                                      const TensorInfo* cellToOutputWeights,
+                                      Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsLstmSupportedRef(input,
+                                     outputStateIn,
+                                     cellStateIn,
+                                     scratchBuffer,
+                                     outputStateOut,
+                                     cellStateOut,
+                                     output,
+                                     descriptor,
+                                     inputToForgetWeights,
+                                     inputToCellWeights,
+                                     inputToOutputWeights,
+                                     recurrentToForgetWeights,
+                                     recurrentToCellWeights,
+                                     recurrentToOutputWeights,
+                                     forgetGateBias,
+                                     cellBias,
+                                     outputGateBias,
+                                     inputToInputWeights,
+                                     recurrentToInputWeights,
+                                     cellToInputWeights,
+                                     inputGateBias,
+                                     projectionWeights,
+                                     projectionBias,
+                                     cellToForgetWeights,
+                                     cellToOutputWeights,
+                                     GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
+                                      const TensorInfo& output,
+                                      const MeanDescriptor& descriptor,
+                                      Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsMeanSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
+                                        const OriginsDescriptor& descriptor,
+                                        Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsMergerSupportedRef(inputs, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
+                                                const TensorInfo& input1,
+                                                const TensorInfo& output,
+                                                Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsMultiplicationSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
+                                               const TensorInfo& output,
+                                               const NormalizationDescriptor& descriptor,
+                                               Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsNormalizationSupportedRef(input,
+                                              output,
+                                              descriptor,
+                                              GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsOutputSupported(const TensorInfo& output,
+                                        Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsOutputSupportedRef(output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsPadSupported(const TensorInfo& input,
+                                     const TensorInfo& output,
+                                     const PadDescriptor& descriptor,
+                                     Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsPadSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input,
+                                         const TensorInfo& output,
+                                         const PermuteDescriptor& descriptor,
+                                         Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsPermuteSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input,
+                                           const TensorInfo& output,
+                                           const Pooling2dDescriptor& descriptor,
+                                           Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsPooling2dSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
+                                         Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsReshapeSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
+                                                Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsResizeBilinearSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
+                                         const TensorInfo& output,
+                                         const SoftmaxDescriptor& descriptor,
+                                         Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsSoftmaxSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
+                                          const ViewsDescriptor& descriptor,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsSplitterSupportedRef(input, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
+                                             const TensorInfo& input1,
+                                             const TensorInfo& output,
+                                             Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsSubtractionSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+//
+// Implementation functions
+//
+// TODO: Functions kept for backward compatibility. Remove once transition to plugable backends is complete!
+
 template<typename Float32Func, typename Uint8Func, typename ... Params>
 bool IsSupportedForDataTypeRef(std::string* reasonIfUnsupported,
                                DataType dataType,
@@ -412,4 +723,4 @@
     return false;
 }
 
-}
+} // namespace armnn