IVGCVSW-1804 : Add Subtraction layer types and placeholders

Change-Id: Ib9a477e5ce590df74ba05fece77258b9204f6523
diff --git a/Android.mk b/Android.mk
index 89a7124..a164535 100644
--- a/Android.mk
+++ b/Android.mk
@@ -193,6 +193,7 @@
         src/armnn/layers/PermuteLayer.cpp \
         src/armnn/layers/Pooling2dLayer.cpp \
         src/armnn/layers/DivisionLayer.cpp \
+        src/armnn/layers/SubtractionLayer.cpp \
         src/armnn/layers/ReshapeLayer.cpp \
         src/armnn/layers/ResizeBilinearLayer.cpp \
         src/armnn/layers/SoftmaxLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 802ca50..7890cdf 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -341,6 +341,8 @@
     src/armnn/layers/SoftmaxLayer.cpp
     src/armnn/layers/SplitterLayer.hpp
     src/armnn/layers/SplitterLayer.cpp
+    src/armnn/layers/SubtractionLayer.cpp
+    src/armnn/layers/SubtractionLayer.hpp
     src/armnn/Half.hpp
     src/armnn/InternalTypes.hpp
     src/armnn/InternalTypes.cpp
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index f129bba..ac7d08f 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -80,6 +80,13 @@
                          char* reasonIfUnsupported = nullptr,
                          size_t reasonIfUnsupportedMaxLength = 1024);
 
+bool IsSubtractionSupported(Compute compute,
+                            const TensorInfo& input0,
+                            const TensorInfo& input1,
+                            const TensorInfo& output,
+                            char* reasonIfUnsupported = nullptr,
+                            size_t reasonIfUnsupportedMaxLength = 1024);
+
 bool IsInputSupported(Compute compute,
                       const TensorInfo& input,
                       char* reasonIfUnsupported = nullptr,
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index 5d7984a..ee93d48 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -40,6 +40,7 @@
         case LayerType::ResizeBilinear: return "ResizeBilinear";
         case LayerType::Softmax: return "Softmax";
         case LayerType::Splitter: return "Splitter";
+        case LayerType::Subtraction: return "Subtraction";
         default:
             BOOST_ASSERT_MSG(false, "Unknown layer type");
             return "Unknown";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 13b93ce..d2c83cd 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -39,9 +39,10 @@
     Reshape,
     ResizeBilinear,
     Softmax,
+    Splitter,
     // Last layer goes here.
     LastLayer,
-    Splitter = LastLayer,
+    Subtraction = LastLayer,
 };
 
 const char* GetLayerTypeAsCString(LayerType type);
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 7ac054c..59c1c8d 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -151,6 +151,16 @@
     FORWARD_LAYER_SUPPORT_FUNC(compute, IsDivisionSupported, input0, input1, output);
 }
 
+bool IsSubtractionSupported(Compute compute,
+                            const TensorInfo& input0,
+                            const TensorInfo& input1,
+                            const TensorInfo& output,
+                            char* reasonIfUnsupported,
+                            size_t reasonIfUnsupportedMaxLength)
+{
+    FORWARD_LAYER_SUPPORT_FUNC(compute, IsSubtractionSupported, input0, input1, output);
+}
+
 bool IsDepthwiseConvolutionSupported(Compute compute,
                                      const TensorInfo& input,
                                      const TensorInfo& output,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index f7ebd37..a1dc355 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -32,6 +32,7 @@
 #include "layers/ResizeBilinearLayer.hpp"
 #include "layers/SoftmaxLayer.hpp"
 #include "layers/SplitterLayer.hpp"
+#include "layers/SubtractionLayer.hpp"
 
 namespace armnn
 {
@@ -86,5 +87,6 @@
 DECLARE_LAYER(ResizeBilinear)
 DECLARE_LAYER(Softmax)
 DECLARE_LAYER(Splitter)
+DECLARE_LAYER(Subtraction)
 
 }
diff --git a/src/armnn/backends/ClLayerSupport.cpp b/src/armnn/backends/ClLayerSupport.cpp
index 3a9a22a..7b5fee2 100644
--- a/src/armnn/backends/ClLayerSupport.cpp
+++ b/src/armnn/backends/ClLayerSupport.cpp
@@ -250,6 +250,15 @@
                                    output);
 }
 
+bool IsSubtractionSupportedCl(const TensorInfo& input0,
+                              const TensorInfo& input1,
+                              const TensorInfo& output,
+                              std::string* reasonIfUnsupported)
+{
+    // At the moment subtraction is not supported
+    return false;
+}
+
 bool IsFullyConnectedSupportedCl(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const TensorInfo& weights,
diff --git a/src/armnn/backends/ClLayerSupport.hpp b/src/armnn/backends/ClLayerSupport.hpp
index be56d5d..dbe546c 100644
--- a/src/armnn/backends/ClLayerSupport.hpp
+++ b/src/armnn/backends/ClLayerSupport.hpp
@@ -59,6 +59,11 @@
                            const TensorInfo& output,
                            std::string* reasonIfUnsupported = nullptr);
 
+bool IsSubtractionSupportedCl(const TensorInfo& input0,
+                              const TensorInfo& input1,
+                              const TensorInfo& output,
+                              std::string* reasonIfUnsupported = nullptr);
+
 bool IsFullyConnectedSupportedCl(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const TensorInfo& weights,
diff --git a/src/armnn/backends/ClWorkloadFactory.cpp b/src/armnn/backends/ClWorkloadFactory.cpp
index d2f3b11..8c9ca20 100644
--- a/src/armnn/backends/ClWorkloadFactory.cpp
+++ b/src/armnn/backends/ClWorkloadFactory.cpp
@@ -169,6 +169,12 @@
     return MakeWorkload<ClDivisionFloatWorkload, NullWorkload>(descriptor, info);
 }
 
+std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+                                                                       const WorkloadInfo& info) const
+{
+    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateBatchNormalization(
     const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
 {
@@ -435,6 +441,12 @@
     return nullptr;
 }
 
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+                                                                const WorkloadInfo& info) const
+{
+    return nullptr;
+}
+
 void ClWorkloadFactory::Finalize()
 {
 }
diff --git a/src/armnn/backends/ClWorkloadFactory.hpp b/src/armnn/backends/ClWorkloadFactory.hpp
index 901bf40..dedbb50 100644
--- a/src/armnn/backends/ClWorkloadFactory.hpp
+++ b/src/armnn/backends/ClWorkloadFactory.hpp
@@ -111,6 +111,9 @@
     virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
                                                       const WorkloadInfo& info) const override;
 
+    virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+                                                         const WorkloadInfo& info) const override;
+
     virtual void Finalize() override;
 
     virtual void Release() override;
diff --git a/src/armnn/backends/NeonLayerSupport.cpp b/src/armnn/backends/NeonLayerSupport.cpp
index f39871b..73d2518 100644
--- a/src/armnn/backends/NeonLayerSupport.cpp
+++ b/src/armnn/backends/NeonLayerSupport.cpp
@@ -234,6 +234,15 @@
     return false;
 }
 
+bool IsSubtractionSupportedNeon(const TensorInfo& input0,
+                                const TensorInfo& input1,
+                                const TensorInfo& output,
+                                std::string* reasonIfUnsupported)
+{
+    // At the moment subtraction is not supported
+    return false;
+}
+
 bool IsFullyConnectedSupportedNeon(const TensorInfo& input,
                                    const TensorInfo& output,
                                    const TensorInfo& weights,
diff --git a/src/armnn/backends/NeonLayerSupport.hpp b/src/armnn/backends/NeonLayerSupport.hpp
index 1715f83..f7b6253 100644
--- a/src/armnn/backends/NeonLayerSupport.hpp
+++ b/src/armnn/backends/NeonLayerSupport.hpp
@@ -64,6 +64,11 @@
                              const TensorInfo& output,
                              std::string* reasonIfUnsupported = nullptr);
 
+bool IsSubtractionSupportedNeon(const TensorInfo& input0,
+                                const TensorInfo& input1,
+                                const TensorInfo& output,
+                                std::string* reasonIfUnsupported = nullptr);
+
 bool IsFullyConnectedSupportedNeon(const TensorInfo& input,
                                    const TensorInfo& output,
                                    const TensorInfo& weights,
diff --git a/src/armnn/backends/NeonWorkloadFactory.cpp b/src/armnn/backends/NeonWorkloadFactory.cpp
index c90362c..fe9fd55 100644
--- a/src/armnn/backends/NeonWorkloadFactory.cpp
+++ b/src/armnn/backends/NeonWorkloadFactory.cpp
@@ -162,6 +162,12 @@
     return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
 }
 
+std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateSubtraction(
+    const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const
+{
+    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateBatchNormalization(
     const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
 {
@@ -429,6 +435,12 @@
     return nullptr;
 }
 
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& data,
+                                                                  const WorkloadInfo& info) const
+{
+    return nullptr;
+}
+
 void NeonWorkloadFactory::Finalize()
 {}
 
diff --git a/src/armnn/backends/NeonWorkloadFactory.hpp b/src/armnn/backends/NeonWorkloadFactory.hpp
index 32e745f..34d0e95 100644
--- a/src/armnn/backends/NeonWorkloadFactory.hpp
+++ b/src/armnn/backends/NeonWorkloadFactory.hpp
@@ -111,6 +111,9 @@
     virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
                                                       const WorkloadInfo& info) const override;
 
+    virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+                                                         const WorkloadInfo& info) const override;
+
     virtual void Finalize() override;
 
     virtual void Release() override;
diff --git a/src/armnn/backends/RefLayerSupport.cpp b/src/armnn/backends/RefLayerSupport.cpp
index ee91e73..5437574 100644
--- a/src/armnn/backends/RefLayerSupport.cpp
+++ b/src/armnn/backends/RefLayerSupport.cpp
@@ -130,6 +130,15 @@
                                      &TrueFunc<>);
 }
 
+bool IsSubtractionSupportedRef(const TensorInfo& input0,
+                               const TensorInfo& input1,
+                               const TensorInfo& output,
+                               std::string* reasonIfUnsupported)
+{
+    // At the moment subtraction is not supported
+    return false;
+}
+
 bool IsFullyConnectedSupportedRef(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const TensorInfo& weights,
diff --git a/src/armnn/backends/RefLayerSupport.hpp b/src/armnn/backends/RefLayerSupport.hpp
index d396867..464eb1c 100644
--- a/src/armnn/backends/RefLayerSupport.hpp
+++ b/src/armnn/backends/RefLayerSupport.hpp
@@ -56,6 +56,11 @@
                             const TensorInfo& output,
                             std::string* reasonIfUnsupported = nullptr);
 
+bool IsSubtractionSupportedRef(const TensorInfo& input0,
+                               const TensorInfo& input1,
+                               const TensorInfo& output,
+                               std::string* reasonIfUnsupported = nullptr);
+
 bool IsFullyConnectedSupportedRef(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const TensorInfo& weights,
diff --git a/src/armnn/backends/RefWorkloadFactory.cpp b/src/armnn/backends/RefWorkloadFactory.cpp
index d4891b3..4de9274 100644
--- a/src/armnn/backends/RefWorkloadFactory.cpp
+++ b/src/armnn/backends/RefWorkloadFactory.cpp
@@ -227,4 +227,10 @@
     return MakeWorkload<RefDivisionFloat32Workload, RefDivisionUint8Workload>(descriptor, info);
 }
 
+std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateSubtraction(
+    const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const
+{
+    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
 } // namespace armnn
diff --git a/src/armnn/backends/RefWorkloadFactory.hpp b/src/armnn/backends/RefWorkloadFactory.hpp
index 8586ca6..5fbc6e4 100644
--- a/src/armnn/backends/RefWorkloadFactory.hpp
+++ b/src/armnn/backends/RefWorkloadFactory.hpp
@@ -127,6 +127,8 @@
     virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
                                                       const WorkloadInfo& info) const override;
 
+    virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+                                                         const WorkloadInfo& info) const override;
 private:
 
     template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
diff --git a/src/armnn/backends/WorkloadData.cpp b/src/armnn/backends/WorkloadData.cpp
index 660637e..e3cf83f 100644
--- a/src/armnn/backends/WorkloadData.cpp
+++ b/src/armnn/backends/WorkloadData.cpp
@@ -811,4 +811,17 @@
                                        "second input");
 }
 
+void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    ValidateTwoInputs(workloadInfo, "SubtractionQueueDescriptor");
+    ValidateSingleOutput(workloadInfo, "SubtractionQueueDescriptor");
+
+    ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
+                                       workloadInfo.m_InputTensorInfos[1],
+                                       workloadInfo.m_OutputTensorInfos[0],
+                                       "SubtractionQueueDescriptor",
+                                       "first input",
+                                       "second input");
+}
+
 } //namespace armnn
diff --git a/src/armnn/backends/WorkloadData.hpp b/src/armnn/backends/WorkloadData.hpp
index d0b8163..d50a237 100644
--- a/src/armnn/backends/WorkloadData.hpp
+++ b/src/armnn/backends/WorkloadData.hpp
@@ -190,6 +190,12 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+// Subtraction layer workload data.
+struct SubtractionQueueDescriptor : QueueDescriptor
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 // Batch norm layer workload data.
 struct BatchNormalizationQueueDescriptor : QueueDescriptorWithParameters<BatchNormalizationDescriptor>
 {
diff --git a/src/armnn/backends/WorkloadFactory.cpp b/src/armnn/backends/WorkloadFactory.cpp
index ba926e8..d188725 100644
--- a/src/armnn/backends/WorkloadFactory.cpp
+++ b/src/armnn/backends/WorkloadFactory.cpp
@@ -524,6 +524,19 @@
                                          reasonCapacity);
             break;
         }
+        case LayerType::Subtraction:
+        {
+            const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+            result = IsSubtractionSupported(compute,
+                                            OverrideDataType(input0, dataType),
+                                            OverrideDataType(input1, dataType),
+                                            OverrideDataType(output, dataType),
+                                            reason,
+                                            reasonCapacity);
+            break;
+        }
         default:
         {
             BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
diff --git a/src/armnn/backends/WorkloadFactory.hpp b/src/armnn/backends/WorkloadFactory.hpp
index 771aecf..0ae5a3e 100644
--- a/src/armnn/backends/WorkloadFactory.hpp
+++ b/src/armnn/backends/WorkloadFactory.hpp
@@ -123,6 +123,9 @@
 
     virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
                                                       const WorkloadInfo& info) const = 0;
+
+    virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+                                                         const WorkloadInfo& info) const = 0;
 };
 
 } //namespace armnn
diff --git a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
index a580be3..7745972 100644
--- a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
+++ b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
@@ -350,7 +350,7 @@
 
 DECLARE_LAYER_POLICY_2_PARAM(Splitter)
 
-
+DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
 
 
 // Generic implementation to get the number of input slots for a given layer type;
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
new file mode 100644
index 0000000..6239868
--- /dev/null
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -0,0 +1,33 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "SubtractionLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backends/WorkloadData.hpp>
+#include <backends/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+SubtractionLayer::SubtractionLayer(const char* name)
+    : ArithmeticBaseLayer(2, 1, LayerType::Subtraction, name)
+{
+}
+
+std::unique_ptr<IWorkload> SubtractionLayer::CreateWorkload(const Graph& graph,
+                                                            const IWorkloadFactory& factory) const
+{
+    SubtractionQueueDescriptor descriptor;
+    return factory.CreateSubtraction(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+SubtractionLayer* SubtractionLayer::Clone(Graph& graph) const
+{
+    return CloneBase<SubtractionLayer>(graph, GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
new file mode 100644
index 0000000..ac02580
--- /dev/null
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "ArithmeticBaseLayer.hpp"
+
+namespace armnn
+{
+
+class SubtractionLayer : public ArithmeticBaseLayer
+{
+public:
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+                                                      const IWorkloadFactory& factory) const override;
+
+    SubtractionLayer* Clone(Graph& graph) const override;
+
+protected:
+    SubtractionLayer(const char* name);
+    ~SubtractionLayer() = default;
+};
+
+} // namespace