IVGCVSW-2692 Add Serializer and Deserializer for Greater

Change-Id: I344a1f36a8a4ab601dd4d62a0014c554ceb6a1c6
Signed-off-by: Conor Kennedy <conor.kennedy@arm.com>
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0c8a7b9..f3ad333 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -605,6 +605,7 @@
                 src/armnnDeserializer/test/DeserializeEqual.cpp
                 src/armnnDeserializer/test/DeserializeFloor.cpp
                 src/armnnDeserializer/test/DeserializeFullyConnected.cpp
+                src/armnnDeserializer/test/DeserializeGreater.cpp
                 src/armnnDeserializer/test/DeserializeMultiplication.cpp
                 src/armnnDeserializer/test/DeserializeNormalization.cpp
                 src/armnnDeserializer/test/DeserializePad.cpp
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index e8cda2e..aebdf0e 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -196,6 +196,7 @@
     m_ParserFunctions[Layer_EqualLayer]                  = &Deserializer::ParseEqual;
     m_ParserFunctions[Layer_FullyConnectedLayer]         = &Deserializer::ParseFullyConnected;
     m_ParserFunctions[Layer_FloorLayer]                  = &Deserializer::ParseFloor;
+    m_ParserFunctions[Layer_GreaterLayer]                = &Deserializer::ParseGreater;
     m_ParserFunctions[Layer_MinimumLayer]                = &Deserializer::ParseMinimum;
     m_ParserFunctions[Layer_MaximumLayer]                = &Deserializer::ParseMaximum;
     m_ParserFunctions[Layer_MultiplicationLayer]         = &Deserializer::ParseMultiplication;
@@ -237,6 +238,8 @@
             return graphPtr->layers()->Get(layerIndex)->layer_as_FullyConnectedLayer()->base();
         case Layer::Layer_FloorLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_FloorLayer()->base();
+        case Layer::Layer_GreaterLayer:
+            return graphPtr->layers()->Get(layerIndex)->layer_as_GreaterLayer()->base();
         case Layer::Layer_InputLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_InputLayer()->base()->base();
         case Layer::Layer_MinimumLayer:
@@ -1036,6 +1039,26 @@
     RegisterOutputSlots(graph, layerIndex, layer);
 }
 
+void Deserializer::ParseGreater(GraphPtr graph, unsigned int layerIndex)
+{
+    CHECK_LAYERS(graph, 0, layerIndex);
+    auto inputs = GetInputs(graph, layerIndex);
+    CHECK_LOCATION();
+    CHECK_VALID_SIZE(inputs.size(), 2);
+
+    auto outputs = GetOutputs(graph, layerIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    auto layerName = GetLayerName(graph, layerIndex);
+    IConnectableLayer* layer = m_Network->AddGreaterLayer(layerName.c_str());
+
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    RegisterInputSlots(graph, layerIndex, layer);
+    RegisterOutputSlots(graph, layerIndex, layer);
+}
+
 void Deserializer::ParseMinimum(GraphPtr graph, unsigned int layerIndex)
 {
     CHECK_LAYERS(graph, 0, layerIndex);
diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp
index 237cb9f..7e25534 100644
--- a/src/armnnDeserializer/Deserializer.hpp
+++ b/src/armnnDeserializer/Deserializer.hpp
@@ -82,6 +82,7 @@
     void ParseEqual(GraphPtr graph, unsigned int layerIndex);
     void ParseFloor(GraphPtr graph, unsigned int layerIndex);
     void ParseFullyConnected(GraphPtr graph, unsigned int layerIndex);
+    void ParseGreater(GraphPtr graph, unsigned int layerIndex);
     void ParseMinimum(GraphPtr graph, unsigned int layerIndex);
     void ParseMaximum(GraphPtr graph, unsigned int layerIndex);
     void ParseMultiplication(GraphPtr graph, unsigned int layerIndex);
diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md
index b5e9b6b..ba85a04 100644
--- a/src/armnnDeserializer/DeserializerSupport.md
+++ b/src/armnnDeserializer/DeserializerSupport.md
@@ -17,6 +17,7 @@
 * Equal
 * Floor
 * FullyConnected
+* Greater
 * Maximum
 * Minimum
 * Multiplication
diff --git a/src/armnnDeserializer/test/DeserializeGreater.cpp b/src/armnnDeserializer/test/DeserializeGreater.cpp
new file mode 100644
index 0000000..d1ff250
--- /dev/null
+++ b/src/armnnDeserializer/test/DeserializeGreater.cpp
@@ -0,0 +1,182 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../Deserializer.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(Deserializer)
+
+struct GreaterFixture : public ParserFlatbuffersSerializeFixture
+{
+    explicit GreaterFixture(const std::string & inputShape1,
+                            const std::string & inputShape2,
+                            const std::string & outputShape,
+                            const std::string & inputDataType,
+                            const std::string & outputDataType)
+    {
+        m_JsonString = R"(
+        {
+                inputIds: [0, 1],
+                outputIds: [3],
+                layers: [
+                {
+                    layer_type: "InputLayer",
+                    layer: {
+                          base: {
+                                layerBindingId: 0,
+                                base: {
+                                    index: 0,
+                                    layerName: "InputLayer1",
+                                    layerType: "Input",
+                                    inputSlots: [{
+                                        index: 0,
+                                        connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+                                    }],
+                                    outputSlots: [ {
+                                        index: 0,
+                                        tensorInfo: {
+                                            dimensions: )" + inputShape1 + R"(,
+                                            dataType: )" + inputDataType + R"(
+                                        },
+                                    }],
+                                 },}},
+                },
+                {
+                layer_type: "InputLayer",
+                layer: {
+                       base: {
+                            layerBindingId: 1,
+                            base: {
+                                  index:1,
+                                  layerName: "InputLayer2",
+                                  layerType: "Input",
+                                  inputSlots: [{
+                                      index: 0,
+                                      connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+                                  }],
+                                  outputSlots: [ {
+                                      index: 0,
+                                      tensorInfo: {
+                                          dimensions: )" + inputShape2 + R"(,
+                                          dataType: )" + inputDataType + R"(
+                                      },
+                                  }],
+                                },}},
+                },
+                {
+                layer_type: "GreaterLayer",
+                layer : {
+                        base: {
+                             index:2,
+                             layerName: "GreaterLayer",
+                             layerType: "Greater",
+                             inputSlots: [
+                                            {
+                                             index: 0,
+                                             connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+                                            },
+                                            {
+                                             index: 1,
+                                             connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+                                            }
+                             ],
+                             outputSlots: [ {
+                                 index: 0,
+                                 tensorInfo: {
+                                     dimensions: )" + outputShape + R"(,
+                                     dataType: Boolean
+                                 },
+                             }],
+                            }},
+                },
+                {
+                layer_type: "OutputLayer",
+                layer: {
+                        base:{
+                              layerBindingId: 0,
+                              base: {
+                                    index: 3,
+                                    layerName: "OutputLayer",
+                                    layerType: "Output",
+                                    inputSlots: [{
+                                        index: 0,
+                                        connection: {sourceLayerIndex:2, outputSlotIndex:0 },
+                                    }],
+                                    outputSlots: [ {
+                                        index: 0,
+                                        tensorInfo: {
+                                            dimensions: )" + outputShape + R"(,
+                                            dataType: )" + outputDataType + R"(
+                                        },
+                                }],
+                            }}},
+                }]
+         }
+        )";
+        Setup();
+    }
+};
+
+
+struct SimpleGreaterFixtureQuantisedAsymm8 : GreaterFixture
+{
+    SimpleGreaterFixtureQuantisedAsymm8() : GreaterFixture("[ 2, 2 ]",        // input1Shape
+                                                           "[ 2, 2 ]",        // input2Shape
+                                                           "[ 2, 2 ]",        // outputShape
+                                                           "QuantisedAsymm8", // inputDataType
+                                                           "Float32") {}      // outputDataType
+};
+
+struct SimpleGreaterFixtureFloat32 : GreaterFixture
+{
+    SimpleGreaterFixtureFloat32() : GreaterFixture("[ 2, 2, 1, 1 ]", // input1Shape
+                                                   "[ 2, 2, 1, 1 ]", // input2Shape
+                                                   "[ 2, 2, 1, 1 ]", // outputShape
+                                                   "Float32",        // inputDataType
+                                                   "Float32") {}     // outputDataType
+};
+
+struct SimpleGreaterFixtureBroadcast : GreaterFixture
+{
+    SimpleGreaterFixtureBroadcast() : GreaterFixture("[ 1, 2, 2, 2 ]", // input1Shape
+                                                     "[ 1, 1, 1, 1 ]", // input2Shape
+                                                     "[ 1, 2, 2, 2 ]", // outputShape
+                                                     "Float32",        // inputDataType
+                                                     "Float32") {}     // outputDataType
+};
+
+
+BOOST_FIXTURE_TEST_CASE(GreaterQuantisedAsymm8, SimpleGreaterFixtureQuantisedAsymm8)
+{
+    RunTest<2, armnn::DataType::QuantisedAsymm8, armnn::DataType::Boolean>(
+        0,
+        {{"InputLayer1", { 1, 5, 8, 7 }},
+        { "InputLayer2", { 4, 0, 6, 7 }}},
+        {{"OutputLayer", { 0, 1, 1, 0 }}});
+}
+
+BOOST_FIXTURE_TEST_CASE(GreaterFloat32, SimpleGreaterFixtureFloat32)
+{
+    RunTest<4, armnn::DataType::Float32, armnn::DataType::Boolean>(
+        0,
+        {{"InputLayer1", { 1.0f, 2.0f, 3.0f, 4.0f }},
+        { "InputLayer2", { 1.0f, 5.0f, 2.0f, 2.0f }}},
+        {{"OutputLayer", { 0, 0, 1, 1 }}});
+}
+
+BOOST_FIXTURE_TEST_CASE(GreaterBroadcast, SimpleGreaterFixtureBroadcast)
+{
+    RunTest<4, armnn::DataType::Float32, armnn::DataType::Boolean>(
+        0,
+        {{"InputLayer1", { 1, 2, 3, 4, 5, 6, 7, 8 }},
+         {"InputLayer2", { 1 }}},
+        {{"OutputLayer", { 0, 1, 1, 1, 1, 1, 1, 1 }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index f416912..410849e 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -104,7 +104,8 @@
     Pad = 20,
     Rsqrt = 21,
     Floor = 22,
-    BatchNormalization = 23
+    BatchNormalization = 23,
+    Greater = 24
 }
 
 // Base layer table to be used as part of other layers
@@ -184,6 +185,10 @@
     transposeWeightsMatrix:bool = false;
 }
 
+table GreaterLayer {
+    base:LayerBase;
+}
+
 table InputLayer {
     base:BindableLayerBase;
 }
@@ -341,7 +346,6 @@
     padList:[uint];
 }
 
-
 table RsqrtLayer {
     base:LayerBase;
 }
@@ -384,7 +388,8 @@
     NormalizationLayer,
     PadLayer,
     RsqrtLayer,
-    FloorLayer
+    FloorLayer,
+    GreaterLayer
 }
 
 table AnyLayer {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 423706c..b55adb2 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -329,6 +329,14 @@
     CreateAnyLayer(fbMaximumLayer.o, serializer::Layer::Layer_MaximumLayer);
 }
 
+void SerializerVisitor::VisitGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
+{
+    auto fbGreaterBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Greater);
+    auto fbGreaterLayer = serializer::CreateGreaterLayer(m_flatBufferBuilder, fbGreaterBaseLayer);
+
+    CreateAnyLayer(fbGreaterLayer.o, serializer::Layer::Layer_GreaterLayer);
+}
+
 // Build FlatBuffer for Multiplication Layer
 void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index a60d19b..164db19 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -92,6 +92,9 @@
                                   const armnn::Optional<armnn::ConstTensor>& biases,
                                   const char* name = nullptr) override;
 
+    void VisitGreaterLayer(const armnn::IConnectableLayer* layer,
+                           const char* name = nullptr) override;
+
     void VisitInputLayer(const armnn::IConnectableLayer* layer,
                          armnn::LayerBindingId id,
                          const char* name = nullptr) override;
diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md
index 98023a6..f18ef3a 100644
--- a/src/armnnSerializer/SerializerSupport.md
+++ b/src/armnnSerializer/SerializerSupport.md
@@ -17,6 +17,7 @@
 * Equal
 * Floor
 * FullyConnected
+* Greater
 * Maximum
 * Minimum
 * Multiplication
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 3ef1583..7206d6d 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -508,6 +508,47 @@
                                             {outputInfo.GetShape()});
 }
 
+BOOST_AUTO_TEST_CASE(SerializeDeserializeGreater)
+{
+    class VerifyGreaterName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
+    {
+    public:
+        void VisitGreaterLayer(const armnn::IConnectableLayer*, const char* name) override
+        {
+            BOOST_TEST(name == "greater");
+        }
+    };
+
+    const armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, armnn::DataType::Float32);
+    const armnn::TensorInfo inputTensorInfo2({ 1, 2, 2, 2 }, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo({ 1, 2, 2, 2 }, armnn::DataType::Boolean);
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const inputLayer2 = network->AddInputLayer(1);
+    armnn::IConnectableLayer* const greaterLayer = network->AddGreaterLayer("greater");
+    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+
+    inputLayer1->GetOutputSlot(0).Connect(greaterLayer->GetInputSlot(0));
+    inputLayer1->GetOutputSlot(0).SetTensorInfo(inputTensorInfo1);
+    inputLayer2->GetOutputSlot(0).Connect(greaterLayer->GetInputSlot(1));
+    inputLayer2->GetOutputSlot(0).SetTensorInfo(inputTensorInfo2);
+    greaterLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+    greaterLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    BOOST_CHECK(deserializedNetwork);
+
+    VerifyGreaterName nameChecker;
+    deserializedNetwork->Accept(nameChecker);
+
+    CheckDeserializedNetworkAgainstOriginal(*network,
+                                            *deserializedNetwork,
+                                            {inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()},
+                                            {outputTensorInfo.GetShape()},
+                                            {0, 1});
+}
+
 BOOST_AUTO_TEST_CASE(SerializeDeserializeReshape)
 {
     class VerifyReshapeName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>