IVGCVSW-2608: support static quantization of Activation

Change-Id: Ia9afd15d002d4454ec72f219c5cf214704f6ae31
Signed-off-by: Nina Drozd <nina.drozd@arm.com>
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index afe3713..4e07514 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -147,6 +147,15 @@
     SetQuantizedInputConnections(layer, newLayer);
 }
 
+void QuantizerVisitor::VisitActivationLayer(const IConnectableLayer *layer,
+                                            const ActivationDescriptor& activationDescriptor,
+                                            const char *name)
+{
+    IConnectableLayer* newLayer = m_QuantizedNetwork->AddActivationLayer(activationDescriptor, name);
+    RecordLayer(layer, newLayer);
+    SetQuantizedInputConnections(layer, newLayer);
+}
+
 void QuantizerVisitor::VisitInputLayer(const IConnectableLayer *layer, LayerBindingId id, const char *name)
 {
     IConnectableLayer* newLayer = m_QuantizedNetwork->AddInputLayer(id, name);
diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp
index d6aee6b..0dc4582 100644
--- a/src/armnn/QuantizerVisitor.hpp
+++ b/src/armnn/QuantizerVisitor.hpp
@@ -27,6 +27,9 @@
     // Functions to quantize the individual layers, overridden from ILayerVisitor
     void VisitInputLayer(const IConnectableLayer *layer, LayerBindingId id, const char *name = nullptr) override;
     void VisitAdditionLayer(const IConnectableLayer *layer, const char *name = nullptr) override;
+    void VisitActivationLayer(const IConnectableLayer *layer,
+                              const ActivationDescriptor& activationDescriptor,
+                              const char *name = nullptr) override;
     void VisitOutputLayer(const IConnectableLayer *layer, LayerBindingId id, const char *name = nullptr)  override;
     void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
                                       const BatchNormalizationDescriptor& desc,
diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp
index cc8c26e..1986e42 100644
--- a/src/armnn/StaticRangeVisitor.cpp
+++ b/src/armnn/StaticRangeVisitor.cpp
@@ -6,6 +6,8 @@
 #include "StaticRangeVisitor.hpp"
 
 #include <boost/core/ignore_unused.hpp>
+#include <armnn/Descriptors.hpp>
+#include <armnn/Types.hpp>
 
 namespace armnn
 {
@@ -53,4 +55,32 @@
     SetRange(layer, 0, -15.0f, 15.0f);
 }
 
+void StaticRangeVisitor::VisitActivationLayer(const IConnectableLayer *layer,
+                                              const ActivationDescriptor& activationDescriptor,
+                                              const char *name)
+{
+    switch (activationDescriptor.m_Function)
+    {
+        // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
+        case ActivationFunction::Abs:
+        case ActivationFunction::Linear:
+        case ActivationFunction::ReLu:
+        case ActivationFunction::SoftReLu:
+            SetRange(layer, 0, 0.f, 15.f);
+            break;
+        case ActivationFunction::BoundedReLu:
+            SetRange(layer, 0, 0.f, activationDescriptor.m_A);
+            break;
+        case ActivationFunction::TanH:
+            SetRange(layer, 0, -1.f, 1.f);
+            break;
+        case ActivationFunction::LeakyReLu:
+            SetRange(layer, 0, -5.f, 15.f);
+            break;
+        default:
+            SetRange(layer, 0, -15.f, 15.f);
+            break;
+    }
+}
+
 } //namespace armnn
\ No newline at end of file
diff --git a/src/armnn/StaticRangeVisitor.hpp b/src/armnn/StaticRangeVisitor.hpp
index 4276a17..ed02fb5 100644
--- a/src/armnn/StaticRangeVisitor.hpp
+++ b/src/armnn/StaticRangeVisitor.hpp
@@ -34,6 +34,9 @@
                                       const ConstTensor& beta,
                                       const ConstTensor& gamma,
                                       const char* name = nullptr) override;
+    void VisitActivationLayer(const IConnectableLayer *layer,
+                              const ActivationDescriptor& activationDescriptor,
+                              const char *name = nullptr) override;
 
     /// Retreive the default range
     MinMaxRange DefaultRange() const { return std::make_pair(-15.0f, 15.0f); }
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index fbafbd8..6f9ad31 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -18,6 +18,29 @@
 {
 BOOST_AUTO_TEST_SUITE(Quantizer)
 
+class TestQuantization : public LayerVisitorBase<VisitorThrowingPolicy>
+{
+public:
+    virtual void VisitInputLayer(const IConnectableLayer* layer,
+        LayerBindingId id,
+        const char* name = nullptr)
+    {
+        TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+
+        BOOST_TEST((info.GetDataType() ==  DataType::QuantisedAsymm8));
+
+        BOOST_TEST((info.GetQuantizationOffset() == 128));
+
+        // Based off current default [-15.0f, 15.0f]
+        BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f/255.0f, 0.000001f );
+    }
+
+    virtual void VisitOutputLayer(const IConnectableLayer* layer,
+        LayerBindingId id,
+        const char* name = nullptr)
+    {}
+};
+
 void VisitLayersTopologically(const INetwork* inputNetwork, ILayerVisitor& visitor)
 {
     auto network = boost::polymorphic_downcast<const Network*>(inputNetwork);
@@ -31,7 +54,7 @@
 
 BOOST_AUTO_TEST_CASE(QuantizeAddition)
 {
-    class TestQuantization : public LayerVisitorBase<VisitorThrowingPolicy>
+    class TestAdditionQuantization : public TestQuantization
     {
     public:
         virtual void VisitAdditionLayer(const IConnectableLayer* layer,
@@ -46,25 +69,6 @@
             // Based off current static value [-20.0f, 20.0f]
             BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 40.0f/255.0f, 0.000001f );
         }
-
-        virtual void VisitInputLayer(const IConnectableLayer* layer,
-                                     LayerBindingId id,
-                                     const char* name = nullptr)
-        {
-            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
-            BOOST_TEST((info.GetDataType() ==  DataType::QuantisedAsymm8));
-
-            BOOST_TEST((info.GetQuantizationOffset() == 128));
-
-            // Based off current default [-15.0f, 15.0f]
-            BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f/255.0f, 0.000001f );
-        }
-
-        virtual void VisitOutputLayer(const IConnectableLayer* layer,
-                                      LayerBindingId id,
-                                      const char* name = nullptr)
-        {}
     };
 
     auto network = INetwork::Create();
@@ -88,7 +92,198 @@
     addition->GetOutputSlot(0).SetTensorInfo(info);
 
     auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestQuantization validator;
+    TestAdditionQuantization validator;
+    VisitLayersTopologically(quantizedNetwork.get(), validator);
+}
+
+class TestActivationQuantization : public TestQuantization
+{
+public:
+    virtual void VisitActivationLayer(const IConnectableLayer* layer,
+        const ActivationDescriptor& descriptor,
+        const char* name = nullptr)
+    {
+        TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+
+        BOOST_TEST((info.GetDataType() ==  DataType::QuantisedAsymm8));
+
+        BOOST_TEST((info.GetQuantizationOffset() == 0));
+
+        // Based off current static value [-20.0f, 20.0f]
+        BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 15.0f/255.0f, 0.000001f );
+    }
+};
+
+INetworkPtr CreateNetworkWithActivationLayer(const ActivationDescriptor& descriptor)
+{
+    auto network = INetwork::Create();
+    // Add the layers
+    IConnectableLayer* input0 = network->AddInputLayer(0);
+    IConnectableLayer* activation = network->AddActivationLayer(descriptor);
+    IConnectableLayer* output = network->AddOutputLayer(2);
+
+    // Establish connections
+    input0->GetOutputSlot(0).Connect(activation->GetInputSlot(0));
+    activation->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    //Set TensorInfo
+    TensorShape shape{1U};
+    TensorInfo info(shape, DataType::Float32);
+    input0->GetOutputSlot(0).SetTensorInfo(info);
+    activation->GetOutputSlot(0).SetTensorInfo(info);
+
+    return network;
+}
+
+BOOST_AUTO_TEST_CASE(QuantizeAbsActivation)
+{
+    ActivationDescriptor descriptor;
+    descriptor.m_Function = ActivationFunction::Abs;
+    descriptor.m_A        = 3.5f;
+    descriptor.m_B        = -10.0f;
+
+    auto network = CreateNetworkWithActivationLayer(descriptor);
+
+    auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork();
+    TestActivationQuantization validator;
+    VisitLayersTopologically(quantizedNetwork.get(), validator);
+}
+
+BOOST_AUTO_TEST_CASE(QuantizeLinearActivation)
+{
+    ActivationDescriptor descriptor;
+    descriptor.m_Function = ActivationFunction::Linear;
+    descriptor.m_A        = 3.5f;
+    descriptor.m_B        = -10.0f;
+
+    auto network = CreateNetworkWithActivationLayer(descriptor);
+
+    auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork();
+    TestActivationQuantization validator;
+    VisitLayersTopologically(quantizedNetwork.get(), validator);
+}
+
+BOOST_AUTO_TEST_CASE(QuantizeReLuActivation)
+{
+    ActivationDescriptor descriptor;
+    descriptor.m_Function = ActivationFunction::ReLu;
+    descriptor.m_A        = 3.5f;
+    descriptor.m_B        = -10.0f;
+
+    auto network = CreateNetworkWithActivationLayer(descriptor);
+
+    auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork();
+    TestActivationQuantization validator;
+    VisitLayersTopologically(quantizedNetwork.get(), validator);
+}
+
+BOOST_AUTO_TEST_CASE(QuantizeSoftReLuActivation)
+{
+    ActivationDescriptor descriptor;
+    descriptor.m_Function = ActivationFunction::SoftReLu;
+    descriptor.m_A        = 3.5f;
+    descriptor.m_B        = -10.0f;
+
+    auto network = CreateNetworkWithActivationLayer(descriptor);
+
+    auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork();
+    TestActivationQuantization validator;
+    VisitLayersTopologically(quantizedNetwork.get(), validator);
+}
+
+BOOST_AUTO_TEST_CASE(QuantizeBoundedReluActivation)
+{
+    class TestBoundedReluActivationQuantization : public TestQuantization
+    {
+    public:
+        virtual void VisitActivationLayer(const IConnectableLayer* layer,
+            const ActivationDescriptor& descriptor,
+            const char* name = nullptr)
+        {
+            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+
+            BOOST_TEST((info.GetDataType() ==  DataType::QuantisedAsymm8));
+
+            BOOST_TEST((info.GetQuantizationOffset() == 0));
+
+            // Based off current static value [0.0f, 3.5f(<-layer upper bound)]
+            BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 3.5f/255.0f, 0.000001f );
+        }
+    };
+
+    ActivationDescriptor descriptor;
+    descriptor.m_Function = ActivationFunction::BoundedReLu;
+    descriptor.m_A        = 3.5f;
+    descriptor.m_B        = -10.0f;
+
+    auto network = CreateNetworkWithActivationLayer(descriptor);
+
+    auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork();
+    TestBoundedReluActivationQuantization validator;
+    VisitLayersTopologically(quantizedNetwork.get(), validator);
+}
+
+BOOST_AUTO_TEST_CASE(QuantizeTanHActivation)
+{
+    class TestTanHActivationQuantization : public TestQuantization
+    {
+    public:
+        virtual void VisitActivationLayer(const IConnectableLayer* layer,
+            const ActivationDescriptor& descriptor,
+            const char* name = nullptr)
+        {
+            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+
+            BOOST_TEST((info.GetDataType() ==  DataType::QuantisedAsymm8));
+
+            BOOST_TEST((info.GetQuantizationOffset() == 128));
+
+            // Based off current static value [-1.0f, 1.0f]
+            BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 2.0f/255.0f, 0.000001f );
+        }
+    };
+
+    ActivationDescriptor descriptor;
+    descriptor.m_Function = ActivationFunction::TanH;
+    descriptor.m_A        = 3.5f;
+    descriptor.m_B        = -10.0f;
+
+    auto network = CreateNetworkWithActivationLayer(descriptor);
+
+    auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork();
+    TestTanHActivationQuantization validator;
+    VisitLayersTopologically(quantizedNetwork.get(), validator);
+}
+
+BOOST_AUTO_TEST_CASE(QuantizeLeakyReLuActivation)
+{
+    class TestLeakyReLuActivationQuantization : public TestQuantization
+    {
+    public:
+        virtual void VisitActivationLayer(const IConnectableLayer* layer,
+            const ActivationDescriptor& descriptor,
+            const char* name = nullptr)
+        {
+            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+
+            BOOST_TEST((info.GetDataType() ==  DataType::QuantisedAsymm8));
+
+            BOOST_TEST((info.GetQuantizationOffset() == 64));
+
+            // Based off current static value [-5.0f, 15.0f]
+            BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 20.0f/255.0f, 0.000001f );
+        }
+    };
+
+    ActivationDescriptor descriptor;
+    descriptor.m_Function = ActivationFunction::LeakyReLu;
+    descriptor.m_A        = 3.5f;
+    descriptor.m_B        = -10.0f;
+
+    auto network = CreateNetworkWithActivationLayer(descriptor);
+
+    auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork();
+    TestLeakyReLuActivationQuantization validator;
     VisitLayersTopologically(quantizedNetwork.get(), validator);
 }