IVGCVSW-2371 Add Rsqrt Ref implementation

*Added Unit Tests

Change-Id: I6cceb8e6dcda35ce08415f8e5ca86019a64d26e3
diff --git a/Android.mk b/Android.mk
index 4c8c492..c02dc39 100644
--- a/Android.mk
+++ b/Android.mk
@@ -113,13 +113,14 @@
         src/armnn/layers/PermuteLayer.cpp \
         src/armnn/layers/Pooling2dLayer.cpp \
         src/armnn/layers/DivisionLayer.cpp \
-        src/armnn/layers/SubtractionLayer.cpp \
         src/armnn/layers/ReshapeLayer.cpp \
-        src/armnn/layers/SpaceToBatchNdLayer.cpp \
         src/armnn/layers/ResizeBilinearLayer.cpp \
+        src/armnn/layers/RsqrtLayer.cpp \
+        src/armnn/layers/SpaceToBatchNdLayer.cpp \
         src/armnn/layers/SoftmaxLayer.cpp \
         src/armnn/layers/SplitterLayer.cpp \
         src/armnn/layers/StridedSliceLayer.cpp \
+        src/armnn/layers/SubtractionLayer.cpp \
         src/armnn/Descriptors.cpp \
         src/armnn/Exceptions.cpp \
         src/armnn/Graph.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a00a44a..9fedc25 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -240,6 +240,8 @@
     src/armnn/layers/SpaceToBatchNdLayer.cpp
     src/armnn/layers/ResizeBilinearLayer.hpp
     src/armnn/layers/ResizeBilinearLayer.cpp
+    src/armnn/layers/RsqrtLayer.cpp
+    src/armnn/layers/RsqrtLayer.hpp
     src/armnn/layers/SoftmaxLayer.hpp
     src/armnn/layers/SoftmaxLayer.cpp
     src/armnn/layers/SplitterLayer.hpp
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index e8840cb..af38b07 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -195,6 +195,10 @@
     virtual bool IsResizeBilinearSupported(const TensorInfo& input,
                                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
 
+    virtual bool IsRsqrtSupported(const TensorInfo& input,
+                                  const TensorInfo& output,
+                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+
     virtual bool IsSoftmaxSupported(const TensorInfo& input,
                                     const TensorInfo& output,
                                     const SoftmaxDescriptor& descriptor,
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 63fd78b..5f341ad 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -338,6 +338,11 @@
     /// @ return - Interface for configuring the layer.
     virtual IConnectableLayer* AddEqualLayer(const char* name = nullptr) = 0;
 
+    /// Add Reciprocal of square root layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @ return - Interface for configuring the layer.
+    virtual IConnectableLayer* AddRsqrtLayer(const char* name = nullptr) = 0;
+
 protected:
     ~INetwork() {}
 };
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index 5a5ba98..446dd70 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -249,6 +249,13 @@
                         size_t reasonIfUnsupportedMaxLength = 1024);
 
 /// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsRsqrtSupported(const BackendId& backend,
+                      const TensorInfo& input,
+                      const TensorInfo& output,
+                      char* reasonIfUnsupported = nullptr,
+                      size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
 bool IsFloorSupported(const BackendId& backend,
                       const TensorInfo& input,
                       const TensorInfo& output,
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index fe806f1..9ffd73a 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -45,6 +45,7 @@
         case LayerType::Permute: return "Permute";
         case LayerType::Pooling2d: return "Pooling2d";
         case LayerType::Reshape: return "Reshape";
+        case LayerType::Rsqrt: return "Rsqrt";
         case LayerType::ResizeBilinear: return "ResizeBilinear";
         case LayerType::Softmax: return "Softmax";
         case LayerType::SpaceToBatchNd: return "SpaceToBatchNd";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index c6b5c04..f4996db 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -46,6 +46,7 @@
     Pooling2d,
     Reshape,
     ResizeBilinear,
+    Rsqrt,
     Softmax,
     SpaceToBatchNd,
     Splitter,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 3c864c5..cf6ce27 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -389,6 +389,15 @@
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input);
 }
 
+bool IsRsqrtSupported(const BackendId& backend,
+                      const TensorInfo& input,
+                      const TensorInfo& output,
+                      char* reasonIfUnsupported,
+                      size_t reasonIfUnsupportedMaxLength)
+{
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsRsqrtSupported, input, output);
+}
+
 bool IsFloorSupported(const BackendId& backend,
                       const TensorInfo& input,
                       const TensorInfo& output,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index c9a12a6..9f55233 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -38,6 +38,7 @@
 #include "layers/Pooling2dLayer.hpp"
 #include "layers/ReshapeLayer.hpp"
 #include "layers/ResizeBilinearLayer.hpp"
+#include "layers/RsqrtLayer.hpp"
 #include "layers/SoftmaxLayer.hpp"
 #include "layers/SpaceToBatchNdLayer.hpp"
 #include "layers/SplitterLayer.hpp"
@@ -103,6 +104,7 @@
 DECLARE_LAYER(Pooling2d)
 DECLARE_LAYER(Reshape)
 DECLARE_LAYER(ResizeBilinear)
+DECLARE_LAYER(Rsqrt)
 DECLARE_LAYER(Softmax)
 DECLARE_LAYER(SpaceToBatchNd)
 DECLARE_LAYER(Splitter)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 0d95954..187d04e 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -727,6 +727,11 @@
     return m_Graph->AddLayer<EqualLayer>(name);
 }
 
+IConnectableLayer* Network::AddRsqrtLayer(const char * name)
+{
+    return m_Graph->AddLayer<RsqrtLayer>(name);
+}
+
 OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
     : m_Graph(std::move(graph))
 {
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index f36ca4f..ba741e9 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -139,6 +139,8 @@
 
     IConnectableLayer* AddEqualLayer(const char* name = nullptr) override;
 
+    IConnectableLayer* AddRsqrtLayer(const char* name = nullptr) override;
+
 private:
     IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
         const ConstTensor& weights,
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
new file mode 100644
index 0000000..d516810
--- /dev/null
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RsqrtLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+RsqrtLayer::RsqrtLayer(const char* name)
+    : Layer(1, 1, LayerType::Rsqrt, name)
+{
+}
+
+std::unique_ptr<IWorkload> RsqrtLayer::CreateWorkload(const Graph& graph,
+                                                      const IWorkloadFactory& factory) const
+{
+    RsqrtQueueDescriptor descriptor;
+    return factory.CreateRsqrt(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+RsqrtLayer* RsqrtLayer::Clone(Graph& graph) const
+{
+    return CloneBase<RsqrtLayer>(graph, GetName());
+}
+
+void RsqrtLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(1, CHECK_LOCATION());
+
+    auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+    BOOST_ASSERT(inferredShapes.size() == 1);
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+            "RsqrtLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+            GetOutputSlot(0).GetTensorInfo().GetShape(),
+            inferredShapes[0]);
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/RsqrtLayer.hpp b/src/armnn/layers/RsqrtLayer.hpp
new file mode 100644
index 0000000..47a4cce
--- /dev/null
+++ b/src/armnn/layers/RsqrtLayer.hpp
@@ -0,0 +1,40 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Layer.hpp>
+
+namespace armnn
+{
+
+class RsqrtLayer : public Layer
+{
+public:
+    /// Makes a workload for the Rsqrt type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+                                                      const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
+    RsqrtLayer* Clone(Graph& graph) const override;
+
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref RsqrtLayer.
+    void ValidateTensorShapesFromInputs() override;
+
+protected:
+    /// Constructor to create an RsqrtLayer.
+    /// @param [in] name Optional name for the layer.
+    RsqrtLayer(const char* name);
+
+    /// Default destructor
+    ~RsqrtLayer() = default;
+};
+
+} // namespace
\ No newline at end of file
diff --git a/src/backends/backendsCommon/ILayerSupport.cpp b/src/backends/backendsCommon/ILayerSupport.cpp
index 0b78913..aa1bb50 100644
--- a/src/backends/backendsCommon/ILayerSupport.cpp
+++ b/src/backends/backendsCommon/ILayerSupport.cpp
@@ -336,4 +336,11 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool ILayerSupport::IsRsqrtSupported(const TensorInfo &input,
+                                     const TensorInfo &output,
+                                     Optional<std::string &> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 } // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 1dac498..a5db088 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1040,4 +1040,15 @@
                                        "second input");
 }
 
+void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    ValidateSingleInput(workloadInfo, "RsqrtQueueDescriptor");
+    ValidateSingleOutput(workloadInfo, "RsqrtQueueDescriptor");
+    ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
+                              workloadInfo.m_OutputTensorInfos[0],
+                              "RsqrtQueueDescriptor",
+                              "input",
+                              "output");
+}
+
 } //namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 9142d87..59e3dfb 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -373,4 +373,9 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct RsqrtQueueDescriptor : QueueDescriptor
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 } //namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 3b8a7d8..1dc96a5 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -600,6 +600,15 @@
             result = layerSupportObject->IsResizeBilinearSupported(OverrideDataType(input, dataType), reason);
             break;
         }
+        case LayerType::Rsqrt:
+        {
+            const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+            result = layerSupportObject->IsRsqrtSupported(OverrideDataType(input, dataType),
+                                                          OverrideDataType(output, dataType),
+                                                          reason);
+            break;
+        }
         case LayerType::Softmax:
         {
             auto cLayer = boost::polymorphic_downcast<const SoftmaxLayer*>(&layer);
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index e72987f..aee9f91 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -156,6 +156,9 @@
 
     virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const = 0;
+
+    virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
+                                                   const WorkloadInfo& info) const = 0;
 };
 
 } //namespace armnn
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index d4c5fe4..d6528bb 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -378,6 +378,8 @@
 
 DECLARE_LAYER_POLICY_2_PARAM(Reshape)
 
+DECLARE_LAYER_POLICY_1_PARAM(Rsqrt)
+
 DECLARE_LAYER_POLICY_2_PARAM(Softmax)
 
 DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index 5215007..8e4596b 100755
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -6734,6 +6734,170 @@
     return result;
 }
 
+LayerTestResult<float, 2> Rsqrt2dTestCommon(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::TensorInfo inputTensorInfo,
+    const armnn::TensorInfo outputTensorInfo,
+    std::vector<float> inputValues,
+    std::vector<float> expectedOutputValues)
+{
+    auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, std::vector<float>(inputValues));
+
+    LayerTestResult<float, 2> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(expectedOutputValues));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::RsqrtQueueDescriptor descriptor;
+
+    armnn::WorkloadInfo info;
+
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
+
+    return result;
+}
+LayerTestResult<float, 2> Rsqrt2dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const armnn::TensorShape inputShape{ 2, 2 };
+    const armnn::TensorShape outputShape{ 2, 2 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+
+    std::vector<float> inputValues
+            {
+                    1.f, 4.f,
+                    16.f, 25.f
+            };
+
+    std::vector<float> expectedOutputValues
+            {
+                    1.f, 0.5f,
+                    0.25f, 0.2f
+            };
+
+    return Rsqrt2dTestCommon(workloadFactory, memoryManager,
+                             inputTensorInfo, outputTensorInfo,
+                             inputValues, expectedOutputValues);
+}
+
+LayerTestResult<float, 3> Rsqrt3dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const armnn::TensorShape inputShape{ 3, 1, 2 };
+    const armnn::TensorShape outputShape{ 3, 1, 2 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+
+    std::vector<float> inputValues
+            {
+                    1.f, 4.f, 16.f,
+                    25.f, 64.f, 100.f
+            };
+
+    std::vector<float> expectedOutputValues
+            {
+                    1.f, 0.5f, 0.25f,
+                    0.2f, 0.125f, 0.1f
+            };
+
+    auto inputTensor = MakeTensor<float, 3>(inputTensorInfo, std::vector<float>(inputValues));
+
+    LayerTestResult<float, 3> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float >(expectedOutputValues));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::RsqrtQueueDescriptor descriptor;
+
+    armnn::WorkloadInfo info;
+
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
+
+    return result;
+}
+
+LayerTestResult<float, 2> RsqrtZeroTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const armnn::TensorShape inputShape{ 1, 2 };
+    const armnn::TensorShape outputShape{ 1, 2 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+
+    std::vector<float> inputValues
+            {
+                    0.f, -0.f
+            };
+
+    std::vector<float> expectedOutputValues
+            {
+                    INFINITY, -INFINITY
+            };
+
+    return Rsqrt2dTestCommon(workloadFactory, memoryManager,
+                             inputTensorInfo, outputTensorInfo,
+                             inputValues, expectedOutputValues);
+}
+
+LayerTestResult<float, 2> RsqrtNegativeTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const armnn::TensorShape inputShape{ 1, 2 };
+    const armnn::TensorShape outputShape{ 1, 2 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+
+    std::vector<float> inputValues
+            {
+                    -25.f, -16.f
+            };
+
+    std::vector<float> expectedOutputValues
+            {
+                    -NAN, -NAN
+            };
+
+    return Rsqrt2dTestCommon(workloadFactory, memoryManager,
+                             inputTensorInfo, outputTensorInfo,
+                             inputValues, expectedOutputValues);
+}
+
 LayerTestResult<float, 4> BatchNormTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index a871594..98c0806 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -639,6 +639,30 @@
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout  dataLayout);
 
+LayerTestResult<float, 2> Rsqrt2dTestCommon(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::TensorInfo inputTensorInfo,
+        const armnn::TensorInfo outputTensorInfo,
+        std::vector<float> inputValues,
+        std::vector<float> expectedOutputValues);
+
+LayerTestResult<float, 2> Rsqrt2dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 3> Rsqrt3dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 2> RsqrtZeroTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 2> RsqrtNegativeTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
 LayerTestResult<float, 4> BatchNormTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 0653b41..6b03585 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -350,4 +350,10 @@
     return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
+                                                          const WorkloadInfo& info) const
+{
+    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
 } // namespace armnn
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 2844721..85cbd91 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -147,6 +147,9 @@
     virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
+                                                   const WorkloadInfo& info) const override;
+
 private:
     template<typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
     static std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index e635f0c..ff60efb 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -318,4 +318,10 @@
     return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor &descriptor,
+                                                            const WorkloadInfo &info) const
+{
+    return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+}
+
 } // namespace armnn
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 2f7a2db..9a63088 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -148,6 +148,9 @@
     virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
+                                                   const WorkloadInfo& info) const override;
+
 private:
     mutable std::shared_ptr<NeonMemoryManager> m_MemoryManager;
 };
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index a64339e..56d2e4c 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -495,6 +495,17 @@
                                      &TrueFunc<>);
 }
 
+bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input,
+                                       const TensorInfo& output,
+                                       Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(output);
+    return IsSupportedForDataTypeRef(reasonIfUnsupported,
+                                     input.GetDataType(),
+                                     &TrueFunc<>,
+                                     &FalseFuncU8<>);
+}
+
 bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
                                          const TensorInfo& output,
                                          const SoftmaxDescriptor& descriptor,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 3941f4b..188faa8 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -185,6 +185,10 @@
     bool IsResizeBilinearSupported(const TensorInfo& input,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsRsqrtSupported(const TensorInfo& input,
+                          const TensorInfo& output,
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsSoftmaxSupported(const TensorInfo& input,
                             const TensorInfo& output,
                             const SoftmaxDescriptor& descriptor,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index eb8807e..7929363 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -312,4 +312,10 @@
     return MakeWorkload<RefDebugFloat32Workload, RefDebugUint8Workload>(descriptor, info);
 }
 
-} // namespace armnn
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
+                                                           const WorkloadInfo& info) const
+{
+    return MakeWorkload<RefRsqrtFloat32Workload, NullWorkload>(descriptor, info);
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index f4401cc..f6707f5 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -164,6 +164,9 @@
 
     virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
+
+    virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
+                                                   const WorkloadInfo& info) const override;
 private:
 
     template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index b23c752..84f15c9 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -57,6 +57,7 @@
         workloads/RefReshapeUint8Workload.cpp \
         workloads/RefResizeBilinearFloat32Workload.cpp \
         workloads/RefResizeBilinearUint8Workload.cpp \
+        workloads/RefRsqrtFloat32Workload.cpp \
         workloads/RefSoftmaxFloat32Workload.cpp \
         workloads/RefSoftmaxUint8Workload.cpp \
         workloads/RefSpaceToBatchNdWorkload.cpp \
@@ -64,6 +65,7 @@
         workloads/RefSplitterFloat32Workload.cpp \
         workloads/RefSplitterUint8Workload.cpp \
         workloads/ResizeBilinear.cpp \
+        workloads/Rsqrt.cpp \
         workloads/SpaceToBatchNd.cpp \
         workloads/StridedSlice.cpp \
         workloads/StringMapping.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 7223f04..50c47ae 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -380,6 +380,12 @@
 ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test)
 ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test)
 
+// Rsqrt
+ARMNN_AUTO_TEST_CASE(Rsqrt2d, Rsqrt2dTest)
+ARMNN_AUTO_TEST_CASE(Rsqrt3d, Rsqrt3dTest)
+ARMNN_AUTO_TEST_CASE(RsqrtZero, RsqrtZeroTest)
+ARMNN_AUTO_TEST_CASE(RsqrtNegative, RsqrtNegativeTest)
+
 // Permute
 ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test)
 ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index d71e6ea..d15f77d 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -94,6 +94,8 @@
     RefResizeBilinearFloat32Workload.hpp
     RefResizeBilinearUint8Workload.cpp
     RefResizeBilinearUint8Workload.hpp
+    RefRsqrtFloat32Workload.cpp
+    RefRsqrtFloat32Workload.hpp
     RefSoftmaxFloat32Workload.cpp
     RefSoftmaxFloat32Workload.hpp
     RefSoftmaxUint8Workload.cpp
@@ -110,6 +112,8 @@
     RefWorkloadUtils.hpp
     ResizeBilinear.cpp
     ResizeBilinear.hpp
+    Rsqrt.cpp
+    Rsqrt.hpp
     Softmax.cpp
     Softmax.hpp
     SpaceToBatchNd.hpp
diff --git a/src/backends/reference/workloads/RefRsqrtFloat32Workload.cpp b/src/backends/reference/workloads/RefRsqrtFloat32Workload.cpp
new file mode 100644
index 0000000..c08dbf0
--- /dev/null
+++ b/src/backends/reference/workloads/RefRsqrtFloat32Workload.cpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefRsqrtFloat32Workload.hpp"
+
+#include "RefWorkloadUtils.hpp"
+#include "Rsqrt.hpp"
+
+#include <Profiling.hpp>
+
+namespace armnn
+{
+
+void RefRsqrtFloat32Workload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefRsqrtFloat32Workload_Execute");
+
+    Rsqrt(GetInputTensorDataFloat(0, m_Data),
+          GetOutputTensorDataFloat(0, m_Data),
+          GetTensorInfo(m_Data.m_Inputs[0]));
+}
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefRsqrtFloat32Workload.hpp b/src/backends/reference/workloads/RefRsqrtFloat32Workload.hpp
new file mode 100644
index 0000000..9d1b450
--- /dev/null
+++ b/src/backends/reference/workloads/RefRsqrtFloat32Workload.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefRsqrtFloat32Workload : public Float32Workload<RsqrtQueueDescriptor>
+{
+public:
+    using Float32Workload<RsqrtQueueDescriptor>::Float32Workload;
+    virtual void Execute() const override;
+};
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index ddce68e..8beb03f 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -59,3 +59,4 @@
 #include "RefBatchToSpaceNdUint8Workload.hpp"
 #include "RefBatchToSpaceNdFloat32Workload.hpp"
 #include "RefDebugWorkload.hpp"
+#include "RefRsqrtFloat32Workload.hpp"
diff --git a/src/backends/reference/workloads/Rsqrt.cpp b/src/backends/reference/workloads/Rsqrt.cpp
new file mode 100644
index 0000000..cee38fc
--- /dev/null
+++ b/src/backends/reference/workloads/Rsqrt.cpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Rsqrt.hpp"
+
+#include <cmath>
+
+namespace armnn
+{
+
+void Rsqrt(const float* in,
+           float* out,
+           const TensorInfo& tensorInfo)
+{
+    for (size_t i = 0; i < tensorInfo.GetNumElements(); i++)
+    {
+        out[i] = 1.f / sqrtf(in[i]);
+    }
+}
+
+} //namespace armnn
\ No newline at end of file
diff --git a/src/backends/reference/workloads/Rsqrt.hpp b/src/backends/reference/workloads/Rsqrt.hpp
new file mode 100644
index 0000000..35caced
--- /dev/null
+++ b/src/backends/reference/workloads/Rsqrt.hpp
@@ -0,0 +1,18 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/Tensor.hpp>
+#include <armnn/Types.hpp>
+
+namespace armnn
+{
+
+/// Performs the reciprocal squareroot function elementwise
+/// on the inputs to give the outputs.
+void Rsqrt(const float* in,
+           float* out,
+           const TensorInfo& tensorInfo);
+
+} //namespace armnn