IVGCVSW-7925 Add REVERSE V2 to Support Library (SL)

 * Fix typos in README

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I6e30536d353fae7a7828d9e02e7301ab8dd8c115
diff --git a/shim/sl/README.md b/shim/sl/README.md
index 4650965..32f117a 100644
--- a/shim/sl/README.md
+++ b/shim/sl/README.md
@@ -4,13 +4,13 @@
 
 # Passing parameters to the support library runtime.
 
-The support library inherits it's parameters from the Arm NN Android Neural Networks driver. Parameters are passed to it through an environment variable, ARMNN_SL_OPTIONS. A full list of parameters are available ./canonical/DriverOptions.cpp.
+The support library inherits its parameters from the Arm NN Android Neural Networks driver. Parameters are passed to it through an environment variable, ARMNN_SL_OPTIONS. A full list of parameters are available ./canonical/DriverOptions.cpp.
 
 # Sample usage
 
 ## Running NeuralNetworksSupportLibraryTest
 
-This test suite takes as it's first argument the path to a shared object implementation of the support library. Any library dependencies should be resolvable through the LD_LIBRARY_PATH mechanism. Setting ARMNN_SL_OPTIONS will pass parameters to the Arm NN Support Library Neural Networks driver.
+This test suite takes as its first argument the path to a shared object implementation of the support library. Any library dependencies should be resolvable through the LD_LIBRARY_PATH mechanism. Setting ARMNN_SL_OPTIONS will pass parameters to the Arm NN Support Library Neural Networks driver.
 
 Here we assume that Bash is the current shell and specify "-v" to enable verbose logging and "-c CpuAcc" to direct that the Neon(TM) accelerator be used.
 ~~~
diff --git a/shim/sl/canonical/Converter.cpp b/shim/sl/canonical/Converter.cpp
index 790fad6..5b8c450 100644
--- a/shim/sl/canonical/Converter.cpp
+++ b/shim/sl/canonical/Converter.cpp
@@ -150,6 +150,8 @@
             return ConvertResize(operation, model, data, ResizeMethod::Bilinear);
         case OperationType::RESIZE_NEAREST_NEIGHBOR:
             return ConvertResize(operation, model, data, ResizeMethod::NearestNeighbor);
+        case OperationType::REVERSE:
+            return ConvertReverseV2(operation, model, data);
         case OperationType::RSQRT:
             return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Rsqrt);
         case OperationType::SIN:
@@ -4789,6 +4791,63 @@
     return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
 }
 
+bool Converter::ConvertReverseV2(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertReverseV2()";
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+    const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
+    const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
+
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+
+    bool isSupported = false;
+    armnn::BackendId setBackend;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsReverseV2Supported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   setBackend,
+                                   inputInfo0,
+                                   inputInfo1,
+                                   outputInfo);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddReverseV2Layer();
+    layer->SetBackendId(setBackend);
+    assert(layer != nullptr);
+    input0.Connect(layer->GetInputSlot(0));
+    input1.Connect(layer->GetInputSlot(1));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
 bool Converter::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
 {
     VLOG(DRIVER) << "Converter::ConvertSpaceToBatchNd()";
diff --git a/shim/sl/canonical/Converter.hpp b/shim/sl/canonical/Converter.hpp
index bf660b9..d19498d 100644
--- a/shim/sl/canonical/Converter.hpp
+++ b/shim/sl/canonical/Converter.hpp
@@ -137,6 +137,8 @@
                               ConversionData& data,
                               armnn::ResizeMethod resizeMethod);
 
+    static bool ConvertReverseV2(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data);