Revert "Revert "IVGCVSW-6268 Add support of Unidirectional Sequence Lstm fp32/fp16 to Neon""

This reverts commit f87b90e4dbb906436cf205a2a19e199bfe9224ed.

Reason for revert: 22.02 release.

Change-Id: I1ca5a79a8957908f655a6c4e79eefa24c5aec645
diff --git a/docs/02_operator_list.dox b/docs/02_operator_list.dox
index a2b31fc..51c01b9 100644
--- a/docs/02_operator_list.dox
+++ b/docs/02_operator_list.dox
@@ -3323,6 +3323,20 @@
          <tr><td>FLOAT32
          <tr><td>QASYMMS8
       </table>
+   <td>CpuAcc
+     <td>
+         <ul>
+          <li>All
+         </ul>
+     <td>
+      <table>
+         <tr><th>Input Types
+         <tr><td>FLOAT32
+      </table>
+      <table>
+         <tr><th>Weight Types
+         <tr><td>FLOAT32
+      </table>
 <tr>
   <td rowspan="3">UnmapLayer
   <td rowspan="3" style="width:200px;"> Layer to perform unmap operation on tensor.
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index 9ed7b7b..2dc6d2a 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -45,6 +45,38 @@
     }
 }
 
+armnn::DataType GetArmNNDataType(arm_compute::DataType dataType)
+{
+    switch(dataType)
+    {
+        case arm_compute::DataType::BFLOAT16:
+            return armnn::DataType::BFloat16;
+        case arm_compute::DataType::U8:
+            return armnn::DataType::Boolean;
+        case arm_compute::DataType::F16:
+            return armnn::DataType::Float16;
+        case arm_compute::DataType::F32:
+            return armnn::DataType::Float32;
+        case arm_compute::DataType::QASYMM8_SIGNED:
+            return armnn::DataType::QAsymmS8;
+        case arm_compute::DataType::QASYMM8:
+            return armnn::DataType::QAsymmU8;
+        case arm_compute::DataType::QSYMM16:
+            return armnn::DataType::QSymmS16;
+        case arm_compute::DataType::S64:
+            return armnn::DataType::Signed64;
+        case arm_compute::DataType::QSYMM8_PER_CHANNEL:
+            return armnn::DataType::QSymmS8;
+        case arm_compute::DataType::QSYMM8:
+            return armnn::DataType::QSymmS8;
+        case arm_compute::DataType::S32:
+            return armnn::DataType::Signed32;
+        default:
+            ARMNN_ASSERT_MSG(false, "Unknown data type");
+            return armnn::DataType::Float32;
+    }
+}
+
 arm_compute::Coordinates BuildArmComputeReductionCoordinates(size_t inputDimensions,
                                                              unsigned int originalInputRank,
                                                              const std::vector<unsigned int>& armnnAxes)
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.hpp b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
index 30df31b..ba6ef6a 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
@@ -25,6 +25,9 @@
 /// Utility function to map an armnn::DataType to corresponding arm_compute::DataType.
 arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multiScales);
 
+/// Utility function to map an arm_compute::DataType to corresponding armnn::DataType.
+armnn::DataType GetArmNNDataType(arm_compute::DataType datatype);
+
 /// Utility function used to set up an arm_compute::Coordinates from a vector of ArmNN Axes for reduction functions
 arm_compute::Coordinates BuildArmComputeReductionCoordinates(size_t inputDimensions,
                                                              unsigned int originalInputRank,
diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp
index e76af02..fab52ff 100644
--- a/src/backends/aclCommon/ArmComputeUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeUtils.hpp
@@ -112,6 +112,30 @@
     return arm_compute::ActivationLayerInfo();
 }
 
+inline arm_compute::ActivationLayerInfo
+ConvertLstmActivationFuncToAclLayerInfo(uint32_t activationFunction)
+{
+    // For preparing the object for the class ActivationLayerInfo, we need to consider 5 situations.
+    switch (activationFunction)
+    {
+        case 0:
+            return arm_compute::ActivationLayerInfo(); // no activation, do nothing
+        case 1:
+            return arm_compute::ActivationLayerInfo(arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
+        case 3:
+            return arm_compute::ActivationLayerInfo(
+                arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
+        case 4:
+            return arm_compute::ActivationLayerInfo(
+                arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
+        case 6:
+            return arm_compute::ActivationLayerInfo(
+                arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
+        default:
+            throw armnn::Exception("Wrong Type of Activation Function!");
+    }
+}
+
 inline arm_compute::ComparisonOperation ConvertComparisonOperationToAcl(const ComparisonDescriptor& descriptor)
 {
     switch (descriptor.m_Operation)
diff --git a/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp
index 66a26cc..c719472 100644
--- a/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp
@@ -17,6 +17,190 @@
 namespace {
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3>
+UnidirectionalSequenceLstmTimeMajorSingleBatchTestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
+    const std::vector<T>& input,
+    const std::vector<T>& outputExpected,
+    const armnn::TensorShape& inputShape,
+    const armnn::TensorShape& outputExpectedShape,
+    float qScale = 0.0f,
+    int32_t qOffset = 0,
+    armnn::DataType constantDataType = armnn::DataType::Float32)
+{
+    IgnoreUnused(memoryManager);
+    unsigned int batchSize = armnn::numeric_cast<unsigned int>(inputShape[1]);
+    unsigned int inputSize = armnn::numeric_cast<unsigned int>(inputShape[2]);
+    unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
+    unsigned numUnits = outputSize;
+
+    armnn::TensorInfo inputTensorInfo({1, batchSize , inputSize}, ArmnnType,  qScale, qOffset );
+    armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, ArmnnType, qScale, qOffset);
+    armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, ArmnnType, qScale, qOffset);
+
+    armnn::TensorInfo outputTensorInfo({1, batchSize, outputSize}, ArmnnType, qScale, qOffset);
+
+    std::vector<T> inputVector;
+    inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
+
+    std::vector<T> cellStateInVector(batchSize * numUnits, T());
+    std::vector<T> outputStateInVector(batchSize * outputSize, T());
+
+    std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
+    std::vector<T> outputVector;
+    outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
+                                              tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
+                                              tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::UnidirectionalSequenceLstmQueueDescriptor data;
+    armnn::WorkloadInfo info;
+
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
+    AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
+
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    armnn::TensorInfo tensorInfo4({numUnits}, constantDataType , qScale, qOffset);
+    armnn::TensorInfo tensorInfo8({numUnits, 2}, constantDataType, qScale, qOffset);
+    armnn::TensorInfo tensorInfo16({numUnits, 4}, constantDataType, qScale, qOffset);
+
+    std::vector<float> inputToInputWeights = {-0.45018822f, -0.02338299f, -0.0870589f,
+                                              -0.34550029f, 0.04266912f, -0.15680569f,
+                                              -0.34856534f, 0.43890524f};
+
+    std::vector<float> inputToForgetWeights = { 0.09701663f, 0.20334584f, -0.50592935f,
+                                                -0.31343272f, -0.40032279f, 0.44781327f,
+                                                0.01387155f, -0.35593212f};
+
+    std::vector<float> inputToCellWeights = { -0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f,
+                                              -0.20583314f, 0.44344562f, 0.22077113f,
+                                              -0.29909778f};
+
+    std::vector<float> inputToOutputWeights = { -0.25065863f, -0.28290087f, 0.04613829f,
+                                                0.40525138f, 0.44272184f, 0.03897077f,
+                                                -0.1556896f, 0.19487578f};
+
+    std::vector<float> recurrentToInputWeights = {-0.0063535f, -0.2042388f, 0.31454784f,
+                                                  -0.35746509f, 0.28902304f, 0.08183324f,
+                                                  -0.16555229f, 0.02286911f, -0.13566875f,
+                                                  0.03034258f, 0.48091322f, -0.12528998f,
+                                                  0.24077177f, -0.51332325f, -0.33502164f,
+                                                  0.10629296f};
+
+    std::vector<float> recurrentToForgetWeights = { -0.48684245f, -0.06655136f, 0.42224967f,
+                                                    0.2112639f, 0.27654213f, 0.20864892f,
+                                                    -0.07646349f, 0.45877004f, 0.00141793f,
+                                                    -0.14609534f, 0.36447752f, 0.09196436f,
+                                                    0.28053468f, 0.01560611f, -0.20127171f,
+                                                    -0.01140004f};
+
+    std::vector<float> recurrentToCellWeights = { -0.3407414f, 0.24443203f, -0.2078532f,
+                                                  0.26320225f, 0.05695659f, -0.00123841f,
+                                                  -0.4744786f, -0.35869038f, -0.06418842f,
+                                                  -0.13502428f, -0.501764f, 0.22830659f,
+                                                  -0.46367589f, 0.26016325f, -0.03894562f,
+                                                  -0.16368064f};
+
+    std::vector<float> recurrentToOutputWeights = { 0.43385774f, -0.17194885f, 0.2718237f,
+                                                    0.09215671f, 0.24107647f, -0.39835793f,
+                                                    0.18212086f, 0.01301402f, 0.48572797f,
+                                                    -0.50656658f, 0.20047462f, -0.20607421f,
+                                                    -0.51818722f, -0.15390486f, 0.0468148f,
+                                                    0.39922136f};
+
+    std::vector<float> cellToInputWeights = {0., 0., 0., 0.};
+
+    std::vector<float> inputGateBias = {0., 0., 0., 0.};
+
+    std::vector<float> forgetGateBias = {1., 1., 1., 1.};
+
+    std::vector<float> cellBias = {0., 0., 0., 0.};
+
+    std::vector<float> outputGateBias = {0., 0., 0., 0.};
+
+    armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo8);
+    armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo8);
+    armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo8);
+    armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo8);
+    armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
+    armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
+    armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
+    armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
+    armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
+    armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
+    armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
+    armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
+    armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
+
+    AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
+    AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
+    AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
+    AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
+    AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
+    AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
+    AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
+    AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
+    AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data());
+    AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
+    AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
+    AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
+    AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
+
+    data.m_InputToInputWeights = &inputToInputWeightsTensor;
+    data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
+    data.m_InputToCellWeights = &inputToCellWeightsTensor;
+    data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
+    data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
+    data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
+    data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
+    data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
+    data.m_InputGateBias = &inputGateBiasTensor;
+    data.m_ForgetGateBias = &forgetGateBiasTensor;
+    data.m_CellBias = &cellBiasTensor;
+    data.m_OutputGateBias = &outputGateBiasTensor;
+
+    // Flags to set test configuration
+    data.m_Parameters.m_ActivationFunc = 4;
+    data.m_Parameters.m_CifgEnabled = false;
+    data.m_Parameters.m_PeepholeEnabled = false;
+    data.m_Parameters.m_ProjectionEnabled = false;
+    data.m_Parameters.m_ClippingThresCell = 10;
+    data.m_Parameters.m_ClippingThresProj = 0;
+    data.m_Parameters.m_TimeMajor = true;
+
+    std::unique_ptr<armnn::IWorkload> workload
+        = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
+    inputHandle->Allocate();
+    outputStateInHandle->Allocate();
+    cellStateInHandle->Allocate();
+
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
+    CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
+    CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+    return LayerTestResult<T, 3>(actualOutput,
+                                 outputVector,
+                                 outputHandle->GetShape(),
+                                 outputTensorInfo.GetShape());
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 3> UnidirectionalSequenceLstmLayerFloat32TestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -369,6 +553,40 @@
 
 } // anonymous namespace
 
+LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerFloat32TimeMajorSingleBatchTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    armnn::TensorInfo inputDesc({1, 2, 2}, armnn::DataType::Float32);
+    std::vector<float> input = {2., 3., 3., 4.};
+
+    armnn::TensorInfo outputDesc({1, 2, 4}, armnn::DataType::Float32);
+    std::vector<float> expectedOutput =
+                          {-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
+                           -0.0185422f,   0.11281417f,  0.24466537f, -0.1826292f};
+
+    return UnidirectionalSequenceLstmTimeMajorSingleBatchTestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, tensorHandleFactory,
+        input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape());
+}
+
+LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerFloat32BatchMajorSingleBatchTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory) {
+    armnn::TensorInfo inputInfo({3, 1, 3}, armnn::DataType::Float32);
+    std::vector<float> input = { 1., 2., 3., 4., 5., 4., 3., 2., 1. };
+
+    armnn::TensorInfo outputInfo({3, 1, 4}, armnn::DataType::Float32);
+    std::vector<float> expectedOutput = { -0.0714901f, -0.162117f, -0.175168f, -0.0232934f,
+                                          -0.0424661f, -0.231802f, -0.513374f, -0.00680323f,
+                                          -0.0668735f, 0.204078f, -0.42765f, -0.0312321f };
+    return UnidirectionalSequenceLstmLayerFloat32TestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, tensorHandleFactory,
+        input, expectedOutput, inputInfo.GetShape(), outputInfo.GetShape());
+}
+
 LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
diff --git a/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.hpp
index 3a1d178..f303b28 100644
--- a/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.hpp
@@ -10,6 +10,16 @@
 #include <armnn/backends/IBackendInternal.hpp>
 #include <armnn/backends/WorkloadFactory.hpp>
 
+LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerFloat32TimeMajorSingleBatchTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerFloat32BatchMajorSingleBatchTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
 LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
index 37dfab6..e190f33 100644
--- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
@@ -7,6 +7,7 @@
 #include <cl/ClTensorHandle.hpp>
 #include <armnn/backends/TensorHandle.hpp>
 #include <cl/ClLayerSupport.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 
 #include <armnn/utility/NumericCast.hpp>
@@ -19,8 +20,8 @@
 {
 using namespace armcomputetensorutils;
 
-ClLstmFloatWorkload::ClLstmFloatWorkload(const LstmQueueDescriptor &descriptor,
-                                         const WorkloadInfo &info,
+ClLstmFloatWorkload::ClLstmFloatWorkload(const LstmQueueDescriptor& descriptor,
+                                         const WorkloadInfo& info,
                                          const arm_compute::CLCompileContext& clCompileContext)
         : FloatWorkload<LstmQueueDescriptor>(descriptor, info)
 {
@@ -28,7 +29,7 @@
     ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClLstmFloatWorkload_Construct",
                                          descriptor.m_Parameters,
                                          info,
-                                         this->GetGuid());
+                                         GetGuid());
 
     arm_compute::LSTMParams<arm_compute::ICLTensor> lstm_param;
 
@@ -163,35 +164,8 @@
     float projection_threshold = m_Data.m_Parameters.m_ClippingThresProj;
 
     // for preparing the object for the class ActivationLayerInfo, we need to consider 5 situations
-    arm_compute::ActivationLayerInfo activationLayerInfo;
-    if (m_Data.m_Parameters.m_ActivationFunc == 0)
-    {
-        // no activation, do nothing
-    }
-    else if (m_Data.m_Parameters.m_ActivationFunc == 1)
-    {
-        activationLayerInfo = arm_compute::ActivationLayerInfo(
-                arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
-    }
-    else if (m_Data.m_Parameters.m_ActivationFunc == 3)
-    {
-        activationLayerInfo = arm_compute::ActivationLayerInfo(
-                arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
-    }
-    else if (m_Data.m_Parameters.m_ActivationFunc == 4)
-    {
-        activationLayerInfo =  arm_compute::ActivationLayerInfo(
-                arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
-    }
-    else if (m_Data.m_Parameters.m_ActivationFunc == 6)
-    {
-        activationLayerInfo =  arm_compute::ActivationLayerInfo(
-                arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
-    }
-    else
-    {
-        throw armnn::Exception("Wrong Type of Activation Function!");
-    }
+    arm_compute::ActivationLayerInfo activationLayerInfo =
+        ConvertLstmActivationFuncToAclLayerInfo(m_Data.m_Parameters.m_ActivationFunc);
 
     {
         ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClLstmFloatWorkload_configure");
@@ -263,7 +237,7 @@
 
 void ClLstmFloatWorkload::Execute() const
 {
-    ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLstmFloatWorkload_Execute", this->GetGuid());
+    ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLstmFloatWorkload_Execute", GetGuid());
     RunClFunction(m_LstmLayer, CHECK_LOCATION());
 }
 
@@ -354,35 +328,8 @@
     float projection_threshold = descriptor.m_ClippingThresProj;
 
     // for preparing the object for the class ActivationLayerInfo, we need to consider 5 situations
-    arm_compute::ActivationLayerInfo activationLayerInfo;
-    if (descriptor.m_ActivationFunc == 0)
-    {
-        // no activation, do nothing
-    }
-    else if (descriptor.m_ActivationFunc == 1)
-    {
-        activationLayerInfo = arm_compute::ActivationLayerInfo(
-                arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
-    }
-    else if (descriptor.m_ActivationFunc == 3)
-    {
-        activationLayerInfo = arm_compute::ActivationLayerInfo(
-                arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
-    }
-    else if (descriptor.m_ActivationFunc == 4)
-    {
-        activationLayerInfo =  arm_compute::ActivationLayerInfo(
-                arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
-    }
-    else if (descriptor.m_ActivationFunc == 6)
-    {
-        activationLayerInfo =  arm_compute::ActivationLayerInfo(
-                arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
-    }
-    else
-    {
-        throw armnn::Exception("Wrong Type of Activation Function!");
-    }
+    arm_compute::ActivationLayerInfo activationLayerInfo =
+        ConvertLstmActivationFuncToAclLayerInfo(descriptor.m_ActivationFunc);
 
     if (descriptor.m_LayerNormEnabled)
     {
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 2b2229a..8901e47 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -76,6 +76,7 @@
 #include "workloads/NeonSubtractionWorkload.hpp"
 #include "workloads/NeonTransposeConvolution2dWorkload.hpp"
 #include "workloads/NeonTransposeWorkload.hpp"
+#include "workloads/NeonUnidirectionalSequenceLstmFloatWorkload.hpp"
 #endif
 
 namespace armnn
@@ -344,6 +345,17 @@
                                     *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
                                     lstmParamsInfo.value(),
                                     reasonIfUnsupported);
+        case LayerType::UnidirectionalSequenceLstm:
+            return IsUnidirectionalSequenceLstmSupported(infos[0],
+                                                         infos[1],
+                                                         infos[2],
+                                                         infos[3],
+                                                         infos[4],
+                                                         infos[5],
+                                                         *(PolymorphicDowncast<const
+                                                            UnidirectionalSequenceLstmDescriptor*>(&descriptor)),
+                                                         lstmParamsInfo.value(),
+                                                         reasonIfUnsupported);
         case LayerType::Maximum:
             return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
         case LayerType::Mean:
@@ -1421,4 +1433,26 @@
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
 }
 
+bool NeonLayerSupport::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
+                                                             const TensorInfo& outputStateIn,
+                                                             const TensorInfo& cellStateIn,
+                                                             const TensorInfo& output,
+                                                             const Optional<TensorInfo>& hiddenStateOutput,
+                                                             const Optional<TensorInfo>& cellStateOutput,
+                                                             const UnidirectionalSequenceLstmDescriptor& descriptor,
+                                                             const LstmInputParamsInfo& paramsInfo,
+                                                             Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonUnidirectionalSequenceLstmFloatWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input,
+                                   outputStateIn,
+                                   cellStateIn,
+                                   output,
+                                   hiddenStateOutput,
+                                   cellStateOutput,
+                                   descriptor,
+                                   paramsInfo);
+}
+
 } // namespace armnn
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index afa9b41..1eef41f 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -336,6 +336,16 @@
                               const TransposeDescriptor& descriptor,
                               Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
+                                               const TensorInfo& outputStateIn,
+                                               const TensorInfo& cellStateIn,
+                                               const TensorInfo& output,
+                                               const Optional<TensorInfo>& hiddenStateOutput,
+                                               const Optional<TensorInfo>& cellStateOutput,
+                                               const UnidirectionalSequenceLstmDescriptor& descriptor,
+                                               const LstmInputParamsInfo& paramsInfo,
+                                               Optional<std::string&> reasonIfUnsupported) const override;
+
 private:
     const IBackendInternal::IBackendSpecificModelContextPtr m_ModelContextPtr;
 
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 19d322b..7d94daf 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -555,6 +555,11 @@
                                                                         info,
                                                                         m_MemoryManager->GetIntraLayerManager());
         }
+        case LayerType::UnidirectionalSequenceLstm :
+        {
+            auto desc = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
+            return MakeWorkloadHelper<NeonUnidirectionalSequenceLstmFloatWorkload, NullWorkload>(*desc, info);
+        }
         default:
             return nullptr;
     }
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 8ae50ac..d43426f 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -84,7 +84,8 @@
         workloads/NeonStridedSliceWorkload.cpp \
         workloads/NeonSubtractionWorkload.cpp \
         workloads/NeonTransposeConvolution2dWorkload.cpp \
-        workloads/NeonTransposeWorkload.cpp
+        workloads/NeonTransposeWorkload.cpp \
+        workloads/NeonUnidirectionalSequenceLstmFloatWorkload.cpp
 
 else
 
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 9648c16..231e2b0 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -907,6 +907,22 @@
 // QuantizedLstm
 ARMNN_AUTO_TEST_CASE_WITH_THF(QuantizedLstm, QuantizedLstmTest)
 
+// Unidirectional Sequence Lstm
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnidirectionalSequenceLstmLayerFloat32TimeMajorSingleBatch,
+                              UnidirectionalSequenceLstmLayerFloat32TimeMajorSingleBatchTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnidirectionalSequenceLstmLayerFloat32BatchMajorSingleBatch,
+                              UnidirectionalSequenceLstmLayerFloat32BatchMajorSingleBatchTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnidirectionalSequenceLstmLayerFloat32,
+                              UnidirectionalSequenceLstmLayerFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnidirectionalSequenceLstmLayerFloat32TimeMajor,
+                              UnidirectionalSequenceLstmLayerFloat32TimeMajorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjection,
+                              UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionWithLayerNorm,
+                              UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjection,
+                              UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest)
+
 // Mean
 ARMNN_AUTO_TEST_CASE_WITH_THF(MeanSimpleFloat32, MeanSimpleTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(MeanSimpleAxisFloat32, MeanSimpleAxisTest<DataType::Float32>)
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index 0c64a19..bae51b9 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -131,6 +131,8 @@
     NeonTransposeConvolution2dWorkload.hpp
     NeonTransposeWorkload.cpp
     NeonTransposeWorkload.hpp
+    NeonUnidirectionalSequenceLstmFloatWorkload.cpp
+    NeonUnidirectionalSequenceLstmFloatWorkload.hpp
     NeonWorkloads.hpp
     NeonWorkloadUtils.hpp
 )
diff --git a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
index 2f14ab9..19c85f7 100644
--- a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
@@ -6,7 +6,8 @@
 #include "NeonLstmFloatWorkload.hpp"
 #include "NeonWorkloadUtils.hpp"
 
-#include "aclCommon/ArmComputeTensorUtils.hpp"
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
 
 #include <armnn/utility/NumericCast.hpp>
 
@@ -16,14 +17,14 @@
 {
 using namespace armcomputetensorutils;
 
-NeonLstmFloatWorkload::NeonLstmFloatWorkload(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info)
+NeonLstmFloatWorkload::NeonLstmFloatWorkload(const LstmQueueDescriptor& descriptor, const WorkloadInfo& info)
         : FloatWorkload<LstmQueueDescriptor>(descriptor, info)
 {
     // Report Profiling Details
     ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonLstmFloatWorkload_Construct",
                                          descriptor.m_Parameters,
                                          info,
-                                         this->GetGuid());
+                                         GetGuid());
 
     arm_compute::LSTMParams<arm_compute::ITensor> lstm_param;
 
@@ -160,36 +161,8 @@
     float projection_threshold = m_Data.m_Parameters.m_ClippingThresProj;
 
     // for preparing the object for the class ActivationLayerInfo, we need to consider 5 situations
-    arm_compute::ActivationLayerInfo activationLayerInfo;
-    if (m_Data.m_Parameters.m_ActivationFunc == 0)
-    {
-        // no activation, do nothing
-    }
-    else if (m_Data.m_Parameters.m_ActivationFunc == 1)
-    {
-        activationLayerInfo = arm_compute::ActivationLayerInfo(
-                arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
-    }
-    else if (m_Data.m_Parameters.m_ActivationFunc == 3)
-    {
-        activationLayerInfo = arm_compute::ActivationLayerInfo(
-                arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
-    }
-    else if (m_Data.m_Parameters.m_ActivationFunc == 4)
-    {
-        activationLayerInfo = arm_compute::ActivationLayerInfo(
-                arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
-    }
-    else if (m_Data.m_Parameters.m_ActivationFunc == 6)
-    {
-        activationLayerInfo = arm_compute::ActivationLayerInfo(
-                arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
-    }
-    else
-    {
-        throw armnn::Exception("Wrong Type of Activation Function!");
-    }
-
+    arm_compute::ActivationLayerInfo activationLayerInfo =
+        ConvertLstmActivationFuncToAclLayerInfo(m_Data.m_Parameters.m_ActivationFunc);
 
     m_LstmLayer.configure(&input, m_InputToForgetWeightsTensor.get(), m_InputToCellWeightsTensor.get(),
                           m_InputToOutputWeightsTensor.get(), m_RecurrentToForgetWeightsTensor.get(),
@@ -273,7 +246,7 @@
 
 void NeonLstmFloatWorkload::Execute() const
 {
-    ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonLstmFloatWorkload_Execute", this->GetGuid());
+    ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonLstmFloatWorkload_Execute", GetGuid());
     m_LstmLayer.run();
 }
 
@@ -390,31 +363,8 @@
     float projection_threshold = descriptor.m_ClippingThresProj;
 
     // for preparing the object for the class ActivationLayerInfo, we need to consider 5 situations
-    arm_compute::ActivationLayerInfo activationLayerInfo;
-    switch (descriptor.m_ActivationFunc)
-    {
-        case 0:
-            // no activation, do nothing
-            break;
-        case 1:
-            activationLayerInfo = arm_compute::ActivationLayerInfo(
-                    arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
-            break;
-        case 3:
-            activationLayerInfo = arm_compute::ActivationLayerInfo(
-                    arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
-            break;
-        case 4:
-            activationLayerInfo = arm_compute::ActivationLayerInfo(
-                    arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
-            break;
-        case 6:
-            activationLayerInfo = arm_compute::ActivationLayerInfo(
-                    arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
-            break;
-        default:
-            throw armnn::Exception("Wrong Type of Activation Function!");
-    }
+    arm_compute::ActivationLayerInfo activationLayerInfo =
+        ConvertLstmActivationFuncToAclLayerInfo(descriptor.m_ActivationFunc);
 
     return arm_compute::NELSTMLayer::validate(&aclInputInfo,
                                               &aclInputToForgetWeightsInfo,
diff --git a/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.cpp b/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.cpp
new file mode 100644
index 0000000..c911afb
--- /dev/null
+++ b/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.cpp
@@ -0,0 +1,911 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonUnidirectionalSequenceLstmFloatWorkload.hpp"
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <armnn/utility/NumericCast.hpp>
+#include <armnnUtils/Permute.hpp>
+#include <neon/test/NeonWorkloadFactoryHelper.hpp>
+#include <backendsCommon/WorkloadUtils.hpp>
+
+#include "neon/NeonTensorHandle.hpp"
+
+namespace
+{
+unsigned int CalcAclAxis(unsigned int numDimensions, unsigned int axis)
+{
+    return (numDimensions - axis) - 1;
+}
+} //namespace
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+NeonUnidirectionalSequenceLstmFloatWorkload::NeonUnidirectionalSequenceLstmFloatWorkload
+    (const UnidirectionalSequenceLstmQueueDescriptor& descriptor, const WorkloadInfo& info)
+    : FloatWorkload<UnidirectionalSequenceLstmQueueDescriptor>(descriptor, info)
+{
+    // Report Profiling Details
+    ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonUnidirectionalSequenceLstmFloatWorkload_Construct",
+                                         descriptor.m_Parameters,
+                                         info,
+                                         GetGuid());
+
+    const arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    TensorInfo inputInfo = info.m_InputTensorInfos[0];
+    TensorInfo outputInfo = info.m_OutputTensorInfos[0];
+
+    arm_compute::DataType armComputeDataType = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetDataType();
+    armnn::DataType armnnDataType = GetArmNNDataType(armComputeDataType);
+
+    TensorShape inputLayerShape = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetShape();
+    TensorShape cellStateLayerShape = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetShape();
+    TensorShape outputLayerShape = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetShape();
+
+    unsigned int maxTime = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[0] : inputLayerShape[1];
+    unsigned int batchSize = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[1] : inputLayerShape[0];
+    unsigned int inputSize = inputLayerShape[2];
+    unsigned int outputSize = outputLayerShape[2];
+    unsigned int numUnits = cellStateLayerShape[1];
+
+    const TensorShape timeMajorShapeInput({maxTime, batchSize, inputSize});
+    const TensorShape timeMajorShapeOutput({maxTime, batchSize, outputSize});
+
+    //
+    // Permute: performed if Unidirectional Sequence Layer inputs/outputs are in batch major format.
+    //
+    if (!m_Data.m_Parameters.m_TimeMajor)
+    {
+        std::unique_ptr<arm_compute::NEPermute> layer(new arm_compute::NEPermute());
+
+        TensorInfo permuteOutInfo = inputInfo;
+        permuteOutInfo.SetShape(timeMajorShapeInput);
+        BuildArmComputeTensor(m_PermuteFirstOut, permuteOutInfo);
+        armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_PermuteFirstOut);
+
+        // Permute to time major format.
+        layer->configure(&input, &m_PermuteFirstOut, arm_compute::PermutationVector(0U,2U,1U));
+        m_Permute1.reset(layer.release());
+    }
+
+    //
+    // Split and Concat Tensors
+    //
+    for (unsigned int i = 0; i < maxTime; ++i)
+    {
+        arm_compute::Tensor splitter_out;
+        arm_compute::Tensor concat_in;
+
+        auto splitterTensorInfo = inputInfo;
+        auto concatTensorInfo = outputInfo;
+        splitterTensorInfo.SetShape({batchSize, inputSize});
+        concatTensorInfo.SetShape({batchSize, outputSize});
+        BuildArmComputeTensor(splitter_out, splitterTensorInfo);
+        BuildArmComputeTensor(concat_in, concatTensorInfo);
+
+        armcomputetensorutils::InitialiseArmComputeTensorEmpty(splitter_out);
+        armcomputetensorutils::InitialiseArmComputeTensorEmpty(concat_in);
+
+        // append to std::vector<arm_compute::Tensor>
+        m_SplitterOutputsTensors.push_back(std::move(splitter_out));
+        m_ConcatInputsTensors.push_back(std::move(concat_in));
+    }
+
+    for (unsigned int i = 0; i < maxTime; ++i)
+    {
+        // append to std::vector<arm_compute::ITensor*>
+        m_SplitterOutputs.push_back(&m_SplitterOutputsTensors[i]);
+        m_ConcatInputs.push_back(&m_ConcatInputsTensors[i]);
+    }
+
+    //
+    // Split
+    //
+    unsigned int numberDimensions = 3;
+    unsigned int dimension = 0; // splitting on 0-dimension (i.e. maxTime dimension)
+
+    if (maxTime != 1) // ACL split does not work with only one element to split.
+    {
+        ViewsDescriptor splitterDesc(maxTime, numberDimensions);
+        unsigned int splitterDimSizes[3] = {1, batchSize, inputSize};
+        for (unsigned int outputIdx = 0u; outputIdx < maxTime; ++outputIdx)
+        {
+            splitterDesc.SetViewOriginCoord(outputIdx, dimension, splitterDimSizes[dimension] * outputIdx);
+            for (unsigned int dimIdx = 0u; dimIdx < numberDimensions; ++dimIdx)
+            {
+                splitterDesc.SetViewSize(outputIdx, dimIdx, splitterDimSizes[dimIdx]);
+            }
+        }
+
+        std::set<unsigned int> splitAxis = ComputeSplitAxis(splitterDesc, timeMajorShapeInput);
+
+        std::unique_ptr<arm_compute::NESplit> split_layer(new arm_compute::NESplit());
+        unsigned int                          aclAxisSplit = CalcAclAxis(splitterDesc.GetNumDimensions(),
+                                                                         *splitAxis.begin());
+        if (!m_Data.m_Parameters.m_TimeMajor)
+        {
+            split_layer->configure(&m_PermuteFirstOut, m_SplitterOutputs, aclAxisSplit);
+        } else
+        {
+            split_layer->configure(&input, m_SplitterOutputs, aclAxisSplit);
+        }
+
+        split_layer->prepare();
+        m_Splitter.reset(split_layer.release());
+    }
+
+    //
+    // Lstm
+    //
+    arm_compute::LSTMParams<arm_compute::ITensor> lstm_param;
+
+    m_InputToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
+    BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
+
+    m_InputToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
+    BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
+
+    m_InputToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
+    BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
+
+    m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
+    BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
+
+    m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
+    BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
+
+    m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
+    BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
+
+    m_ForgetGateBiasTensor = std::make_unique<arm_compute::Tensor>();
+    BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
+
+    m_CellBiasTensor = std::make_unique<arm_compute::Tensor>();
+    BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
+
+    m_OutputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
+    BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
+
+    // for future reference: check the AndroidNN API for the logic here
+    if (!m_Data.m_Parameters.m_CifgEnabled)
+    {
+        m_InputToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
+        BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
+
+        m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
+        BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
+
+        m_CellToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
+        if (m_Data.m_CellToInputWeights != nullptr)
+        {
+            BuildArmComputeTensor(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights->GetTensorInfo());
+        }
+
+        m_InputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
+        BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
+
+        lstm_param.set_cifg_params(m_InputToInputWeightsTensor.get(),
+                                   m_RecurrentToInputWeightsTensor.get(),
+                                   m_Data.m_CellToInputWeights ? m_CellToInputWeightsTensor.get() : nullptr,
+                                   m_InputGateBiasTensor.get());
+    }
+
+    if (m_Data.m_Parameters.m_ProjectionEnabled)
+    {
+        m_ProjectionWeightsTensor = std::make_unique<arm_compute::Tensor>();
+        BuildArmComputeTensor(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights->GetTensorInfo());
+
+        m_ProjectionBiasTensor = std::make_unique<arm_compute::Tensor>();
+        if (m_Data.m_ProjectionBias != nullptr)
+        {
+            BuildArmComputeTensor(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias->GetTensorInfo());
+        }
+
+        lstm_param.set_projection_params(m_ProjectionWeightsTensor.get(),
+                                         m_Data.m_ProjectionBias ? m_ProjectionBiasTensor.get() : nullptr);
+    }
+
+    if (m_Data.m_Parameters.m_PeepholeEnabled)
+    {
+        m_CellToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
+        BuildArmComputeTensor(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights->GetTensorInfo());
+
+        m_CellToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
+        BuildArmComputeTensor(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights->GetTensorInfo());
+
+        lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());
+    }
+
+    if (m_Data.m_Parameters.m_LayerNormEnabled)
+    {
+        m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
+        if (!m_Data.m_Parameters.m_CifgEnabled)
+        {
+            BuildArmComputeTensor(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights->GetTensorInfo());
+        }
+
+        m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
+        BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
+
+        m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
+        BuildArmComputeTensor(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights->GetTensorInfo());
+
+        m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
+        BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights->GetTensorInfo());
+
+        auto inputNormWeightTensor = m_Data.m_Parameters.m_CifgEnabled ? nullptr : m_InputLayerNormWeightsTensor.get();
+        lstm_param.set_layer_normalization_params(inputNormWeightTensor,
+                                                  m_ForgetLayerNormWeightsTensor.get(),
+                                                  m_CellLayerNormWeightsTensor.get(),
+                                                  m_OutputLayerNormWeightsTensor.get());
+    }
+
+    arm_compute::ITensor& output_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+    arm_compute::ITensor& cell_state_in   = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
+
+    arm_compute::ITensor& output_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+    arm_compute::ITensor& cell_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
+
+    m_ScratchBuffer = std::make_unique<arm_compute::Tensor>();
+    if (m_Data.m_Parameters.m_CifgEnabled)
+    {
+        // scratch_buffer [num_units * 3, batch_size] with CIFG
+        BuildArmComputeTensor(*m_ScratchBuffer, TensorInfo({batchSize, numUnits * 3}, armnnDataType));
+    }
+    else
+    {
+        // scratch_buffer [num_units * 4, batch_size] without CIFG
+        BuildArmComputeTensor(*m_ScratchBuffer, TensorInfo({batchSize, numUnits * 4}, armnnDataType));
+    }
+
+    // Need to be set at negative threshold to be compatible for ACL
+    float cell_threshold       = m_Data.m_Parameters.m_ClippingThresCell;
+    float projection_threshold = m_Data.m_Parameters.m_ClippingThresProj;
+
+    // For preparing the object for the class ActivationLayerInfo, consider 5 situations
+    arm_compute::ActivationLayerInfo activationLayerInfo =
+        ConvertLstmActivationFuncToAclLayerInfo(m_Data.m_Parameters.m_ActivationFunc);
+
+    for (unsigned int i = 0; i != maxTime; ++i)
+    {
+        // Set LSTM input and output ITensors depending on:
+        // input format (timeMajor) & number of LSTM batches (maxTime).
+        arm_compute::ITensor* outputLSTM;
+        arm_compute::ITensor* inputLSTM;
+
+        // If there is only one LSTM time major batch, we will not concat OR permute.
+        // Set input of LSTM to be first input ITensor.
+        // Set output of LSTM to be final output ITensor.
+        // LSTM input/output cannot be > 2 dimensions so need to resize its TensorInfo.
+        if (maxTime == 1 && m_Data.m_Parameters.m_TimeMajor)
+        {
+            TensorShape inputShape = GetTensorShape((&input)->info()->tensor_shape(), 1U);
+            TensorShape outputShape = GetTensorShape((&output)->info()->tensor_shape(), 1U);
+
+            TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
+            TensorShape outputShapeShrink({outputShape[1], outputShape[2]});
+
+            auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
+            auto acl_output_shape_shrink = BuildArmComputeTensorShape(outputShapeShrink);
+
+            (&input)->info()->set_tensor_shape(acl_input_shape_shrink);
+            inputLSTM = const_cast<arm_compute::ITensor*>(&input);
+
+            (&output)->info()->set_tensor_shape(acl_output_shape_shrink);
+            outputLSTM = &output;
+        }
+        // If there is only one LSTM batch major batch, we will not concat, only permute.
+        // Set input of LSTM to be output of initial permute.
+        // Set output of LSTM to be first element of m_ConcatInputs & use that value later in permute.
+        // LSTM output cannot be > 2 dimensions so need to resize its TensorInfo.
+        else if (maxTime == 1 && !m_Data.m_Parameters.m_TimeMajor)
+        {
+            TensorShape inputShape = GetTensorShape(m_PermuteFirstOut.info()->tensor_shape(), 1U);
+            TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
+            auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
+            m_PermuteFirstOut.info()->set_tensor_shape(acl_input_shape_shrink);
+            inputLSTM = &m_PermuteFirstOut;
+
+            outputLSTM = const_cast<arm_compute::ITensor*>(m_ConcatInputs[i]);
+        }
+        // Batch major AND/OR 2+ LSTM batches so will use concat AND/OR permute later on.
+        else
+        {
+            inputLSTM = m_SplitterOutputs[i];
+            outputLSTM = const_cast<arm_compute::ITensor*>(m_ConcatInputs[i]);
+        }
+
+        std::unique_ptr<arm_compute::NELSTMLayer> lstm_layer(new arm_compute::NELSTMLayer());
+        lstm_layer->configure(inputLSTM,
+                              m_InputToForgetWeightsTensor.get(),
+                              m_InputToCellWeightsTensor.get(),
+                              m_InputToOutputWeightsTensor.get(),
+                              m_RecurrentToForgetWeightsTensor.get(),
+                              m_RecurrentToCellWeightsTensor.get(),
+                              m_RecurrentToOutputWeightsTensor.get(),
+                              m_ForgetGateBiasTensor.get(),
+                              m_CellBiasTensor.get(),
+                              m_OutputGateBiasTensor.get(),
+                              &output_state_in,
+                              &cell_state_in,
+                              m_ScratchBuffer.get(),
+                              &output_state_out,
+                              &cell_state_out,
+                              outputLSTM,
+                              lstm_param,
+                              activationLayerInfo,
+                              cell_threshold,
+                              projection_threshold);
+
+        m_Layers.emplace_back(std::move(lstm_layer));
+    }
+
+    armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
+
+    InitializeArmComputeTensorData(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights);
+    InitializeArmComputeTensorData(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights);
+    InitializeArmComputeTensorData(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights);
+    InitializeArmComputeTensorData(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights);
+    InitializeArmComputeTensorData(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights);
+    InitializeArmComputeTensorData(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights);
+    InitializeArmComputeTensorData(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias);
+    InitializeArmComputeTensorData(*m_CellBiasTensor, m_Data.m_CellBias);
+    InitializeArmComputeTensorData(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias);
+
+    if (!m_Data.m_Parameters.m_CifgEnabled)
+    {
+        InitializeArmComputeTensorData(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights);
+        InitializeArmComputeTensorData(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights);
+        if (m_Data.m_CellToInputWeights != nullptr)
+        {
+            InitializeArmComputeTensorData(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights);
+        }
+        InitializeArmComputeTensorData(*m_InputGateBiasTensor, m_Data.m_InputGateBias);
+    }
+
+    if (m_Data.m_Parameters.m_ProjectionEnabled)
+    {
+        InitializeArmComputeTensorData(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights);
+        if (m_Data.m_ProjectionBias != nullptr)
+        {
+            InitializeArmComputeTensorData(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias);
+        }
+    }
+
+    if (m_Data.m_Parameters.m_PeepholeEnabled)
+    {
+        InitializeArmComputeTensorData(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights);
+        InitializeArmComputeTensorData(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights);
+    }
+
+    if (m_Data.m_Parameters.m_LayerNormEnabled)
+    {
+        if (!m_Data.m_Parameters.m_CifgEnabled)
+        {
+            InitializeArmComputeTensorData(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights);
+        }
+        InitializeArmComputeTensorData(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights);
+        InitializeArmComputeTensorData(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights);
+        InitializeArmComputeTensorData(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights);
+    }
+
+    // Force Compute Library to perform the necessary copying and reshaping.
+    // After which delete all the input tensors that will no longer be needed.
+    for (uint32_t i = 0; i < m_Layers.size(); ++i)
+    {
+        m_Layers[i]->prepare();
+    }
+
+    //
+    // Concat
+    //
+
+    // Expand dimensions of LSTM outputs adding one empty dimension to fit concatenate inputs.
+    TensorShape shape = GetTensorShape(m_ConcatInputs[0]->info()->tensor_shape(), 1U);
+    TensorShape shapeExpandTimeMajor({1, shape[0], shape[1]});
+    TensorShape shapeExpandBatchMajor({shape[0], 1, shape[1]});
+
+    if (maxTime != 1) // ACL concat does not work with only one element to concatenate.
+    {
+        for (unsigned int i = 0; i < maxTime; ++i)
+        {
+            m_ConcatInputs[i]->info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandTimeMajor));
+        }
+
+        ConcatDescriptor  concatDescriptor(maxTime, numberDimensions);  // maxTime = num inputs (aka. number of views).
+        for (unsigned int inputIdx = 0u; inputIdx < maxTime; ++inputIdx)
+        {
+            concatDescriptor.SetViewOriginCoord(inputIdx, dimension, inputIdx);
+            concatDescriptor.SetConcatAxis(dimension);
+        }
+
+        m_Concat.reset(new arm_compute::NEConcatenateLayer());
+        unsigned int aclAxisConcat = CalcAclAxis(concatDescriptor.GetNumDimensions(), concatDescriptor.GetConcatAxis());
+        if (!m_Data.m_Parameters.m_TimeMajor)
+        {
+            TensorInfo concatOuputTensorInfo = outputInfo;
+            concatOuputTensorInfo.SetShape(timeMajorShapeOutput);
+            BuildArmComputeTensor(concat_out, concatOuputTensorInfo);
+            armcomputetensorutils::InitialiseArmComputeTensorEmpty(concat_out);
+
+            m_Concat->configure(m_ConcatInputs, &concat_out, aclAxisConcat);
+        }
+        else
+        {
+            m_Concat->configure(m_ConcatInputs, &output, aclAxisConcat);
+        }
+
+        m_Concat->prepare();
+    }
+    // If only one LSTM batch, we do not concat and/or permute.
+    // Must ensure final output info is expanded to correct batch major dimensions.
+    else
+    {
+        if (!m_Data.m_Parameters.m_TimeMajor)
+        {
+            (&output)->info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandBatchMajor));
+        }
+        else
+        {
+            (&output)->info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandTimeMajor));
+        }
+    }
+
+    //
+    // Permute: only done if input/output are in batch major format.
+    //
+    if (!m_Data.m_Parameters.m_TimeMajor)
+    {
+        // Output now time major. Permute output back to batch major.
+        std::unique_ptr<arm_compute::NEPermute> layer(new arm_compute::NEPermute());
+        if (maxTime != 1)
+        {
+            layer->configure(&concat_out, &output, arm_compute::PermutationVector(0U, 2U, 1U));
+        }
+        else
+        {
+            layer->configure(m_ConcatInputs[0], &output, arm_compute::PermutationVector(0U, 2U, 1U));
+        }
+        m_Permute2.reset(layer.release());
+    }
+
+    FreeUnusedTensors();
+}
+
+void NeonUnidirectionalSequenceLstmFloatWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonUnidirectionalSequenceLstmFloatWorkload_Execute", GetGuid());
+    if (m_Permute1)
+    {
+        m_Permute1->run();
+    }
+    if (m_Splitter)
+    {
+        m_Splitter->run();
+    }
+    for (uint32_t i = 0; i < m_Layers.size(); ++i)
+    {
+        m_Layers[i]->run();
+    }
+    if (m_Concat)
+    {
+        m_Concat->run();
+    }
+    if (m_Permute2)
+    {
+        m_Permute2->run();
+    }
+}
+
+arm_compute::Status
+NeonUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo& input,
+                                                    const TensorInfo& outputStateIn,
+                                                    const TensorInfo& cellStateIn,
+                                                    const TensorInfo& output,
+                                                    const Optional<TensorInfo>& hiddenStateOutput,
+                                                    const Optional<TensorInfo>& cellStateOutput,
+                                                    const UnidirectionalSequenceLstmDescriptor& descriptor,
+                                                    const LstmInputParamsInfo& paramsInfo)
+{
+    IgnoreUnused(hiddenStateOutput, cellStateOutput);
+
+    TensorShape inputLayerShape = input.GetShape();
+    TensorShape outputLayerShape = outputStateIn.GetShape();
+
+    unsigned int maxTime = descriptor.m_TimeMajor ? inputLayerShape[0] : inputLayerShape[1];
+    unsigned int batchSize = descriptor.m_TimeMajor ? inputLayerShape[1] : inputLayerShape[0];
+    unsigned int inputSize = inputLayerShape[2];
+    unsigned int outputSize = outputLayerShape[2];
+
+    const TensorShape timeMajorShapeInput({maxTime, batchSize, inputSize});
+    const TensorShape timeMajorShapeOutput({maxTime, batchSize, outputSize});
+
+    arm_compute::Status statusPermute1 = arm_compute::Status(arm_compute::ErrorCode::OK,
+                                                             "Permute1 status");
+    arm_compute::Status statusSplit = arm_compute::Status(arm_compute::ErrorCode::OK,
+                                                          "Split status");
+    arm_compute::Status statusLSTM = arm_compute::Status(arm_compute::ErrorCode::OK,
+                                                         "LSTM status");
+    arm_compute::Status statusConcat = arm_compute::Status(arm_compute::ErrorCode::OK,
+                                                           "Concat status");
+    arm_compute::Status statusPermute2 = arm_compute::Status(arm_compute::ErrorCode::OK,
+                                                             "Permute2 status");
+
+    const arm_compute::TensorInfo aclInputInfo  = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutputInfo  = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    //
+    // Permute validate
+    //
+    TensorInfo permuteOutInfo = TensorInfo(input);
+    arm_compute::TensorInfo aclPermuteOutInfo = armcomputetensorutils::BuildArmComputeTensorInfo(permuteOutInfo);
+    if (!descriptor.m_TimeMajor)
+    {
+        statusPermute1 =  arm_compute::NEPermute::validate(&aclInputInfo,
+                                                           &aclPermuteOutInfo,
+                                                           arm_compute::PermutationVector(0U, 2U, 1U));
+    }
+
+    //
+    // Split and Concat Tensors validate
+    //
+    std::vector<arm_compute::TensorInfo> splitterOutputsTensorInfos;
+    std::vector<arm_compute::TensorInfo> concatInputsTensorInfos;
+    std::vector<arm_compute::ITensorInfo*> splitterOutputsTensorInfosPtr;
+    std::vector<const arm_compute::ITensorInfo*> concatInputsTensorInfosPtr;
+    splitterOutputsTensorInfos.reserve(maxTime);
+    concatInputsTensorInfos.reserve(maxTime);
+    for (unsigned int i = 0; i < maxTime; ++i)
+    {
+        arm_compute::TensorInfo splitter_out;
+        arm_compute::TensorInfo concat_in;
+
+        auto splitterTensorInfo = TensorInfo(input);
+        auto concatTensorInfo   = TensorInfo(output);
+        splitterTensorInfo.SetShape({batchSize, inputSize});
+        concatTensorInfo.SetShape({batchSize, outputSize});
+
+        arm_compute::TensorInfo aclSplitterTensorInfo
+            = armcomputetensorutils::BuildArmComputeTensorInfo(splitterTensorInfo);
+        arm_compute::TensorInfo aclConcatTensorInfo
+            = armcomputetensorutils::BuildArmComputeTensorInfo(concatTensorInfo);
+
+        splitterOutputsTensorInfos.emplace_back(aclSplitterTensorInfo);
+        concatInputsTensorInfos.emplace_back(aclConcatTensorInfo);
+        splitterOutputsTensorInfosPtr.emplace_back(&splitterOutputsTensorInfos[i]);
+        concatInputsTensorInfosPtr.emplace_back(&concatInputsTensorInfos[i]);
+    }
+
+    //
+    // Split validate
+    //
+    unsigned int numberDimensions = 3;
+    unsigned int dimension = 0; // splitting on 0-dimension (i.e. maxTime dimension)
+    unsigned int aclAxisSplit = CalcAclAxis(numberDimensions, dimension);
+
+    if (maxTime != 1) // ACL split does not work with only one element to split.
+    {
+        if (!descriptor.m_TimeMajor)
+        {
+            statusSplit = arm_compute::NESplit::validate(&aclPermuteOutInfo,
+                                                         splitterOutputsTensorInfosPtr,
+                                                         aclAxisSplit);
+        } else
+        {
+            statusSplit = arm_compute::NESplit::validate(&aclInputInfo, splitterOutputsTensorInfosPtr, aclAxisSplit);
+        }
+    }
+
+    //
+    // LSTM validate
+    //
+
+    arm_compute::LSTMParams<arm_compute::ITensorInfo> lstm_params_info;
+
+    const TensorInfo& scratchBuffer = TensorInfo(cellStateIn.GetShape(), input.GetDataType());
+    const TensorInfo& outputStateOut = TensorInfo(outputStateIn.GetShape(), input.GetDataType());
+    const TensorInfo& cellStateOut = TensorInfo(cellStateIn.GetShape(), input.GetDataType());
+
+    // The inputs and outputs
+    const arm_compute::TensorInfo aclOutputStateInInfo = BuildArmComputeTensorInfo(outputStateIn);
+    const arm_compute::TensorInfo aclCellStateInInfo = BuildArmComputeTensorInfo(cellStateIn);
+    const arm_compute::TensorInfo aclScratchBufferInfo = BuildArmComputeTensorInfo(scratchBuffer);
+    const arm_compute::TensorInfo aclOutputStateOutInfo = BuildArmComputeTensorInfo(outputStateOut);
+    const arm_compute::TensorInfo aclCellStateOutInfo = BuildArmComputeTensorInfo(cellStateOut);
+
+    // Basic parameters
+    const arm_compute::TensorInfo aclInputToForgetWeightsInfo
+                                      = BuildArmComputeTensorInfo(paramsInfo.GetInputToForgetWeights());
+    const arm_compute::TensorInfo aclInputToCellWeightsInfo
+                                      = BuildArmComputeTensorInfo(paramsInfo.GetInputToCellWeights());
+    const arm_compute::TensorInfo aclInputToOutputWeightsInfo
+                                      = BuildArmComputeTensorInfo(paramsInfo.GetInputToOutputWeights());
+    const arm_compute::TensorInfo aclRecurrentToForgetWeightsInfo
+                                      = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToForgetWeights());
+    const arm_compute::TensorInfo aclRecurrentToCellWeightsInfo
+                                      = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToCellWeights());
+    const arm_compute::TensorInfo aclRecurrentToOutputWeightsInfo
+                                      = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToOutputWeights());
+    const arm_compute::TensorInfo aclForgetGateBiasInfo
+                                      = BuildArmComputeTensorInfo(paramsInfo.GetForgetGateBias());
+    const arm_compute::TensorInfo aclCellBiasInfo
+                                      = BuildArmComputeTensorInfo(paramsInfo.GetCellBias());
+    const arm_compute::TensorInfo aclOutputGateBiasInfo
+                                      = BuildArmComputeTensorInfo(paramsInfo.GetOutputGateBias());
+
+    arm_compute::TensorInfo aclInputToInputWeightsInfo;
+    arm_compute::TensorInfo aclRecurrentToInputWeightsInfo;
+    arm_compute::TensorInfo aclCellToInputWeightsInfo;
+    arm_compute::TensorInfo aclInputGateBiasInfo;
+    arm_compute::TensorInfo aclProjectionWeightsInfo;
+    arm_compute::TensorInfo aclProjectionBiasInfo;
+    arm_compute::TensorInfo aclCellToForgetWeightsInfo;
+    arm_compute::TensorInfo aclCellToOutputWeightsInfo;
+
+    arm_compute::TensorInfo aclInputLayerNormWeightsInfo;
+    arm_compute::TensorInfo aclForgetLayerNormWeightsInfo;
+    arm_compute::TensorInfo aclCellLayerNormWeightsInfo;
+    arm_compute::TensorInfo aclOutputLayerNormWeightsInfo;
+
+
+    if (!descriptor.m_CifgEnabled)
+    {
+        if (descriptor.m_PeepholeEnabled)
+        {
+            aclCellToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToInputWeights());
+        }
+        aclInputToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputToInputWeights());
+        aclRecurrentToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToInputWeights());
+        aclInputGateBiasInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputGateBias());
+
+        lstm_params_info.set_cifg_params(&aclInputToInputWeightsInfo,
+                                         &aclRecurrentToInputWeightsInfo,
+                                         descriptor.m_PeepholeEnabled ? &aclCellToInputWeightsInfo : nullptr,
+                                         &aclInputGateBiasInfo);
+    }
+
+    if (descriptor.m_ProjectionEnabled)
+    {
+        if (paramsInfo.m_ProjectionBias != nullptr)
+        {
+            aclProjectionBiasInfo = BuildArmComputeTensorInfo(paramsInfo.GetProjectionBias());
+        }
+        aclProjectionWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetProjectionWeights());
+
+        lstm_params_info.set_projection_params(&aclProjectionWeightsInfo,
+                                               paramsInfo.m_ProjectionBias ? &aclProjectionBiasInfo : nullptr);
+    }
+
+    if (descriptor.m_PeepholeEnabled)
+    {
+        aclCellToForgetWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToForgetWeights());
+        aclCellToOutputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToOutputWeights());
+
+        lstm_params_info.set_peephole_params(&aclCellToForgetWeightsInfo, &aclCellToOutputWeightsInfo);
+    }
+
+    if (descriptor.m_LayerNormEnabled)
+    {
+        if (!descriptor.m_CifgEnabled)
+        {
+            aclInputLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputLayerNormWeights());
+        }
+        aclForgetLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetForgetLayerNormWeights());
+        aclCellLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellLayerNormWeights());
+        aclOutputLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetOutputLayerNormWeights());
+
+        lstm_params_info.set_layer_normalization_params(descriptor.m_CifgEnabled ? nullptr :
+                                                            &aclInputLayerNormWeightsInfo,
+                                                        &aclForgetLayerNormWeightsInfo,
+                                                        &aclCellLayerNormWeightsInfo,
+                                                        &aclOutputLayerNormWeightsInfo);
+    }
+
+    // Need to be set at negative threshold to be compatible for ACL
+    float cell_threshold = descriptor.m_ClippingThresCell;
+    float projection_threshold = descriptor.m_ClippingThresProj;
+
+    arm_compute::ActivationLayerInfo activationLayerInfo =
+        ConvertLstmActivationFuncToAclLayerInfo(descriptor.m_ActivationFunc);
+
+    for (unsigned int i = 0; i != maxTime; ++i)
+    {
+
+        // Set LSTM input and output ITensors depending on:
+        // input format (timeMajor) & number of LSTM batches (maxTime).
+        arm_compute::ITensorInfo* outputLSTM;
+        arm_compute::ITensorInfo* inputLSTM;
+
+        // If there is only one LSTM time major batch, we will not concat OR permute.
+        // Set input of LSTM to be first input ITensor.
+        // Set output of LSTM to be final output ITensor.
+        // LSTM input/output cannot be > 2 dimensions so need to resize its TensorInfo.
+        if (maxTime == 1 && !descriptor.m_TimeMajor)
+        {
+            TensorShape inputShape = GetTensorShape(aclInputInfo.tensor_shape(), 1U);
+            TensorShape outputShape = GetTensorShape(aclOutputInfo.tensor_shape(), 1U);
+
+            TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
+            TensorShape outputShapeShrink({outputShape[1], outputShape[2]});
+
+            auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
+            auto acl_output_shape_shrink = BuildArmComputeTensorShape(outputShapeShrink);
+
+            const_cast<arm_compute::TensorInfo*>(&aclInputInfo)->set_tensor_shape(acl_input_shape_shrink);
+            inputLSTM = const_cast<arm_compute::TensorInfo*>(&aclInputInfo);
+
+            const_cast<arm_compute::TensorInfo*>(&aclOutputInfo)->set_tensor_shape(acl_output_shape_shrink);
+            outputLSTM = const_cast<arm_compute::TensorInfo*>(&aclOutputInfo);
+        }
+        // If there is only one LSTM batch major batch, we will not concat, only permute.
+        // Set input of LSTM to be output of initial permute.
+        // Set output of LSTM to be first element of m_ConcatInputs & use that value later in permute.
+        // LSTM output cannot be > 2 dimensions so need to resize its TensorInfo.
+        else if (maxTime == 1 && !descriptor.m_TimeMajor)
+        {
+            TensorShape inputShape = GetTensorShape(aclPermuteOutInfo.tensor_shape(), 1U);
+            TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
+            auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
+            aclPermuteOutInfo.set_tensor_shape(acl_input_shape_shrink);
+            inputLSTM = &aclPermuteOutInfo;
+
+            outputLSTM = const_cast<arm_compute::ITensorInfo*>(concatInputsTensorInfosPtr[i]);
+        }
+        // Batch major AND/OR 2+ LSTM batches so will use concat AND/OR permute later on.
+        else
+        {
+            inputLSTM = splitterOutputsTensorInfosPtr[i];
+            outputLSTM = const_cast<arm_compute::ITensorInfo*>(concatInputsTensorInfosPtr[i]);
+        }
+
+        statusLSTM = arm_compute::NELSTMLayer::validate(inputLSTM,
+                                                        &aclInputToForgetWeightsInfo,
+                                                        &aclInputToCellWeightsInfo,
+                                                        &aclInputToOutputWeightsInfo,
+                                                        &aclRecurrentToForgetWeightsInfo,
+                                                        &aclRecurrentToCellWeightsInfo,
+                                                        &aclRecurrentToOutputWeightsInfo,
+                                                        &aclForgetGateBiasInfo,
+                                                        &aclCellBiasInfo,
+                                                        &aclOutputGateBiasInfo,
+                                                        &aclOutputStateInInfo,
+                                                        &aclCellStateInInfo,
+                                                        &aclScratchBufferInfo,
+                                                        &aclOutputStateOutInfo,
+                                                        &aclCellStateOutInfo,
+                                                        outputLSTM,
+                                                        lstm_params_info,
+                                                        activationLayerInfo,
+                                                        cell_threshold,
+                                                        projection_threshold);
+
+        if (statusLSTM.error_code() != arm_compute::ErrorCode::OK)
+        {
+            break;
+        }
+    }
+
+    //
+    // Concat validate
+    //
+
+    // Expand dimensions of LSTM outputs adding one empty dimension to fit concatenate inputs.
+    TensorShape shape = GetTensorShape(concatInputsTensorInfosPtr[0]->tensor_shape(), 1U);
+    TensorShape shapeExpandTimeMajor({1, shape[0], shape[1]});
+    TensorShape shapeExpandBatchMajor({shape[0], 1, shape[1]});
+
+    TensorInfo concatOuputTensorInfo = TensorInfo(output);
+    concatOuputTensorInfo.SetShape(timeMajorShapeOutput);
+    arm_compute::TensorInfo aclConcatOuputTensorInfo= BuildArmComputeTensorInfo(concatOuputTensorInfo);
+
+    if (maxTime != 1) // ACL concat does not work with only one element to concatenate.
+    {
+        for (unsigned int i = 0; i < maxTime; ++i)
+        {
+            auto acl_shape_expand = BuildArmComputeTensorShape(shapeExpandTimeMajor);
+            concatInputsTensorInfos[i].set_tensor_shape(acl_shape_expand);
+        }
+
+        unsigned int aclAxisConcat = CalcAclAxis(numberDimensions, dimension);
+        if (!descriptor.m_TimeMajor)
+        {
+            statusConcat = arm_compute::NEConcatenateLayer::validate(concatInputsTensorInfosPtr,
+                                                                     &aclConcatOuputTensorInfo,
+                                                                     aclAxisConcat);
+        }
+        else
+        {
+            statusConcat = arm_compute::NEConcatenateLayer::validate(concatInputsTensorInfosPtr,
+                                                                     &aclOutputInfo,
+                                                                     aclAxisConcat);
+        }
+    }
+    // If only one LSTM batch, we do not concat and/or permute.
+    // Must ensure final output info is expanded to correct batch major dimensions.
+    else
+    {
+        if (!descriptor.m_TimeMajor)
+        {
+            const_cast<arm_compute::TensorInfo*>(&aclInputInfo)->set_tensor_shape(
+                BuildArmComputeTensorShape(shapeExpandBatchMajor));
+        }
+        else
+        {
+            const_cast<arm_compute::TensorInfo*>(&aclInputInfo)->set_tensor_shape(
+                BuildArmComputeTensorShape(shapeExpandTimeMajor));
+        }
+    }
+
+    //
+    // Permute validate
+    //
+    if (!descriptor.m_TimeMajor)
+    {
+        // Output now time major. Permute output back to batch major.
+        if (maxTime != 1)
+        {
+            statusPermute2 = arm_compute::NEPermute::validate(&aclConcatOuputTensorInfo,
+                                                              &aclOutputInfo,
+                                                              arm_compute::PermutationVector(0U, 2U, 1U));
+        }
+        else
+        {
+            statusPermute2 = arm_compute::NEPermute::validate(concatInputsTensorInfosPtr[0],
+                                                              &aclOutputInfo,
+                                                              arm_compute::PermutationVector(0U, 2U, 1U));
+        }
+    }
+
+    auto okCode = arm_compute::ErrorCode::OK;
+    if (statusPermute1.error_code() == okCode &&
+        statusSplit.error_code()    == okCode &&
+        statusLSTM .error_code()    == okCode &&
+        statusConcat.error_code()   == okCode &&
+        statusPermute2.error_code() == okCode)
+    {
+        return arm_compute::Status(arm_compute::ErrorCode::OK,
+                                   "All Unidirectional Sequence LSTM layer validate status OK.");
+    }
+    else
+    {
+        return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR,
+                                   "Unidirectional Sequence LSTM layer validate status failed.");
+    }
+}
+
+void NeonUnidirectionalSequenceLstmFloatWorkload::FreeUnusedTensors()
+{
+    FreeTensorIfUnused(m_InputToInputWeightsTensor);
+    FreeTensorIfUnused(m_InputToForgetWeightsTensor);
+    FreeTensorIfUnused(m_InputToCellWeightsTensor);
+    FreeTensorIfUnused(m_InputToOutputWeightsTensor);
+    FreeTensorIfUnused(m_RecurrentToInputWeightsTensor);
+    FreeTensorIfUnused(m_RecurrentToForgetWeightsTensor);
+    FreeTensorIfUnused(m_RecurrentToCellWeightsTensor);
+    FreeTensorIfUnused(m_RecurrentToOutputWeightsTensor);
+    FreeTensorIfUnused(m_CellToInputWeightsTensor);
+    FreeTensorIfUnused(m_CellToForgetWeightsTensor);
+    FreeTensorIfUnused(m_CellToOutputWeightsTensor);
+    FreeTensorIfUnused(m_InputGateBiasTensor);
+    FreeTensorIfUnused(m_ForgetGateBiasTensor);
+    FreeTensorIfUnused(m_CellBiasTensor);
+    FreeTensorIfUnused(m_OutputGateBiasTensor);
+    FreeTensorIfUnused(m_ProjectionWeightsTensor);
+    FreeTensorIfUnused(m_ProjectionBiasTensor);
+    FreeTensorIfUnused(m_InputLayerNormWeightsTensor);
+    FreeTensorIfUnused(m_ForgetLayerNormWeightsTensor);
+    FreeTensorIfUnused(m_CellLayerNormWeightsTensor);
+    FreeTensorIfUnused(m_OutputLayerNormWeightsTensor);
+    FreeTensorIfUnused(m_ScratchBuffer);
+}
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.hpp b/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.hpp
new file mode 100644
index 0000000..10c2ecb
--- /dev/null
+++ b/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.hpp
@@ -0,0 +1,92 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Descriptors.hpp>
+#include <armnn/LstmParams.hpp>
+#include <armnn/backends/Workload.hpp>
+#include <armnn/backends/WorkloadData.hpp>
+
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/runtime/NEON/functions/NELSTMLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEPermute.h"
+#include "arm_compute/runtime/NEON/functions/NESplit.h"
+#include "arm_compute/runtime/NEON/functions/NEConcatenateLayer.h"
+
+namespace armnn
+{
+
+class NeonUnidirectionalSequenceLstmFloatWorkload : public FloatWorkload<UnidirectionalSequenceLstmQueueDescriptor>
+{
+public:
+    NeonUnidirectionalSequenceLstmFloatWorkload(const UnidirectionalSequenceLstmQueueDescriptor& descriptor,
+                                                const WorkloadInfo& info);
+    virtual void Execute() const override;
+
+private:
+
+    //
+    // ACL layers required to fully form a Unidirectional Sequence LSTM layer.
+    //
+    mutable std::unique_ptr<arm_compute::NEPermute> m_Permute1;
+    mutable std::unique_ptr<arm_compute::IFunction> m_Splitter;
+    mutable std::vector<std::unique_ptr<arm_compute::NELSTMLayer>> m_Layers;
+    mutable std::unique_ptr<arm_compute::NEConcatenateLayer> m_Concat;
+    mutable std::unique_ptr<arm_compute::NEPermute> m_Permute2;
+
+    //
+    // ACL LSTM arm_compute::Tensors.
+    //
+    std::unique_ptr<arm_compute::Tensor> m_InputToInputWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_InputToForgetWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_InputToCellWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_InputToOutputWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_RecurrentToInputWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_RecurrentToForgetWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_RecurrentToCellWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_RecurrentToOutputWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_CellToInputWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_CellToForgetWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_CellToOutputWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_InputGateBiasTensor;
+    std::unique_ptr<arm_compute::Tensor> m_ForgetGateBiasTensor;
+    std::unique_ptr<arm_compute::Tensor> m_CellBiasTensor;
+    std::unique_ptr<arm_compute::Tensor> m_OutputGateBiasTensor;
+    std::unique_ptr<arm_compute::Tensor> m_ProjectionWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_ProjectionBiasTensor;
+
+    std::unique_ptr<arm_compute::Tensor> m_ScratchBuffer;
+
+    std::unique_ptr<arm_compute::Tensor> m_InputLayerNormWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_ForgetLayerNormWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_CellLayerNormWeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_OutputLayerNormWeightsTensor;
+
+    //
+    // Additional ACL arm_compute::Tensors and std::vector<arm_compute::Tensor>.
+    // Required to perform splitting, concatenation and permutations.
+    //
+    arm_compute::Tensor m_PermuteFirstOut;
+    std::vector<arm_compute::Tensor> m_SplitterOutputsTensors;
+    std::vector<arm_compute::Tensor> m_ConcatInputsTensors;
+    std::vector<arm_compute::ITensor*> m_SplitterOutputs;
+    std::vector<const arm_compute::ITensor*> m_ConcatInputs;
+    arm_compute::Tensor concat_out;
+
+    void FreeUnusedTensors();
+};
+
+arm_compute::Status
+NeonUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo& input,
+                                                    const TensorInfo& outputStateIn,
+                                                    const TensorInfo& cellStateIn,
+                                                    const TensorInfo& output,
+                                                    const Optional<TensorInfo>& hiddenStateOutput,
+                                                    const Optional<TensorInfo>& cellStateOutput,
+                                                    const UnidirectionalSequenceLstmDescriptor& descriptor,
+                                                    const LstmInputParamsInfo& paramsInfo);
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index a8134a1..4f5ba2d 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -68,3 +68,4 @@
 #include "NeonSubtractionWorkload.hpp"
 #include "NeonTransposeConvolution2dWorkload.hpp"
 #include "NeonTransposeWorkload.hpp"
+#include "NeonUnidirectionalSequenceLstmFloatWorkload.hpp"
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index d07aea1..aa8076e 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -2555,6 +2555,10 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMinNegativeAxisFloat32, ReduceMinNegativeAxisTest<DataType::Float32>)
 
 // Unidirectional Sequence Lstm
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnidirectionalSequenceLstmLayerFloat32TimeMajorSingleBatch,
+                              UnidirectionalSequenceLstmLayerFloat32TimeMajorSingleBatchTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnidirectionalSequenceLstmLayerFloat32BatchMajorSingleBatch,
+                              UnidirectionalSequenceLstmLayerFloat32BatchMajorSingleBatchTest)
 ARMNN_AUTO_TEST_CASE_WITH_THF(UnidirectionalSequenceLstmLayerFloat32,
                               UnidirectionalSequenceLstmLayerFloat32Test)
 ARMNN_AUTO_TEST_CASE_WITH_THF(UnidirectionalSequenceLstmLayerFloat32TimeMajor,