IVGCVSW-3342 Add CL backend support for Quantized_LSTM (16bit cell state)

!android-nn-driver:1685

Signed-off-by: Ferran Balaguer <ferran.balaguer@arm.com>
Signed-off-by: Matthew Bentham <matthew.bentham@arm.com>
Change-Id: I17278562f72d4b77e22c3af25bf7199b9150a765
diff --git a/src/backends/cl/workloads/ClQuantizedLstmWorkload.hpp b/src/backends/cl/workloads/ClQuantizedLstmWorkload.hpp
new file mode 100644
index 0000000..c7d8375
--- /dev/null
+++ b/src/backends/cl/workloads/ClQuantizedLstmWorkload.hpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+#include <arm_compute/runtime/CL/CLFunctions.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo& input, const TensorInfo& previousCellStateIn,
+                                                    const TensorInfo& previousOutputIn, const TensorInfo& cellStateOut,
+                                                    const TensorInfo& output,
+                                                    const QuantizedLstmInputParamsInfo& paramsInfo);
+
+class ClQuantizedLstmWorkload : public BaseWorkload<QuantizedLstmQueueDescriptor>
+{
+public:
+    ClQuantizedLstmWorkload(const QuantizedLstmQueueDescriptor& descriptor, const WorkloadInfo& info);
+    void Execute() const override;
+
+private:
+    mutable arm_compute::CLLSTMLayerQuantized m_QuantizedLstmLayer;
+
+    std::unique_ptr<arm_compute::CLTensor> m_InputToInputWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_InputToForgetWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_InputToCellWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_InputToOutputWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_RecurrentToInputWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_RecurrentToForgetWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_RecurrentToCellWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_RecurrentToOutputWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_InputGateBiasTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_ForgetGateBiasTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_CellBiasTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_OutputGateBiasTensor;
+
+    void FreeUnusedTensors();
+};
+
+} //namespace armnn
+
+