IVGCVSW-1900 : CL backend folder structure

* moving backends/ClWorkloads to backends/cl
* and moving pure Cl workload related code to
  backends/cl/workloads

Change-Id: I019a3c6b4da5e7a23074bf03fb057e63199ad129
diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.hpp b/src/backends/cl/workloads/ClLstmFloatWorkload.hpp
new file mode 100644
index 0000000..352d774
--- /dev/null
+++ b/src/backends/cl/workloads/ClLstmFloatWorkload.hpp
@@ -0,0 +1,68 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backends/Workload.hpp>
+#include <backends/WorkloadData.hpp>
+
+#include <arm_compute/runtime/CL/CLFunctions.h>
+
+namespace armnn
+{
+
+class ClLstmFloatWorkload : public FloatWorkload<LstmQueueDescriptor>
+{
+public:
+    ClLstmFloatWorkload(const LstmQueueDescriptor& descriptor, const WorkloadInfo& info);
+    void Execute() const override;
+
+private:
+    mutable arm_compute::CLLSTMLayer m_LstmLayer;
+
+    std::unique_ptr<arm_compute::CLTensor> m_InputToInputWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_InputToForgetWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_InputToCellWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_InputToOutputWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_RecurrentToInputWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_RecurrentToForgetWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_RecurrentToCellWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_RecurrentToOutputWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_CellToInputWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_CellToForgetWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_CellToOutputWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_InputGateBiasTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_ForgetGateBiasTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_CellBiasTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_OutputGateBiasTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_ProjectionWeightsTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_ProjectionBiasTensor;
+
+    std::unique_ptr<arm_compute::CLTensor> m_ScratchBuffer;
+
+    void FreeUnusedTensors();
+};
+
+arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
+                                                const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
+                                                const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
+                                                const TensorInfo& output, const LstmDescriptor &descriptor,
+                                                const TensorInfo& inputToForgetWeights,
+                                                const TensorInfo& inputToCellWeights,
+                                                const TensorInfo& inputToOutputWeights,
+                                                const TensorInfo& recurrentToForgetWeights,
+                                                const TensorInfo& recurrentToCellWeights,
+                                                const TensorInfo& recurrentToOutputWeights,
+                                                const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
+                                                const TensorInfo& outputGateBias,
+                                                const TensorInfo* inputToInputWeights,
+                                                const TensorInfo* recurrentToInputWeights,
+                                                const TensorInfo* cellToInputWeights,
+                                                const TensorInfo* inputGateBias,
+                                                const TensorInfo* projectionWeights,
+                                                const TensorInfo* projectionBias,
+                                                const TensorInfo* cellToForgetWeights,
+                                                const TensorInfo* cellToOutputWeights);
+} //namespace armnn