IVGCVSW-4549 Add front end for new QLSTM layer

* Added new layer QLstm (Android R HAL 1.3)
* Made necessary updates to APIs
* Added unit tests
* This layer is functionally equivalent to the
  original unquantized LSTM layer with some
  additonal quantization features added. Due
  to this, original LstmParams are used for
  this layer.

Signed-off-by: James Conroy <james.conroy@arm.com>
Change-Id: I5b7f2d2fb6e17e81573b41a31bc55f49ae79608f
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index d688257..2fe38fc 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -58,6 +58,7 @@
         case LayerType::PreCompiled: return "PreCompiled";
         case LayerType::Prelu: return "Prelu";
         case LayerType::Quantize:  return "Quantize";
+        case LayerType::QLstm: return "QLstm";
         case LayerType::QuantizedLstm: return "QuantizedLstm";
         case LayerType::Reshape: return "Reshape";
         case LayerType::Resize: return "Resize";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 8dd9a9e..ee4a710 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -58,6 +58,7 @@
     PreCompiled,
     Prelu,
     Quantize,
+    QLstm,
     QuantizedLstm,
     Reshape,
     Resize,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 3c244b0..73e54b3 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -483,6 +483,23 @@
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizeSupported, input, output);
 }
 
+bool IsQLstmSupported(const BackendId& backend,
+                      const TensorInfo& input,
+                      const TensorInfo& previousOutputIn,
+                      const TensorInfo& previousCellStateIn,
+                      const TensorInfo& outputStateOut,
+                      const TensorInfo& cellStateOut,
+                      const TensorInfo& output,
+                      const QLstmDescriptor& descriptor,
+                      const LstmInputParamsInfo& paramsInfo,
+                      char* reasonIfUnsupported,
+                      size_t reasonIfUnsupportedMaxLength)
+
+{
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsQLstmSupported, input, previousOutputIn, previousCellStateIn,
+                               outputStateOut, cellStateOut, output, descriptor, paramsInfo);
+}
+
 bool IsQuantizedLstmSupported(const BackendId& backend,
                               const TensorInfo& input,
                               const TensorInfo& previousCellStateIn,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 4159f48..2054413 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -50,6 +50,7 @@
 #include "layers/PreCompiledLayer.hpp"
 #include "layers/PreluLayer.hpp"
 #include "layers/QuantizeLayer.hpp"
+#include "layers/QLstmLayer.hpp"
 #include "layers/QuantizedLstmLayer.hpp"
 #include "layers/ReshapeLayer.hpp"
 #include "layers/ResizeLayer.hpp"
@@ -137,6 +138,7 @@
 DECLARE_LAYER(PreCompiled)
 DECLARE_LAYER(Prelu)
 DECLARE_LAYER(Quantize)
+DECLARE_LAYER(QLstm)
 DECLARE_LAYER(QuantizedLstm)
 DECLARE_LAYER(Reshape)
 DECLARE_LAYER(Resize)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 9eef7b2..7a6fa8f 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1670,6 +1670,147 @@
     return layer;
 }
 
+IConnectableLayer* Network::AddQLstmLayer(const QLstmDescriptor&  descriptor,
+                                          const LstmInputParams& params,
+                                          const char* name)
+{
+    const auto layer = m_Graph->AddLayer<QLstmLayer>(descriptor, name);
+
+    // QLstm Basic Parameters
+    layer->m_BasicParameters.m_InputToForgetWeights =
+            std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
+    layer->m_BasicParameters.m_InputToCellWeights =
+            std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
+    layer->m_BasicParameters.m_InputToOutputWeights =
+            std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
+    layer->m_BasicParameters.m_RecurrentToForgetWeights =
+            std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
+    layer->m_BasicParameters.m_RecurrentToCellWeights =
+            std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
+    layer->m_BasicParameters.m_RecurrentToOutputWeights =
+            std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
+    layer->m_BasicParameters.m_ForgetGateBias =
+            std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
+    layer->m_BasicParameters.m_CellBias =
+            std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
+    layer->m_BasicParameters.m_OutputGateBias =
+            std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
+
+    // QLstm Cifg parameters
+    if(!descriptor.m_CifgEnabled)
+    {
+        if(params.m_InputToInputWeights == nullptr)
+        {
+            throw InvalidArgumentException("AddQLstmLayer: Input To Input Weights cannot be NULL");
+        }
+
+        if(params.m_RecurrentToInputWeights == nullptr)
+        {
+            throw InvalidArgumentException(
+                    "AddQLstmLayer: Recurrent To Input Weights cannot be NULL");
+        }
+
+        if(params.m_InputGateBias == nullptr)
+        {
+            throw InvalidArgumentException("AddQLstmLayer: Input Gate Bias cannot be NULL");
+        }
+
+        layer->m_CifgParameters.m_InputToInputWeights =
+                std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
+        layer->m_CifgParameters.m_RecurrentToInputWeights =
+                std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
+        layer->m_CifgParameters.m_InputGateBias =
+                std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
+    }
+
+    // QLstm Projection parameters
+    if(descriptor.m_ProjectionEnabled)
+    {
+        if(params.m_ProjectionWeights == nullptr)
+        {
+            throw InvalidArgumentException("AddQLstmLayer: Projection Weights cannot be NULL");
+        }
+
+        if(params.m_ProjectionBias == nullptr)
+        {
+            throw InvalidArgumentException("AddQLstmLayer: Projection Biases cannot be NULL");
+        }
+
+        layer->m_ProjectionParameters.m_ProjectionWeights =
+                std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
+        layer->m_ProjectionParameters.m_ProjectionBias =
+                std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
+    }
+
+    // QLstm Peephole params
+    if(descriptor.m_PeepholeEnabled)
+    {
+        if(params.m_CellToForgetWeights == nullptr)
+        {
+            throw InvalidArgumentException("AddQLstmLayer: Cell To Forget Weights cannot be NULL");
+        }
+
+        if(params.m_CellToOutputWeights == nullptr)
+        {
+            throw InvalidArgumentException("AddQLstmLayer: Cell To Output Weights cannot be NULL");
+        }
+
+        if(!descriptor.m_CifgEnabled)
+        {
+            if(params.m_CellToInputWeights == nullptr)
+            {
+                throw InvalidArgumentException("AddQLstmLayer: Cell To Input Weights cannot be NULL");
+            }
+
+            layer->m_PeepholeParameters.m_CellToInputWeights =
+                    std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
+        }
+
+        layer->m_PeepholeParameters.m_CellToForgetWeights =
+                std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
+        layer->m_PeepholeParameters.m_CellToOutputWeights =
+                std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
+    }
+
+    // QLstm Layer Normalization params
+    if(descriptor.m_LayerNormEnabled)
+    {
+        if(params.m_ForgetLayerNormWeights == nullptr)
+        {
+            throw InvalidArgumentException("AddQLstmLayer: Forget layer normalization weights cannot be NULL");
+        }
+
+        if(params.m_CellLayerNormWeights == nullptr)
+        {
+            throw InvalidArgumentException("AddQLstmLayer: Cell layer normalization weights cannot be NULL");
+        }
+
+        if(params.m_OutputLayerNormWeights == nullptr)
+        {
+            throw InvalidArgumentException("AddQLstmLayer: Output layer normalization weights cannot be NULL");
+        }
+
+        if(!descriptor.m_CifgEnabled)
+        {
+            if(params.m_InputLayerNormWeights == nullptr)
+            {
+                throw InvalidArgumentException("AddQLstmLayer: Input layer normalization weights cannot be NULL");
+            }
+
+            layer->m_LayerNormParameters.m_InputLayerNormWeights =
+                    std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
+        }
+
+        layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
+                std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
+        layer->m_LayerNormParameters.m_CellLayerNormWeights =
+                std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
+        layer->m_LayerNormParameters.m_OutputLayerNormWeights =
+                std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
+    }
+    return layer;
+}
+
 void Network::Accept(ILayerVisitor& visitor) const
 {
     for (auto layer : GetGraph())
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 17eacba..df4a35f 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -234,6 +234,10 @@
     IConnectableLayer* AddStandInLayer(const StandInDescriptor& descriptor,
                                        const char* name = nullptr) override;
 
+    IConnectableLayer* AddQLstmLayer(const QLstmDescriptor& descriptor,
+                                     const LstmInputParams& params,
+                                     const char* name = nullptr) override;
+
     IConnectableLayer* AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
                                              const char* name = nullptr) override;
 
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
new file mode 100644
index 0000000..393a702
--- /dev/null
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -0,0 +1,512 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "QLstmLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/LstmParams.hpp>
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+QLstmLayer::QLstmLayer(const QLstmDescriptor& param, const char* name)
+        : LayerWithParameters(3, 3, LayerType::QLstm, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> QLstmLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+    QLstmQueueDescriptor descriptor;
+
+    // Basic parameters
+    descriptor.m_InputToForgetWeights = m_BasicParameters.m_InputToForgetWeights.get();
+    descriptor.m_InputToCellWeights = m_BasicParameters.m_InputToCellWeights.get();
+    descriptor.m_InputToOutputWeights = m_BasicParameters.m_InputToOutputWeights.get();
+    descriptor.m_RecurrentToForgetWeights = m_BasicParameters.m_RecurrentToForgetWeights.get();
+    descriptor.m_RecurrentToCellWeights = m_BasicParameters.m_RecurrentToCellWeights.get();
+    descriptor.m_RecurrentToOutputWeights = m_BasicParameters.m_RecurrentToOutputWeights.get();
+    descriptor.m_ForgetGateBias = m_BasicParameters.m_ForgetGateBias.get();
+    descriptor.m_CellBias = m_BasicParameters.m_CellBias.get();
+    descriptor.m_OutputGateBias = m_BasicParameters.m_OutputGateBias.get();
+
+    // CIFG parameters
+    if (!m_Param.m_CifgEnabled)
+    {
+        descriptor.m_InputToInputWeights     = m_CifgParameters.m_InputToInputWeights.get();
+        descriptor.m_RecurrentToInputWeights = m_CifgParameters.m_RecurrentToInputWeights.get();
+        descriptor.m_InputGateBias           = m_CifgParameters.m_InputGateBias.get();
+    }
+
+    // Projection parameters
+    if (m_Param.m_ProjectionEnabled)
+    {
+        descriptor.m_ProjectionWeights = m_ProjectionParameters.m_ProjectionWeights.get();
+        descriptor.m_ProjectionBias    = m_ProjectionParameters.m_ProjectionBias.get();
+    }
+
+    // Peephole parameters
+    if (m_Param.m_PeepholeEnabled)
+    {
+        if (!m_Param.m_CifgEnabled)
+        {
+            descriptor.m_CellToInputWeights = m_PeepholeParameters.m_CellToInputWeights.get();
+        }
+
+        descriptor.m_CellToForgetWeights = m_PeepholeParameters.m_CellToForgetWeights.get();
+        descriptor.m_CellToOutputWeights = m_PeepholeParameters.m_CellToOutputWeights.get();
+    }
+
+    // Layer normalisation parameters
+    if(m_Param.m_LayerNormEnabled)
+    {
+        if (!m_Param.m_CifgEnabled)
+        {
+            descriptor.m_InputLayerNormWeights = m_LayerNormParameters.m_InputLayerNormWeights.get();
+        }
+        descriptor.m_ForgetLayerNormWeights = m_LayerNormParameters.m_ForgetLayerNormWeights.get();
+        descriptor.m_CellLayerNormWeights   = m_LayerNormParameters.m_CellLayerNormWeights.get();
+        descriptor.m_OutputLayerNormWeights = m_LayerNormParameters.m_OutputLayerNormWeights.get();
+    }
+
+    return factory.CreateQLstm(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+QLstmLayer* QLstmLayer::Clone(Graph& graph) const
+{
+    auto layer = CloneBase<QLstmLayer>(graph, m_Param, GetName());
+
+    layer->m_BasicParameters.m_InputToForgetWeights = m_BasicParameters.m_InputToForgetWeights ?
+            std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_InputToForgetWeights) : nullptr;
+    layer->m_BasicParameters.m_InputToCellWeights = m_BasicParameters.m_InputToCellWeights ?
+            std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_InputToCellWeights) : nullptr;
+    layer->m_BasicParameters.m_InputToOutputWeights = m_BasicParameters.m_InputToOutputWeights ?
+            std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_InputToOutputWeights) : nullptr;
+    layer->m_BasicParameters.m_RecurrentToForgetWeights = m_BasicParameters.m_RecurrentToForgetWeights ?
+            std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_RecurrentToForgetWeights) : nullptr;
+    layer->m_BasicParameters.m_RecurrentToCellWeights = m_BasicParameters.m_RecurrentToCellWeights ?
+            std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_RecurrentToCellWeights) : nullptr;
+    layer->m_BasicParameters.m_RecurrentToOutputWeights = m_BasicParameters.m_RecurrentToOutputWeights ?
+            std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_RecurrentToOutputWeights) : nullptr;
+    layer->m_BasicParameters.m_ForgetGateBias = m_BasicParameters.m_ForgetGateBias ?
+            std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_ForgetGateBias) : nullptr;
+    layer->m_BasicParameters.m_CellBias = m_BasicParameters.m_CellBias ?
+            std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_CellBias) : nullptr;
+    layer->m_BasicParameters.m_OutputGateBias = m_BasicParameters.m_OutputGateBias ?
+            std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_OutputGateBias) : nullptr;
+
+    if (!m_Param.m_CifgEnabled)
+    {
+        layer->m_CifgParameters.m_InputToInputWeights = m_CifgParameters.m_InputToInputWeights ?
+                std::make_unique<ScopedCpuTensorHandle>(*m_CifgParameters.m_InputToInputWeights) : nullptr;
+        layer->m_CifgParameters.m_RecurrentToInputWeights = m_CifgParameters.m_RecurrentToInputWeights ?
+                std::make_unique<ScopedCpuTensorHandle>(*m_CifgParameters.m_RecurrentToInputWeights) : nullptr;
+        layer->m_CifgParameters.m_InputGateBias = m_CifgParameters.m_InputGateBias ?
+                std::make_unique<ScopedCpuTensorHandle>(*m_CifgParameters.m_InputGateBias) : nullptr;
+    }
+
+    if (m_Param.m_ProjectionEnabled)
+    {
+        layer->m_ProjectionParameters.m_ProjectionWeights = m_ProjectionParameters.m_ProjectionWeights ?
+                std::make_unique<ScopedCpuTensorHandle>(*m_ProjectionParameters.m_ProjectionWeights) : nullptr;
+        layer->m_ProjectionParameters.m_ProjectionBias = m_ProjectionParameters.m_ProjectionBias ?
+                std::make_unique<ScopedCpuTensorHandle>(*m_ProjectionParameters.m_ProjectionBias) : nullptr;
+    }
+
+    if (m_Param.m_PeepholeEnabled)
+    {
+        if (!m_Param.m_CifgEnabled) {
+            layer->m_PeepholeParameters.m_CellToInputWeights = m_PeepholeParameters.m_CellToInputWeights ?
+                    std::make_unique<ScopedCpuTensorHandle>(*m_PeepholeParameters.m_CellToInputWeights) : nullptr;
+        }
+
+        layer->m_PeepholeParameters.m_CellToForgetWeights = m_PeepholeParameters.m_CellToForgetWeights ?
+                std::make_unique<ScopedCpuTensorHandle>(*m_PeepholeParameters.m_CellToForgetWeights) : nullptr;
+        layer->m_PeepholeParameters.m_CellToOutputWeights = m_PeepholeParameters.m_CellToOutputWeights ?
+                std::make_unique<ScopedCpuTensorHandle>(*m_PeepholeParameters.m_CellToOutputWeights) : nullptr;
+    }
+
+    if (m_Param.m_LayerNormEnabled)
+    {
+        if (!m_Param.m_CifgEnabled) {
+            layer->m_LayerNormParameters.m_InputLayerNormWeights = m_LayerNormParameters.m_InputLayerNormWeights ?
+                    std::make_unique<ScopedCpuTensorHandle>(*m_LayerNormParameters.m_InputLayerNormWeights) : nullptr;
+        }
+
+        layer->m_LayerNormParameters.m_ForgetLayerNormWeights = m_LayerNormParameters.m_ForgetLayerNormWeights ?
+                std::make_unique<ScopedCpuTensorHandle>(*m_LayerNormParameters.m_ForgetLayerNormWeights) : nullptr;
+        layer->m_LayerNormParameters.m_CellLayerNormWeights = m_LayerNormParameters.m_CellLayerNormWeights ?
+                std::make_unique<ScopedCpuTensorHandle>(*m_LayerNormParameters.m_CellLayerNormWeights) : nullptr;
+        layer->m_LayerNormParameters.m_OutputLayerNormWeights = m_LayerNormParameters.m_OutputLayerNormWeights ?
+                std::make_unique<ScopedCpuTensorHandle>(*m_LayerNormParameters.m_OutputLayerNormWeights) : nullptr;
+    }
+
+    return std::move(layer);
+}
+
+std::vector<TensorShape> QLstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+    BOOST_ASSERT(inputShapes.size() == 3);
+
+    // Get input values for validation
+    unsigned int batchSize = inputShapes[0][0];
+    unsigned int outputSize = inputShapes[1][1];
+    unsigned int numUnits = inputShapes[2][1];
+
+    std::vector<TensorShape> outShapes;
+    outShapes.push_back(TensorShape({ batchSize, outputSize })); // outputStateOut
+    outShapes.push_back(TensorShape({ batchSize, numUnits })); // cellStateOut
+    outShapes.push_back(TensorShape({ batchSize, outputSize })); // output
+
+    return outShapes;
+}
+
+void QLstmLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(3, CHECK_LOCATION());
+
+    auto inferredShapes = InferOutputShapes(
+    {
+        GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), // input
+        GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(), // previousOutputIn
+        GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape() //  previousCellStateIn
+    });
+
+    BOOST_ASSERT(inferredShapes.size() == 3);
+
+    // Check if the weights are nullptr for basic params
+    BOOST_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
+            "QLstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
+    BOOST_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
+            "QLstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
+    BOOST_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
+            "QLstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
+    BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
+            "QLstmLayer: m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
+    BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
+            "QLstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
+    BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
+            "QLstmLayer: m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
+    BOOST_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
+            "QLstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
+    BOOST_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
+            "QLstmLayer: m_BasicParameters.m_CellBias should not be null.");
+    BOOST_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
+            "QLstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
+
+    if (!m_Param.m_CifgEnabled)
+    {
+        BOOST_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
+                "QLstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
+        BOOST_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
+                "QLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not be null.");
+        BOOST_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
+                "QLstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
+
+        ConditionalThrowIfNotEqual<LayerValidationException>(
+                "QLstmLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+                GetOutputSlot(0).GetTensorInfo().GetShape(),
+                inferredShapes[0]);
+    }
+    else
+    {
+        BOOST_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
+                "QLstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value when CIFG is enabled.");
+        BOOST_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
+                "QLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should "
+                             "not have a value when CIFG is enabled.");
+        BOOST_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
+                "QLstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
+
+        ConditionalThrowIfNotEqual<LayerValidationException>(
+                "QLstmLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+                GetOutputSlot(0).GetTensorInfo().GetShape(),
+                inferredShapes[0]);
+    }
+
+    if (m_Param.m_ProjectionEnabled)
+    {
+        BOOST_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
+                         "QLstmLayer: m_ProjectionParameters.m_ProjectionWeights should not be null.");
+        BOOST_ASSERT_MSG(m_ProjectionParameters.m_ProjectionBias != nullptr,
+                         "QLstmLayer: m_ProjectionParameters.m_ProjectionBias should not be null.");
+    }
+
+    if (m_Param.m_PeepholeEnabled)
+    {
+        if (!m_Param.m_CifgEnabled) {
+            BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
+                    "QLstmLayer: m_PeepholeParameters.m_CellToInputWeights should not be null "
+                    "when Peephole is enabled and CIFG is disabled.");
+        }
+
+        BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
+                         "QLstmLayer: m_PeepholeParameters.m_CellToForgetWeights should not be null.");
+        BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
+                         "QLstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null.");
+    }
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+            "QLstmLayer: TensorShape set on OutputSlot[1] does not match the inferred shape.",
+            GetOutputSlot(1).GetTensorInfo().GetShape(),
+            inferredShapes[1]);
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+            "QLstmLayer: TensorShape set on OutputSlot[2] does not match the inferred shape.",
+            GetOutputSlot(2).GetTensorInfo().GetShape(),
+            inferredShapes[2]);
+
+    if (m_Param.m_LayerNormEnabled)
+    {
+        if(!m_Param.m_CifgEnabled)
+        {
+            BOOST_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
+                             "QLstmLayer: m_LayerNormParameters.m_InputLayerNormWeights should not be null.");
+        }
+        BOOST_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
+                         "QLstmLayer: m_LayerNormParameters.m_ForgetLayerNormWeights should not be null.");
+        BOOST_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
+                         "QLstmLayer: m_LayerNormParameters.m_CellLayerNormWeights should not be null.");
+        BOOST_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
+                         "QLstmLayer: m_LayerNormParameters.m_UutputLayerNormWeights should not be null.");
+    }
+}
+
+Layer::ConstantTensors QLstmLayer::GetConstantTensorsByRef()
+{
+    return {m_BasicParameters.m_InputToForgetWeights,
+            m_BasicParameters.m_InputToCellWeights,
+            m_BasicParameters.m_InputToOutputWeights,
+            m_BasicParameters.m_RecurrentToForgetWeights,
+            m_BasicParameters.m_RecurrentToCellWeights,
+            m_BasicParameters.m_RecurrentToOutputWeights,
+            m_BasicParameters.m_ForgetGateBias,
+            m_BasicParameters.m_CellBias,
+            m_BasicParameters.m_OutputGateBias,
+
+            // Cifg parameters
+            m_CifgParameters.m_InputToInputWeights,
+            m_CifgParameters.m_RecurrentToInputWeights,
+            m_CifgParameters.m_InputGateBias,
+
+            // Projection parameters
+            m_ProjectionParameters.m_ProjectionWeights,
+            m_ProjectionParameters.m_ProjectionBias,
+
+            // Peephole parameters
+            m_PeepholeParameters.m_CellToInputWeights,
+            m_PeepholeParameters.m_CellToForgetWeights,
+            m_PeepholeParameters.m_CellToOutputWeights,
+
+            // Layer normalisation parameters
+            m_LayerNormParameters.m_InputLayerNormWeights,
+            m_LayerNormParameters.m_ForgetLayerNormWeights,
+            m_LayerNormParameters.m_CellLayerNormWeights,
+            m_LayerNormParameters.m_OutputLayerNormWeights};
+}
+
+void QLstmLayer::Accept(ILayerVisitor& visitor) const
+{
+    LstmInputParams inputParams;
+
+    ConstTensor inputToInputWeightsTensor;
+    if (m_CifgParameters.m_InputToInputWeights != nullptr)
+    {
+        ConstTensor inputToInputWeightsTensorCopy(m_CifgParameters.m_InputToInputWeights->GetTensorInfo(),
+                                                  m_CifgParameters.m_InputToInputWeights->Map(true));
+        inputToInputWeightsTensor = inputToInputWeightsTensorCopy;
+        inputParams.m_InputToInputWeights = &inputToInputWeightsTensor;
+    }
+
+    ConstTensor inputToForgetWeightsTensor;
+    if (m_BasicParameters.m_InputToForgetWeights != nullptr)
+    {
+        ConstTensor inputToForgetWeightsTensorCopy(m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(),
+                                                   m_BasicParameters.m_InputToForgetWeights->Map(true));
+        inputToForgetWeightsTensor = inputToForgetWeightsTensorCopy;
+        inputParams.m_InputToForgetWeights = &inputToForgetWeightsTensor;
+    }
+
+    ConstTensor inputToCellWeightsTensor;
+    if (m_BasicParameters.m_InputToCellWeights != nullptr)
+    {
+        ConstTensor inputToCellWeightsTensorCopy(m_BasicParameters.m_InputToCellWeights->GetTensorInfo(),
+                                                 m_BasicParameters.m_InputToCellWeights->Map(true));
+        inputToCellWeightsTensor = inputToCellWeightsTensorCopy;
+        inputParams.m_InputToCellWeights = &inputToCellWeightsTensor;
+    }
+
+    ConstTensor inputToOutputWeightsTensor;
+    if (m_BasicParameters.m_InputToOutputWeights != nullptr)
+    {
+        ConstTensor inputToOutputWeightsTensorCopy(m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(),
+                                                   m_BasicParameters.m_InputToOutputWeights->Map(true));
+        inputToOutputWeightsTensor = inputToOutputWeightsTensorCopy;
+        inputParams.m_InputToOutputWeights = &inputToOutputWeightsTensor;
+    }
+
+    ConstTensor recurrentToInputWeightsTensor;
+    if (m_CifgParameters.m_RecurrentToInputWeights != nullptr)
+    {
+        ConstTensor recurrentToInputWeightsTensorCopy(
+                m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(),
+                m_CifgParameters.m_RecurrentToInputWeights->Map(true));
+        recurrentToInputWeightsTensor = recurrentToInputWeightsTensorCopy;
+        inputParams.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
+    }
+
+    ConstTensor recurrentToForgetWeightsTensor;
+    if (m_BasicParameters.m_RecurrentToForgetWeights != nullptr)
+    {
+        ConstTensor recurrentToForgetWeightsTensorCopy(
+                m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(),
+                m_BasicParameters.m_RecurrentToForgetWeights->Map(true));
+        recurrentToForgetWeightsTensor = recurrentToForgetWeightsTensorCopy;
+        inputParams.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
+    }
+
+    ConstTensor recurrentToCellWeightsTensor;
+    if (m_BasicParameters.m_RecurrentToCellWeights != nullptr)
+    {
+        ConstTensor recurrentToCellWeightsTensorCopy(
+                m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(),
+                m_BasicParameters.m_RecurrentToCellWeights->Map(true));
+        recurrentToCellWeightsTensor = recurrentToCellWeightsTensorCopy;
+        inputParams.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
+    }
+
+    ConstTensor recurrentToOutputWeightsTensor;
+    if (m_BasicParameters.m_RecurrentToOutputWeights != nullptr)
+    {
+        ConstTensor recurrentToOutputWeightsTensorCopy(
+                m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(),
+                m_BasicParameters.m_RecurrentToOutputWeights->Map(true));
+        recurrentToOutputWeightsTensor = recurrentToOutputWeightsTensorCopy;
+        inputParams.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
+    }
+
+    ConstTensor cellToInputWeightsTensor;
+    if (m_PeepholeParameters.m_CellToInputWeights != nullptr)
+    {
+        ConstTensor cellToInputWeightsTensorCopy(m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
+                                                 m_PeepholeParameters.m_CellToInputWeights->Map(true));
+        cellToInputWeightsTensor = cellToInputWeightsTensorCopy;
+        inputParams.m_CellToInputWeights = &cellToInputWeightsTensor;
+    }
+
+    ConstTensor cellToForgetWeightsTensor;
+    if (m_PeepholeParameters.m_CellToForgetWeights != nullptr)
+    {
+        ConstTensor cellToForgetWeightsTensorCopy(m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(),
+                                                  m_PeepholeParameters.m_CellToForgetWeights->Map(true));
+        cellToForgetWeightsTensor = cellToForgetWeightsTensorCopy;
+        inputParams.m_CellToForgetWeights = &cellToForgetWeightsTensor;
+    }
+
+    ConstTensor cellToOutputWeightsTensor;
+    if (m_PeepholeParameters.m_CellToOutputWeights != nullptr)
+    {
+        ConstTensor cellToOutputWeightsTensorCopy(m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(),
+                                                  m_PeepholeParameters.m_CellToOutputWeights->Map(true));
+        cellToOutputWeightsTensor = cellToOutputWeightsTensorCopy;
+        inputParams.m_CellToOutputWeights = &cellToOutputWeightsTensor;
+    }
+
+    ConstTensor inputGateBiasTensor;
+    if (m_CifgParameters.m_InputGateBias != nullptr)
+    {
+        ConstTensor inputGateBiasTensorCopy(m_CifgParameters.m_InputGateBias->GetTensorInfo(),
+                                            m_CifgParameters.m_InputGateBias->Map(true));
+        inputGateBiasTensor = inputGateBiasTensorCopy;
+        inputParams.m_InputGateBias = &inputGateBiasTensor;
+    }
+
+    ConstTensor forgetGateBiasTensor;
+    if (m_BasicParameters.m_ForgetGateBias != nullptr)
+    {
+        ConstTensor forgetGateBiasTensorCopy(m_BasicParameters.m_ForgetGateBias->GetTensorInfo(),
+                                             m_BasicParameters.m_ForgetGateBias->Map(true));
+        forgetGateBiasTensor = forgetGateBiasTensorCopy;
+        inputParams.m_ForgetGateBias = &forgetGateBiasTensor;
+    }
+
+    ConstTensor cellBiasTensor;
+    if (m_BasicParameters.m_CellBias != nullptr)
+    {
+        ConstTensor cellBiasTensorCopy(m_BasicParameters.m_CellBias->GetTensorInfo(),
+                                       m_BasicParameters.m_CellBias->Map(true));
+        cellBiasTensor = cellBiasTensorCopy;
+        inputParams.m_CellBias = &cellBiasTensor;
+    }
+
+    ConstTensor outputGateBias;
+    if (m_BasicParameters.m_OutputGateBias != nullptr)
+    {
+        ConstTensor outputGateBiasCopy(m_BasicParameters.m_OutputGateBias->GetTensorInfo(),
+                                       m_BasicParameters.m_OutputGateBias->Map(true));
+        outputGateBias = outputGateBiasCopy;
+        inputParams.m_OutputGateBias = &outputGateBias;
+    }
+
+    ConstTensor projectionWeightsTensor;
+    if (m_ProjectionParameters.m_ProjectionWeights != nullptr)
+    {
+        ConstTensor projectionWeightsTensorCopy(m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(),
+                                                m_ProjectionParameters.m_ProjectionWeights->Map(true));
+        projectionWeightsTensor = projectionWeightsTensorCopy;
+        inputParams.m_ProjectionWeights = &projectionWeightsTensor;
+    }
+
+    ConstTensor projectionBiasTensor;
+    if (m_ProjectionParameters.m_ProjectionBias != nullptr)
+    {
+        ConstTensor projectionBiasTensorCopy(m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(),
+                                             m_ProjectionParameters.m_ProjectionBias->Map(true));
+        projectionBiasTensor = projectionBiasTensorCopy;
+        inputParams.m_ProjectionBias = &projectionBiasTensor;
+    }
+
+    ConstTensor inputLayerNormTensor;
+    if (m_LayerNormParameters.m_InputLayerNormWeights != nullptr)
+    {
+        ConstTensor inputLayerNormTensorCopy(m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(),
+                                             m_LayerNormParameters.m_InputLayerNormWeights->Map(true));
+        inputLayerNormTensor = inputLayerNormTensorCopy;
+        inputParams.m_InputLayerNormWeights = &inputLayerNormTensor;
+    }
+
+    ConstTensor forgetLayerNormTensor;
+    if (m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr)
+    {
+        ConstTensor forgetLayerNormTensorCopy(m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(),
+                                              m_LayerNormParameters.m_ForgetLayerNormWeights->Map(true));
+        forgetLayerNormTensor = forgetLayerNormTensorCopy;
+        inputParams.m_ForgetLayerNormWeights = &forgetLayerNormTensor;
+    }
+
+    ConstTensor cellLayerNormTensor;
+    if (m_LayerNormParameters.m_CellLayerNormWeights != nullptr)
+    {
+        ConstTensor cellLayerNormTensorCopy(m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(),
+                                            m_LayerNormParameters.m_CellLayerNormWeights->Map(true));
+        cellLayerNormTensor = cellLayerNormTensorCopy;
+        inputParams.m_CellLayerNormWeights = &cellLayerNormTensor;
+    }
+
+    ConstTensor outputLayerNormTensor;
+    if (m_LayerNormParameters.m_OutputLayerNormWeights != nullptr)
+    {
+        ConstTensor outputLayerNormTensorCopy(m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(),
+                                              m_LayerNormParameters.m_OutputLayerNormWeights->Map(true));
+        outputLayerNormTensor = outputLayerNormTensorCopy;
+        inputParams.m_OutputLayerNormWeights = &outputLayerNormTensor;
+    }
+
+
+    visitor.VisitQLstmLayer(this, GetParameters(), inputParams, GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp
new file mode 100644
index 0000000..2d40b7e
--- /dev/null
+++ b/src/armnn/layers/QLstmLayer.hpp
@@ -0,0 +1,124 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+class ScopedCpuTensorHandle;
+
+struct QLstmBasicParameters
+{
+    /// A unique pointer to represent 2D weights tensor with dimensions [num_units, inputSize] (QSymmS8).
+    std::unique_ptr<ScopedCpuTensorHandle> m_InputToForgetWeights;
+    /// A unique pointer to represent 2D weights tensor with dimensions [num_units, inputSize] (QSymmS8).
+    std::unique_ptr<ScopedCpuTensorHandle> m_InputToCellWeights;
+    /// A unique pointer to represent 2D weights tensor with dimensions [num_units, inputSize] (QSymmS8).
+    std::unique_ptr<ScopedCpuTensorHandle> m_InputToOutputWeights;
+
+    /// A unique pointer to represent 2D weights tensor with dimensions [num_units, outputSize] (QSymmS8).
+    std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToForgetWeights;
+    /// A unique pointer to represent 2D weights tensor with dimensions [num_units, outputSize] (QSymmS8).
+    std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToCellWeights;
+    /// A unique pointer to represent 2D weights tensor with dimensions [num_units, outputSize] (QSymmS8).
+    std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToOutputWeights;
+
+    /// A unique pointer to represent 1D bias tensor with dimensions [num_units] (int32).
+    std::unique_ptr<ScopedCpuTensorHandle> m_ForgetGateBias;
+    /// A unique pointer to represent 1D bias tensor with dimensions [num_units] (int32).
+    std::unique_ptr<ScopedCpuTensorHandle> m_CellBias;
+    /// A unique pointer to represent 1D bias tensor with dimensions [num_units] (int32).
+    std::unique_ptr<ScopedCpuTensorHandle> m_OutputGateBias;
+};
+
+struct QLstmOptProjectionParameters
+{
+    /// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units] (QSymmS8).
+    std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionWeights;
+    /// A unique pointer to represent 1D weights tensor with dimensions [output_size] (int32).
+    std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionBias;
+};
+
+struct QLstmOptPeepholeParameters
+{
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
+    std::unique_ptr<ScopedCpuTensorHandle> m_CellToInputWeights;
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
+    std::unique_ptr<ScopedCpuTensorHandle> m_CellToForgetWeights;
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
+    std::unique_ptr<ScopedCpuTensorHandle> m_CellToOutputWeights;
+};
+
+struct QLstmOptCifgParameters
+{
+    /// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units] (QSymmS8).
+    std::unique_ptr<ScopedCpuTensorHandle> m_InputToInputWeights;
+    /// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units] (QSymmS8).
+    std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToInputWeights;
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units] (int32).
+    std::unique_ptr<ScopedCpuTensorHandle> m_InputGateBias;
+};
+
+struct QLstmOptLayerNormParameters
+{
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
+    std::unique_ptr<ScopedCpuTensorHandle> m_InputLayerNormWeights;
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
+    std::unique_ptr<ScopedCpuTensorHandle> m_ForgetLayerNormWeights;
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
+    std::unique_ptr<ScopedCpuTensorHandle> m_CellLayerNormWeights;
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
+    std::unique_ptr<ScopedCpuTensorHandle> m_OutputLayerNormWeights;
+};
+
+/// This layer represents a QLstm operation.
+class QLstmLayer : public LayerWithParameters<QLstmDescriptor>
+{
+public:
+
+    QLstmBasicParameters m_BasicParameters;
+    QLstmOptCifgParameters m_CifgParameters;
+    QLstmOptProjectionParameters m_ProjectionParameters;
+    QLstmOptPeepholeParameters m_PeepholeParameters;
+    QLstmOptLayerNormParameters m_LayerNormParameters;
+
+    /// Makes a workload for the QLstm type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
+    QLstmLayer* Clone(Graph& graph) const override;
+
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref QLstmLayer.
+    void ValidateTensorShapesFromInputs() override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
+    std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+    void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+    /// Constructor to create a QLstmLayer.
+    /// @param [in] name Optional name for the layer.
+    QLstmLayer(const QLstmDescriptor& param, const char* name);
+
+    /// Default destructor
+    ~QLstmLayer() = default;
+
+    /// Retrieve the handles to the constant values stored by the layer.
+    /// @return A vector of the constant tensors stored by this layer.
+    Layer::ConstantTensors GetConstantTensorsByRef() override;
+};
+
+} // namespace armnn
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index ada665e..7ef3dd2 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -107,6 +107,104 @@
     CheckConstTensorPtrs("CellBias", m_InputParams.m_CellBias, inputParams.m_CellBias);
 }
 
+void TestQLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
+                                                 const ConstTensor* expected,
+                                                 const ConstTensor* actual)
+{
+    if (expected == nullptr)
+    {
+        BOOST_CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
+    }
+    else
+    {
+        BOOST_CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
+        if (actual != nullptr)
+        {
+            CheckConstTensors(*expected, *actual);
+        }
+    }
+}
+
+void TestQLstmLayerVisitor::CheckDescriptor(const QLstmDescriptor& descriptor)
+{
+    BOOST_CHECK(m_Descriptor.m_CellClip == descriptor.m_CellClip);
+    BOOST_CHECK(m_Descriptor.m_ProjectionClip == descriptor.m_ProjectionClip);
+    BOOST_CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
+    BOOST_CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
+    BOOST_CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
+}
+
+void TestQLstmLayerVisitor::CheckInputParameters(const LstmInputParams& inputParams)
+{
+    CheckConstTensorPtrs("InputToInputWeights",
+                         m_InputParams.m_InputToInputWeights,
+                         inputParams.m_InputToInputWeights);
+
+    CheckConstTensorPtrs("InputToForgetWeights",
+                         m_InputParams.m_InputToForgetWeights,
+                         inputParams.m_InputToForgetWeights);
+
+    CheckConstTensorPtrs("InputToCellWeights",
+                         m_InputParams.m_InputToCellWeights,
+                         inputParams.m_InputToCellWeights);
+
+    CheckConstTensorPtrs("InputToOutputWeights",
+                         m_InputParams.m_InputToOutputWeights,
+                         inputParams.m_InputToOutputWeights);
+
+    CheckConstTensorPtrs("RecurrentToInputWeights",
+                         m_InputParams.m_RecurrentToInputWeights,
+                         inputParams.m_RecurrentToInputWeights);
+
+    CheckConstTensorPtrs("RecurrentToForgetWeights",
+                         m_InputParams.m_RecurrentToForgetWeights,
+                         inputParams.m_RecurrentToForgetWeights);
+
+    CheckConstTensorPtrs("RecurrentToCellWeights",
+                         m_InputParams.m_RecurrentToCellWeights,
+                         inputParams.m_RecurrentToCellWeights);
+
+    CheckConstTensorPtrs("RecurrentToOutputWeights",
+                         m_InputParams.m_RecurrentToOutputWeights,
+                         inputParams.m_RecurrentToOutputWeights);
+
+    CheckConstTensorPtrs("CellToInputWeights",
+                         m_InputParams.m_CellToInputWeights,
+                         inputParams.m_CellToInputWeights);
+
+    CheckConstTensorPtrs("CellToForgetWeights",
+                         m_InputParams.m_CellToForgetWeights,
+                         inputParams.m_CellToForgetWeights);
+
+    CheckConstTensorPtrs("CellToOutputWeights",
+                         m_InputParams.m_CellToOutputWeights,
+                         inputParams.m_CellToOutputWeights);
+
+    CheckConstTensorPtrs("ProjectionWeights", m_InputParams.m_ProjectionWeights, inputParams.m_ProjectionWeights);
+    CheckConstTensorPtrs("ProjectionBias", m_InputParams.m_ProjectionBias, inputParams.m_ProjectionBias);
+
+    CheckConstTensorPtrs("InputGateBias",  m_InputParams.m_InputGateBias,  inputParams.m_InputGateBias);
+    CheckConstTensorPtrs("ForgetGateBias", m_InputParams.m_ForgetGateBias, inputParams.m_ForgetGateBias);
+    CheckConstTensorPtrs("CellBias",       m_InputParams.m_CellBias,       inputParams.m_CellBias);
+    CheckConstTensorPtrs("OutputGateBias", m_InputParams.m_OutputGateBias, inputParams.m_OutputGateBias);
+
+    CheckConstTensorPtrs("InputLayerNormWeights",
+                         m_InputParams.m_InputLayerNormWeights,
+                         inputParams.m_InputLayerNormWeights);
+
+    CheckConstTensorPtrs("ForgetLayerNormWeights",
+                         m_InputParams.m_ForgetLayerNormWeights,
+                         inputParams.m_ForgetLayerNormWeights);
+
+    CheckConstTensorPtrs("CellLayerNormWeights",
+                         m_InputParams.m_CellLayerNormWeights,
+                         inputParams.m_CellLayerNormWeights);
+
+    CheckConstTensorPtrs("OutputLayerNormWeights",
+                         m_InputParams.m_OutputLayerNormWeights,
+                         inputParams.m_OutputLayerNormWeights);
+}
+
 void TestQuantizedLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
                                                          const ConstTensor* expected,
                                                          const ConstTensor* actual)
@@ -891,7 +989,7 @@
 
     Network net;
 
-    IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
+    IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
     layer->Accept(visitor);
 }
 
@@ -978,7 +1076,7 @@
 
     Network net;
 
-    IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params);
+    IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
     layer->Accept(visitor);
 }
 
@@ -1065,7 +1163,7 @@
 
     Network net;
 
-    IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
+    IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
     layer->Accept(visitor);
 }
 
@@ -1152,7 +1250,7 @@
 
     Network net;
 
-    IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params);
+    IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
     layer->Accept(visitor);
 }
 
@@ -1239,52 +1337,713 @@
 
     Network net;
 
-    IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
+    IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
     layer->Accept(visitor);
 }
 
+BOOST_AUTO_TEST_CASE(CheckQLstmLayerBasic)
+{
+    QLstmDescriptor descriptor;
+    descriptor.m_ProjectionClip = 0.5f;
+    descriptor.m_CellClip = 0.3f;
+    descriptor.m_CifgEnabled = true;
+
+    // Basic params ONLY
+    std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToForgetWeights(
+            TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+
+    std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToCellWeights(
+            TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+
+    std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToOutputWeights(
+            TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+
+    std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToForgetWeights(TensorInfo(
+            4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+
+    std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToCellWeights(TensorInfo(
+            4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+
+    std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToOutputWeights(TensorInfo(
+            4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+
+    std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor forgetGateBias(TensorInfo(
+            4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+
+    std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+    ConstTensor cellBias(TensorInfo(
+            4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+
+    std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor outputGateBias(TensorInfo(
+            4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+
+    LstmInputParams params;
+    params.m_InputToForgetWeights     = &inputToForgetWeights;
+    params.m_InputToCellWeights       = &inputToCellWeights;
+    params.m_InputToOutputWeights     = &inputToOutputWeights;
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+    params.m_ForgetGateBias           = &forgetGateBias;
+    params.m_CellBias                 = &cellBias;
+    params.m_OutputGateBias           = &outputGateBias;
+
+    TestQLstmLayerVisitor visitor(descriptor, params);
+
+    Network net;
+
+    IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
+    layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckNamedQLstmLayerBasic)
+{
+    const char* layerName = "QLstmLayer";
+    QLstmDescriptor descriptor;
+    descriptor.m_ProjectionClip = 0.5f;
+    descriptor.m_CellClip = 0.3f;
+    descriptor.m_CifgEnabled = true;
+
+    // Basic params ONLY
+    std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToForgetWeights(
+            TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+
+    std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToCellWeights(
+            TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+
+    std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToOutputWeights(
+            TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+
+    std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToForgetWeights(TensorInfo(
+            4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+
+    std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToCellWeights(TensorInfo(
+            4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+
+    std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToOutputWeights(TensorInfo(
+            4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+
+    std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor forgetGateBias(TensorInfo(
+            4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+
+    std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+    ConstTensor cellBias(TensorInfo(
+            4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+
+    std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor outputGateBias(TensorInfo(
+            4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+
+    LstmInputParams params;
+    params.m_InputToForgetWeights     = &inputToForgetWeights;
+    params.m_InputToCellWeights       = &inputToCellWeights;
+    params.m_InputToOutputWeights     = &inputToOutputWeights;
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+    params.m_ForgetGateBias           = &forgetGateBias;
+    params.m_CellBias                 = &cellBias;
+    params.m_OutputGateBias           = &outputGateBias;
+
+    TestQLstmLayerVisitor visitor(descriptor, params, layerName);
+
+    Network net;
+
+    IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params, layerName);
+    layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabled)
+{
+    QLstmDescriptor descriptor;
+    descriptor.m_ProjectionClip = 0.5f;
+    descriptor.m_CellClip = 0.3f;
+    descriptor.m_CifgEnabled = false;
+
+    // Basic params
+    std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToForgetWeights(
+            TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+
+    std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToCellWeights(
+            TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+
+    std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToOutputWeights(
+            TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+
+    std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToForgetWeights(TensorInfo(
+            4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+
+    std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToCellWeights(TensorInfo(
+            4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+
+    std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToOutputWeights(TensorInfo(
+            4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+
+    std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor forgetGateBias(TensorInfo(
+            4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+
+    std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+    ConstTensor cellBias(TensorInfo(
+            4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+
+    std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor outputGateBias(TensorInfo(
+            4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+
+    // CIFG disabled params
+    std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToInputWeights(
+            TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8), inputToInputWeightsData);
+
+    std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToInputWeights(TensorInfo(
+            4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8), recurrentToInputWeightsData);
+
+    std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor inputGateBias(
+            TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32), inputGateBiasData);
+
+    LstmInputParams params;
+
+    // Basic params
+    params.m_InputToForgetWeights     = &inputToForgetWeights;
+    params.m_InputToCellWeights       = &inputToCellWeights;
+    params.m_InputToOutputWeights     = &inputToOutputWeights;
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+    params.m_ForgetGateBias           = &forgetGateBias;
+    params.m_CellBias                 = &cellBias;
+    params.m_OutputGateBias           = &outputGateBias;
+
+    // CIFG disabled params
+    params.m_InputToInputWeights     = &inputToInputWeights;
+    params.m_RecurrentToInputWeights = &recurrentToInputWeights;
+    params.m_InputGateBias           = &inputGateBias;
+
+    TestQLstmLayerVisitor visitor(descriptor, params);
+
+    Network net;
+
+    IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
+    layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabledPeepholeEnabled)
+{
+    QLstmDescriptor descriptor;
+    descriptor.m_ProjectionClip = 0.5f;
+    descriptor.m_CellClip = 0.3f;
+    descriptor.m_CifgEnabled = false;
+    descriptor.m_PeepholeEnabled = true;
+
+    // Basic params
+    std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToForgetWeights(
+            TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+
+    std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToCellWeights(
+            TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+
+    std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToOutputWeights(
+            TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+
+    std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToForgetWeights(TensorInfo(
+            4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+
+    std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToCellWeights(TensorInfo(
+            4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+
+    std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToOutputWeights(TensorInfo(
+            4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+
+    std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor forgetGateBias(TensorInfo(
+            4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+
+    std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+    ConstTensor cellBias(TensorInfo(
+            4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+
+    std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor outputGateBias(TensorInfo(
+            4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+
+    // CIFG disabled params
+    std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToInputWeights(
+            TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8), inputToInputWeightsData);
+
+    std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToInputWeights(TensorInfo(
+            4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8), recurrentToInputWeightsData);
+
+    std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor inputGateBias(
+            TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32), inputGateBiasData);
+
+    // Peephole enabled, CIFG disabled params
+    std::vector<int16_t> cellToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor cellToInputWeights(
+            TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::QSymmS16), cellToInputWeightsData);
+
+    std::vector<int16_t> cellToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor cellToForgetWeights(
+            TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::QSymmS16), cellToForgetWeightsData);
+
+    std::vector<int16_t> cellToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor cellToOutputWeights(
+            TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::QSymmS16), cellToOutputWeightsData);
+
+    LstmInputParams params;
+
+    // Basic params
+    params.m_InputToForgetWeights     = &inputToForgetWeights;
+    params.m_InputToCellWeights       = &inputToCellWeights;
+    params.m_InputToOutputWeights     = &inputToOutputWeights;
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+    params.m_ForgetGateBias           = &forgetGateBias;
+    params.m_CellBias                 = &cellBias;
+    params.m_OutputGateBias           = &outputGateBias;
+
+    // CIFG disabled params
+    params.m_InputToInputWeights     = &inputToInputWeights;
+    params.m_RecurrentToInputWeights = &recurrentToInputWeights;
+    params.m_InputGateBias           = &inputGateBias;
+
+    // Peephole enabled, CIFG disabled params
+    params.m_CellToInputWeights  = &cellToInputWeights;
+    params.m_CellToForgetWeights = &cellToForgetWeights;
+    params.m_CellToOutputWeights = &cellToOutputWeights;
+
+    TestQLstmLayerVisitor visitor(descriptor, params);
+
+    Network net;
+
+    IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
+    layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgEnabledPeepholeEnabled)
+{
+    QLstmDescriptor descriptor;
+    descriptor.m_ProjectionClip = 0.5f;
+    descriptor.m_CellClip = 0.3f;
+    descriptor.m_CifgEnabled = true;
+    descriptor.m_PeepholeEnabled = true;
+
+    // Basic params
+    std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToForgetWeights(
+            TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+
+    std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToCellWeights(
+            TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+
+    std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToOutputWeights(
+            TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+
+    std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToForgetWeights(TensorInfo(
+            4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+
+    std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToCellWeights(TensorInfo(
+            4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+
+    std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToOutputWeights(TensorInfo(
+            4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+
+    std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor forgetGateBias(TensorInfo(
+            4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+
+    std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+    ConstTensor cellBias(TensorInfo(
+            4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+
+    std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor outputGateBias(TensorInfo(
+            4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+
+    // Peephole enabled and CIFG enabled params
+    std::vector<int16_t> cellToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor cellToForgetWeights(
+            TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::QSymmS16), cellToForgetWeightsData);
+
+    std::vector<int16_t> cellToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor cellToOutputWeights(
+            TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::QSymmS16), cellToOutputWeightsData);
+
+    LstmInputParams params;
+
+    // Basic params
+    params.m_InputToForgetWeights     = &inputToForgetWeights;
+    params.m_InputToCellWeights       = &inputToCellWeights;
+    params.m_InputToOutputWeights     = &inputToOutputWeights;
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+    params.m_ForgetGateBias           = &forgetGateBias;
+    params.m_CellBias                 = &cellBias;
+    params.m_OutputGateBias           = &outputGateBias;
+
+    // Peephole enabled and CIFG enabled params
+    params.m_CellToForgetWeights = &cellToForgetWeights;
+    params.m_CellToOutputWeights = &cellToOutputWeights;
+
+    TestQLstmLayerVisitor visitor(descriptor, params);
+
+    Network net;
+
+    IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
+    layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckQLstmLayerProjectionEnabled)
+{
+    QLstmDescriptor descriptor;
+    descriptor.m_ProjectionClip = 0.5f;
+    descriptor.m_CellClip = 0.3f;
+    descriptor.m_CifgEnabled = true;
+    descriptor.m_ProjectionEnabled = true;
+
+    // Basic params ONLY
+    std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToForgetWeights(
+            TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+
+    std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToCellWeights(
+            TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+
+    std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToOutputWeights(
+            TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+
+    std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToForgetWeights(TensorInfo(
+            4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+
+    std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToCellWeights(TensorInfo(
+            4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+
+    std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToOutputWeights(TensorInfo(
+            4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+
+    std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor forgetGateBias(TensorInfo(
+            4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+
+    std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+    ConstTensor cellBias(TensorInfo(
+            4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+
+    std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor outputGateBias(TensorInfo(
+            4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+
+    // Projection enabled params
+    std::vector<uint8_t> projectionWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor projectionWeights(TensorInfo(
+            4, projectionWeightsDimensions.data(), DataType::QSymmS8), projectionWeightsData);
+
+    std::vector<int32_t> projectionBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
+    ConstTensor projectionBias(TensorInfo(
+            4, projectionBiasDimensions.data(), DataType::Signed32), projectionBiasData);
+
+    LstmInputParams params;
+
+    // Basic params
+    params.m_InputToForgetWeights     = &inputToForgetWeights;
+    params.m_InputToCellWeights       = &inputToCellWeights;
+    params.m_InputToOutputWeights     = &inputToOutputWeights;
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+    params.m_ForgetGateBias           = &forgetGateBias;
+    params.m_CellBias                 = &cellBias;
+    params.m_OutputGateBias           = &outputGateBias;
+
+    // Projection enabled params
+    params.m_ProjectionWeights = &projectionWeights;
+    params.m_ProjectionBias    = &projectionBias;
+
+    TestQLstmLayerVisitor visitor(descriptor, params);
+
+    Network net;
+
+    IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
+    layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabledLayerNormEnabled)
+{
+    QLstmDescriptor descriptor;
+    descriptor.m_ProjectionClip = 0.5f;
+    descriptor.m_CellClip = 0.3f;
+    descriptor.m_CifgEnabled = false;
+    descriptor.m_LayerNormEnabled = true;
+
+    // Basic params
+    std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToForgetWeights(
+            TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+
+    std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToCellWeights(
+            TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+
+    std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToOutputWeights(
+            TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+
+    std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToForgetWeights(TensorInfo(
+            4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+
+    std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToCellWeights(TensorInfo(
+            4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+
+    std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToOutputWeights(TensorInfo(
+            4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+
+    std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor forgetGateBias(TensorInfo(
+            4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+
+    std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+    ConstTensor cellBias(TensorInfo(
+            4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+
+    std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor outputGateBias(TensorInfo(
+            4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+
+    // CIFG disabled params
+    std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputToInputWeights(
+            TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8), inputToInputWeightsData);
+
+    std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor recurrentToInputWeights(TensorInfo(
+            4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8), recurrentToInputWeightsData);
+
+    std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
+    ConstTensor inputGateBias(
+            TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32), inputGateBiasData);
+
+    // Layer Norm enabled, CIFG disabled params
+    std::vector<int16_t> inputLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> inputLayerNormWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor inputLayerNormWeights(
+            TensorInfo(4, inputLayerNormWeightsDimensions.data(), DataType::QSymmS16), inputLayerNormWeightsData);
+
+    std::vector<int16_t> forgetLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> forgetLayerNormWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor forgetLayerNormWeights(
+            TensorInfo(4, forgetLayerNormWeightsDimensions.data(), DataType::QSymmS16), forgetLayerNormWeightsData);
+
+    std::vector<int16_t> cellLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> cellLayerNormWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor cellLayerNormWeights(
+            TensorInfo(4, cellLayerNormWeightsDimensions.data(), DataType::QSymmS16), cellLayerNormWeightsData);
+
+    std::vector<int16_t> outputLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<unsigned int> outputLayerNormWeightsDimensions = {1, 1, 3, 3};
+    ConstTensor outputLayerNormWeights(
+            TensorInfo(4, outputLayerNormWeightsDimensions.data(), DataType::QSymmS16), outputLayerNormWeightsData);
+
+    LstmInputParams params;
+
+    // Basic params
+    params.m_InputToForgetWeights     = &inputToForgetWeights;
+    params.m_InputToCellWeights       = &inputToCellWeights;
+    params.m_InputToOutputWeights     = &inputToOutputWeights;
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+    params.m_ForgetGateBias           = &forgetGateBias;
+    params.m_CellBias                 = &cellBias;
+    params.m_OutputGateBias           = &outputGateBias;
+
+    // CIFG disabled params
+    params.m_InputToInputWeights     = &inputToInputWeights;
+    params.m_RecurrentToInputWeights = &recurrentToInputWeights;
+    params.m_InputGateBias           = &inputGateBias;
+
+    // Layer Norm enabled, CIFG disabled params
+    params.m_InputLayerNormWeights  = &inputLayerNormWeights;
+    params.m_ForgetLayerNormWeights = &forgetLayerNormWeights;
+    params.m_CellLayerNormWeights   = &cellLayerNormWeights;
+    params.m_OutputLayerNormWeights = &outputLayerNormWeights;
+
+    TestQLstmLayerVisitor visitor(descriptor, params);
+
+    Network net;
+
+    IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
+    layer->Accept(visitor);
+}
+
+
 BOOST_AUTO_TEST_CASE(CheckQuantizedLstmLayer)
 {
     std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
     ConstTensor inputToInputWeights(
-            TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8), inputToInputWeightsData);
+            TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8), inputToInputWeightsData);
 
     std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
     ConstTensor inputToForgetWeights(
-            TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8), inputToForgetWeightsData);
+            TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
 
     std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
     ConstTensor inputToCellWeights(
-            TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8), inputToCellWeightsData);
+            TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
 
     std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
     ConstTensor inputToOutputWeights(
-            TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8), inputToOutputWeightsData);
+            TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
 
 
     std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
     ConstTensor recurrentToInputWeights(TensorInfo(
-            4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToInputWeightsData);
+            4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8), recurrentToInputWeightsData);
 
     std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
     ConstTensor recurrentToForgetWeights(TensorInfo(
-            4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8), recurrentToForgetWeightsData);
+            4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
 
     std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
     ConstTensor recurrentToCellWeights(TensorInfo(
-            4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8), recurrentToCellWeightsData);
+            4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
 
     std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
     ConstTensor recurrentToOutputWeights(TensorInfo(
-            4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToOutputWeightsData);
+            4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
 
 
     std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
diff --git a/src/armnn/test/ConstTensorLayerVisitor.hpp b/src/armnn/test/ConstTensorLayerVisitor.hpp
index 203c5fd..e423e0f 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.hpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.hpp
@@ -221,6 +221,38 @@
     LstmInputParams m_InputParams;
 };
 
+class TestQLstmLayerVisitor : public TestLayerVisitor
+{
+public:
+    explicit TestQLstmLayerVisitor(const QLstmDescriptor& descriptor,
+                                   const LstmInputParams& params,
+                                   const char* name = nullptr)
+            : TestLayerVisitor(name)
+            , m_Descriptor(descriptor)
+            , m_InputParams(params)
+    {}
+
+    void VisitQLstmLayer(const IConnectableLayer* layer,
+                         const QLstmDescriptor& descriptor,
+                         const LstmInputParams& params,
+                         const char* name = nullptr)
+    {
+        CheckLayerPointer(layer);
+        CheckLayerName(name);
+        CheckDescriptor(descriptor);
+        CheckInputParameters(params);
+    }
+
+protected:
+    void CheckDescriptor(const QLstmDescriptor& descriptor);
+    void CheckInputParameters(const LstmInputParams& inputParams);
+    void CheckConstTensorPtrs(const std::string& name, const ConstTensor* expected, const ConstTensor* actual);
+
+private:
+    QLstmDescriptor m_Descriptor;
+    LstmInputParams m_InputParams;
+};
+
 
 class TestQuantizedLstmLayerVisitor : public TestLayerVisitor
 {
diff --git a/src/armnn/test/InferOutputTests.cpp b/src/armnn/test/InferOutputTests.cpp
index 3293cef..015ab67 100644
--- a/src/armnn/test/InferOutputTests.cpp
+++ b/src/armnn/test/InferOutputTests.cpp
@@ -46,6 +46,9 @@
 // TransposeConvolution2D
 ARMNN_SIMPLE_TEST_CASE(TransposeConvolution2dInferOutputShape, TransposeConvolution2dInferOutputShapeTest)
 
+// QLstm
+ARMNN_SIMPLE_TEST_CASE(QLstmInferOutputShape, QLstmInferOutputShapeTest)
+
 // QuantizedLstm
 ARMNN_SIMPLE_TEST_CASE(QuantizedLstmInferOutputShape, QuantizedLstmInferOutputShapeTest)
 
diff --git a/src/armnn/test/InferOutputTests.hpp b/src/armnn/test/InferOutputTests.hpp
index b03449b..70afbc9 100644
--- a/src/armnn/test/InferOutputTests.hpp
+++ b/src/armnn/test/InferOutputTests.hpp
@@ -7,7 +7,6 @@
 
 #include "TestUtils.hpp"
 
-
 #include <Graph.hpp>
 #include <layers/ArgMinMaxLayer.hpp>
 #include <layers/BatchToSpaceNdLayer.hpp>
@@ -530,6 +529,63 @@
     BOOST_CHECK(expectedOutputShape == depthwiseConvolution2dLayer->InferOutputShapes(shapes).at(0));
 }
 
+// QLstm
+void QLstmInferOutputShapeImpl(const armnn::QLstmDescriptor descriptor,
+                               const std::vector<armnn::TensorShape>& inputShapes,
+                               std::vector<armnn::TensorShape>& outputShapes)
+{
+    armnn::Graph graph;
+    armnn::QLstmLayer* const qLstmLayer = graph.AddLayer<armnn::QLstmLayer>(descriptor, "qLstm");
+    outputShapes = qLstmLayer->InferOutputShapes(inputShapes);
+}
+
+void QLstmInferOutputShapeTest()
+{
+    armnn::QLstmDescriptor descriptor;
+    descriptor.m_PeepholeEnabled = true;
+    descriptor.m_CifgEnabled = false;
+    descriptor.m_ProjectionEnabled = false;
+
+    // Input shapes
+    const std::vector<unsigned int> inputShape{ 2, 5 };
+    const std::vector<unsigned int> previousOutputInShape{ 2, 4 };
+    const std::vector<unsigned int> previousCellStateInShape{ 2, 4 };
+
+    armnn::TensorShape inputTensorShape(2, inputShape.data());
+    armnn::TensorShape previousOutputInTensorShape(2, previousOutputInShape.data());
+    armnn::TensorShape previousCellStateInTensorShape(2, previousCellStateInShape.data());
+
+    std::vector<armnn::TensorShape> inShapes
+    {
+        inputTensorShape,
+        previousOutputInTensorShape,
+        previousCellStateInTensorShape
+    };
+
+    // Output shapes
+    const std::vector<unsigned int> outputStateOutShape{ 2, 4 };
+    const std::vector<unsigned int> cellStateOutShape{ 2, 4 };
+    const std::vector<unsigned int> outputShape{ 2, 4 };
+    armnn::TensorShape outputStateOutTensorShape(2, outputShape.data());
+    armnn::TensorShape cellStateOutTensorShape(2, cellStateOutShape.data());
+    armnn::TensorShape outputTensorShape(2, outputShape.data());
+
+    std::vector<armnn::TensorShape> expectedOutShapes
+    {
+        outputStateOutTensorShape,
+        cellStateOutTensorShape,
+        outputTensorShape
+    };
+
+    std::vector<armnn::TensorShape> actualOutShapes;
+    BOOST_CHECK_NO_THROW(QLstmInferOutputShapeImpl(descriptor, inShapes, actualOutShapes));
+
+    BOOST_CHECK(actualOutShapes.size() == 3);
+    BOOST_CHECK(expectedOutShapes[0] == actualOutShapes[0]);
+    BOOST_CHECK(expectedOutShapes[1] == actualOutShapes[1]);
+    BOOST_CHECK(expectedOutShapes[2] == actualOutShapes[2]);
+}
+
 // QuantizedLstm
 void QuantizedLstmInferOutputShapeImpl(const std::vector<armnn::TensorShape>& inputShapes,
                                        std::vector<armnn::TensorShape>& outputShapes)