IVGCVSW-4893 Refactor ILayerVisitor using unified interface strategy.

Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>
Change-Id: Id7bc8255a8e3f9e5aac65d510bec8a559bf37246
diff --git a/Android.mk b/Android.mk
index 6ada126..aa89ff9 100644
--- a/Android.mk
+++ b/Android.mk
@@ -426,7 +426,10 @@
         src/profiling/test/TimelinePacketTests.cpp \
         src/profiling/test/TimelineUtilityMethodsTests.cpp \
         src/armnnSerializer/test/ActivationSerializationTests.cpp \
-        src/armnnSerializer/test/SerializerTests.cpp
+        src/armnnSerializer/test/ComparisonSerializationTests.cpp \
+        src/armnnSerializer/test/LstmSerializationTests.cpp \
+        src/armnnSerializer/test/SerializerTests.cpp \
+        src/armnnSerializer/test/SerializerTestUtils.cpp
 
 ifeq ($(ARMNN_REF_ENABLED),1)
 LOCAL_SRC_FILES += \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c862c55..4e75c28 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -247,6 +247,7 @@
     include/armnn/INetwork.hpp
     include/armnn/IProfiler.hpp
     include/armnn/IRuntime.hpp
+    include/armnn/IStrategy.hpp
     include/armnn/LayerSupport.hpp
     include/armnn/LayerVisitorBase.hpp
     include/armnn/Logging.hpp
@@ -412,8 +413,8 @@
     src/armnn/Descriptors.cpp
     src/armnn/DeviceSpec.hpp
     src/armnn/DllExport.hpp
-    src/armnn/DynamicQuantizationVisitor.cpp
-    src/armnn/DynamicQuantizationVisitor.hpp
+    src/armnn/DynamicQuantizationStrategy.cpp
+    src/armnn/DynamicQuantizationStrategy.hpp
     src/armnn/Exceptions.cpp
     src/armnn/ExecutionFrame.cpp
     src/armnn/ExecutionFrame.hpp
@@ -456,8 +457,8 @@
     src/armnn/ProfilingEvent.cpp
     src/armnn/ProfilingEvent.hpp
     src/armnn/Profiling.hpp
-    src/armnn/QuantizerVisitor.cpp
-    src/armnn/QuantizerVisitor.hpp
+    src/armnn/QuantizerStrategy.hpp
+    src/armnn/QuantizerStrategy.cpp
     src/armnn/Runtime.cpp
     src/armnn/Runtime.hpp
     src/armnn/RangeTracker.cpp
@@ -465,8 +466,8 @@
     src/armnn/ResolveType.hpp
     src/armnn/SerializeLayerParameters.cpp
     src/armnn/SerializeLayerParameters.hpp
-    src/armnn/StaticRangeVisitor.cpp
-    src/armnn/StaticRangeVisitor.hpp
+    src/armnn/StaticRangeStrategy.cpp
+    src/armnn/StaticRangeStrategy.hpp
     src/armnn/SubgraphView.cpp
     src/armnn/SubgraphView.hpp
     src/armnn/SubgraphViewSelector.cpp
@@ -909,7 +910,11 @@
         enable_language(ASM)
         list(APPEND unittest_sources
             src/armnnSerializer/test/ActivationSerializationTests.cpp
+            src/armnnSerializer/test/ComparisonSerializationTests.cpp
+            src/armnnSerializer/test/LstmSerializationTests.cpp
             src/armnnSerializer/test/SerializerTests.cpp
+            src/armnnSerializer/test/SerializerTestUtils.cpp
+            src/armnnSerializer/test/SerializerTestUtils.hpp
             src/armnnDeserializer/test/DeserializeAbs.cpp
             src/armnnDeserializer/test/DeserializeActivation.cpp
             src/armnnDeserializer/test/DeserializeAdd.cpp
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index a8e68aa..20511ab 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -9,6 +9,8 @@
 
 #include <cstdint>
 #include <initializer_list>
+#include <iostream>
+#include <sstream>
 
 #include "Tensor.hpp"
 #include "Types.hpp"
@@ -16,8 +18,11 @@
 namespace armnn
 {
 
+/// Base class for all descriptors.
+struct BaseDescriptor {};
+
 /// An ActivationDescriptor for the ActivationLayer.
-struct ActivationDescriptor
+struct ActivationDescriptor : BaseDescriptor
 {
     ActivationDescriptor()
         : m_Function(ActivationFunction::Sigmoid)
@@ -48,7 +53,7 @@
 };
 
 /// An ArgMinMaxDescriptor for ArgMinMaxLayer
-struct ArgMinMaxDescriptor
+struct ArgMinMaxDescriptor : BaseDescriptor
 {
     ArgMinMaxDescriptor()
         : m_Function(ArgMinMaxFunction::Min)
@@ -70,7 +75,7 @@
 };
 
 /// A ComparisonDescriptor for the ComparisonLayer
-struct ComparisonDescriptor
+struct ComparisonDescriptor : BaseDescriptor
 {
     ComparisonDescriptor()
         : ComparisonDescriptor(ComparisonOperation::Equal)
@@ -90,7 +95,7 @@
 };
 
 /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
-struct ElementwiseUnaryDescriptor
+struct ElementwiseUnaryDescriptor : BaseDescriptor
 {
     ElementwiseUnaryDescriptor()
         : ElementwiseUnaryDescriptor(UnaryOperation::Abs)
@@ -110,7 +115,7 @@
 };
 
 /// A PermuteDescriptor for the PermuteLayer.
-struct PermuteDescriptor
+struct PermuteDescriptor : BaseDescriptor
 {
     PermuteDescriptor()
         : m_DimMappings{}
@@ -131,7 +136,7 @@
 };
 
 /// A SoftmaxDescriptor for the SoftmaxLayer.
-struct SoftmaxDescriptor
+struct SoftmaxDescriptor : BaseDescriptor
 {
     SoftmaxDescriptor()
         : m_Beta(1.0f)
@@ -155,7 +160,7 @@
 /// @brief An OriginsDescriptor for the ConcatLayer.
 /// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and
 /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
-struct OriginsDescriptor
+struct OriginsDescriptor : BaseDescriptor
 {
     OriginsDescriptor();
     OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
@@ -198,7 +203,7 @@
 /// @brief A ViewsDescriptor for the SplitterLayer.
 /// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
 /// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
-struct ViewsDescriptor
+struct ViewsDescriptor : BaseDescriptor
 {
     ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
     ViewsDescriptor(const ViewsDescriptor& other);
@@ -321,7 +326,7 @@
 }
 
 /// A Pooling2dDescriptor for the Pooling2dLayer.
-struct Pooling2dDescriptor
+struct Pooling2dDescriptor : BaseDescriptor
 {
     Pooling2dDescriptor()
         : m_PoolType(PoolingAlgorithm::Max)
@@ -381,7 +386,7 @@
 };
 
 /// A FullyConnectedDescriptor for the FullyConnectedLayer.
-struct FullyConnectedDescriptor
+struct FullyConnectedDescriptor : BaseDescriptor
 {
     FullyConnectedDescriptor()
         : m_BiasEnabled(false)
@@ -400,7 +405,7 @@
 };
 
 /// A Convolution2dDescriptor for the Convolution2dLayer.
-struct Convolution2dDescriptor
+struct Convolution2dDescriptor : BaseDescriptor
 {
     Convolution2dDescriptor()
         : m_PadLeft(0)
@@ -452,7 +457,7 @@
 };
 
 /// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
-struct DepthwiseConvolution2dDescriptor
+struct DepthwiseConvolution2dDescriptor : BaseDescriptor
 {
     DepthwiseConvolution2dDescriptor()
         : m_PadLeft(0)
@@ -503,7 +508,7 @@
     DataLayout m_DataLayout;
 };
 
-struct DetectionPostProcessDescriptor
+struct DetectionPostProcessDescriptor : BaseDescriptor
 {
     DetectionPostProcessDescriptor()
         : m_MaxDetections(0)
@@ -559,7 +564,7 @@
 };
 
 /// A NormalizationDescriptor for the NormalizationLayer.
-struct NormalizationDescriptor
+struct NormalizationDescriptor : BaseDescriptor
 {
     NormalizationDescriptor()
         : m_NormChannelType(NormalizationAlgorithmChannel::Across)
@@ -599,7 +604,7 @@
 };
 
 /// A L2NormalizationDescriptor for the L2NormalizationLayer.
-struct L2NormalizationDescriptor
+struct L2NormalizationDescriptor : BaseDescriptor
 {
     L2NormalizationDescriptor()
         : m_Eps(1e-12f)
@@ -618,7 +623,7 @@
 };
 
 /// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
-struct BatchNormalizationDescriptor
+struct BatchNormalizationDescriptor : BaseDescriptor
 {
     BatchNormalizationDescriptor()
         : m_Eps(0.0001f)
@@ -637,7 +642,7 @@
 };
 
 /// An InstanceNormalizationDescriptor for InstanceNormalizationLayer
-struct InstanceNormalizationDescriptor
+struct InstanceNormalizationDescriptor : BaseDescriptor
 {
     InstanceNormalizationDescriptor()
         : m_Gamma(1.0f)
@@ -665,7 +670,7 @@
 };
 
 /// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
-struct BatchToSpaceNdDescriptor
+struct BatchToSpaceNdDescriptor : BaseDescriptor
 {
     BatchToSpaceNdDescriptor()
         : m_BlockShape({1, 1})
@@ -696,7 +701,7 @@
 };
 
 /// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
-struct FakeQuantizationDescriptor
+struct FakeQuantizationDescriptor : BaseDescriptor
 {
         FakeQuantizationDescriptor()
         : m_Min(-6.0f)
@@ -715,7 +720,7 @@
 };
 
 /// A FillDescriptor for the FillLayer
-struct FillDescriptor
+struct FillDescriptor : BaseDescriptor
 {
     FillDescriptor()
     : m_Value(0)
@@ -734,7 +739,7 @@
 };
 
 /// A GatherDescriptor for the GatherLayer.
-struct GatherDescriptor
+struct GatherDescriptor : BaseDescriptor
 {
     GatherDescriptor()
         : m_Axis(0)
@@ -754,7 +759,7 @@
 };
 
 /// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
-struct ResizeBilinearDescriptor
+struct ResizeBilinearDescriptor : BaseDescriptor
 {
     ResizeBilinearDescriptor()
         : m_TargetWidth(0)
@@ -764,6 +769,15 @@
         , m_HalfPixelCenters(false)
     {}
 
+    bool operator ==(const ResizeBilinearDescriptor& rhs) const
+    {
+        return m_TargetWidth          == rhs.m_TargetWidth &&
+               m_TargetHeight         == rhs.m_TargetHeight &&
+               m_DataLayout           == rhs.m_DataLayout &&
+               m_AlignCorners         == rhs.m_AlignCorners &&
+               m_HalfPixelCenters     == rhs.m_HalfPixelCenters;
+    }
+
     /// Target width value.
     uint32_t          m_TargetWidth;
     /// Target height value.
@@ -777,7 +791,7 @@
 };
 
 /// A ResizeDescriptor for the ResizeLayer.
-struct ResizeDescriptor
+struct ResizeDescriptor : BaseDescriptor
 {
     ResizeDescriptor()
         : m_TargetWidth(0)
@@ -815,7 +829,7 @@
 
 
 /// A ReshapeDescriptor for the ReshapeLayer.
-struct ReshapeDescriptor
+struct ReshapeDescriptor : BaseDescriptor
 {
     ReshapeDescriptor()
         : m_TargetShape()
@@ -835,7 +849,7 @@
 };
 
 /// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
-struct SpaceToBatchNdDescriptor
+struct SpaceToBatchNdDescriptor : BaseDescriptor
 {
     SpaceToBatchNdDescriptor()
         : m_BlockShape({1, 1})
@@ -867,7 +881,7 @@
 };
 
 /// A SpaceToDepthDescriptor for the SpaceToDepthLayer
-struct SpaceToDepthDescriptor
+struct SpaceToDepthDescriptor : BaseDescriptor
 {
     SpaceToDepthDescriptor()
         : SpaceToDepthDescriptor(1u, DataLayout::NHWC)
@@ -894,7 +908,7 @@
 using DepthToSpaceDescriptor = SpaceToDepthDescriptor;
 
 /// An LstmDescriptor for the LstmLayer.
-struct LstmDescriptor
+struct LstmDescriptor : BaseDescriptor
 {
     LstmDescriptor()
         : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
@@ -934,7 +948,7 @@
 };
 
 /// A MeanDescriptor for the MeanLayer.
-struct MeanDescriptor
+struct MeanDescriptor : BaseDescriptor
 {
     MeanDescriptor()
         : m_Axis()
@@ -958,7 +972,7 @@
 };
 
 /// A PadDescriptor for the PadLayer.
-struct PadDescriptor
+struct PadDescriptor : BaseDescriptor
 {
     PadDescriptor() : m_PadValue(0)
     {}
@@ -984,7 +998,7 @@
 };
 
 /// A SliceDescriptor for the SliceLayer.
-struct SliceDescriptor
+struct SliceDescriptor : BaseDescriptor
 {
     SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
         : m_Begin(begin)
@@ -1007,7 +1021,7 @@
 };
 
 /// A StackDescriptor for the StackLayer.
-struct StackDescriptor
+struct StackDescriptor : BaseDescriptor
 {
     StackDescriptor()
         : m_Axis(0)
@@ -1037,7 +1051,7 @@
 };
 
 /// A StandInDescriptor for the StandIn layer
-struct StandInDescriptor
+struct StandInDescriptor : BaseDescriptor
 {
     StandInDescriptor() {};
 
@@ -1059,7 +1073,7 @@
 };
 
 /// A StridedSliceDescriptor for the StridedSliceLayer.
-struct StridedSliceDescriptor
+struct StridedSliceDescriptor : BaseDescriptor
 {
     StridedSliceDescriptor(const std::vector<int>& begin,
                            const std::vector<int>& end,
@@ -1123,7 +1137,7 @@
 };
 
 /// A PreCompiledDescriptor for the PreCompiledLayer.
-struct PreCompiledDescriptor
+struct PreCompiledDescriptor : BaseDescriptor
 {
     PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u)
         : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots)
@@ -1136,7 +1150,7 @@
 };
 
 /// A QLstmDescriptor for the QLstmLayer.
-struct QLstmDescriptor
+struct QLstmDescriptor : BaseDescriptor
 {
     QLstmDescriptor()
             : m_CellClip(0.0)
@@ -1196,7 +1210,7 @@
 };
 
 /// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
-struct TransposeConvolution2dDescriptor
+struct TransposeConvolution2dDescriptor : BaseDescriptor
 {
     TransposeConvolution2dDescriptor() :
         m_PadLeft(0),
@@ -1246,7 +1260,7 @@
 };
 
 /// A TransposeDescriptor for the TransposeLayer.
-struct TransposeDescriptor
+struct TransposeDescriptor : BaseDescriptor
 {
     TransposeDescriptor()
             : m_DimMappings{}
@@ -1267,7 +1281,7 @@
 };
 
 /// A LogicalBinaryDescriptor for the LogicalBinaryLayer
-struct LogicalBinaryDescriptor
+struct LogicalBinaryDescriptor : BaseDescriptor
 {
     LogicalBinaryDescriptor()
         : LogicalBinaryDescriptor(LogicalBinaryOperation::LogicalAnd)
@@ -1287,7 +1301,7 @@
 };
 
 /// A ReduceDescriptor for the REDUCE operators.
-struct ReduceDescriptor
+struct ReduceDescriptor : BaseDescriptor
 {
     ReduceDescriptor()
         : m_KeepDims(false)
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index 054ce51..4e7082e 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -7,6 +7,7 @@
 
 namespace armnn
 {
+struct BaseDescriptor;
 
 struct ActivationDescriptor;
 struct ArgMinMaxDescriptor;
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index ca1b725..c667d9c 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -8,6 +8,7 @@
 #include <armnn/Deprecated.hpp>
 #include <armnn/DescriptorsFwd.hpp>
 #include <armnn/ILayerVisitor.hpp>
+#include <armnn/IStrategy.hpp>
 #include <armnn/NetworkFwd.hpp>
 #include <armnn/Optional.hpp>
 #include <armnn/TensorFwd.hpp>
@@ -91,8 +92,15 @@
     /// Apply a visitor to this layer
     virtual void Accept(ILayerVisitor& visitor) const = 0;
 
+    /// Apply a visitor to this layer
+    virtual void ExecuteStrategy(IStrategy& strategy) const = 0;
+
     /// Provide a hint for the optimizer as to which backend to prefer for this layer
     virtual void BackendSelectionHint(Optional<BackendId> backend) = 0;
+
+    /// Returns the armnn::LayerType of this layer
+    virtual LayerType GetType() const = 0;
+
 protected:
       /// Objects are not deletable via the handle
     ~IConnectableLayer() {}
@@ -600,6 +608,8 @@
 
     virtual void Accept(ILayerVisitor& visitor) const = 0;
 
+    virtual void ExecuteStrategy(IStrategy& strategy) const = 0;
+
 protected:
     ~INetwork() {}
 };
diff --git a/include/armnn/IStrategy.hpp b/include/armnn/IStrategy.hpp
new file mode 100644
index 0000000..8d29565
--- /dev/null
+++ b/include/armnn/IStrategy.hpp
@@ -0,0 +1,31 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/DescriptorsFwd.hpp>
+#include <armnn/Types.hpp>
+
+namespace armnn
+{
+
+class IStrategy
+{
+protected:
+IStrategy() {}
+virtual ~IStrategy() {}
+
+public:
+virtual void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                             const armnn::BaseDescriptor& descriptor,
+                             const std::vector<armnn::ConstTensor>& constants,
+                             const char* name,
+                             const armnn::LayerBindingId id = 0) = 0;
+
+virtual void FinishStrategy() {};
+
+};
+
+
+} // namespace armnn
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index 22004bd..e1ff46b 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -344,6 +344,89 @@
 
 } // namespace profiling
 
+/// This list uses X macro technique.
+/// See https://en.wikipedia.org/wiki/X_Macro for more info
+#define LIST_OF_LAYER_TYPE \
+    X(Activation) \
+    X(Addition) \
+    X(ArgMinMax) \
+    X(BatchNormalization) \
+    X(BatchToSpaceNd) \
+    X(Comparison) \
+    X(Concat) \
+    X(Constant) \
+    X(ConvertBf16ToFp32) \
+    X(ConvertFp16ToFp32) \
+    X(ConvertFp32ToBf16) \
+    X(ConvertFp32ToFp16) \
+    X(Convolution2d) \
+    X(Debug) \
+    X(DepthToSpace) \
+    X(DepthwiseConvolution2d) \
+    X(Dequantize) \
+    X(DetectionPostProcess) \
+    X(Division) \
+    X(ElementwiseUnary) \
+    X(FakeQuantization) \
+    X(Fill) \
+    X(Floor) \
+    X(FullyConnected) \
+    X(Gather) \
+    X(Input) \
+    X(InstanceNormalization) \
+    X(L2Normalization) \
+    X(LogicalBinary) \
+    X(LogSoftmax) \
+    X(Lstm) \
+    X(QLstm) \
+    X(Map) \
+    X(Maximum) \
+    X(Mean) \
+    X(MemCopy) \
+    X(MemImport) \
+    X(Merge) \
+    X(Minimum) \
+    X(Multiplication) \
+    X(Normalization) \
+    X(Output) \
+    X(Pad) \
+    X(Permute) \
+    X(Pooling2d) \
+    X(PreCompiled) \
+    X(Prelu) \
+    X(Quantize) \
+    X(QuantizedLstm) \
+    X(Reshape) \
+    X(Rank) \
+    X(Resize) \
+    X(Reduce) \
+    X(Slice) \
+    X(Softmax) \
+    X(SpaceToBatchNd) \
+    X(SpaceToDepth) \
+    X(Splitter) \
+    X(Stack) \
+    X(StandIn) \
+    X(StridedSlice) \
+    X(Subtraction) \
+    X(Switch) \
+    X(Transpose) \
+    X(TransposeConvolution2d) \
+    X(Unmap)
+
+/// When adding a new layer, adapt also the LastLayer enum value in the
+/// enum class LayerType below
+enum class LayerType
+{
+#define X(name) name,
+    LIST_OF_LAYER_TYPE
+#undef X
+    FirstLayer = Activation,
+    LastLayer = Unmap
+};
+
+const char* GetLayerTypeAsCString(LayerType type);
+
 } // namespace armnn
 
 
diff --git a/src/armnn/DynamicQuantizationStrategy.cpp b/src/armnn/DynamicQuantizationStrategy.cpp
new file mode 100644
index 0000000..d354a0e
--- /dev/null
+++ b/src/armnn/DynamicQuantizationStrategy.cpp
@@ -0,0 +1,276 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "DynamicQuantizationStrategy.hpp"
+#include "NetworkUtils.hpp"
+
+#include <armnn/Descriptors.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <armnn/Types.hpp>
+
+#include <limits>
+
+namespace armnn
+{
+DynamicQuantizationStrategy::DynamicQuantizationStrategy(RangeTracker& rangeTracker, Graph& graph)
+        : m_RangeTracker(rangeTracker),
+          m_Graph(graph)
+{}
+
+void DynamicQuantizationStrategy::SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max)
+{
+    m_RangeTracker.SetRange(layer, outputIdx, min, max);
+}
+
+void DynamicQuantizationStrategy::ForwardParentParameters(const IConnectableLayer* layer)
+{
+    for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
+    {
+        const IOutputSlot *outputSlot = layer->GetInputSlot(i).GetConnection();
+        LayerGuid previousLayerId = outputSlot->GetOwningLayerGuid();
+        unsigned int ownerIndex = outputSlot->CalculateIndexOnOwner();
+        const auto parentRange = m_RangeTracker.GetRange(previousLayerId, ownerIndex);
+        SetRange(layer, i, parentRange.first, parentRange.second);
+    }
+}
+
+void DynamicQuantizationStrategy::AddToCalibratedLayers(const IConnectableLayer* layer)
+{
+    m_LayersToCalibrate.push_back(layer);
+}
+
+void DynamicQuantizationStrategy::AddToNonCalibratedLayers(const IConnectableLayer* layer)
+{
+    m_LayersNotToCalibrate.push_back(layer);
+}
+
+void DynamicQuantizationStrategy::FinishStrategy()
+{
+    for (const IConnectableLayer* layer : m_LayersToCalibrate)
+    {
+        std::vector<DebugLayer*> newDebugLayers = InsertDebugLayerAfter(
+            m_Graph, *PolymorphicDowncast<Layer*>(const_cast<IConnectableLayer*>(layer)));
+        // record them so we can take them out again efficiently afterward
+        m_DebugLayers.insert(std::end(m_DebugLayers), std::begin(newDebugLayers), std::end(newDebugLayers));
+    }
+}
+
+void DynamicQuantizationStrategy::RemoveDebugLayers()
+{
+    for (DebugLayer* debugLayer : m_DebugLayers)
+    {
+        OutputSlot& proceedingOutputSlot = *debugLayer->GetInputSlot(0).GetConnectedOutputSlot();
+        proceedingOutputSlot.Disconnect(debugLayer->GetInputSlot(0));
+
+        for (InputSlot* succeedingInputSlot : debugLayer->GetOutputSlot(0).GetConnections())
+        {
+            debugLayer->GetOutputSlot(0).Disconnect(*succeedingInputSlot);
+            proceedingOutputSlot.Connect(*succeedingInputSlot);
+        }
+        m_Graph.EraseLayer(debugLayer);
+    }
+    m_DebugLayers.clear();
+}
+
+void DynamicQuantizationStrategy::VisitNonCalibratedLayers() {
+    RemoveDebugLayers();
+    for (const IConnectableLayer* layer : m_LayersNotToCalibrate)
+    {
+        ForwardParentParameters(layer);
+    }
+}
+
+
+void DynamicQuantizationStrategy::ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                                                  const BaseDescriptor& descriptor,
+                                                  const std::vector<armnn::ConstTensor>& constants,
+                                                  const char* name,
+                                                  const armnn::LayerBindingId id)
+{
+    IgnoreUnused(name);
+    IgnoreUnused(id);
+    IgnoreUnused(descriptor);
+
+    switch (layer->GetType())
+    {
+        case armnn::LayerType::Activation :
+        {
+            const ActivationDescriptor& activationDescriptor = static_cast<const ActivationDescriptor&>(descriptor);
+            switch (activationDescriptor.m_Function)
+            {
+                // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
+                case ActivationFunction::Abs:
+                case ActivationFunction::Linear:
+                case ActivationFunction::ReLu:
+                case ActivationFunction::SoftReLu:
+                    SetRange(layer, 0, 0.f, 15.f);
+                    break;
+                case ActivationFunction::BoundedReLu:
+                    SetRange(layer, 0, 0.f, activationDescriptor.m_A);
+                    break;
+                case ActivationFunction::TanH:
+                    SetRange(layer, 0, -1.f, 1.f);
+                    break;
+                case ActivationFunction::LeakyReLu:
+                    SetRange(layer, 0, -5.f, 15.f);
+                    break;
+                default:
+                    SetRange(layer, 0, -15.f, 15.f);
+                    break;
+            }
+            break;
+        }
+        case armnn::LayerType::Addition :
+        {
+            SetRange(layer, 0, -20.f, 20.f);
+            AddToCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::ArgMinMax :
+        {
+            AddToNonCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::BatchNormalization :
+        {
+            SetRange(layer, 0, -15.0f, 15.0f);
+            AddToCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::Normalization:
+        {
+            SetRange(layer, 0, -15.0f, 15.0f);
+            AddToCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::Convolution2d:
+        {
+            SetRange(layer, 0, -15.0f, 15.0f);
+            AddToCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::DepthwiseConvolution2d:
+        {
+            SetRange(layer, 0, -15.0f, 15.0f);
+            AddToCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::FullyConnected :
+        {
+            SetRange(layer, 0, -15.0f, 15.0f);
+            AddToCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::Permute :
+        {
+            AddToNonCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::SpaceToBatchNd :
+        {
+            AddToNonCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::Pooling2d :
+        {
+            AddToNonCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::Softmax :
+        {
+            SetRange(layer, 0, 0.f, 1.f);
+            AddToCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::Constant :
+        {
+            if (constants[0].GetDataType() != DataType::Float32)
+            {
+                throw InvalidArgumentException("Quantization is supported only for FP32 tensors");
+            }
+
+            // Work out the range based on the input constants
+            unsigned int inputNumElements = constants[0].GetNumElements();
+            const float* inputData = reinterpret_cast<const float*>(constants[0].GetMemoryArea());
+
+            float min = std::numeric_limits<float>::max();
+            float max = std::numeric_limits<float>::lowest();
+
+            for (unsigned int i = 0; i < inputNumElements; i++)
+            {
+                const float inputValue = inputData[i];
+
+                min = std::min(min, inputValue);
+                max = std::max(max, inputValue);
+            }
+            SetRange(layer, 0, min, max);
+            break;
+        }
+        case armnn::LayerType::Concat :
+        {
+            float min = std::numeric_limits<float>::max();
+            float max = std::numeric_limits<float>::lowest();
+            for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
+            {
+                const IOutputSlot* outputSlot = layer->GetInputSlot(i).GetConnection();
+                LayerGuid layerId = outputSlot->GetOwningLayerGuid();
+                unsigned int slotIndex = outputSlot->CalculateIndexOnOwner();
+                RangeTracker::MinMaxRange range = m_RangeTracker.GetRange(layerId, slotIndex);
+                min = std::min(min, range.first);
+                max = std::max(max, range.second);
+            }
+            SetRange(layer, 0, min, max);
+            AddToCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::Reshape :
+        {
+            AddToNonCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::Splitter :
+        {
+            AddToNonCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::Resize :
+        {
+            AddToNonCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::StridedSlice :
+        {
+            AddToNonCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::BatchToSpaceNd :
+        {
+            AddToNonCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::Input :
+        {
+            SetRange(layer, 0, -0.0f, 0.0f);
+            AddToCalibratedLayers(layer);
+            break;
+        }
+        case armnn::LayerType::Output :
+        {
+            AddToNonCalibratedLayers(layer);
+            m_OutputLayers.push_back(id);
+            break;
+        }
+        default:
+        {}
+    }
+}
+
+const std::vector<LayerBindingId>& DynamicQuantizationStrategy::GetOutputLayers()
+{
+    return m_OutputLayers;
+}
+
+} //namespace armnn
diff --git a/src/armnn/DynamicQuantizationStrategy.hpp b/src/armnn/DynamicQuantizationStrategy.hpp
new file mode 100644
index 0000000..aa77a4b
--- /dev/null
+++ b/src/armnn/DynamicQuantizationStrategy.hpp
@@ -0,0 +1,59 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "armnn/LayerVisitorBase.hpp"
+#include "RangeTracker.hpp"
+#include "layers/DebugLayer.hpp"
+
+#include <armnn/INetwork.hpp>
+#include <armnnQuantizer/INetworkQuantizer.hpp>
+
+namespace armnn
+{
+
+/// Visitor class implementation to gather the TensorInfo for LayerBindingID for creation of ConstTensor for Refine.
+class DynamicQuantizationStrategy : public armnn::IStrategy
+{
+public:
+
+    DynamicQuantizationStrategy(RangeTracker& rangeTracker, Graph& graph);
+    ~DynamicQuantizationStrategy() = default;
+
+    virtual void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                                 const armnn::BaseDescriptor& descriptor,
+                                 const std::vector<armnn::ConstTensor>& constants,
+                                 const char* name,
+                                 const armnn::LayerBindingId id = 0) override;
+
+    const std::vector<armnn::LayerBindingId>& GetOutputLayers();
+    void VisitNonCalibratedLayers();
+    void FinishStrategy() override;
+
+
+private:
+    /// Set the range for an output slot on a layer
+    void SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max);
+
+    void ForwardParentParameters(const IConnectableLayer* layer);
+
+    /// Mapping from a layer Guid to an array of ranges for outputs
+    RangeTracker& m_RangeTracker;
+
+    Graph& m_Graph;
+
+    std::vector<const IConnectableLayer*> m_LayersToCalibrate;
+    std::vector<const IConnectableLayer*> m_LayersNotToCalibrate;
+    std::vector<DebugLayer*> m_DebugLayers;
+
+    std::vector<armnn::LayerBindingId> m_OutputLayers;
+    void AddToCalibratedLayers(const IConnectableLayer* layer);
+    void AddToNonCalibratedLayers(const IConnectableLayer* layer);
+    void RemoveDebugLayers();
+
+
+};
+} //namespace armnn
diff --git a/src/armnn/DynamicQuantizationVisitor.cpp b/src/armnn/DynamicQuantizationVisitor.cpp
deleted file mode 100644
index 02e7699..0000000
--- a/src/armnn/DynamicQuantizationVisitor.cpp
+++ /dev/null
@@ -1,364 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "DynamicQuantizationVisitor.hpp"
-#include "NetworkUtils.hpp"
-
-#include <armnn/Descriptors.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
-#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <armnn/Types.hpp>
-
-#include <limits>
-
-namespace armnn
-{
-
-DynamicQuantizationVisitor::DynamicQuantizationVisitor(RangeTracker& rangeTracker, Graph& graph)
-        : m_RangeTracker(rangeTracker),
-          m_Graph(graph)
-{}
-
-void DynamicQuantizationVisitor::SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max)
-{
-    m_RangeTracker.SetRange(layer, outputIdx, min, max);
-}
-
-void DynamicQuantizationVisitor::ForwardParentParameters(const IConnectableLayer* layer)
-{
-    for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
-    {
-        const IOutputSlot *outputSlot = layer->GetInputSlot(i).GetConnection();
-        LayerGuid previousLayerId = outputSlot->GetOwningLayerGuid();
-        unsigned int ownerIndex = outputSlot->CalculateIndexOnOwner();
-        const auto parentRange = m_RangeTracker.GetRange(previousLayerId, ownerIndex);
-        SetRange(layer, i, parentRange.first, parentRange.second);
-    }
-}
-
-void DynamicQuantizationVisitor::AddToCalibratedLayers(const IConnectableLayer* layer)
-{
-    m_LayersToCalibrate.push_back(layer);
-}
-
-void DynamicQuantizationVisitor::AddToNonCalibratedLayers(const IConnectableLayer* layer)
-{
-    m_LayersNotToCalibrate.push_back(layer);
-}
-
-void DynamicQuantizationVisitor::FinishVisit()
-{
-    for (const IConnectableLayer* layer : m_LayersToCalibrate)
-    {
-        std::vector<DebugLayer*> newDebugLayers = InsertDebugLayerAfter(
-            m_Graph, *PolymorphicDowncast<Layer*>(const_cast<IConnectableLayer*>(layer)));
-        // record them so we can take them out again efficiently afterward
-        m_DebugLayers.insert(std::end(m_DebugLayers), std::begin(newDebugLayers), std::end(newDebugLayers));
-    }
-}
-
-void DynamicQuantizationVisitor::RemoveDebugLayers()
-{
-    for (DebugLayer* debugLayer : m_DebugLayers)
-    {
-        OutputSlot& proceedingOutputSlot = *debugLayer->GetInputSlot(0).GetConnectedOutputSlot();
-        proceedingOutputSlot.Disconnect(debugLayer->GetInputSlot(0));
-
-        for (InputSlot* succeedingInputSlot : debugLayer->GetOutputSlot(0).GetConnections())
-        {
-            debugLayer->GetOutputSlot(0).Disconnect(*succeedingInputSlot);
-            proceedingOutputSlot.Connect(*succeedingInputSlot);
-        }
-        m_Graph.EraseLayer(debugLayer);
-    }
-    m_DebugLayers.clear();
-}
-
-void DynamicQuantizationVisitor::VisitNonCalibratedLayers() {
-    RemoveDebugLayers();
-    for (const IConnectableLayer* layer : m_LayersNotToCalibrate)
-    {
-        ForwardParentParameters(layer);
-    }
-}
-
-void DynamicQuantizationVisitor::VisitAdditionLayer(const IConnectableLayer* layer,
-                                                    const char* name)
-{
-    IgnoreUnused(name);
-    SetRange(layer, 0, -20.f, 20.f);
-    AddToCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitAbsLayer(const IConnectableLayer* layer,
-                                               const char* name)
-{
-    IgnoreUnused(name);
-    SetRange(layer, 0, -20.f, 20.f);
-    AddToCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitArgMinMaxLayer(const IConnectableLayer* layer,
-                                                     const ArgMinMaxDescriptor& desc,
-                                                     const char* name)
-{
-    IgnoreUnused(name);
-    IgnoreUnused(desc);
-    AddToNonCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitBatchNormalizationLayer(const IConnectableLayer* layer,
-                                                              const BatchNormalizationDescriptor& desc,
-                                                              const ConstTensor& mean,
-                                                              const ConstTensor& variance,
-                                                              const ConstTensor& beta,
-                                                              const ConstTensor& gamma,
-                                                              const char* name)
-{
-    IgnoreUnused(desc);
-    IgnoreUnused(mean);
-    IgnoreUnused(variance);
-    IgnoreUnused(beta);
-    IgnoreUnused(gamma);
-    IgnoreUnused(name);
-    SetRange(layer, 0, -15.0f, 15.0f);
-    AddToCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitNormalizationLayer(const IConnectableLayer* layer,
-                                 const NormalizationDescriptor& desc,
-                                 const char* name)
-{
-    IgnoreUnused(desc);
-    IgnoreUnused(name);
-    SetRange(layer, 0, -15.0f, 15.0f);
-    AddToCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
-                                                         const Convolution2dDescriptor& convolution2dDescriptor,
-                                                         const ConstTensor& weights,
-                                                         const Optional<ConstTensor>& biases,
-                                                         const char* name)
-{
-    IgnoreUnused(convolution2dDescriptor);
-    IgnoreUnused(weights);
-    IgnoreUnused(biases);
-    IgnoreUnused(name);
-    SetRange(layer, 0, -15.0f, 15.0f);
-    AddToCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
-                                                                  const DepthwiseConvolution2dDescriptor& desc,
-                                                                  const ConstTensor& weights,
-                                                                  const Optional<ConstTensor>& biases,
-                                                                  const char* name)
-{
-    IgnoreUnused(desc);
-    IgnoreUnused(weights);
-    IgnoreUnused(biases);
-    IgnoreUnused(name);
-    SetRange(layer, 0, -15.0f, 15.0f);
-    AddToCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitActivationLayer(const IConnectableLayer* layer,
-                                                      const ActivationDescriptor& activationDescriptor,
-                                                      const char* name)
-{
-    IgnoreUnused(name, activationDescriptor);
-    switch (activationDescriptor.m_Function)
-    {
-        // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
-        case ActivationFunction::Abs:
-        case ActivationFunction::Linear:
-        case ActivationFunction::ReLu:
-        case ActivationFunction::SoftReLu:
-            SetRange(layer, 0, 0.f, 15.f);
-            break;
-        case ActivationFunction::BoundedReLu:
-            SetRange(layer, 0, 0.f, activationDescriptor.m_A);
-            break;
-        case ActivationFunction::TanH:
-            SetRange(layer, 0, -1.f, 1.f);
-            break;
-        case ActivationFunction::LeakyReLu:
-            SetRange(layer, 0, -5.f, 15.f);
-            break;
-        default:
-            SetRange(layer, 0, -15.f, 15.f);
-            break;
-    }
-    AddToCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer,
-                                                          const FullyConnectedDescriptor& desc,
-                                                          const ConstTensor& weights,
-                                                          const Optional<ConstTensor>& biases,
-                                                          const char *name)
-{
-    IgnoreUnused(desc);
-    IgnoreUnused(weights);
-    IgnoreUnused(biases);
-    IgnoreUnused(name);
-    SetRange(layer, 0, -15.0f, 15.0f);
-    AddToCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitPermuteLayer(const IConnectableLayer* layer,
-                                                   const PermuteDescriptor& permuteDescriptor,
-                                                   const char* name)
-{
-    IgnoreUnused(permuteDescriptor);
-    IgnoreUnused(name);
-    AddToNonCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
-                                                          const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
-                                                          const char* name)
-{
-    IgnoreUnused(spaceToBatchNdDescriptor);
-    IgnoreUnused(name);
-    AddToNonCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitPooling2dLayer(const IConnectableLayer* layer,
-                                                     const Pooling2dDescriptor& pooling2dDescriptor,
-                                                     const char* name)
-{
-    IgnoreUnused(pooling2dDescriptor);
-    IgnoreUnused(name);
-    AddToNonCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer,
-                                                   const SoftmaxDescriptor& softmaxDescriptor,
-                                                   const char* name)
-{
-    IgnoreUnused(softmaxDescriptor);
-    IgnoreUnused(name);
-    SetRange(layer, 0, 0.f, 1.f);
-    AddToCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitConstantLayer(const IConnectableLayer* layer,
-                                                    const ConstTensor& input,
-                                                    const char* name)
-{
-    IgnoreUnused(name);
-
-    if (input.GetDataType() != DataType::Float32)
-    {
-        throw InvalidArgumentException("Quantization is supported only for FP32 tensors");
-    }
-
-    // Work out the range based on the input constants
-    unsigned int inputNumElements = input.GetNumElements();
-    const float* inputData = reinterpret_cast<const float*>(input.GetMemoryArea());
-
-    float min = std::numeric_limits<float>::max();
-    float max = std::numeric_limits<float>::lowest();
-
-    for (unsigned int i = 0; i < inputNumElements; i++)
-    {
-        const float inputValue = inputData[i];
-
-        min = std::min(min, inputValue);
-        max = std::max(max, inputValue);
-    }
-    SetRange(layer, 0, min, max);
-}
-
-void DynamicQuantizationVisitor::VisitConcatLayer(const IConnectableLayer* layer,
-                                                  const ConcatDescriptor& originsDescriptor,
-                                                  const char* name)
-{
-    IgnoreUnused(name);
-    IgnoreUnused(originsDescriptor);
-    float min = std::numeric_limits<float>::max();
-    float max = std::numeric_limits<float>::lowest();
-    for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
-    {
-        const IOutputSlot* outputSlot = layer->GetInputSlot(i).GetConnection();
-        LayerGuid layerId = outputSlot->GetOwningLayerGuid();
-        unsigned int slotIndex = outputSlot->CalculateIndexOnOwner();
-        RangeTracker::MinMaxRange range = m_RangeTracker.GetRange(layerId, slotIndex);
-        min = std::min(min, range.first);
-        max = std::max(max, range.second);
-    }
-    SetRange(layer, 0, min, max);
-    AddToCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitReshapeLayer(const IConnectableLayer* layer,
-                                                   const ReshapeDescriptor& reshapeDescriptor,
-                                                   const char* name)
-{
-    IgnoreUnused(reshapeDescriptor);
-    IgnoreUnused(name);
-    AddToNonCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitSplitterLayer(const IConnectableLayer* layer,
-                                                    const SplitterDescriptor& splitterDescriptor,
-                                                    const char* name)
-{
-    IgnoreUnused(splitterDescriptor);
-    IgnoreUnused(name);
-    AddToNonCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitResizeBilinearLayer(const IConnectableLayer* layer,
-                                                          const ResizeBilinearDescriptor& resizeDesc,
-                                                          const char* name)
-{
-    IgnoreUnused(resizeDesc);
-    IgnoreUnused(name);
-    AddToNonCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitStridedSliceLayer(const IConnectableLayer* layer,
-                                                        const StridedSliceDescriptor& stridedSliceDescriptor,
-                                                        const char* name)
-{
-    IgnoreUnused(stridedSliceDescriptor);
-    IgnoreUnused(name);
-    AddToNonCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
-                                                          const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
-                                                          const char* name)
-{
-    IgnoreUnused(batchToSpaceNdDescriptor);
-    IgnoreUnused(name);
-    AddToNonCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
-{
-    IgnoreUnused(id);
-    IgnoreUnused(name);
-    SetRange(layer, 0, -0.0f, 0.0f);
-    AddToCalibratedLayers(layer);
-}
-
-void DynamicQuantizationVisitor::VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
-{
-    IgnoreUnused(id);
-    IgnoreUnused(name);
-    AddToNonCalibratedLayers(layer);
-    m_OutputLayers.push_back(id);
-}
-
-const std::vector<LayerBindingId>& DynamicQuantizationVisitor::GetOutputLayers()
-{
-    return m_OutputLayers;
-}
-
-} //namespace armnn
diff --git a/src/armnn/DynamicQuantizationVisitor.hpp b/src/armnn/DynamicQuantizationVisitor.hpp
deleted file mode 100644
index 358e471..0000000
--- a/src/armnn/DynamicQuantizationVisitor.hpp
+++ /dev/null
@@ -1,149 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "armnn/LayerVisitorBase.hpp"
-#include "RangeTracker.hpp"
-#include "layers/DebugLayer.hpp"
-
-#include <armnn/INetwork.hpp>
-#include <armnnQuantizer/INetworkQuantizer.hpp>
-
-namespace armnn
-{
-
-/// Visitor class to establish min/max ranges based on the type of the layer
-class DynamicQuantizationVisitor : public LayerVisitorBase<VisitorThrowingPolicy>
-{
-public:
-    DynamicQuantizationVisitor(RangeTracker& rangeTracker, Graph& graph);
-    ~DynamicQuantizationVisitor() = default;
-
-    /// Functions to set the Range on a per-layer-type basis
-    void VisitAbsLayer(const IConnectableLayer* layer,
-                       const char* name = nullptr) override;
-
-    void VisitAdditionLayer(const IConnectableLayer* layer,
-                            const char* name = nullptr) override;
-
-    void VisitArgMinMaxLayer(const IConnectableLayer* layer,
-                             const ArgMinMaxDescriptor& desc,
-                             const char* name = nullptr) override;
-
-    void VisitNormalizationLayer(const IConnectableLayer* layer,
-                                 const NormalizationDescriptor& desc,
-                                 const char* name = nullptr) override ;
-
-    void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
-                                      const BatchNormalizationDescriptor& desc,
-                                      const ConstTensor& mean,
-                                      const ConstTensor& variance,
-                                      const ConstTensor& beta,
-                                      const ConstTensor& gamma,
-                                      const char* name = nullptr) override;
-
-    void VisitConvolution2dLayer(const IConnectableLayer* layer,
-                                 const Convolution2dDescriptor& convolution2dDescriptor,
-                                 const ConstTensor& weights,
-                                 const Optional<ConstTensor>& biases,
-                                 const char* name = nullptr) override;
-
-    void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
-                                          const DepthwiseConvolution2dDescriptor& desc,
-                                          const ConstTensor& weights,
-                                          const Optional<ConstTensor>& biases,
-                                          const char* name = nullptr) override;
-
-    void VisitActivationLayer(const IConnectableLayer* layer,
-                              const ActivationDescriptor& activationDescriptor,
-                              const char* name = nullptr) override;
-
-    void VisitFullyConnectedLayer(const IConnectableLayer *layer,
-                                  const FullyConnectedDescriptor& desc,
-                                  const ConstTensor& weights,
-                                  const Optional<ConstTensor>& biases,
-                                  const char *name) override;
-
-    void VisitPermuteLayer(const IConnectableLayer* layer,
-                           const PermuteDescriptor& permuteDescriptor,
-                           const char* name) override;
-
-    void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
-                                  const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
-                                  const char* name = nullptr) override;
-
-    void VisitPooling2dLayer(const IConnectableLayer* layer,
-                             const Pooling2dDescriptor& pooling2dDescriptor,
-                             const char* name) override;
-
-    void VisitSoftmaxLayer(const IConnectableLayer* layer,
-                           const SoftmaxDescriptor& softmaxDescriptor,
-                           const char* name = nullptr) override;
-
-    void VisitConcatLayer(const IConnectableLayer* layer,
-                          const ConcatDescriptor& originsDescriptor,
-                          const char* name = nullptr) override;
-
-    void VisitConstantLayer(const IConnectableLayer* layer,
-                            const ConstTensor& input,
-                            const char* name = nullptr) override;
-
-    void VisitReshapeLayer(const IConnectableLayer* layer,
-                           const ReshapeDescriptor& reshapeDescriptor,
-                           const char* name = nullptr) override;
-
-    void VisitSplitterLayer(const IConnectableLayer* layer,
-                            const SplitterDescriptor& splitterDescriptor,
-                            const char* name = nullptr) override;
-
-    void VisitResizeBilinearLayer(const IConnectableLayer* layer,
-                                  const ResizeBilinearDescriptor& resizeDesc,
-                                  const char* name = nullptr) override;
-
-    void VisitStridedSliceLayer(const IConnectableLayer* layer,
-                                const StridedSliceDescriptor& stridedSliceDescriptor,
-                                const char* name = nullptr) override;
-
-    void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
-                                  const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
-                                  const char* name = nullptr) override;
-
-    void VisitInputLayer(const IConnectableLayer* layer,
-                         LayerBindingId id,
-                         const char* name = nullptr) override;
-
-    void VisitOutputLayer(const IConnectableLayer* layer,
-                          LayerBindingId id,
-                          const char* name = nullptr) override;
-
-    void FinishVisit() override;
-    void VisitNonCalibratedLayers();
-
-    const std::vector<armnn::LayerBindingId>& GetOutputLayers();
-
-private:
-    /// Set the range for an output slot on a layer
-    void SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max);
-
-    void ForwardParentParameters(const IConnectableLayer* layer);
-
-    /// Mapping from a layer Guid to an array of ranges for outputs
-    RangeTracker& m_RangeTracker;
-
-    Graph& m_Graph;
-
-    std::vector<const IConnectableLayer*> m_LayersToCalibrate;
-    std::vector<const IConnectableLayer*> m_LayersNotToCalibrate;
-    std::vector<DebugLayer*> m_DebugLayers;
-
-    std::vector<armnn::LayerBindingId> m_OutputLayers;
-
-    void AddToCalibratedLayers(const IConnectableLayer* layer);
-    void AddToNonCalibratedLayers(const IConnectableLayer* layer);
-    void RemoveDebugLayers();
-};
-
-} //namespace armnn
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 6e65591..9850520 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -8,93 +8,9 @@
 
 #include <array>
 
-
-/// This list uses X macro technique.
-/// See https://en.wikipedia.org/wiki/X_Macro for more info
-#define LIST_OF_LAYER_TYPE \
-    X(Activation) \
-    X(Addition) \
-    X(ArgMinMax) \
-    X(BatchNormalization) \
-    X(BatchToSpaceNd) \
-    X(Comparison) \
-    X(Concat) \
-    X(Constant) \
-    X(ConvertBf16ToFp32) \
-    X(ConvertFp16ToFp32) \
-    X(ConvertFp32ToBf16) \
-    X(ConvertFp32ToFp16) \
-    X(Convolution2d) \
-    X(Debug) \
-    X(DepthToSpace) \
-    X(DepthwiseConvolution2d) \
-    X(Dequantize) \
-    X(DetectionPostProcess) \
-    X(Division) \
-    X(ElementwiseUnary) \
-    X(FakeQuantization) \
-    X(Fill) \
-    X(Floor) \
-    X(FullyConnected) \
-    X(Gather) \
-    X(Input) \
-    X(InstanceNormalization) \
-    X(L2Normalization) \
-    X(LogicalBinary) \
-    X(LogSoftmax) \
-    X(Lstm) \
-    X(QLstm) \
-    X(Map) \
-    X(Maximum) \
-    X(Mean) \
-    X(MemCopy) \
-    X(MemImport) \
-    X(Merge) \
-    X(Minimum) \
-    X(Multiplication) \
-    X(Normalization) \
-    X(Output) \
-    X(Pad) \
-    X(Permute) \
-    X(Pooling2d) \
-    X(PreCompiled) \
-    X(Prelu) \
-    X(Quantize) \
-    X(QuantizedLstm) \
-    X(Reshape) \
-    X(Rank) \
-    X(Reduce) \
-    X(Resize) \
-    X(Slice) \
-    X(Softmax) \
-    X(SpaceToBatchNd) \
-    X(SpaceToDepth) \
-    X(Splitter) \
-    X(Stack) \
-    X(StandIn) \
-    X(StridedSlice) \
-    X(Subtraction) \
-    X(Switch) \
-    X(Transpose) \
-    X(TransposeConvolution2d) \
-    X(Unmap)
-
-/// When adding a new layer, adapt also the LastLayer enum value in the
-/// enum class LayerType below
 namespace armnn
 {
 
-enum class LayerType
-{
-#define X(name) name,
-  LIST_OF_LAYER_TYPE
-#undef X
-  FirstLayer = Activation,
-  LastLayer = Unmap
-};
-
-const char* GetLayerTypeAsCString(LayerType type);
-
 using Coordinates = std::array<unsigned int, MaxNumOfTensorDimensions>;
 using Dimensions  = std::array<unsigned int, MaxNumOfTensorDimensions>;
 
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 9a526a0..c9733e8 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -473,4 +473,10 @@
     }
 }
 
+// default implementation of ExecuteStrategy
+void Layer::ExecuteStrategy(IStrategy& strategy) const
+{
+    strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
+}
+
 } // namespace armnn
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index ef0f8c3..2f5cacc 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -214,6 +214,9 @@
     Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name);
     Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, DataLayout layout, const char* name);
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
+
     const std::string& GetNameStr() const
     {
         return m_LayerName;
@@ -259,7 +262,7 @@
     void ResetPriority() const;
     LayerPriority GetPriority() const;
 
-    LayerType GetType() const { return m_Type; }
+    LayerType GetType() const override { return m_Type; }
 
     DataType GetDataType() const;
 
@@ -440,6 +443,11 @@
 
     LayerBindingId GetBindingId() const { return m_Id; };
 
+    void ExecuteStrategy(IStrategy& strategy) const override
+    {
+        strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName(), GetBindingId());
+    }
+
 protected:
     ~BindableLayer() = default;
 
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index f8b0675..bf7a056 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -2021,6 +2021,14 @@
     };
 }
 
+void Network::ExecuteStrategy(IStrategy& strategy) const
+{
+    for (auto layer : GetGraph())
+    {
+        layer->ExecuteStrategy(strategy);
+    };
+}
+
 OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
     : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid())
 {
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 1205bd8..cffade5 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -258,6 +258,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
 private:
     IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
                                                   const ConstTensor& weights,
diff --git a/src/armnn/NetworkQuantizer.cpp b/src/armnn/NetworkQuantizer.cpp
index e6becee..eed3f41 100644
--- a/src/armnn/NetworkQuantizer.cpp
+++ b/src/armnn/NetworkQuantizer.cpp
@@ -8,9 +8,9 @@
 #include "Graph.hpp"
 #include "Layer.hpp"
 #include "Network.hpp"
-#include "DynamicQuantizationVisitor.hpp"
-#include "StaticRangeVisitor.hpp"
-#include "QuantizerVisitor.hpp"
+#include "DynamicQuantizationStrategy.hpp"
+#include "StaticRangeStrategy.hpp"
+#include "QuantizerStrategy.hpp"
 #include "OverrideInputRangeVisitor.hpp"
 
 #include <TensorIOUtils.hpp>
@@ -60,9 +60,9 @@
 
 void NetworkQuantizer::Refine(const InputTensors& inputTensors)
 {
-    // The first time Refine is called the m_Runtime and the DynamicQuantizationVisitor
+    // The first time Refine is called the m_Runtime and the DynamicQuantizationStrategy
     // will not have been created. Need to get the environment set up, Runtime loaded,
-    // DynamicQuantizationVisitor created and run over the network to initialise itself
+    // DynamicQuantizationStrategy created and run over the network to initialise itself
     // and the RangeTracker the Debug callback registered and an initial inference
     // done to set up the first min/max values
     if (!m_Runtime)
@@ -71,15 +71,15 @@
         m_Ranges.SetDynamicMode(true);
         const Graph& cGraph = PolymorphicDowncast<const Network*>(m_InputNetwork)->GetGraph().TopologicalSort();
 
-        // need to insert Debug layers in the DynamicQuantizationVisitor
+        // need to insert Debug layers in the DynamicQuantizationStrategy
         Graph& graph = const_cast<Graph&>(cGraph);
 
         // Initialize RangeTracker to the default values for each layer.
         // The default values are overwritten by the min/max that is
         // recorded during the first dataset min/max calibration. This
         // initialisation is only required for the first call of Refine().
-        m_DynamicQuantizationVisitor = DynamicQuantizationVisitor(m_Ranges, graph);
-        VisitLayers(cGraph, m_DynamicQuantizationVisitor.value());
+        m_DynamicQuantizationStrategy = DynamicQuantizationStrategy(m_Ranges, graph);
+        ApplyStrategyToLayers(cGraph, m_DynamicQuantizationStrategy.value());
 
         IRuntime::CreationOptions options;
         m_Runtime = IRuntime::Create(options);
@@ -119,7 +119,7 @@
 
     // Create output tensor for EnqueueWorkload
     std::vector<armnn::BindingPointInfo> outputBindings;
-    auto outputLayers = m_DynamicQuantizationVisitor.value().GetOutputLayers();
+    auto outputLayers = m_DynamicQuantizationStrategy.value().GetOutputLayers();
     std::vector<TContainer> outputVectors;
     for (auto outputLayerBindingId : outputLayers)
     {
@@ -144,16 +144,16 @@
     if (!m_Runtime)
     {
         m_Ranges.SetDynamicMode(false);
-        StaticRangeVisitor rangeVisitor(m_Ranges);
-        VisitLayers(graph, rangeVisitor);
+        StaticRangeStrategy rangeStrategy(m_Ranges);
+        ApplyStrategyToLayers(graph, rangeStrategy);
     }
     else
     {
         // Set min/max range of non-calibrated layers to parent layer's range
-        m_DynamicQuantizationVisitor.value().VisitNonCalibratedLayers();
+        m_DynamicQuantizationStrategy.value().VisitNonCalibratedLayers();
         // now tear down the runtime and the dynamic visitor.
         m_Runtime.reset(nullptr);
-        m_DynamicQuantizationVisitor = EmptyOptional();
+        m_DynamicQuantizationStrategy = EmptyOptional();
         m_RefineCount = 0;
     }
 
@@ -177,8 +177,8 @@
             throw InvalidArgumentException("Unsupported quantization target");
     }
 
-    QuantizerVisitor quantizerVisitor(m_Ranges, quantizationScheme.get(), m_Options.m_PreserveType);
-    VisitLayers(graph, quantizerVisitor);
+    QuantizerStrategy quantizerVisitor(m_Ranges, quantizationScheme.get(), m_Options.m_PreserveType);
+    ApplyStrategyToLayers(graph, quantizerVisitor);
 
     // clear the ranges
     m_Ranges.Reset();
diff --git a/src/armnn/NetworkQuantizer.hpp b/src/armnn/NetworkQuantizer.hpp
index d384bdc..a07ac88 100644
--- a/src/armnn/NetworkQuantizer.hpp
+++ b/src/armnn/NetworkQuantizer.hpp
@@ -11,7 +11,7 @@
 #include <armnn/Types.hpp>
 #include <armnn/Optional.hpp>
 
-#include "DynamicQuantizationVisitor.hpp"
+#include "DynamicQuantizationStrategy.hpp"
 #include "RangeTracker.hpp"
 
 namespace armnn
@@ -44,7 +44,7 @@
     // the runtime between invocations of the Refine method.
     IRuntimePtr m_Runtime;
 
-    Optional<DynamicQuantizationVisitor> m_DynamicQuantizationVisitor;
+    Optional<DynamicQuantizationStrategy> m_DynamicQuantizationStrategy;
 
     // counts the number of times refine is called
     unsigned int m_RefineCount;
diff --git a/src/armnn/NetworkQuantizerUtils.hpp b/src/armnn/NetworkQuantizerUtils.hpp
index dd274f9..5497e1b 100644
--- a/src/armnn/NetworkQuantizerUtils.hpp
+++ b/src/armnn/NetworkQuantizerUtils.hpp
@@ -10,6 +10,7 @@
 #include <armnn/Tensor.hpp>
 #include <armnn/TypesUtils.hpp>
 #include <armnn/ILayerVisitor.hpp>
+#include <armnn/IStrategy.hpp>
 #include <armnn/utility/Assert.hpp>
 
 #include <utility>
@@ -56,4 +57,14 @@
     visitor.FinishVisit();
 }
 
+template <typename LayerContainer>
+void ApplyStrategyToLayers(const LayerContainer& layerContainer, IStrategy& strategy)
+{
+    for (auto layer : layerContainer)
+    {
+        layer->ExecuteStrategy(strategy);
+    }
+    strategy.FinishStrategy();
+}
+
 } // namespace armnn
diff --git a/src/armnn/OverrideInputRangeVisitor.hpp b/src/armnn/OverrideInputRangeVisitor.hpp
index 511c851..196a3aa 100644
--- a/src/armnn/OverrideInputRangeVisitor.hpp
+++ b/src/armnn/OverrideInputRangeVisitor.hpp
@@ -13,6 +13,57 @@
 
 namespace armnn
 {
+class OverrideInputRangeStrategy : public IStrategy
+{
+private:
+    using MinMaxRange  = RangeTracker::MinMaxRange;
+public :
+    OverrideInputRangeStrategy(RangeTracker& ranges,
+                               LayerBindingId layerId,
+                               const MinMaxRange& minMaxRange)
+                               : m_Ranges(ranges)
+            , m_LayerId(layerId)
+            , m_MinMaxRange(minMaxRange){}
+
+    ~OverrideInputRangeStrategy() = default;
+
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id) override
+    {
+        IgnoreUnused(name, constants, id, descriptor);
+
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::Input :
+            {
+                if (m_LayerId == id)
+                {
+                    m_Ranges.SetRange(layer, 0, m_MinMaxRange.first, m_MinMaxRange.second);
+                }
+                break;
+            }
+            default:
+            {
+                std::cout << "dont know this one" << std::endl;
+            }
+        }
+    }
+
+private:
+    /// Mapping from a layer Guid to an array of ranges for outputs
+    RangeTracker& m_Ranges;
+
+    /// The id of the input layer of which to override the input range
+    LayerBindingId m_LayerId;
+
+    /// The new input range to be applied to the input layer
+    MinMaxRange m_MinMaxRange;
+};
+
+
 
 /// Visitor object for overriding the input range of the quantized input layers in a network
 class OverrideInputRangeVisitor : public LayerVisitorBase<VisitorNoThrowPolicy>
diff --git a/src/armnn/QuantizerStrategy.cpp b/src/armnn/QuantizerStrategy.cpp
new file mode 100644
index 0000000..df20749
--- /dev/null
+++ b/src/armnn/QuantizerStrategy.cpp
@@ -0,0 +1,519 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "QuantizerStrategy.hpp"
+#include "armnn/utility/PolymorphicDowncast.hpp"
+
+namespace armnn
+{
+
+QuantizerStrategy::QuantizerStrategy(const RangeTracker& rangeTracker,
+                                   const IQuantizationScheme* quantizationScheme,
+                                   bool preserveType)
+        : m_Ranges(rangeTracker)
+        , m_QuantizedNetwork(INetwork::Create())
+        , m_QuantizationScheme(quantizationScheme)
+        , m_PreserveType(preserveType)
+{
+}
+
+void QuantizerStrategy::SetQuantizedInputConnections(const IConnectableLayer* srcLayer,
+                                                     IConnectableLayer* quantizedLayer)
+{
+    ARMNN_ASSERT(srcLayer);
+    for (unsigned int i = 0; i < srcLayer->GetNumInputSlots(); i++)
+    {
+        const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(i);
+        const InputSlot* inputSlot = static_cast<const InputSlot*>(&srcInputSlot);
+        ARMNN_ASSERT(inputSlot);
+        const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot();
+
+        ARMNN_ASSERT(outputSlot);
+        unsigned int slotIdx = outputSlot->CalculateIndexOnOwner();
+        Layer& layerToFind = outputSlot->GetOwningLayer();
+
+        auto found = m_OriginalToQuantizedGuidMap.find(layerToFind.GetGuid());
+        if (found == m_OriginalToQuantizedGuidMap.end())
+        {
+            // Error in graph traversal order
+            ARMNN_ASSERT_MSG(false, "Error in graph traversal");
+            return;
+        }
+
+        // Connect the slots in the quantized model
+        IConnectableLayer* prevQuantizedLayer = m_QuantizedGuidToLayerMap[found->second];
+        IInputSlot& newInputSlot = quantizedLayer->GetInputSlot(i);
+        IOutputSlot& newOutputSlot = prevQuantizedLayer->GetOutputSlot(slotIdx);
+        newOutputSlot.Connect(newInputSlot);
+        TensorInfo info(outputSlot->GetTensorInfo());
+
+        // Only try to set quantization params on tensors that can be quantized
+        if (inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Boolean &&
+            inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Signed32 &&
+            inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Signed64)
+        {
+            // Fetch the min/max ranges that were computed earlier
+            auto range = m_Ranges.GetRange(layerToFind.GetGuid(), slotIdx);
+            OffsetScalePair qParams = m_QuantizationScheme->ComputeScheme(range.first, range.second);
+            info.SetDataType(m_QuantizationScheme->GetDataType());
+            info.SetQuantizationOffset(qParams.second);
+            info.SetQuantizationScale(qParams.first);
+        }
+        newOutputSlot.SetTensorInfo(info);
+    }
+}
+
+ConstTensor QuantizerStrategy::CreateQuantizedBias(const IConnectableLayer* srcLayer,
+                                                   const ConstTensor& weights,
+                                                   const Optional<ConstTensor>& biases,
+                                                   std::vector<int32_t>& backing)
+{
+    ARMNN_ASSERT(srcLayer);
+    const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(0);
+    auto inputSlot = static_cast<const InputSlot*>(&srcInputSlot);
+    ARMNN_ASSERT(inputSlot);
+    const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot();
+
+    ARMNN_ASSERT(outputSlot);
+    unsigned int slotIdx = outputSlot->CalculateIndexOnOwner();
+    Layer& layerToFind = outputSlot->GetOwningLayer();
+
+    auto found = m_OriginalToQuantizedGuidMap.find(layerToFind.GetGuid());
+    if (found == m_OriginalToQuantizedGuidMap.end())
+    {
+        // Error in graph traversal order
+        ARMNN_ASSERT_MSG(false, "Error in graph traversal");
+        return biases.value();
+    }
+
+    // Fetch the min/max ranges that were computed earlier
+    auto range = m_Ranges.GetRange(layerToFind.GetGuid(), slotIdx);
+    OffsetScalePair qParams = m_QuantizationScheme->ComputeScheme(range.first, range.second);
+
+    // Get the quantization scale based on input and weight scale
+    float scale = qParams.first * weights.GetInfo().GetQuantizationScale();
+
+    // Set up quantized bias tensor info and allocate space
+    TensorInfo qInfo(biases.value().GetInfo().GetShape(), DataType::Signed32, scale, 0);
+    backing.resize(biases.value().GetInfo().GetNumElements());
+
+    // Convert values to int32
+    for (size_t i = 0; i < backing.size(); ++i)
+    {
+        float fp32Value = static_cast<const float*>(biases.value().GetMemoryArea())[i];
+        backing[i] = armnn::numeric_cast<int32_t>(fp32Value * ( 1 / scale ));
+    }
+
+    return ConstTensor(qInfo, backing);
+}
+
+void QuantizerStrategy::RecordLayer(const IConnectableLayer* srcLayer, IConnectableLayer* quantizedLayer)
+{
+    m_OriginalToQuantizedGuidMap.insert(std::make_pair(srcLayer->GetGuid(), quantizedLayer->GetGuid()));
+    m_QuantizedGuidToLayerMap.insert(std::make_pair(quantizedLayer->GetGuid(), quantizedLayer));
+}
+
+void QuantizerStrategy::ExecuteStrategy(const armnn::IConnectableLayer *layer,
+                                        const BaseDescriptor& descriptor,
+                                        const std::vector<armnn::ConstTensor> &constants,
+                                        const char *name,
+                                        const armnn::LayerBindingId id)
+{
+    IgnoreUnused(id);
+
+    IConnectableLayer* newLayer;
+
+    switch (layer->GetType())
+    {
+        case armnn::LayerType::Addition :
+        {
+            newLayer = m_QuantizedNetwork->AddAdditionLayer(name);
+            break;
+        }
+        case armnn::LayerType::Activation :
+        {
+            const ActivationDescriptor& activationDescriptor = static_cast<const ActivationDescriptor&>(descriptor);
+            newLayer = m_QuantizedNetwork->AddActivationLayer(activationDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::ArgMinMax :
+        {
+            ArgMinMaxDescriptor argMinMaxDescriptor = static_cast<const ArgMinMaxDescriptor&>(descriptor);
+            newLayer = m_QuantizedNetwork->AddArgMinMaxLayer(argMinMaxDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::BatchNormalization :
+        {
+
+            BatchNormalizationDescriptor batchNormalizationDescriptor =
+                    static_cast<const BatchNormalizationDescriptor&>(descriptor);
+            std::vector<uint8_t> meanBacking;
+            ConstTensor qMean = CreateQuantizedConst(constants[0], meanBacking);
+
+            std::vector<uint8_t> varianceBacking;
+            ConstTensor qVariance = CreateQuantizedConst(constants[1], varianceBacking);
+
+            std::vector<uint8_t> betaBacking;
+            ConstTensor qBeta = CreateQuantizedConst(constants[2], betaBacking);
+
+            std::vector<uint8_t> gammaBacking;
+            ConstTensor qGamma = CreateQuantizedConst(constants[3], gammaBacking);
+
+            newLayer = m_QuantizedNetwork->AddBatchNormalizationLayer(batchNormalizationDescriptor,
+                                                                                         qMean,
+                                                                                         qVariance,
+                                                                                         qBeta,
+                                                                                         qGamma,
+                                                                                         name);
+            break;
+        }
+        case armnn::LayerType::BatchToSpaceNd :
+        {
+            BatchToSpaceNdDescriptor batchToSpaceNdDescriptor =
+                    static_cast<const BatchToSpaceNdDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddBatchToSpaceNdLayer(batchToSpaceNdDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Comparison :
+        {
+            ComparisonDescriptor comparisonDescriptor =static_cast<const ComparisonDescriptor&>(descriptor);
+            newLayer = m_QuantizedNetwork->AddComparisonLayer(comparisonDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Concat :
+        {
+            OriginsDescriptor originsDescriptor = static_cast<const OriginsDescriptor&>(descriptor);
+            newLayer = m_QuantizedNetwork->AddConcatLayer(originsDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Constant :
+        {
+            std::vector<uint8_t> inputBacking;
+            ConstTensor qInput = CreateQuantizedConst(constants[0], inputBacking);
+
+            newLayer = m_QuantizedNetwork->AddConstantLayer(qInput, name);
+            break;
+        }
+        case armnn::LayerType::Convolution2d :
+        {
+            const armnn::Optional<ConstTensor> biases = constants.size() == 1 ?
+                    armnn::Optional<ConstTensor>{} :
+                    armnn::Optional<ConstTensor>(constants[1]);
+
+            std::vector<uint8_t> weightsBacking;
+            ConstTensor qWeights = CreateQuantizedConst(constants[0], weightsBacking);
+            Optional<ConstTensor> optionalQBiases;
+            std::vector<int32_t> biasesBacking;
+
+            if (biases.has_value())
+            {
+                ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking);
+                optionalQBiases = Optional<ConstTensor>(qBiases);
+            }
+            Convolution2dDescriptor convolution2dDescriptor = static_cast<const Convolution2dDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor,
+                                                                 qWeights,
+                                                                 optionalQBiases,
+                                                                 name);
+            break;
+        }
+        case armnn::LayerType::DepthToSpace :
+        {
+            DepthToSpaceDescriptor depthToSpaceDescriptor = static_cast<const DepthToSpaceDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddDepthToSpaceLayer(depthToSpaceDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::DepthwiseConvolution2d :
+        {
+            DepthwiseConvolution2dDescriptor depthwiseConvolution2dDescriptor =
+                    static_cast<const DepthwiseConvolution2dDescriptor&>(descriptor);
+
+            const armnn::Optional<ConstTensor> biases = constants.size() == 1 ?
+                                                        armnn::Optional<ConstTensor>{} :
+                                                        armnn::Optional<ConstTensor>(constants[1]);
+
+            std::vector<uint8_t> weightsBacking;
+            ConstTensor qWeights = CreateQuantizedConst(constants[0], weightsBacking);
+            Optional<ConstTensor> optionalQBiases;
+            std::vector<int32_t> biasesBacking;
+
+            if (biases.has_value())
+            {
+                ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking);
+                optionalQBiases = Optional<ConstTensor>(qBiases);
+            }
+
+            newLayer = m_QuantizedNetwork->AddDepthwiseConvolution2dLayer(
+                    depthwiseConvolution2dDescriptor,
+                    qWeights,
+                    optionalQBiases,
+                    name);
+            break;
+        }
+        case armnn::LayerType::ElementwiseUnary :
+        {
+            ElementwiseUnaryDescriptor elementwiseUnaryDescriptor =
+                    static_cast<const ElementwiseUnaryDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Fill :
+        {
+            FillDescriptor fillDescriptor = static_cast<const FillDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddFillLayer(fillDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::FullyConnected :
+        {
+            FullyConnectedDescriptor fullyConnectedDescriptor =
+                    static_cast<const FullyConnectedDescriptor&>(descriptor);
+
+            const armnn::Optional<ConstTensor> biases = constants.size() == 1 ?
+                                                        armnn::Optional<ConstTensor>{} :
+                                                        armnn::Optional<ConstTensor>(constants[1]);
+
+            std::vector<uint8_t> weightsBacking;
+            ConstTensor qWeights = CreateQuantizedConst(constants[0], weightsBacking);
+            Optional<ConstTensor> optionalQBiases;
+            std::vector<int32_t> biasesBacking;
+
+            if (biases.has_value())
+            {
+                ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking);
+                optionalQBiases = Optional<ConstTensor>(qBiases);
+            }
+
+            newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(fullyConnectedDescriptor,
+                                                                                     qWeights,
+                                                                                     optionalQBiases,
+                                                                                     name);
+            break;
+        }
+        case armnn::LayerType::Input :
+        {
+            const DataType dataType = layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
+            IConnectableLayer* inputLayer = m_QuantizedNetwork->AddInputLayer(id, name);
+
+            if (m_PreserveType && (dataType == DataType::Float32 || dataType == DataType::Float16))
+            {
+                IConnectableLayer* quantizeLayer = m_QuantizedNetwork->AddQuantizeLayer();
+                inputLayer->GetOutputSlot(0).Connect(quantizeLayer->GetInputSlot(0));
+                inputLayer->GetOutputSlot(0).SetTensorInfo(layer->GetOutputSlot(0).GetTensorInfo());
+                RecordLayer(layer, quantizeLayer);
+                return;
+            }
+            else
+            {
+                RecordLayer(layer, inputLayer);
+                return;
+            }
+        }
+        case armnn::LayerType::InstanceNormalization :
+        {
+            InstanceNormalizationDescriptor instanceNormalizationDescriptor =
+                    static_cast<const InstanceNormalizationDescriptor&>(descriptor);
+
+            newLayer =
+                    m_QuantizedNetwork->AddInstanceNormalizationLayer(instanceNormalizationDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::LogSoftmax :
+        {
+            LogSoftmaxDescriptor logSoftmaxDescriptor = static_cast<const LogSoftmaxDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddLogSoftmaxLayer(logSoftmaxDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Mean :
+        {
+            MeanDescriptor meanDescriptor = static_cast<const MeanDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddMeanLayer(meanDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Multiplication :
+        {
+            newLayer = m_QuantizedNetwork->AddMultiplicationLayer(name);
+            break;
+        }
+        case armnn::LayerType::Normalization :
+        {
+            NormalizationDescriptor normalizationDescriptor = static_cast<const NormalizationDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddNormalizationLayer(normalizationDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Output :
+        {
+            const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const DataType& dataType = info.GetDataType();
+            newLayer = m_QuantizedNetwork->AddOutputLayer(id, name);
+
+            if (m_PreserveType  && (dataType == DataType::Float32 || dataType == DataType::Float16))
+            {
+                IConnectableLayer* dequantizeLayer = m_QuantizedNetwork->AddDequantizeLayer();
+                RecordLayer(layer, dequantizeLayer);
+                SetQuantizedInputConnections(layer, dequantizeLayer);
+                dequantizeLayer->GetOutputSlot(0).Connect(newLayer->GetInputSlot(0));
+                dequantizeLayer->GetOutputSlot(0).SetTensorInfo(info);
+                return;
+            }
+            else
+            {
+                break;
+            }
+        }
+        case armnn::LayerType::Pad :
+        {
+            PadDescriptor padDescriptor = static_cast<const PadDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddPadLayer(padDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Permute :
+        {
+            PermuteDescriptor permuteDescriptor = static_cast<const PermuteDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddPermuteLayer(permuteDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Pooling2d :
+        {
+            Pooling2dDescriptor pooling2dDescriptor = static_cast<const Pooling2dDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddPooling2dLayer(pooling2dDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Prelu :
+        {
+            newLayer = m_QuantizedNetwork->AddPreluLayer(name);
+            break;
+        }
+        case armnn::LayerType::Reshape :
+        {
+            ReshapeDescriptor reshapeDescriptor = static_cast<const ReshapeDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddReshapeLayer(reshapeDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Resize :
+        {
+
+            ResizeBilinearDescriptor resizeBilinearDescriptor =
+                    static_cast<const ResizeBilinearDescriptor&>(descriptor);
+
+            ResizeDescriptor resizeDescriptor;
+            resizeDescriptor.m_Method       = ResizeMethod::Bilinear;
+            resizeDescriptor.m_TargetWidth  = resizeBilinearDescriptor.m_TargetWidth;
+            resizeDescriptor.m_TargetHeight = resizeBilinearDescriptor.m_TargetHeight;
+            resizeDescriptor.m_DataLayout   = resizeBilinearDescriptor.m_DataLayout;
+
+            newLayer = m_QuantizedNetwork->AddResizeLayer(resizeDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Slice :
+        {
+            SliceDescriptor sliceDescriptor = static_cast<const SliceDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddSliceLayer(sliceDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Softmax :
+        {
+            SoftmaxDescriptor softmaxDescriptor = static_cast<const SoftmaxDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddSoftmaxLayer(softmaxDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::SpaceToBatchNd :
+        {
+            SpaceToBatchNdDescriptor spaceToBatchNdDescriptor =
+                    static_cast<const SpaceToBatchNdDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddSpaceToBatchNdLayer(spaceToBatchNdDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::SpaceToDepth :
+        {
+            SpaceToDepthDescriptor spaceToDepthDescriptor = static_cast<const SpaceToDepthDescriptor&>(descriptor);
+            newLayer = m_QuantizedNetwork->AddSpaceToDepthLayer(spaceToDepthDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Splitter :
+        {
+            SplitterDescriptor splitterDescriptor = static_cast<const SplitterDescriptor&>(descriptor);
+            newLayer = m_QuantizedNetwork->AddSplitterLayer(splitterDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Stack :
+        {
+            StackDescriptor stackDescriptor = static_cast<const StackDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddStackLayer(stackDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::StridedSlice :
+        {
+            StridedSliceDescriptor stridedSliceDescriptor = static_cast<const StridedSliceDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddStridedSliceLayer(stridedSliceDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Subtraction :
+        {
+            newLayer = m_QuantizedNetwork->AddSubtractionLayer( name);
+            break;
+        }
+        case armnn::LayerType::TransposeConvolution2d :
+        {
+
+            const armnn::Optional<ConstTensor> biases = constants.size() == 1 ?
+                                                        armnn::Optional<ConstTensor>{} :
+                                                        armnn::Optional<ConstTensor>(constants[1]);
+            // quantize weights
+            std::vector<uint8_t> weightsBacking;
+            ConstTensor qWeights = CreateQuantizedConst(constants[0], weightsBacking);
+
+            // quantize biases
+            std::vector<int32_t> biasesBacking;
+            Optional<ConstTensor> optionalQBiases;
+            if (biases.has_value())
+            {
+                ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking);
+                optionalQBiases = Optional<ConstTensor>(qBiases);
+            }
+
+            TransposeConvolution2dDescriptor transposeConvolution2dDescriptor =
+                    static_cast<const TransposeConvolution2dDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddTransposeConvolution2dLayer(transposeConvolution2dDescriptor,
+                                                                          qWeights,
+                                                                          optionalQBiases,
+                                                                          name);
+            break;
+        }
+        case armnn::LayerType::Transpose :
+        {
+            TransposeDescriptor transposeDescriptor = static_cast<const TransposeDescriptor&>(descriptor);
+
+            newLayer = m_QuantizedNetwork->AddTransposeLayer(transposeDescriptor, name);
+            break;
+        }
+        default:
+        {
+            throw UnimplementedException("Unimplemented layer encountered");
+        }
+    }
+    RecordLayer(layer, newLayer);
+    SetQuantizedInputConnections(layer, newLayer);
+}
+
+}
+
diff --git a/src/armnn/QuantizerStrategy.hpp b/src/armnn/QuantizerStrategy.hpp
new file mode 100644
index 0000000..f782959
--- /dev/null
+++ b/src/armnn/QuantizerStrategy.hpp
@@ -0,0 +1,63 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Network.hpp"
+#include "NetworkQuantizerUtils.hpp"
+#include "StaticRangeStrategy.hpp"
+
+#include <armnn/utility/NumericCast.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+namespace armnn
+{
+class QuantizerStrategy : public IStrategy
+{
+public :
+    QuantizerStrategy(const RangeTracker& rangeTracker,
+                      const IQuantizationScheme* quantizationScheme,
+                      bool preserveType);
+
+    ~QuantizerStrategy() = default;
+
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id) override;
+
+    /// Extract the quantized network
+    INetworkPtr RetrieveFinalNetwork() { return std::move(m_QuantizedNetwork); }
+
+private:
+    /// Connects the layer to preceeding layers and sets the quantization parameters based on recorded ranges
+    void SetQuantizedInputConnections(const IConnectableLayer* srcLayer, IConnectableLayer* quantizedLayer);
+
+    /// Record the guids so we can easily find the layers later
+    void RecordLayer(const IConnectableLayer* srcLayer, IConnectableLayer* qLayer);
+
+    /// Sets the bias quantization scale based on input and weight scales
+    ConstTensor CreateQuantizedBias(const IConnectableLayer* srcLayer,
+                                    const ConstTensor& weights,
+                                    const Optional<ConstTensor>& biases,
+                                    std::vector<int32_t>& weightsBacking);
+
+    /// Reference to the static range visitor used to retrieve the quantization ranges
+    const RangeTracker& m_Ranges;
+
+    /// Quantized version of the model we are building up
+    INetworkPtr m_QuantizedNetwork;
+
+    /// Mapping from input network guids to quantized network guids
+    std::unordered_map<LayerGuid, LayerGuid> m_OriginalToQuantizedGuidMap;
+
+    /// Mapping from guid to layer in quantized network
+    std::unordered_map<LayerGuid, IConnectableLayer*> m_QuantizedGuidToLayerMap;
+
+    const IQuantizationScheme* m_QuantizationScheme;
+
+    const bool m_PreserveType;
+};
+
+} //namespace armnn
\ No newline at end of file
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
deleted file mode 100644
index 0e9d224..0000000
--- a/src/armnn/QuantizerVisitor.cpp
+++ /dev/null
@@ -1,589 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "Network.hpp"
-#include "NetworkQuantizerUtils.hpp"
-#include "QuantizerVisitor.hpp"
-#include "StaticRangeVisitor.hpp"
-
-#include <armnn/utility/NumericCast.hpp>
-#include <armnn/utility/PolymorphicDowncast.hpp>
-
-namespace armnn
-{
-
-QuantizerVisitor::QuantizerVisitor(const RangeTracker& rangeTracker,
-                                   const IQuantizationScheme* quantizationScheme,
-                                   bool preserveType)
-    : m_Ranges(rangeTracker)
-    , m_QuantizedNetwork(INetwork::Create())
-    , m_QuantizationScheme(quantizationScheme)
-    , m_PreserveType(preserveType)
-{
-}
-
-void QuantizerVisitor::SetQuantizedInputConnections(const IConnectableLayer* srcLayer,
-                                                    IConnectableLayer* quantizedLayer)
-{
-    ARMNN_ASSERT(srcLayer);
-    for (unsigned int i = 0; i < srcLayer->GetNumInputSlots(); i++)
-    {
-        const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(i);
-        const InputSlot* inputSlot = PolymorphicDowncast<const InputSlot*>(&srcInputSlot);
-        ARMNN_ASSERT(inputSlot);
-        const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot();
-
-        ARMNN_ASSERT(outputSlot);
-        unsigned int slotIdx = outputSlot->CalculateIndexOnOwner();
-        Layer& layerToFind = outputSlot->GetOwningLayer();
-
-        auto found = m_OriginalToQuantizedGuidMap.find(layerToFind.GetGuid());
-        if (found == m_OriginalToQuantizedGuidMap.end())
-        {
-            // Error in graph traversal order
-            ARMNN_ASSERT_MSG(false, "Error in graph traversal");
-            return;
-        }
-
-        // Connect the slots in the quantized model
-        IConnectableLayer* prevQuantizedLayer = m_QuantizedGuidToLayerMap[found->second];
-        IInputSlot& newInputSlot = quantizedLayer->GetInputSlot(i);
-        IOutputSlot& newOutputSlot = prevQuantizedLayer->GetOutputSlot(slotIdx);
-        newOutputSlot.Connect(newInputSlot);
-        TensorInfo info(outputSlot->GetTensorInfo());
-
-        // Only try to set quantization params on tensors that can be quantized
-        if (inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Boolean &&
-            inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Signed32 &&
-            inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Signed64)
-        {
-            // Fetch the min/max ranges that were computed earlier
-            auto range = m_Ranges.GetRange(layerToFind.GetGuid(), slotIdx);
-            OffsetScalePair qParams = m_QuantizationScheme->ComputeScheme(range.first, range.second);
-            info.SetDataType(m_QuantizationScheme->GetDataType());
-            info.SetQuantizationOffset(qParams.second);
-            info.SetQuantizationScale(qParams.first);
-        }
-        newOutputSlot.SetTensorInfo(info);
-    }
-}
-
-ConstTensor QuantizerVisitor::CreateQuantizedBias(const IConnectableLayer* srcLayer,
-                                                  const ConstTensor& weights,
-                                                  const Optional<ConstTensor>& biases,
-                                                  std::vector<int32_t>& backing)
-{
-    ARMNN_ASSERT(srcLayer);
-    const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(0);
-    auto inputSlot = PolymorphicDowncast<const InputSlot*>(&srcInputSlot);
-    ARMNN_ASSERT(inputSlot);
-    const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot();
-
-    ARMNN_ASSERT(outputSlot);
-    unsigned int slotIdx = outputSlot->CalculateIndexOnOwner();
-    Layer& layerToFind = outputSlot->GetOwningLayer();
-
-    auto found = m_OriginalToQuantizedGuidMap.find(layerToFind.GetGuid());
-    if (found == m_OriginalToQuantizedGuidMap.end())
-    {
-        // Error in graph traversal order
-        ARMNN_ASSERT_MSG(false, "Error in graph traversal");
-        return biases.value();
-    }
-
-    // Fetch the min/max ranges that were computed earlier
-    auto range = m_Ranges.GetRange(layerToFind.GetGuid(), slotIdx);
-    OffsetScalePair qParams = m_QuantizationScheme->ComputeScheme(range.first, range.second);
-
-    // Get the quantization scale based on input and weight scale
-    float scale = qParams.first * weights.GetInfo().GetQuantizationScale();
-
-    // Set up quantized bias tensor info and allocate space
-    TensorInfo qInfo(biases.value().GetInfo().GetShape(), DataType::Signed32, scale, 0);
-    backing.resize(biases.value().GetInfo().GetNumElements());
-
-    // Convert values to int32
-    for (size_t i = 0; i < backing.size(); ++i)
-    {
-        float fp32Value = static_cast<const float*>(biases.value().GetMemoryArea())[i];
-        backing[i] = armnn::numeric_cast<int32_t>(fp32Value * ( 1 / scale ));
-    }
-
-    return ConstTensor(qInfo, backing);
-}
-
-void QuantizerVisitor::RecordLayer(const IConnectableLayer* srcLayer, IConnectableLayer* quantizedLayer)
-{
-    m_OriginalToQuantizedGuidMap.insert(std::make_pair(srcLayer->GetGuid(), quantizedLayer->GetGuid()));
-    m_QuantizedGuidToLayerMap.insert(std::make_pair(quantizedLayer->GetGuid(), quantizedLayer));
-}
-
-void QuantizerVisitor::VisitAbsLayer(const IConnectableLayer* layer, const char* name)
-{
-    VisitElementwiseUnaryLayer(layer, ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
-}
-
-void QuantizerVisitor::VisitActivationLayer(const IConnectableLayer* layer,
-                                            const ActivationDescriptor& activationDescriptor,
-                                            const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddActivationLayer(activationDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddAdditionLayer(name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitArgMinMaxLayer(const IConnectableLayer* layer,
-                                           const ArgMinMaxDescriptor& argMinMaxDescriptor,
-                                           const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddArgMinMaxLayer(argMinMaxDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitBatchNormalizationLayer(const IConnectableLayer* layer,
-                                                    const BatchNormalizationDescriptor& desc,
-                                                    const ConstTensor& mean,
-                                                    const ConstTensor& variance,
-                                                    const ConstTensor& beta,
-                                                    const ConstTensor& gamma,
-                                                    const char* name)
-{
-    std::vector<uint8_t> meanBacking;
-    ConstTensor qMean = CreateQuantizedConst(mean, meanBacking);
-
-    std::vector<uint8_t> varianceBacking;
-    ConstTensor qVariance = CreateQuantizedConst(variance, varianceBacking);
-
-    std::vector<uint8_t> betaBacking;
-    ConstTensor qBeta = CreateQuantizedConst(beta, betaBacking);
-
-    std::vector<uint8_t> gammaBacking;
-    ConstTensor qGamma = CreateQuantizedConst(gamma, gammaBacking);
-
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddBatchNormalizationLayer(desc,
-                                                                                 qMean,
-                                                                                 qVariance,
-                                                                                 qBeta,
-                                                                                 qGamma,
-                                                                                 name);
-
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
-                                                const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
-                                                const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddBatchToSpaceNdLayer(batchToSpaceNdDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitComparisonLayer(const IConnectableLayer* layer,
-                                            const ComparisonDescriptor& comparisonDescriptor,
-                                            const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddComparisonLayer(comparisonDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitConcatLayer(const IConnectableLayer* layer,
-                                        const OriginsDescriptor& originsDescriptor,
-                                        const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddConcatLayer(originsDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitConstantLayer(const IConnectableLayer* layer,
-                                          const ConstTensor& input,
-                                          const char* name)
-{
-    std::vector<uint8_t> inputBacking;
-    ConstTensor qInput = CreateQuantizedConst(input, inputBacking);
-
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddConstantLayer(qInput, name);
-    RecordLayer(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
-                                               const Convolution2dDescriptor& convolution2dDescriptor,
-                                               const ConstTensor& weights,
-                                               const Optional<ConstTensor>& biases,
-                                               const char* name)
-{
-    std::vector<uint8_t> weightsBacking;
-    ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
-    Optional<ConstTensor> optionalQBiases;
-    std::vector<int32_t> biasesBacking;
-
-    if (biases.has_value())
-    {
-        ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking);
-        optionalQBiases = Optional<ConstTensor>(qBiases);
-    }
-
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor,
-                                                                            qWeights,
-                                                                            optionalQBiases,
-                                                                            name);
-
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitDepthToSpaceLayer(const IConnectableLayer* layer,
-                                              const DepthToSpaceDescriptor& descriptor,
-                                              const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddDepthToSpaceLayer(descriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
-                                                        const DepthwiseConvolution2dDescriptor& desc,
-                                                        const ConstTensor& weights,
-                                                        const Optional<ConstTensor>& biases,
-                                                        const char* name)
-{
-    std::vector<uint8_t> weightsBacking;
-    ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
-    Optional<ConstTensor> optionalQBiases;
-    std::vector<int32_t> biasesBacking;
-
-    if (biases.has_value())
-    {
-        ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking);
-        optionalQBiases = Optional<ConstTensor>(qBiases);
-    }
-
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddDepthwiseConvolution2dLayer(desc,
-                                                                                     qWeights,
-                                                                                     optionalQBiases,
-                                                                                     name);
-
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitElementwiseUnaryLayer(const IConnectableLayer* layer,
-                                                  const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
-                                                  const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitFillLayer(const IConnectableLayer* layer,
-                                      const FillDescriptor& desc,
-                                      const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddFillLayer(desc, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer,
-                                                const FullyConnectedDescriptor& desc,
-                                                const ConstTensor& weights,
-                                                const Optional<ConstTensor>& biases,
-                                                const char *name)
-{
-    std::vector<uint8_t> weightsBacking;
-    ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
-    Optional<ConstTensor> optionalQBiases;
-    std::vector<int32_t> biasesBacking;
-
-    if (biases.has_value())
-    {
-        ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking);
-        optionalQBiases = Optional<ConstTensor>(qBiases);
-    }
-
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc,
-                                                                             qWeights,
-                                                                             optionalQBiases,
-                                                                             name);
-
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitInputLayer(const IConnectableLayer *layer, LayerBindingId id, const char *name)
-{
-    const DataType dataType = layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
-    IConnectableLayer* inputLayer = m_QuantizedNetwork->AddInputLayer(id, name);
-
-    if (m_PreserveType && (dataType == DataType::Float32 || dataType == DataType::Float16))
-    {
-        IConnectableLayer* quantizeLayer = m_QuantizedNetwork->AddQuantizeLayer();
-        inputLayer->GetOutputSlot(0).Connect(quantizeLayer->GetInputSlot(0));
-        inputLayer->GetOutputSlot(0).SetTensorInfo(layer->GetOutputSlot(0).GetTensorInfo());
-        RecordLayer(layer, quantizeLayer);
-    }
-    else
-    {
-        RecordLayer(layer, inputLayer);
-    }
-}
-
-void QuantizerVisitor::VisitInstanceNormalizationLayer(const IConnectableLayer* layer,
-                                                       const InstanceNormalizationDescriptor& descriptor,
-                                                       const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddInstanceNormalizationLayer(descriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitLogSoftmaxLayer(const IConnectableLayer* layer,
-                                            const LogSoftmaxDescriptor& logSoftmaxDescriptor,
-                                            const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddLogSoftmaxLayer(logSoftmaxDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitMeanLayer(const IConnectableLayer* layer,
-                                      const MeanDescriptor& meanDescriptor,
-                                      const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddMeanLayer(meanDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitMultiplicationLayer(const IConnectableLayer* layer,
-                                                const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddMultiplicationLayer(name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitNormalizationLayer(const armnn::IConnectableLayer* layer,
-                                               const armnn::NormalizationDescriptor& normalizationDescriptor,
-                                               const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddNormalizationLayer(normalizationDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
-{
-    const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
-    const DataType& dataType = info.GetDataType();
-    IConnectableLayer* outputLayer = m_QuantizedNetwork->AddOutputLayer(id, name);
-
-    if (m_PreserveType  && (dataType == DataType::Float32 || dataType == DataType::Float16))
-    {
-        IConnectableLayer* dequantizeLayer = m_QuantizedNetwork->AddDequantizeLayer();
-        RecordLayer(layer, dequantizeLayer);
-        SetQuantizedInputConnections(layer, dequantizeLayer);
-        dequantizeLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-        dequantizeLayer->GetOutputSlot(0).SetTensorInfo(info);
-    }
-    else
-    {
-        RecordLayer(layer, outputLayer);
-        SetQuantizedInputConnections(layer, outputLayer);
-    }
-}
-
-void QuantizerVisitor::VisitPadLayer(const IConnectableLayer* layer,
-                                     const PadDescriptor& padDescriptor,
-                                     const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddPadLayer(padDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitPermuteLayer(const IConnectableLayer* layer,
-                                         const PermuteDescriptor& permuteDescriptor,
-                                         const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddPermuteLayer(permuteDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitPooling2dLayer(const IConnectableLayer* layer,
-                                           const Pooling2dDescriptor& pooling2dDescriptor,
-                                           const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddPooling2dLayer(pooling2dDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitPreluLayer(const IConnectableLayer* layer,
-                                       const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddPreluLayer(name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitReshapeLayer(const IConnectableLayer* layer,
-                                         const ReshapeDescriptor& reshapeDescriptor,
-                                         const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddReshapeLayer(reshapeDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitResizeBilinearLayer(const IConnectableLayer* layer,
-                                                const ResizeBilinearDescriptor& resizeBilinearDescriptor,
-                                                const char* name)
-{
-    ResizeDescriptor resizeDescriptor;
-    resizeDescriptor.m_Method       = ResizeMethod::Bilinear;
-    resizeDescriptor.m_TargetWidth  = resizeBilinearDescriptor.m_TargetWidth;
-    resizeDescriptor.m_TargetHeight = resizeBilinearDescriptor.m_TargetHeight;
-    resizeDescriptor.m_DataLayout   = resizeBilinearDescriptor.m_DataLayout;
-
-    VisitResizeLayer(layer, resizeDescriptor, name);
-}
-
-void QuantizerVisitor::VisitResizeLayer(const IConnectableLayer* layer,
-                                        const ResizeDescriptor& resizeDescriptor,
-                                        const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddResizeLayer(resizeDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitRsqrtLayer(const IConnectableLayer* layer, const char* name)
-{
-    VisitElementwiseUnaryLayer(layer, ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
-}
-
-void QuantizerVisitor::VisitSliceLayer(const IConnectableLayer* layer,
-                                       const SliceDescriptor& sliceDescriptor,
-                                       const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddSliceLayer(sliceDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer,
-                                         const SoftmaxDescriptor& softmaxDescriptor,
-                                         const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddSoftmaxLayer(softmaxDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
-                                                const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
-                                                const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddSpaceToBatchNdLayer(spaceToBatchNdDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitSpaceToDepthLayer(const IConnectableLayer* layer,
-                                              const SpaceToDepthDescriptor& spaceToDepthDescriptor,
-                                              const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddSpaceToDepthLayer(spaceToDepthDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitSplitterLayer(const IConnectableLayer* layer,
-                                          const SplitterDescriptor& splitterDescriptor,
-                                          const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddSplitterLayer(splitterDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitStackLayer(const IConnectableLayer* layer,
-                                       const StackDescriptor& stackDescriptor,
-                                       const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddStackLayer(stackDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitStridedSliceLayer(const IConnectableLayer* layer,
-                                              const StridedSliceDescriptor& stridedSliceDescriptor,
-                                              const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddStridedSliceLayer(stridedSliceDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitSubtractionLayer(const IConnectableLayer* layer,
-                                                const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddSubtractionLayer(name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitTransposeConvolution2dLayer(const IConnectableLayer* layer,
-                                                        const TransposeConvolution2dDescriptor& descriptor,
-                                                        const ConstTensor& weights,
-                                                        const Optional<ConstTensor>& biases,
-                                                        const char* name)
-{
-    // quantize weights
-    std::vector<uint8_t> weightsBacking;
-    ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
-
-    // quantize biases
-    std::vector<int32_t> biasesBacking;
-    Optional<ConstTensor> optionalQBiases;
-    if (biases.has_value())
-    {
-        ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking);
-        optionalQBiases = Optional<ConstTensor>(qBiases);
-    }
-
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddTransposeConvolution2dLayer(descriptor,
-                                                                                     qWeights,
-                                                                                     optionalQBiases,
-                                                                                     name);
-
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitTransposeLayer(const IConnectableLayer* layer,
-                                           const TransposeDescriptor& transposeDescriptor,
-                                           const char* name)
-{
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddTransposeLayer(transposeDescriptor, name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
-}
-
-} //namespace armnn
diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp
deleted file mode 100644
index 65bd671..0000000
--- a/src/armnn/QuantizerVisitor.hpp
+++ /dev/null
@@ -1,231 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "armnn/LayerVisitorBase.hpp"
-#include "StaticRangeVisitor.hpp"
-#include "NetworkQuantizationScheme.hpp"
-
-#include <armnn/INetwork.hpp>
-#include <armnn/Types.hpp>
-#include <armnnQuantizer/INetworkQuantizer.hpp>
-
-#include <unordered_map>
-
-namespace armnn
-{
-
-// Forward declaration
-class StaticRangeVisitor;
-
-/// Visitor object for quantizing layers in a network
-class QuantizerVisitor : public LayerVisitorBase<VisitorThrowingPolicy>
-{
-public:
-    QuantizerVisitor(const RangeTracker& rangeTracker,
-                     const IQuantizationScheme* quantizationScheme,
-                     bool preserveType = false);
-
-    ~QuantizerVisitor() = default;
-
-    /// Functions to quantize the individual layers, overridden from ILayerVisitor
-    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
-    void VisitAbsLayer(const IConnectableLayer* layer, const char* name = nullptr) override;
-
-    void VisitActivationLayer(const IConnectableLayer* layer,
-                              const ActivationDescriptor& activationDescriptor,
-                              const char* name = nullptr) override;
-
-    void VisitAdditionLayer(const IConnectableLayer* layer, const char* name = nullptr) override;
-
-    void VisitArgMinMaxLayer(const IConnectableLayer* layer,
-                             const ArgMinMaxDescriptor& argMinMaxDescriptor,
-                             const char* name = nullptr) override;
-
-    void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
-                                      const BatchNormalizationDescriptor& desc,
-                                      const ConstTensor& mean,
-                                      const ConstTensor& variance,
-                                      const ConstTensor& beta,
-                                      const ConstTensor& gamma,
-                                      const char* name = nullptr) override;
-
-    void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
-                                  const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
-                                  const char* name = nullptr) override;
-
-    void VisitComparisonLayer(const IConnectableLayer* layer,
-                              const ComparisonDescriptor& comparisonDescriptor,
-                              const char* name = nullptr) override;
-
-    void VisitConcatLayer(const IConnectableLayer* layer,
-                          const OriginsDescriptor& originsDescriptor,
-                          const char* name = nullptr) override;
-
-    void VisitConstantLayer(const IConnectableLayer* layer,
-                            const ConstTensor& input,
-                            const char* name = nullptr) override;
-
-    void VisitConvolution2dLayer(const IConnectableLayer* layer,
-                                 const Convolution2dDescriptor& convolution2dDescriptor,
-                                 const ConstTensor& weights,
-                                 const Optional<ConstTensor>& biases,
-                                 const char* name = nullptr) override;
-
-    void VisitDepthToSpaceLayer(const IConnectableLayer* layer,
-                                const DepthToSpaceDescriptor& depthToSpaceDescriptor,
-                                const char* name = nullptr) override;
-
-    void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
-                                          const DepthwiseConvolution2dDescriptor& desc,
-                                          const ConstTensor& weights,
-                                          const Optional<ConstTensor>& biases,
-                                          const char* name = nullptr) override;
-
-    void VisitElementwiseUnaryLayer(const IConnectableLayer* layer,
-                                    const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
-                                    const char* name = nullptr) override;
-
-    void VisitFillLayer(const IConnectableLayer* layer,
-                        const FillDescriptor& desc,
-                        const char* name) override;
-
-    void VisitFullyConnectedLayer(const IConnectableLayer *layer,
-                                  const FullyConnectedDescriptor& desc,
-                                  const ConstTensor& weights,
-                                  const Optional<ConstTensor>& biases,
-                                  const char *name = nullptr)  override;
-
-    void VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override;
-
-    void VisitInstanceNormalizationLayer(const IConnectableLayer* layer,
-                                         const InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
-                                         const char* name = nullptr) override;
-
-    void VisitLogSoftmaxLayer(const IConnectableLayer* layer,
-                              const LogSoftmaxDescriptor& logSoftmaxDescriptor,
-                              const char* name = nullptr) override;
-
-    void VisitMeanLayer(const IConnectableLayer* layer,
-                        const MeanDescriptor& meanDescriptor,
-                        const char* name = nullptr) override;
-
-    void VisitMultiplicationLayer(const IConnectableLayer* layer,
-                                  const char* name = nullptr) override;
-
-    void VisitNormalizationLayer(const IConnectableLayer* layer,
-                                 const NormalizationDescriptor& normalizationDescriptor,
-                                 const char* name = nullptr) override;
-
-    void VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr)  override;
-
-    void VisitPadLayer(const IConnectableLayer*,
-                       const PadDescriptor&,
-                       const char* name = nullptr) override;
-
-    void VisitPermuteLayer(const IConnectableLayer* layer,
-                           const PermuteDescriptor& permuteDescriptor,
-                           const char* name = nullptr) override;
-
-    void VisitPooling2dLayer(const IConnectableLayer* layer,
-                             const Pooling2dDescriptor& pooling2dDescriptor,
-                             const char* name = nullptr) override;
-
-    void VisitPreluLayer(const IConnectableLayer* layer,
-                         const char* name = nullptr) override;
-
-    void VisitReshapeLayer(const IConnectableLayer* layer,
-                           const ReshapeDescriptor& reshapeDescriptor,
-                           const char* name = nullptr) override;
-
-    void VisitResizeLayer(const IConnectableLayer* layer,
-                          const ResizeDescriptor& resizeDescriptor,
-                          const char* name = nullptr) override;
-
-    ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead")
-    void VisitResizeBilinearLayer(const IConnectableLayer* layer,
-                                  const ResizeBilinearDescriptor& resizeDesc,
-                                  const char* name = nullptr) override;
-
-    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
-    void VisitRsqrtLayer(const IConnectableLayer*,
-                         const char* name = nullptr) override;
-
-    void VisitSliceLayer(const IConnectableLayer* layer,
-                         const SliceDescriptor& sliceDescriptor,
-                         const char* name = nullptr) override;
-
-    void VisitSoftmaxLayer(const IConnectableLayer* layer,
-                           const SoftmaxDescriptor& softmaxDescriptor,
-                           const char* name = nullptr) override;
-
-    void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
-                                  const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
-                                  const char* name = nullptr) override;
-
-    void VisitSpaceToDepthLayer(const IConnectableLayer* layer,
-                                const SpaceToDepthDescriptor& spaceToDepthDescriptor,
-                                const char* name = nullptr) override;
-
-    void VisitSplitterLayer(const IConnectableLayer* layer,
-                            const SplitterDescriptor& splitterDescriptor,
-                            const char* name = nullptr) override;
-
-    void VisitStackLayer(const IConnectableLayer* layer,
-                         const StackDescriptor& stackDescriptor,
-                         const char* name = nullptr) override;
-
-    void VisitStridedSliceLayer(const IConnectableLayer* layer,
-                                const StridedSliceDescriptor& stridedSliceDescriptor,
-                                const char* name = nullptr) override;
-
-    void VisitSubtractionLayer(const IConnectableLayer* layer,
-                               const char* name = nullptr) override;
-
-    void VisitTransposeConvolution2dLayer(const IConnectableLayer* layer,
-                                          const TransposeConvolution2dDescriptor& descriptor,
-                                          const ConstTensor& weights,
-                                          const Optional<ConstTensor>& biases,
-                                          const char* name = nullptr) override;
-
-    void VisitTransposeLayer(const IConnectableLayer* layer,
-                             const TransposeDescriptor& descriptor,
-                             const char* name = nullptr) override;
-
-    /// Extract the quantized network
-    INetworkPtr RetrieveFinalNetwork() { return std::move(m_QuantizedNetwork); }
-
-private:
-    /// Connects the layer to preceeding layers and sets the quantization parameters based on recorded ranges
-    void SetQuantizedInputConnections(const IConnectableLayer* srcLayer, IConnectableLayer* quantizedLayer);
-
-    /// Record the guids so we can easily find the layers later
-    void RecordLayer(const IConnectableLayer* srcLayer, IConnectableLayer* qLayer);
-
-    /// Sets the bias quantization scale based on input and weight scales
-    ConstTensor CreateQuantizedBias(const IConnectableLayer* srcLayer,
-                                    const ConstTensor& weights,
-                                    const Optional<ConstTensor>& biases,
-                                    std::vector<int32_t>& weightsBacking);
-
-    /// Reference to the static range visitor used to retrieve the quantization ranges
-    const RangeTracker& m_Ranges;
-
-    /// Quantized version of the model we are building up
-    INetworkPtr m_QuantizedNetwork;
-
-    /// Mapping from input network guids to quantized network guids
-    std::unordered_map<LayerGuid, LayerGuid> m_OriginalToQuantizedGuidMap;
-
-    /// Mapping from guid to layer in quantized network
-    std::unordered_map<LayerGuid, IConnectableLayer*> m_QuantizedGuidToLayerMap;
-
-    const IQuantizationScheme* m_QuantizationScheme;
-
-    const bool m_PreserveType;
-};
-
-} //namespace armnn
diff --git a/src/armnn/StaticRangeStrategy.cpp b/src/armnn/StaticRangeStrategy.cpp
new file mode 100644
index 0000000..84b8d24
--- /dev/null
+++ b/src/armnn/StaticRangeStrategy.cpp
@@ -0,0 +1,193 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "StaticRangeStrategy.hpp"
+
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/Descriptors.hpp>
+#include <armnn/Types.hpp>
+
+#include <limits>
+
+namespace armnn
+{
+
+StaticRangeStrategy::StaticRangeStrategy(RangeTracker& rangeTracker)
+    : m_RangeTracker(rangeTracker)
+{}
+
+void StaticRangeStrategy::SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max)
+{
+    m_RangeTracker.SetRange(layer, outputIdx, min, max);
+}
+
+void StaticRangeStrategy::ForwardParentParameters(const IConnectableLayer* layer)
+{
+    const auto parentRange = m_RangeTracker.GetRange(layer->GetInputSlot(0).GetConnection()->GetOwningLayerGuid(), 0);
+    SetRange(layer, 0, parentRange.first, parentRange.second);
+}
+
+
+void StaticRangeStrategy::ExecuteStrategy(const armnn::IConnectableLayer *layer,
+                                          const BaseDescriptor &descriptor,
+                                          const std::vector<armnn::ConstTensor> &constants,
+                                          const char *name,
+                                          const armnn::LayerBindingId id)
+{
+IgnoreUnused(id, name);
+
+switch (layer->GetType())
+{
+    case armnn::LayerType::Activation :
+    {
+        const ActivationDescriptor& activationDescriptor = static_cast<const ActivationDescriptor&>(descriptor);
+
+        switch (activationDescriptor.m_Function)
+        {
+            // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
+            case ActivationFunction::Abs:
+            case ActivationFunction::Linear:
+            case ActivationFunction::ReLu:
+            case ActivationFunction::SoftReLu:
+                SetRange(layer, 0, 0.f, 15.f);
+                break;
+            case ActivationFunction::BoundedReLu:
+                SetRange(layer, 0, 0.f, activationDescriptor.m_A);
+                break;
+            case ActivationFunction::TanH:
+                SetRange(layer, 0, -1.f, 1.f);
+                break;
+            case ActivationFunction::LeakyReLu:
+                SetRange(layer, 0, -5.f, 15.f);
+                break;
+            default:
+                SetRange(layer, 0, -15.f, 15.f);
+                break;
+        }
+        break;
+    }
+    case armnn::LayerType::Addition :
+    {
+        SetRange(layer, 0, -20.f, 20.f);
+        break;
+    }
+    case armnn::LayerType::ArgMinMax :
+    {
+        ForwardParentParameters(layer);
+        break;
+    }
+    case armnn::LayerType::BatchToSpaceNd :
+    {
+        ForwardParentParameters(layer);
+        break;
+    }
+    case armnn::LayerType::BatchNormalization :
+    {
+        SetRange(layer, 0, -15.0f, 15.0f);
+        break;
+    }
+    case armnn::LayerType::Concat :
+    {
+        float min = std::numeric_limits<float>::max();
+        float max = std::numeric_limits<float>::lowest();
+        for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
+        {
+            const IOutputSlot* outputSlot = layer->GetInputSlot(i).GetConnection();
+            LayerGuid layerId = outputSlot->GetOwningLayerGuid();
+            unsigned int slotIndex = outputSlot->CalculateIndexOnOwner();
+            RangeTracker::MinMaxRange range = m_RangeTracker.GetRange(layerId, slotIndex);
+            min = std::min(min, range.first);
+            max = std::max(max, range.second);
+        }
+        SetRange(layer, 0, min, max);
+        break;
+    }
+    case armnn::LayerType::Constant :
+    {
+
+        if (constants[0].GetDataType() != DataType::Float32)
+        {
+            throw InvalidArgumentException("Quantization is supported only for FP32 tensors");
+        }
+
+        // Work out the range based on the input constants
+        unsigned int inputNumElements = constants[0].GetNumElements();
+        const float* inputData = reinterpret_cast<const float*>(constants[0].GetMemoryArea());
+
+        float min = std::numeric_limits<float>::max();
+        float max = std::numeric_limits<float>::lowest();
+
+        for (unsigned int i = 0; i < inputNumElements; i++)
+        {
+            const float inputValue = inputData[i];
+
+            min = std::min(min, inputValue);
+            max = std::max(max, inputValue);
+        }
+        SetRange(layer, 0, min, max);
+        break;
+    }
+    case armnn::LayerType::Convolution2d :
+    {
+        SetRange(layer, 0, -15.0f, 15.0f);
+        break;
+    }
+    case armnn::LayerType::DepthwiseConvolution2d :
+    {
+        SetRange(layer, 0, -15.0f, 15.0f);
+        break;
+    }
+    case armnn::LayerType::FullyConnected :
+    {
+        SetRange(layer, 0, -15.0f, 15.0f);
+        break;
+    }
+    case armnn::LayerType::Permute :
+    {
+        ForwardParentParameters(layer);
+        break;
+    }
+    case armnn::LayerType::Pooling2d :
+    {
+        ForwardParentParameters(layer);
+        break;
+    }
+    case armnn::LayerType::Reshape :
+    {
+        ForwardParentParameters(layer);
+        break;
+    }
+    case armnn::LayerType::Resize :
+    {
+        ForwardParentParameters(layer);
+        break;
+    }
+    case armnn::LayerType::Splitter :
+    {
+        ForwardParentParameters(layer);
+        break;
+    }
+    case armnn::LayerType::SpaceToBatchNd :
+    {
+        ForwardParentParameters(layer);
+        break;
+    }
+    case armnn::LayerType::Softmax :
+    {
+        SetRange(layer, 0, 0.f, 1.f);
+        break;
+    }
+    case armnn::LayerType::StridedSlice :
+    {
+        ForwardParentParameters(layer);
+        break;
+    }
+    default:
+    {
+    }
+}
+}
+
+} //namespace armnn
diff --git a/src/armnn/StaticRangeStrategy.hpp b/src/armnn/StaticRangeStrategy.hpp
new file mode 100644
index 0000000..ed7cf27
--- /dev/null
+++ b/src/armnn/StaticRangeStrategy.hpp
@@ -0,0 +1,41 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "armnn/LayerVisitorBase.hpp"
+#include "RangeTracker.hpp"
+
+#include <armnn/INetwork.hpp>
+#include <armnnQuantizer/INetworkQuantizer.hpp>
+
+
+namespace armnn
+{
+
+class StaticRangeStrategy : public IStrategy
+{
+public:
+    StaticRangeStrategy(RangeTracker& rangeTracker);
+    ~StaticRangeStrategy() = default;
+
+    void ExecuteStrategy(const armnn::IConnectableLayer *layer,
+                         const BaseDescriptor &descriptor,
+                         const std::vector<armnn::ConstTensor> &constants,
+                         const char *name,
+                         const armnn::LayerBindingId id) override;
+
+private:
+    /// Set the range for an output slot on a layer
+    void SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max);
+
+    void ForwardParentParameters(const IConnectableLayer* layer);
+
+    /// Mapping from a layer Guid to an array of ranges for outputs
+    RangeTracker& m_RangeTracker;
+
+};
+
+} //namespace armnn
diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp
deleted file mode 100644
index 210c666..0000000
--- a/src/armnn/StaticRangeVisitor.cpp
+++ /dev/null
@@ -1,270 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "StaticRangeVisitor.hpp"
-
-#include <armnn/utility/IgnoreUnused.hpp>
-#include <armnn/Descriptors.hpp>
-#include <armnn/Types.hpp>
-
-#include <limits>
-
-namespace armnn
-{
-
-StaticRangeVisitor::StaticRangeVisitor(RangeTracker& rangeTracker)
-    : m_RangeTracker(rangeTracker)
-{}
-
-void StaticRangeVisitor::SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max)
-{
-    m_RangeTracker.SetRange(layer, outputIdx, min, max);
-}
-
-void StaticRangeVisitor::ForwardParentParameters(const IConnectableLayer* layer)
-{
-    const auto parentRange = m_RangeTracker.GetRange(layer->GetInputSlot(0).GetConnection()->GetOwningLayerGuid(), 0);
-    SetRange(layer, 0, parentRange.first, parentRange.second);
-}
-
-void StaticRangeVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
-{
-    IgnoreUnused(name);
-    SetRange(layer, 0, -20.f, 20.f);
-}
-
-void StaticRangeVisitor::VisitBatchNormalizationLayer(const IConnectableLayer* layer,
-                                                      const BatchNormalizationDescriptor& desc,
-                                                      const ConstTensor& mean,
-                                                      const ConstTensor& variance,
-                                                      const ConstTensor& beta,
-                                                      const ConstTensor& gamma,
-                                                      const char* name)
-{
-    IgnoreUnused(desc);
-    IgnoreUnused(mean);
-    IgnoreUnused(variance);
-    IgnoreUnused(beta);
-    IgnoreUnused(gamma);
-    IgnoreUnused(name);
-    SetRange(layer, 0, -15.0f, 15.0f);
-}
-
-void StaticRangeVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
-                                                 const Convolution2dDescriptor& convolution2dDescriptor,
-                                                 const ConstTensor& weights,
-                                                 const Optional<ConstTensor>& biases,
-                                                 const char* name)
-{
-    IgnoreUnused(convolution2dDescriptor);
-    IgnoreUnused(weights);
-    IgnoreUnused(biases);
-    IgnoreUnused(name);
-    SetRange(layer, 0, -15.0f, 15.0f);
-}
-
-void StaticRangeVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
-                                                          const DepthwiseConvolution2dDescriptor& desc,
-                                                          const ConstTensor& weights,
-                                                          const Optional<ConstTensor>& biases,
-                                                          const char* name)
-{
-    IgnoreUnused(desc);
-    IgnoreUnused(weights);
-    IgnoreUnused(biases);
-    IgnoreUnused(name);
-    SetRange(layer, 0, -15.0f, 15.0f);
-}
-
-void StaticRangeVisitor::VisitActivationLayer(const IConnectableLayer* layer,
-                                              const ActivationDescriptor& activationDescriptor,
-                                              const char* name)
-{
-    IgnoreUnused(name);
-    switch (activationDescriptor.m_Function)
-    {
-        // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
-        case ActivationFunction::Abs:
-        case ActivationFunction::Linear:
-        case ActivationFunction::ReLu:
-        case ActivationFunction::SoftReLu:
-            SetRange(layer, 0, 0.f, 15.f);
-            break;
-        case ActivationFunction::BoundedReLu:
-            SetRange(layer, 0, 0.f, activationDescriptor.m_A);
-            break;
-        case ActivationFunction::TanH:
-            SetRange(layer, 0, -1.f, 1.f);
-            break;
-        case ActivationFunction::LeakyReLu:
-            SetRange(layer, 0, -5.f, 15.f);
-            break;
-        default:
-            SetRange(layer, 0, -15.f, 15.f);
-            break;
-    }
-}
-
-void StaticRangeVisitor::VisitArgMinMaxLayer(const armnn::IConnectableLayer* layer,
-                                             const armnn::ArgMinMaxDescriptor& argMinMaxDescriptor,
-                                             const char* name)
-{
-    IgnoreUnused(argMinMaxDescriptor);
-    IgnoreUnused(name);
-    ForwardParentParameters(layer);
-}
-
-void StaticRangeVisitor::VisitFullyConnectedLayer(const IConnectableLayer* layer,
-                                                  const FullyConnectedDescriptor& desc,
-                                                  const ConstTensor& weights,
-                                                  const Optional<ConstTensor>& biases,
-                                                  const char* name)
-{
-    IgnoreUnused(desc);
-    IgnoreUnused(weights);
-    IgnoreUnused(biases);
-    IgnoreUnused(name);
-    SetRange(layer, 0, -15.0f, 15.0f);
-}
-
-void StaticRangeVisitor::VisitPermuteLayer(const IConnectableLayer* layer,
-                                           const PermuteDescriptor& permuteDescriptor,
-                                           const char* name)
-{
-    IgnoreUnused(permuteDescriptor);
-    IgnoreUnused(name);
-    ForwardParentParameters(layer);
-}
-
-void StaticRangeVisitor::VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
-                                                  const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
-                                                  const char* name)
-{
-    IgnoreUnused(spaceToBatchNdDescriptor);
-    IgnoreUnused(name);
-    ForwardParentParameters(layer);
-}
-
-void StaticRangeVisitor::VisitPooling2dLayer(const IConnectableLayer* layer,
-                                             const Pooling2dDescriptor& pooling2dDescriptor,
-                                             const char* name)
-{
-    IgnoreUnused(pooling2dDescriptor);
-    IgnoreUnused(name);
-    ForwardParentParameters(layer);
-}
-
-void StaticRangeVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer,
-                                           const SoftmaxDescriptor& softmaxDescriptor,
-                                           const char* name)
-{
-    IgnoreUnused(softmaxDescriptor);
-    IgnoreUnused(name);
-    SetRange(layer, 0, 0.f, 1.f);
-}
-
-void StaticRangeVisitor::VisitConcatLayer(const IConnectableLayer* layer,
-                                          const OriginsDescriptor& originsDescriptor,
-                                          const char* name)
-{
-    IgnoreUnused(originsDescriptor);
-    IgnoreUnused(name);
-    float min = std::numeric_limits<float>::max();
-    float max = std::numeric_limits<float>::lowest();
-    for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
-    {
-        const IOutputSlot* outputSlot = layer->GetInputSlot(i).GetConnection();
-        LayerGuid layerId = outputSlot->GetOwningLayerGuid();
-        unsigned int slotIndex = outputSlot->CalculateIndexOnOwner();
-        RangeTracker::MinMaxRange range = m_RangeTracker.GetRange(layerId, slotIndex);
-        min = std::min(min, range.first);
-        max = std::max(max, range.second);
-    }
-    SetRange(layer, 0, min, max);
-}
-
-void StaticRangeVisitor::VisitConstantLayer(const IConnectableLayer* layer,
-                                            const ConstTensor& input,
-                                            const char* name)
-{
-    IgnoreUnused(name);
-
-    if (input.GetDataType() != DataType::Float32)
-    {
-        throw InvalidArgumentException("Quantization is supported only for FP32 tensors");
-    }
-
-    // Work out the range based on the input constants
-    unsigned int inputNumElements = input.GetNumElements();
-    const float* inputData = reinterpret_cast<const float*>(input.GetMemoryArea());
-
-    float min = std::numeric_limits<float>::max();
-    float max = std::numeric_limits<float>::lowest();
-
-    for (unsigned int i = 0; i < inputNumElements; i++)
-    {
-        const float inputValue = inputData[i];
-
-        min = std::min(min, inputValue);
-        max = std::max(max, inputValue);
-    }
-    SetRange(layer, 0, min, max);
-}
-
-void StaticRangeVisitor::VisitReshapeLayer(const IConnectableLayer* layer,
-                                           const ReshapeDescriptor& reshapeDescriptor,
-                                           const char* name)
-{
-    IgnoreUnused(reshapeDescriptor);
-    IgnoreUnused(name);
-    ForwardParentParameters(layer);
-}
-
-void StaticRangeVisitor::VisitSplitterLayer(const IConnectableLayer* layer,
-                                            const SplitterDescriptor& splitterDescriptor,
-                                            const char* name)
-{
-    IgnoreUnused(splitterDescriptor);
-    IgnoreUnused(name);
-    ForwardParentParameters(layer);
-}
-
-void StaticRangeVisitor::VisitResizeBilinearLayer(const IConnectableLayer* layer,
-                                                  const ResizeBilinearDescriptor& resizeDesc,
-                                                  const char* name)
-{
-    IgnoreUnused(resizeDesc);
-    IgnoreUnused(name);
-    ForwardParentParameters(layer);
-}
-
-void StaticRangeVisitor::VisitResizeLayer(const IConnectableLayer* layer,
-                                          const ResizeDescriptor& resizeDescriptor,
-                                          const char* name)
-{
-    IgnoreUnused(resizeDescriptor);
-    IgnoreUnused(name);
-    ForwardParentParameters(layer);
-}
-
-void StaticRangeVisitor::VisitStridedSliceLayer(const IConnectableLayer* layer,
-                                                const StridedSliceDescriptor& stridedSliceDescriptor,
-                                                const char* name)
-{
-    IgnoreUnused(stridedSliceDescriptor);
-    IgnoreUnused(name);
-    ForwardParentParameters(layer);
-}
-
-void StaticRangeVisitor::VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
-                                                  const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
-                                                  const char* name)
-{
-    IgnoreUnused(batchToSpaceNdDescriptor);
-    IgnoreUnused(name);
-    ForwardParentParameters(layer);
-}
-
-} //namespace armnn
diff --git a/src/armnn/StaticRangeVisitor.hpp b/src/armnn/StaticRangeVisitor.hpp
deleted file mode 100644
index 20e3cb0..0000000
--- a/src/armnn/StaticRangeVisitor.hpp
+++ /dev/null
@@ -1,120 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "armnn/LayerVisitorBase.hpp"
-#include "RangeTracker.hpp"
-
-#include <armnn/INetwork.hpp>
-#include <armnnQuantizer/INetworkQuantizer.hpp>
-
-
-namespace armnn
-{
-
-/// Visitor class to establish min/max ranges based on the type of the layer
-class StaticRangeVisitor : public LayerVisitorBase<VisitorNoThrowPolicy>
-{
-public:
-    StaticRangeVisitor(RangeTracker& rangeTracker);
-    ~StaticRangeVisitor() = default;
-
-    /// Functions to set the Range on a per-layer-type basis
-    void VisitAdditionLayer(const IConnectableLayer* layer, const char* name = nullptr) override;
-
-    void VisitArgMinMaxLayer(const IConnectableLayer* layer,
-                             const ArgMinMaxDescriptor& desc,
-                             const char* name = nullptr) override;
-
-    void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
-                                      const BatchNormalizationDescriptor& desc,
-                                      const ConstTensor& mean,
-                                      const ConstTensor& variance,
-                                      const ConstTensor& beta,
-                                      const ConstTensor& gamma,
-                                      const char* name = nullptr) override;
-
-    void VisitConvolution2dLayer(const IConnectableLayer* layer,
-                                 const Convolution2dDescriptor& convolution2dDescriptor,
-                                 const ConstTensor& weights,
-                                 const Optional<ConstTensor>& biases,
-                                 const char* name = nullptr) override;
-
-    void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
-                                          const DepthwiseConvolution2dDescriptor& desc,
-                                          const ConstTensor& weights,
-                                          const Optional<ConstTensor>& biases,
-                                          const char* name = nullptr) override;
-
-    void VisitActivationLayer(const IConnectableLayer* layer,
-                              const ActivationDescriptor& activationDescriptor,
-                              const char* name = nullptr) override;
-
-    void VisitFullyConnectedLayer(const IConnectableLayer *layer,
-                                  const FullyConnectedDescriptor& desc,
-                                  const ConstTensor& weights,
-                                  const Optional<ConstTensor>& biases,
-                                  const char *name) override;
-
-    void VisitPermuteLayer(const IConnectableLayer* layer,
-                           const PermuteDescriptor& permuteDescriptor,
-                           const char* name) override;
-
-    void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
-                                  const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
-                                  const char* name = nullptr) override;
-
-    void VisitPooling2dLayer(const IConnectableLayer* layer,
-                             const Pooling2dDescriptor& pooling2dDescriptor,
-                             const char* name) override;
-
-    void VisitSoftmaxLayer(const IConnectableLayer* layer,
-                           const SoftmaxDescriptor& softmaxDescriptor,
-                           const char* name = nullptr) override;
-
-    void VisitConcatLayer(const IConnectableLayer* layer,
-                          const OriginsDescriptor& originsDescriptor,
-                          const char* name = nullptr) override;
-
-    void VisitConstantLayer(const IConnectableLayer* layer,
-                            const ConstTensor& input,
-                            const char* name = nullptr) override;
-
-    void VisitReshapeLayer(const IConnectableLayer* layer,
-                           const ReshapeDescriptor& reshapeDescriptor,
-                           const char* name = nullptr) override;
-
-    void VisitSplitterLayer(const IConnectableLayer* layer,
-                            const SplitterDescriptor& splitterDescriptor,
-                            const char* name = nullptr) override;
-
-    void VisitResizeBilinearLayer(const IConnectableLayer* layer,
-                                  const ResizeBilinearDescriptor& resizeDesc,
-                                  const char* name = nullptr) override;
-
-    void VisitResizeLayer(const IConnectableLayer* layer,
-                          const ResizeDescriptor& resizeDescriptor,
-                          const char* name = nullptr) override;
-
-    void VisitStridedSliceLayer(const IConnectableLayer* layer,
-                                const StridedSliceDescriptor& stridedSliceDescriptor,
-                                const char* name = nullptr) override;
-
-    void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
-                                  const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
-                                  const char* name = nullptr) override;
-
-private:
-    /// Set the range for an output slot on a layer
-    void SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max);
-
-    void ForwardParentParameters(const IConnectableLayer* layer);
-
-    /// Mapping from a layer Guid to an array of ranges for outputs
-    RangeTracker& m_RangeTracker;
-};
-
-} //namespace armnn
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index ce351a4..6df5195 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -80,4 +80,14 @@
             this, GetParameters(), meanTensor, varianceTensor, betaTensor, gammaTensor, GetName());
 }
 
+void BatchNormalizationLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    std::vector<armnn::ConstTensor> constTensors { {m_Mean->GetTensorInfo(), m_Mean->Map(true)},
+                                                   {m_Variance->GetTensorInfo(), m_Variance->Map(true)},
+                                                   {m_Beta->GetTensorInfo(), m_Beta->Map(true)},
+                                                   {m_Gamma->GetTensorInfo(), m_Gamma->Map(true)} };
+
+    strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index 3915897..dab75d1 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -41,6 +41,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
 protected:
     /// Constructor to create a BatchNormalizationLayer.
     /// @param [in] param BatchNormalizationDescriptor to configure the batch normalization operation.
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index 76b9997..31e9e97 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -68,4 +68,10 @@
     visitor.VisitConstantLayer(this, layerOutputTensor, GetName());
 }
 
+void ConstantLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    std::vector<armnn::ConstTensor> constTensors { {m_LayerOutput->GetTensorInfo(), m_LayerOutput->Map(true)} };
+    strategy.ExecuteStrategy(this, BaseDescriptor(), constTensors, GetName());
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index 36fa1f9..9d91551 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -41,6 +41,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
     std::unique_ptr<ScopedCpuTensorHandle> m_LayerOutput;
 
 protected:
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index 18557bf..0c3040e 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -157,4 +157,16 @@
     visitor.VisitConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
 }
 
+void Convolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    std::vector<armnn::ConstTensor> constTensors { {m_Weight->GetTensorInfo(), m_Weight->Map(true)} };
+
+    if (GetParameters().m_BiasEnabled)
+    {
+        constTensors.emplace_back(ConstTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true)));
+    }
+
+    strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index 4dd1497..440c80d 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -44,6 +44,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
     void SerializeLayerParameters(ParameterStringifyFunction& fn) const override;
 
 protected:
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index ff9ceba..1871b7d 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -165,4 +165,16 @@
     visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
 }
 
+void DepthwiseConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    std::vector<armnn::ConstTensor> constTensors { {m_Weight->GetTensorInfo(), m_Weight->Map(true)} };
+
+    if (GetParameters().m_BiasEnabled)
+    {
+        constTensors.emplace_back(ConstTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true)));
+    }
+
+    strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index dd0b0e6..7388cbc 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -43,6 +43,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
     void SerializeLayerParameters(ParameterStringifyFunction& fn) const override;
 
 protected:
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index d54bf26..356377a 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -84,4 +84,11 @@
     visitor.VisitDetectionPostProcessLayer(this, GetParameters(), anchorTensor, GetName());
 }
 
+void DetectionPostProcessLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    std::vector<armnn::ConstTensor> constTensors { {m_Anchors->GetTensorInfo(), m_Anchors->GetConstTensor<void>()} };
+
+    strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index 374eef5..b0d5858 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -36,6 +36,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
 protected:
     /// Constructor to create a DetectionPostProcessLayer.
     /// @param [in] param DetectionPostProcessDescriptor to configure the detection postprocess.
diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp
index 631e08c..a169d31 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.cpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.cpp
@@ -82,4 +82,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
 }
 
+void ElementwiseBaseLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/ElementwiseBaseLayer.hpp b/src/armnn/layers/ElementwiseBaseLayer.hpp
index 3893dcd..17e8b44 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.hpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.hpp
@@ -27,6 +27,8 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
 protected:
     /// @param numInputSlots The number of input slots for the layer.
     /// @param numOutputSlots The number of output slots for the layer.
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index a316b2b..102a672 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -52,4 +52,10 @@
     throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
 }
 
+void FakeQuantizationLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    IgnoreUnused(strategy);
+    throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/FakeQuantizationLayer.hpp b/src/armnn/layers/FakeQuantizationLayer.hpp
index 09bd530..78e49e6 100644
--- a/src/armnn/layers/FakeQuantizationLayer.hpp
+++ b/src/armnn/layers/FakeQuantizationLayer.hpp
@@ -30,6 +30,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
 protected:
     /// Constructor to create a FakeQuantizationLayer.
     /// @param [in] param FakeQuantizationDescriptor to configure the fake quantization operation.
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index ca7a0cc..0e5e594 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -101,4 +101,16 @@
     visitor.VisitFullyConnectedLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
 }
 
+void FullyConnectedLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    std::vector<armnn::ConstTensor> constTensors { {m_Weight->GetTensorInfo(), m_Weight->Map(true)} };
+
+    if (GetParameters().m_BiasEnabled)
+    {
+        constTensors.emplace_back(ConstTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true)));
+    }
+
+    strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index bbacd25..4a9cbe1 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -43,6 +43,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
 protected:
     /// Constructor to create a FullyConnectedLayer.
     /// @param [in] param FullyConnectedDescriptor to configure the fully connected operation.
diff --git a/src/armnn/layers/LayerWithParameters.hpp b/src/armnn/layers/LayerWithParameters.hpp
index 3f3bdd8..952eff6 100644
--- a/src/armnn/layers/LayerWithParameters.hpp
+++ b/src/armnn/layers/LayerWithParameters.hpp
@@ -48,6 +48,11 @@
 
     /// The parameters for the layer (not including tensor-valued weights etc.).
     Parameters m_Param;
+
+    void ExecuteStrategy(IStrategy& strategy) const override
+    {
+        strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
+    }
 };
 
 } // namespace
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 8e396ab..ebc408a 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -480,4 +480,150 @@
     visitor.VisitLstmLayer(this, GetParameters(), inputParams, GetName());
 }
 
+void LstmLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    std::vector<ConstTensor> constTensors;
+
+    LstmDescriptor descriptor = GetParameters();
+
+    // First add mandatory/basic parameters
+    if (m_BasicParameters.m_InputToForgetWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(),
+                                              m_BasicParameters.m_InputToForgetWeights->Map(true)));
+    }
+    if (m_BasicParameters.m_InputToCellWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToCellWeights->GetTensorInfo(),
+                                              m_BasicParameters.m_InputToCellWeights->Map(true)));
+    }
+    if (m_BasicParameters.m_InputToOutputWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(),
+                                              m_BasicParameters.m_InputToOutputWeights->Map(true)));
+    }
+    if (m_BasicParameters.m_RecurrentToForgetWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(
+                m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(),
+                m_BasicParameters.m_RecurrentToForgetWeights->Map(true)));
+    }
+    if (m_BasicParameters.m_RecurrentToCellWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(
+                m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(),
+                m_BasicParameters.m_RecurrentToCellWeights->Map(true)));
+    }
+    if (m_BasicParameters.m_RecurrentToOutputWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(
+                m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(),
+                m_BasicParameters.m_RecurrentToOutputWeights->Map(true)));
+    }
+    if (m_BasicParameters.m_ForgetGateBias != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_BasicParameters.m_ForgetGateBias->GetTensorInfo(),
+                                              m_BasicParameters.m_ForgetGateBias->Map(true)));
+    }
+    if (m_BasicParameters.m_CellBias != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_BasicParameters.m_CellBias->GetTensorInfo(),
+                                              m_BasicParameters.m_CellBias->Map(true)));
+    }
+    if (m_BasicParameters.m_OutputGateBias != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_BasicParameters.m_OutputGateBias->GetTensorInfo(),
+                                              m_BasicParameters.m_OutputGateBias->Map(true)));
+    }
+
+    // Add cifg parameters
+    if (!descriptor.m_CifgEnabled)
+    {
+        if (m_CifgParameters.m_InputToInputWeights != nullptr)
+        {
+            constTensors.emplace_back(ConstTensor(m_CifgParameters.m_InputToInputWeights->GetTensorInfo(),
+                                                  m_CifgParameters.m_InputToInputWeights->Map(true)));
+        }
+        if (m_CifgParameters.m_RecurrentToInputWeights != nullptr)
+        {
+            constTensors.emplace_back(ConstTensor(
+                    m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(),
+                    m_CifgParameters.m_RecurrentToInputWeights->Map(true)));
+        }
+        if (m_CifgParameters.m_InputGateBias != nullptr)
+        {
+            constTensors.emplace_back(ConstTensor(m_CifgParameters.m_InputGateBias->GetTensorInfo(),
+                                                  m_CifgParameters.m_InputGateBias->Map(true)));
+        }
+    }
+
+    // Add peephole parameters
+    if (descriptor.m_PeepholeEnabled)
+    {
+        if (!descriptor.m_CifgEnabled)
+        {
+            if (m_PeepholeParameters.m_CellToInputWeights != nullptr)
+            {
+                constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
+                                                      m_PeepholeParameters.m_CellToInputWeights->Map(true)));
+            }
+        }
+        if (m_PeepholeParameters.m_CellToForgetWeights != nullptr)
+        {
+            constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(),
+                                                  m_PeepholeParameters.m_CellToForgetWeights->Map(true)));
+        }
+        if (m_PeepholeParameters.m_CellToOutputWeights != nullptr)
+        {
+            constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(),
+                                                  m_PeepholeParameters.m_CellToOutputWeights->Map(true)));
+        }
+    }
+
+    // Add projection parameters
+    if (descriptor.m_ProjectionEnabled)
+    {
+        if (m_ProjectionParameters.m_ProjectionWeights != nullptr)
+        {
+            constTensors.emplace_back(ConstTensor(m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(),
+                                                  m_ProjectionParameters.m_ProjectionWeights->Map(true)));
+        }
+        if (m_ProjectionParameters.m_ProjectionBias != nullptr)
+        {
+            constTensors.emplace_back(ConstTensor(m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(),
+                                                  m_ProjectionParameters.m_ProjectionBias->Map(true)));
+        }
+    }
+
+    // Add norm parameters
+    if (descriptor.m_LayerNormEnabled)
+    {
+        if (!descriptor.m_CifgEnabled)
+        {
+            if (m_LayerNormParameters.m_InputLayerNormWeights != nullptr)
+            {
+                constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(),
+                                                      m_LayerNormParameters.m_InputLayerNormWeights->Map(true)));
+            }
+        }
+        if (m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr)
+        {
+            constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(),
+                                                  m_LayerNormParameters.m_ForgetLayerNormWeights->Map(true)));
+        }
+        if (m_LayerNormParameters.m_CellLayerNormWeights != nullptr)
+        {
+            constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(),
+                                                  m_LayerNormParameters.m_CellLayerNormWeights->Map(true)));
+        }
+        if (m_LayerNormParameters.m_OutputLayerNormWeights != nullptr)
+        {
+            constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(),
+                                                  m_LayerNormParameters.m_OutputLayerNormWeights->Map(true)));
+        }
+    }
+
+    strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index 51348d7..30f952e 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -107,6 +107,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
 protected:
     /// Constructor to create a LstmLayer.
     /// @param [in] param LstmDescriptor to configure the lstm operation.
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index d9a802c..40c1b98 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -55,4 +55,10 @@
     throw armnn::Exception("MemCopyLayer should not appear in an input graph");
 }
 
+void MemCopyLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    IgnoreUnused(strategy);
+    throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/MemCopyLayer.hpp b/src/armnn/layers/MemCopyLayer.hpp
index 996d687..b913c52 100644
--- a/src/armnn/layers/MemCopyLayer.hpp
+++ b/src/armnn/layers/MemCopyLayer.hpp
@@ -30,6 +30,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
 protected:
     /// Constructor to create a MemCopyLayer.
     /// @param [in] name Optional name for the layer.
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index 3d1c702..c96f92b 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -55,4 +55,10 @@
     throw armnn::Exception("MemImportLayer should not appear in an input graph");
 }
 
+void MemImportLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    IgnoreUnused(strategy);
+    throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/MemImportLayer.hpp b/src/armnn/layers/MemImportLayer.hpp
index 1cbdaac..4737970 100644
--- a/src/armnn/layers/MemImportLayer.hpp
+++ b/src/armnn/layers/MemImportLayer.hpp
@@ -30,6 +30,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
 protected:
     /// Constructor to create a MemImportLayer.
     /// @param [in] name Optional name for the layer.
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index dbbc1fd..75c1e46 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -55,4 +55,10 @@
     throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
 }
 
+void PreCompiledLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    IgnoreUnused(strategy);
+    throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/PreCompiledLayer.hpp b/src/armnn/layers/PreCompiledLayer.hpp
index a4851c7..2ed8757 100644
--- a/src/armnn/layers/PreCompiledLayer.hpp
+++ b/src/armnn/layers/PreCompiledLayer.hpp
@@ -35,6 +35,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
 private:
     PreCompiledLayer(const PreCompiledLayer& other) = delete;
     PreCompiledLayer& operator=(const PreCompiledLayer& other) = delete;
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index 85f99bd..d957bbb 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -503,4 +503,130 @@
     visitor.VisitQLstmLayer(this, GetParameters(), inputParams, GetName());
 }
 
+
+void QLstmLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    std::vector<ConstTensor> constTensors;
+
+    // First add mandatory/basic parameters
+    if (m_BasicParameters.m_InputToForgetWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(),
+                                              m_BasicParameters.m_InputToForgetWeights->Map(true)));
+    }
+    if (m_BasicParameters.m_InputToCellWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToCellWeights->GetTensorInfo(),
+                                              m_BasicParameters.m_InputToCellWeights->Map(true)));
+    }
+    if (m_BasicParameters.m_InputToOutputWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(),
+                                              m_BasicParameters.m_InputToOutputWeights->Map(true)));
+    }
+    if (m_BasicParameters.m_RecurrentToForgetWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(
+                m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(),
+                m_BasicParameters.m_RecurrentToForgetWeights->Map(true)));
+    }
+    if (m_BasicParameters.m_RecurrentToCellWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(
+                m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(),
+                m_BasicParameters.m_RecurrentToCellWeights->Map(true)));
+    }
+    if (m_BasicParameters.m_RecurrentToOutputWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(
+                m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(),
+                m_BasicParameters.m_RecurrentToOutputWeights->Map(true)));
+    }
+    if (m_BasicParameters.m_ForgetGateBias != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_BasicParameters.m_ForgetGateBias->GetTensorInfo(),
+                                              m_BasicParameters.m_ForgetGateBias->Map(true)));
+    }
+    if (m_BasicParameters.m_CellBias != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_BasicParameters.m_CellBias->GetTensorInfo(),
+                                              m_BasicParameters.m_CellBias->Map(true)));
+    }
+    if (m_BasicParameters.m_OutputGateBias != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_BasicParameters.m_OutputGateBias->GetTensorInfo(),
+                                              m_BasicParameters.m_OutputGateBias->Map(true)));
+    }
+
+    // Add cifig parameters
+    if (m_CifgParameters.m_InputToInputWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_CifgParameters.m_InputToInputWeights->GetTensorInfo(),
+                                              m_CifgParameters.m_InputToInputWeights->Map(true)));
+    }
+    if (m_CifgParameters.m_RecurrentToInputWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(
+                m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(),
+                m_CifgParameters.m_RecurrentToInputWeights->Map(true)));
+    }
+    if (m_CifgParameters.m_InputGateBias != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_CifgParameters.m_InputGateBias->GetTensorInfo(),
+                                              m_CifgParameters.m_InputGateBias->Map(true)));
+    }
+
+    // Add peephole parameters
+    if (m_PeepholeParameters.m_CellToInputWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
+                                              m_PeepholeParameters.m_CellToInputWeights->Map(true)));
+    }
+    if (m_PeepholeParameters.m_CellToForgetWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(),
+                                              m_PeepholeParameters.m_CellToForgetWeights->Map(true)));
+    }
+    if (m_PeepholeParameters.m_CellToOutputWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(),
+                                              m_PeepholeParameters.m_CellToOutputWeights->Map(true)));
+    }
+
+    // Add projection parameters
+    if (m_ProjectionParameters.m_ProjectionWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(),
+                                              m_ProjectionParameters.m_ProjectionWeights->Map(true)));
+    }
+    if (m_ProjectionParameters.m_ProjectionBias != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(),
+                                              m_ProjectionParameters.m_ProjectionBias->Map(true)));
+    }
+
+    // Add norm parameters
+    if (m_LayerNormParameters.m_InputLayerNormWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(),
+                                              m_LayerNormParameters.m_InputLayerNormWeights->Map(true)));
+    }
+    if (m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(),
+                                              m_LayerNormParameters.m_ForgetLayerNormWeights->Map(true)));
+    }
+    if (m_LayerNormParameters.m_CellLayerNormWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(),
+                                              m_LayerNormParameters.m_CellLayerNormWeights->Map(true)));
+    }
+    if (m_LayerNormParameters.m_OutputLayerNormWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(),
+                                              m_LayerNormParameters.m_OutputLayerNormWeights->Map(true)));
+    }
+    strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp
index 5757ef6..70cc4f2 100644
--- a/src/armnn/layers/QLstmLayer.hpp
+++ b/src/armnn/layers/QLstmLayer.hpp
@@ -109,6 +109,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
 protected:
     /// Constructor to create a QLstmLayer.
     /// @param [in] name Optional name for the layer.
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 624e443..578d9eb 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -291,4 +291,91 @@
     visitor.VisitQuantizedLstmLayer(this, inputParams, GetName());
 }
 
+void QuantizedLstmLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    std::vector<ConstTensor> constTensors;
+
+    // InputToX weight tensors
+    if (m_QuantizedLstmParameters.m_InputToInputWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo(),
+                                              m_QuantizedLstmParameters.m_InputToInputWeights->Map(true)));
+    }
+
+    if (m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo(),
+                                              m_QuantizedLstmParameters.m_InputToForgetWeights->Map(true)));
+    }
+
+    if (m_QuantizedLstmParameters.m_InputToCellWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo(),
+                                              m_QuantizedLstmParameters.m_InputToCellWeights->Map(true)));
+    }
+
+    if (m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo(),
+                                              m_QuantizedLstmParameters.m_InputToOutputWeights->Map(true)));
+    }
+
+    // RecurrentToX weight tensors
+    if (m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(
+                m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo(),
+                m_QuantizedLstmParameters.m_RecurrentToInputWeights->Map(true)));
+    }
+
+    if (m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(
+                m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo(),
+                m_QuantizedLstmParameters.m_RecurrentToForgetWeights->Map(true)));
+    }
+
+    if (m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(
+                m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo(),
+                m_QuantizedLstmParameters.m_RecurrentToCellWeights->Map(true)));
+    }
+
+    if (m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(
+                m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo(),
+                m_QuantizedLstmParameters.m_RecurrentToOutputWeights->Map(true)));
+    }
+
+    // Bias tensors
+    if (m_QuantizedLstmParameters.m_InputGateBias != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo(),
+                                              m_QuantizedLstmParameters.m_InputGateBias->Map(true)));
+    }
+
+    if (m_QuantizedLstmParameters.m_ForgetGateBias != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo(),
+                                              m_QuantizedLstmParameters.m_ForgetGateBias->Map(true)));
+    }
+
+    if (m_QuantizedLstmParameters.m_CellBias != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_CellBias->GetTensorInfo(),
+                                              m_QuantizedLstmParameters.m_CellBias->Map(true)));
+    }
+
+    if (m_QuantizedLstmParameters.m_OutputGateBias != nullptr)
+    {
+        constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo(),
+                                              m_QuantizedLstmParameters.m_OutputGateBias->Map(true)));
+    }
+
+
+    strategy.ExecuteStrategy(this, BaseDescriptor(), constTensors, GetName());
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp
index bfe86a4..544acbd 100644
--- a/src/armnn/layers/QuantizedLstmLayer.hpp
+++ b/src/armnn/layers/QuantizedLstmLayer.hpp
@@ -71,6 +71,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
 protected:
     /// Constructor to create a QuantizedLstmLayer.
     /// @param [in] name Optional name for the layer.
diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp
index 2b0dffe..3b14ef0 100644
--- a/src/armnn/layers/RankLayer.cpp
+++ b/src/armnn/layers/RankLayer.cpp
@@ -46,4 +46,9 @@
     visitor.VisitRankLayer(this, GetName());
 }
 
+void RankLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
+}
+
 } //namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/RankLayer.hpp b/src/armnn/layers/RankLayer.hpp
index f4f1ec9..fbd2824 100644
--- a/src/armnn/layers/RankLayer.hpp
+++ b/src/armnn/layers/RankLayer.hpp
@@ -24,7 +24,9 @@
 
         void Accept(ILayerVisitor& visitor) const override;
 
-    protected:
+        void ExecuteStrategy(IStrategy& strategy) const override;
+
+protected:
         RankLayer(const char* name);
         ~RankLayer() = default;
 };
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index 189e5f6..bd8cb09 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -135,4 +135,16 @@
     visitor.VisitTransposeConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
 }
 
+void TransposeConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    std::vector<armnn::ConstTensor> constTensors { {m_Weight->GetTensorInfo(), m_Weight->Map(true)} };
+
+    if (GetParameters().m_BiasEnabled)
+    {
+        constTensors.emplace_back(ConstTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true)));
+    }
+
+    strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
 } // namespace armnn
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
index 1ee984d..903c957 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.hpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -42,6 +42,8 @@
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
 protected:
     /// Constructor to create a TransposeConvolution2dLayer.
     /// @param [in] param TransposeConvolution2dDescriptor to configure the 2D transpose convolution operation.
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index da85029..67d0f95 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -7,10 +7,8 @@
 #include "../Network.hpp"
 #include "../NetworkQuantizerUtils.hpp"
 #include "../OverrideInputRangeVisitor.hpp"
-#include "../RangeTracker.hpp"
 
 #include <armnn/INetwork.hpp>
-#include <armnn/LayerVisitorBase.hpp>
 #include <armnn/Tensor.hpp>
 #include <armnn/Types.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
@@ -37,45 +35,332 @@
 
 BOOST_AUTO_TEST_SUITE(Quantizer)
 
-class TestQuantization : public LayerVisitorBase<VisitorThrowingPolicy>
+class TestQuantization : public IStrategy
 {
 public:
-    TestQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-    : LayerVisitorBase<VisitorThrowingPolicy>()
-    , m_InputShape(inputShape)
-    , m_OutputShape(outputShape)
-    , m_QuantizerOptions(QuantizerOptions()) {}
+    TestQuantization(const TensorShape &inputShape, const TensorShape &outputShape)
+            : m_InputShape(inputShape), m_OutputShape(outputShape), m_QuantizerOptions(QuantizerOptions())
+    {}
 
     TestQuantization(const QuantizerOptions& options, const TensorShape& inputShape, const TensorShape& outputShape)
-    : LayerVisitorBase<VisitorThrowingPolicy>()
-    , m_InputShape(inputShape)
+    : m_InputShape(inputShape)
     , m_OutputShape(outputShape)
     , m_QuantizerOptions(options) {}
 
-    void VisitInputLayer(const IConnectableLayer* layer,
-                         LayerBindingId id,
-                         const char* name = nullptr) override
+    void ExecuteStrategy(const armnn::IConnectableLayer *layer,
+                         const BaseDescriptor &descriptor,
+                         const std::vector<armnn::ConstTensor> &constants,
+                         const char *name,
+                         const armnn::LayerBindingId id) override
     {
         IgnoreUnused(id, name);
+
+        if (layer->GetType() == armnn::LayerType::Output)
+        {
+            const TensorInfo &info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
+            BOOST_TEST(m_OutputShape == info.GetShape());
+            return;
+        }
+
         const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
-        BOOST_TEST(m_InputShape == info.GetShape());
-        // Based off current default [-15.0f, 15.0f]
-        TestQuantizationParams(info, {30.0f / g_AsymmU8QuantizationBase, 128},
-                                     {30.0f / g_AsymmS8QuantizationBase, 0},
-                                     {15.0f / g_SymmS8QuantizationBase , 0},
-                                     {15.0f / g_SymmS16QuantizationBase, 0});
+
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::BatchToSpaceNd :
+            case armnn::LayerType::Permute :
+            case armnn::LayerType::Pooling2d :
+            case armnn::LayerType::Reshape :
+            case armnn::LayerType::Resize :
+            case armnn::LayerType::SpaceToBatchNd :
+            case armnn::LayerType::Splitter :
+            case armnn::LayerType::StridedSlice :
+            {
+                CheckDefaultQuantizationSettings(info);
+                break;
+            }
+            case armnn::LayerType::Addition :
+            {
+
+                // Based off default static range [-20.0f, 20.0f]
+                TestQuantizationParams(info, {40.0f / g_AsymmU8QuantizationBase, 128},
+                                       {40.0f / g_AsymmS8QuantizationBase, 0},
+                                       {20.0f / g_SymmS8QuantizationBase,  0},
+                                       {20.0f / g_SymmS16QuantizationBase, 0});
+                break;
+            }
+            case armnn::LayerType::Activation :
+            {
+                const ActivationDescriptor& activationDescriptor = static_cast<const ActivationDescriptor&>(descriptor);
+
+                switch (activationDescriptor.m_Function)
+                {
+                    case ActivationFunction::BoundedReLu :
+                    {
+                        // Based off default static range [0.0f, 3.5f]
+                        TestQuantizationParams(info, {3.5f / g_AsymmU8QuantizationBase, 0},
+                                               {3.5f / g_AsymmS8QuantizationBase, -128},
+                                               {3.5f / g_SymmS8QuantizationBase,  0},
+                                               {3.5f / g_SymmS16QuantizationBase, 0});
+                        break;
+                    }
+                    case ActivationFunction::Elu :
+                    {
+                        TestQuantizationParams(
+                                info, {30.0f / g_AsymmU8QuantizationBase, 128},
+                                {30.0f / g_AsymmS8QuantizationBase, 0},
+                                {15.0f / g_SymmS8QuantizationBase,  0},
+                                {15.0f / g_SymmS16QuantizationBase, 0});
+                        break;
+                    }
+                    case ActivationFunction::HardSwish :
+                    {
+                        TestQuantizationParams(info, {30.0f / g_AsymmU8QuantizationBase, 128},
+                                               {30.0f / g_AsymmS8QuantizationBase, 0},
+                                               {15.0f / g_SymmS8QuantizationBase, 0},
+                                               {15.0f / g_SymmS16QuantizationBase, 0});
+                        break;
+                    }
+                    case ActivationFunction::LeakyReLu :
+                    {
+                        // Based off default static range [-5.0f, 15.0f]
+                        TestQuantizationParams(info, {20.0f / g_AsymmU8QuantizationBase, 64},
+                                               {20.0f / g_AsymmS8QuantizationBase,-64},
+                                               {15.0f / g_SymmS8QuantizationBase ,  0},
+                                               {15.0f / g_SymmS16QuantizationBase,  0});
+                        break;
+                    }
+                    case ActivationFunction::TanH :
+                    {
+                        TestQuantizationParams(info, {2.0f / g_AsymmU8QuantizationBase, 128},
+                                               {2.0f / g_AsymmS8QuantizationBase,   0},
+                                               {1.0f / g_SymmS8QuantizationBase ,   0},
+                                               {1.0f / g_SymmS16QuantizationBase,   0});
+                        break;
+                    }
+                    default:
+                    {
+                        // Based off default static range [0.0f, 15.0f]
+                        TestQuantizationParams(info, {15.0f / g_AsymmU8QuantizationBase, 0},
+                                               {15.0f / g_AsymmS8QuantizationBase, -128},
+                                               {15.0f / g_SymmS8QuantizationBase, 0},
+                                               {15.0f / g_SymmS16QuantizationBase, 0});
+                        break;
+                    }
+                }
+                break;
+            }
+            case armnn::LayerType::ArgMinMax :
+            {
+                const ArgMinMaxDescriptor& argMinMaxDescriptor = static_cast<const ArgMinMaxDescriptor&>(descriptor);
+
+                if(argMinMaxDescriptor.m_Function == ArgMinMaxFunction::Max)
+                {
+                    break;
+                }
+                TestQuantizationParams(info,
+                                       { 30.0f / g_AsymmU8QuantizationBase, 128 },
+                                       { 30.0f / g_AsymmS8QuantizationBase,  0},
+                                       { 15.0f / g_SymmS8QuantizationBase,  0},
+                                       { 15.0f / g_SymmS16QuantizationBase, 0 });
+                break;
+            }
+            case armnn::LayerType::BatchNormalization :
+            {
+
+                // Based off default static range [-15.0f, 15.0f]
+                TestQuantizationParams(
+                        info, {30.0f / g_AsymmU8QuantizationBase, 128},
+                        {30.0f / g_AsymmS8QuantizationBase, 0},
+                        {15.0f / g_SymmS8QuantizationBase, 0},
+                        {15.0f / g_SymmS16QuantizationBase, 0});
+
+                // Test constants
+                TestConstantQuantizationParams(constants[0].GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85});
+                TestConstantQuantizationParams(constants[1].GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85});
+                TestConstantQuantizationParams(constants[2].GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85});
+                TestConstantQuantizationParams(constants[3].GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85});
+                break;
+            }
+            case armnn::LayerType::Comparison :
+            {
+
+                const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
+                const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase,  0};
+                const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase,  0};
+                const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 };
+
+                TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params);
+
+                break;
+            }
+            case armnn::LayerType::Constant :
+            {
+
+                // Based off the range of values in the const tensor used for the test: [-2.0f, 6.0f]
+                TestQuantizationParams(info, {8.0f / g_AsymmU8QuantizationBase, 64},
+                                       {8.0f / g_AsymmS8QuantizationBase, -64},
+                                       {6.0f / g_SymmS8QuantizationBase,  0},
+                                       {6.0f / g_SymmS16QuantizationBase, 0});
+
+                break;
+            }
+            case armnn::LayerType::Convolution2d :
+            {
+                if (constants.size() == 1)
+                {
+                    TestQuantizationOnLayersWithBiases(layer, constants[0], armnn::EmptyOptional());
+                }
+                else if (constants.size() == 1)
+                {
+                    TestQuantizationOnLayersWithBiases(layer, constants[0], constants[1]);
+                }
+                break;
+            }
+            case armnn::LayerType::DepthwiseConvolution2d :
+            {
+                if (constants.size() == 2)
+                {
+                    TestQuantizationOnLayersWithBiases(layer, constants[0], constants[1]);
+                }
+                else if (constants.size() == 1)
+                {
+                    TestQuantizationOnLayersWithBiases(layer, constants[0], armnn::EmptyOptional());
+                }
+                break;
+            }
+            case armnn::LayerType::DepthToSpace :
+            {
+                const OffsetScalePair qAsymmU8Params{30.0f / g_AsymmU8QuantizationBase, 128};
+                const OffsetScalePair qAsymmS8Params{30.0f / g_AsymmS8QuantizationBase, 0};
+                const OffsetScalePair qSymmS8Params{15.0f / g_SymmS8QuantizationBase, 0};
+                const OffsetScalePair qSymmS16Params{15.0f / g_SymmS16QuantizationBase, 0};
+
+                TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params);
+                break;
+            }
+            case armnn::LayerType::FullyConnected :
+            {
+                if (constants.size() == 2)
+                {
+                    TestQuantizationOnLayersWithBiases(layer, constants[0], constants[1]);
+                }
+                else if (constants.size() == 1)
+                {
+                    TestQuantizationOnLayersWithBiases(layer, constants[0], armnn::EmptyOptional());
+                }
+
+                break;
+            }
+            case armnn::LayerType::Fill :
+            {
+                const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
+                const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase,  0};
+                const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase,  0};
+                const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 };
+
+                TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params);
+                break;
+            }
+            case armnn::LayerType::Input :
+            {
+                 BOOST_TEST(m_InputShape == info.GetShape());
+                 // Based off current default [-15.0f, 15.0f]
+                 TestQuantizationParams(info, {30.0f / g_AsymmU8QuantizationBase, 128},
+                                        {30.0f / g_AsymmS8QuantizationBase, 0},
+                                        {15.0f / g_SymmS8QuantizationBase, 0},
+                                        {15.0f / g_SymmS16QuantizationBase, 0});
+                break;
+            }
+            case armnn::LayerType::InstanceNormalization :
+            {
+                const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
+                const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase,  0};
+                const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase,  0};
+                const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 };
+
+                TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params);
+                break;
+            }
+            case armnn::LayerType::LogSoftmax :
+            {
+                const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
+                const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase,  0};
+                const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase,  0};
+                const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 };
+
+                TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params);
+                break;
+            }
+            case armnn::LayerType::Slice :
+            {
+                const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
+                const OffsetScalePair qAsymmS8Params{ 30.0f / g_AsymmS8QuantizationBase, 0 };
+                const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase,  0 };
+                const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 };
+
+                TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params);
+                break;
+            }
+            case armnn::LayerType::Softmax :
+            {
+                // Based off default static range [0.0f, 1.0f]
+                TestQuantizationParams(info, {1.0f / g_AsymmU8QuantizationBase, 0},
+                                       {1.0f / g_AsymmS8QuantizationBase, -128},
+                                       {1.0f / g_SymmS8QuantizationBase,  0},
+                                       {1.0f / g_SymmS16QuantizationBase, 0});
+                break;
+            }
+            case armnn::LayerType::SpaceToDepth :
+            {
+                TestQuantizationParams(info,
+                                       { 30.0f / g_AsymmU8QuantizationBase, 128 },
+                                       { 30.0f / g_AsymmS8QuantizationBase, 0   },
+                                       { 15.0f / g_SymmS8QuantizationBase,  0   },
+                                       { 15.0f / g_SymmS16QuantizationBase, 0   });
+
+                break;
+            }
+            case armnn::LayerType::Stack :
+            {
+                TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
+
+                TestQuantizationParams(outputInfo,
+                                       { 30.0f / g_AsymmU8QuantizationBase, 128 },
+                                       { 30.0f / g_AsymmS8QuantizationBase, 0},
+                                       { 15.0f / g_SymmS8QuantizationBase,  0},
+                                       { 15.0f / g_SymmS16QuantizationBase, 0 });
+                break;
+            }
+            case armnn::LayerType::TransposeConvolution2d :
+            {
+                if (constants.size() == 2)
+                {
+                    TestQuantizationOnLayersWithBiases(layer, constants[0], constants[1]);
+                }
+                else if (constants.size() == 1)
+                {
+                    TestQuantizationOnLayersWithBiases(layer, constants[0], armnn::EmptyOptional());
+                }
+                break;
+            }
+            default:
+            {
+                throw UnimplementedException("Unimplemented layer encountered");
+            }
+        }
     }
 
-    void VisitOutputLayer(const IConnectableLayer* layer,
-                          LayerBindingId id,
-                          const char* name = nullptr) override
-    {
-        IgnoreUnused(id, name);
-        const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
-        BOOST_TEST(m_OutputShape == info.GetShape());
-    }
 
 protected:
+
+    void CheckDefaultQuantizationSettings(const TensorInfo& info)
+    {
+        TestQuantizationParams(info, {20.0f / g_AsymmU8QuantizationBase, 64},
+                               {20.0f / g_AsymmS8QuantizationBase,-64},
+                               {15.0f / g_SymmS8QuantizationBase,   0},
+                               {15.0f / g_SymmS16QuantizationBase,  0});
+    }
+
     void TestQuantizationParams(const TensorInfo& info,
                                 const OffsetScalePair& qAsymmU8Params,
                                 const OffsetScalePair& qAsymmS8Params,
@@ -188,39 +473,41 @@
     QuantizerOptions m_QuantizerOptions;
 };
 
-void VisitLayersTopologically(const INetwork* inputNetwork, ILayerVisitor& visitor)
+void VisitLayersTopologically(const INetwork* inputNetwork, IStrategy& strategy)
 {
     auto network = PolymorphicDowncast<const Network*>(inputNetwork);
     auto graph = network->GetGraph().TopologicalSort();
 
-    VisitLayers(graph, visitor);
+    ApplyStrategyToLayers(graph, strategy);
 }
 
-class TestAdditionQuantization : public TestQuantization
+void TestNetwork(INetwork* network, const TensorShape inShape, const TensorShape outShape)
 {
-public:
-    TestAdditionQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-    : TestQuantization(inputShape, outputShape) {}
+    const QuantizerOptions qAsymmU8Options(DataType::QAsymmU8);
+    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network, qAsymmU8Options)->ExportNetwork();
+    TestQuantization validatorQAsymmU8(inShape, outShape);
+    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
 
-    TestAdditionQuantization(const QuantizerOptions& options,
-                             const TensorShape& inputShape,
-                             const TensorShape& outputShape)
-    : TestQuantization(options, inputShape, outputShape) {}
+    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
+    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network, qAsymmS8Options)->ExportNetwork();
+    TestQuantization validatorQAsymmS8(qAsymmS8Options, inShape, outShape);
+    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
 
-    void VisitAdditionLayer(const IConnectableLayer* layer,
-                            const char* name = nullptr) override
-    {
-        IgnoreUnused(name);
-        TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
+    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network, qSymmS8Options)->ExportNetwork();
+    TestQuantization validatorQSymmS8(qSymmS8Options, inShape, outShape);
+    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
 
-        // Based off default static range [-20.0f, 20.0f]
-        TestQuantizationParams(info, {40.0f / g_AsymmU8QuantizationBase, 128},
-                                     {40.0f / g_AsymmS8QuantizationBase, 0},
-                                     {20.0f / g_SymmS8QuantizationBase,  0},
-                                     {20.0f / g_SymmS16QuantizationBase, 0});
-    }
-};
+    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
+    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network, qSymmS16options)->ExportNetwork();
+    TestQuantization validatorQSymmS16(qSymmS16options, inShape, outShape);
+    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+}
 
+void TestNetwork(INetwork* network, const TensorShape shape)
+{
+    TestNetwork(network, shape, shape);
+}
 
 BOOST_AUTO_TEST_CASE(QuantizeAddition)
 {
@@ -244,54 +531,9 @@
     input1->GetOutputSlot(0).SetTensorInfo(info);
     addition->GetOutputSlot(0).SetTensorInfo(info);
 
-    const QuantizerOptions qAsymmU8Options(DataType::QAsymmU8);
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get(), qAsymmU8Options)->ExportNetwork();
-    TestAdditionQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestAdditionQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestAdditionQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestAdditionQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
-class TestActivationQuantization : public TestQuantization
-{
-public:
-    TestActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-    : TestQuantization(inputShape, outputShape) {}
-
-    TestActivationQuantization(const QuantizerOptions& options,
-                               const TensorShape& inputShape,
-                               const TensorShape& outputShape)
-    : TestQuantization(options, inputShape, outputShape) {}
-
-    void VisitActivationLayer(const IConnectableLayer* layer,
-                              const ActivationDescriptor& descriptor,
-                              const char* name = nullptr) override
-    {
-        IgnoreUnused(descriptor, name);
-
-        TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
-        // Based off default static range [0.0f, 15.0f]
-        TestQuantizationParams(info, {15.0f / g_AsymmU8QuantizationBase, 0},
-                                     {15.0f / g_AsymmS8QuantizationBase, -128},
-                                     {15.0f / g_SymmS8QuantizationBase, 0},
-                                     {15.0f / g_SymmS16QuantizationBase, 0});
-    }
-};
-
 INetworkPtr CreateNetworkWithActivationLayer(const ActivationDescriptor& descriptor, const TensorShape& shape)
 {
     INetworkPtr network = INetwork::Create();
@@ -313,28 +555,6 @@
     return network;
 }
 
-class TestArgMinMaxQuantization : public TestQuantization
-{
-public:
-    TestArgMinMaxQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-            : TestQuantization(inputShape, outputShape) {}
-
-    TestArgMinMaxQuantization(const QuantizerOptions& options,
-                              const TensorShape& inputShape,
-                              const TensorShape& outputShape)
-            : TestQuantization(options, inputShape, outputShape) {}
-
-    void VisitArgMinMaxLayer(const IConnectableLayer* layer,
-                             const ArgMinMaxDescriptor&,
-                             const char* name = nullptr) override
-    {
-        IgnoreUnused(name);
-        TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
-        BOOST_CHECK(info.GetDataType() == DataType::Signed32);
-    }
-};
-
 INetworkPtr CreateNetworkWithArgMinMaxLayer(const ArgMinMaxDescriptor& descriptor, const TensorShape& shape)
 {
     INetworkPtr network = INetwork::Create();
@@ -417,34 +637,47 @@
     std::unique_ptr<IQuantizationScheme> quantizationScheme = std::make_unique<QAsymmU8QuantizationScheme>();
     OffsetScalePair qParams = quantizationScheme->ComputeScheme(-77.0, 98.0);
 
-    class TestOutputLayerVisitor : public LayerVisitorBase<VisitorNoThrowPolicy>
-    {
-    public:
-        TestOutputLayerVisitor(const OffsetScalePair& offsetScalePair, const DataType& dataType) :
+class TestOutputStrategy : public IStrategy
+{
+    public :
+    TestOutputStrategy(const OffsetScalePair& offsetScalePair, const DataType& dataType) :
             m_OffsetScalePair(offsetScalePair), m_DataType(dataType) {}
 
-        void VisitOutputLayer(const IConnectableLayer* layer,
-                                      LayerBindingId id,
-                                      const char* name = nullptr) override
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id) override
+    {
+        IgnoreUnused(name, constants, id, descriptor);
+
+        switch (layer->GetType())
         {
-            IgnoreUnused(id, name);
-            const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
-            BOOST_CHECK_MESSAGE(info.GetDataType() == m_DataType,
-                                std::string(armnn::GetDataTypeName(info.GetDataType()))
-                                        .append(" == ").append(armnn::GetDataTypeName(m_DataType)));
-            // int_32t
-            BOOST_CHECK(info.GetQuantizationOffset() == m_OffsetScalePair.second);
-            // float
-            BOOST_TEST(info.GetQuantizationScale() == m_OffsetScalePair.first, boost::test_tools::tolerance(0.001));
+            case armnn::LayerType::Output :
+            {
+                const TensorInfo &info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
+                BOOST_CHECK_MESSAGE(info.GetDataType() == m_DataType,
+                                    std::string(armnn::GetDataTypeName(info.GetDataType()))
+                                            .append(" == ").append(armnn::GetDataTypeName(m_DataType)));
+                // int_32t
+                BOOST_CHECK(info.GetQuantizationOffset() == m_OffsetScalePair.second);
+                // float
+                BOOST_TEST(info.GetQuantizationScale() == m_OffsetScalePair.first,
+                           boost::test_tools::tolerance(0.001));
+                break;
+            }
+            default:
+            {}
         }
+    }
 
-    private:
-        const OffsetScalePair m_OffsetScalePair;
-        const DataType m_DataType;
-    };
+private:
+    const OffsetScalePair m_OffsetScalePair;
+    const DataType m_DataType;
+};
 
-    TestOutputLayerVisitor visitor(qParams, quantizationScheme->GetDataType());
-    quantizedNetwork->Accept(visitor);
+    TestOutputStrategy strategy(qParams, quantizationScheme->GetDataType());
+    quantizedNetwork->ExecuteStrategy(strategy);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeAbsActivation)
@@ -457,25 +690,7 @@
     const TensorShape shape{1U};
     INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape);
 
-    const QuantizerOptions qAsymmU8Options(DataType::QAsymmU8);
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get(), qAsymmU8Options)->ExportNetwork();
-    TestActivationQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeArgMax)
@@ -486,25 +701,7 @@
     const TensorShape shape{1U};
     INetworkPtr network = CreateNetworkWithArgMinMaxLayer(descriptor, shape);
 
-    const QuantizerOptions qAsymmU8Options(DataType::QAsymmU8);
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get(), qAsymmU8Options)->ExportNetwork();
-    TestArgMinMaxQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestArgMinMaxQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestArgMinMaxQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestArgMinMaxQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeLinearActivation)
@@ -517,24 +714,8 @@
     const TensorShape shape{1U};
     INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestActivationQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
 
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeReLuActivation)
@@ -547,24 +728,7 @@
     const TensorShape shape{1U};
     INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestActivationQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeSoftReLuActivation)
@@ -577,54 +741,11 @@
     const TensorShape shape{1U};
     INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestActivationQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeBoundedReluActivation)
 {
-    class TestBoundedReluActivationQuantization : public TestQuantization
-    {
-    public:
-        TestBoundedReluActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestQuantization(inputShape, outputShape) {}
-
-        TestBoundedReluActivationQuantization(const QuantizerOptions& options,
-                                              const TensorShape& inputShape,
-                                              const TensorShape& outputShape)
-        : TestQuantization(options, inputShape, outputShape) {}
-
-        void VisitActivationLayer(const IConnectableLayer* layer,
-                                  const ActivationDescriptor& descriptor,
-                                  const char* name = nullptr) override
-        {
-            IgnoreUnused(descriptor, name);
-            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
-            // Based off default static range [0.0f, 3.5f]
-            TestQuantizationParams(info, {3.5f / g_AsymmU8QuantizationBase, 0},
-                                         {3.5f / g_AsymmS8QuantizationBase, -128},
-                                         {3.5f / g_SymmS8QuantizationBase,  0},
-                                         {3.5f / g_SymmS16QuantizationBase, 0});
-        }
-    };
-
     ActivationDescriptor descriptor;
     descriptor.m_Function = ActivationFunction::BoundedReLu;
     descriptor.m_A        = 3.5f;
@@ -633,55 +754,11 @@
     const TensorShape shape{1U};
     INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestBoundedReluActivationQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestBoundedReluActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestBoundedReluActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestBoundedReluActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeTanHActivation)
 {
-    class TestTanHActivationQuantization : public TestQuantization
-    {
-    public:
-        TestTanHActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestQuantization(inputShape, outputShape) {}
-
-        TestTanHActivationQuantization(const QuantizerOptions& options,
-                                       const TensorShape& inputShape,
-                                       const TensorShape& outputShape)
-        : TestQuantization(options, inputShape, outputShape) {}
-
-        void VisitActivationLayer(const IConnectableLayer* layer,
-                                  const ActivationDescriptor& descriptor,
-                                  const char* name = nullptr) override
-        {
-            IgnoreUnused(descriptor, name);
-            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
-            // Based off default static range [-1.0f, 1.0f]
-            TestQuantizationParams(
-                info, {2.0f / g_AsymmU8QuantizationBase, 128},
-                      {2.0f / g_AsymmS8QuantizationBase,   0},
-                      {1.0f / g_SymmS8QuantizationBase ,   0},
-                      {1.0f / g_SymmS16QuantizationBase,   0});
-        }
-    };
-
     ActivationDescriptor descriptor;
     descriptor.m_Function = ActivationFunction::TanH;
     descriptor.m_A        = 3.5f;
@@ -690,64 +767,9 @@
     const TensorShape shape{1U};
     INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestTanHActivationQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestTanHActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestTanHActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestTanHActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
-class TestLeakyReLuActivationQuantization : public TestQuantization
-{
-public:
-    TestLeakyReLuActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-    : TestQuantization(inputShape, outputShape) {}
-
-    TestLeakyReLuActivationQuantization(const QuantizerOptions& options,
-                                        const TensorShape& inputShape,
-                                        const TensorShape& outputShape)
-    : TestQuantization(options, inputShape, outputShape) {}
-
-    void VisitActivationLayer(const IConnectableLayer* layer,
-                              const ActivationDescriptor& descriptor,
-                              const char* name = nullptr) override
-    {
-        IgnoreUnused(descriptor, name);
-        TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
-        // Based off default static range [-5.0f, 15.0f]
-        TestQuantizationParams(info, {20.0f / g_AsymmU8QuantizationBase, 64},
-                                     {20.0f / g_AsymmS8QuantizationBase,-64},
-                                     {15.0f / g_SymmS8QuantizationBase ,  0},
-                                     {15.0f / g_SymmS16QuantizationBase,  0});
-    }
-
-protected:
-    // Used by the descendant classes which test layers
-    // that are forwarding their parent layer settings
-    void CheckForwardedQuantizationSettings(const IConnectableLayer* layer)
-    {
-        TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-        TestQuantizationParams(info, {20.0f / g_AsymmU8QuantizationBase, 64},
-                                     {20.0f / g_AsymmS8QuantizationBase,-64},
-                                     {15.0f / g_SymmS8QuantizationBase,   0},
-                                     {15.0f / g_SymmS16QuantizationBase,  0});
-    }
-};
-
 BOOST_AUTO_TEST_CASE(QuantizeLeakyReLuActivation)
 {
     ActivationDescriptor descriptor;
@@ -758,176 +780,34 @@
     const TensorShape shape{1U};
     INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestLeakyReLuActivationQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestLeakyReLuActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestLeakyReLuActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestLeakyReLuActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 
 BOOST_AUTO_TEST_CASE(QuantizeELuActivation)
 {
-    class TestEluActivationQuantization : public TestQuantization
-    {
-    public:
-        TestEluActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestQuantization(inputShape, outputShape) {}
-
-        TestEluActivationQuantization(const QuantizerOptions& options,
-                                       const TensorShape& inputShape,
-                                       const TensorShape& outputShape)
-        : TestQuantization(options, inputShape, outputShape) {}
-
-        void VisitActivationLayer(const IConnectableLayer* layer,
-                                  const ActivationDescriptor& descriptor,
-                                  const char* name = nullptr) override
-        {
-            IgnoreUnused(descriptor, name);
-            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
-            // Based off default static range [-15.0f, 15.0f]
-            TestQuantizationParams(
-                info, {30.0f / g_AsymmU8QuantizationBase, 128},
-                      {30.0f / g_AsymmS8QuantizationBase, 0},
-                      {15.0f / g_SymmS8QuantizationBase,  0},
-                      {15.0f / g_SymmS16QuantizationBase, 0});
-        }
-    };
-
     ActivationDescriptor descriptor;
     descriptor.m_Function = ActivationFunction::Elu;
 
     const TensorShape shape{1U};
     INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestEluActivationQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestEluActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestEluActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestEluActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 BOOST_AUTO_TEST_CASE(QuantizeHardSwishActivation)
 {
-    class TestHardSwishActivationQuantization : public TestQuantization
-    {
-    public:
-        TestHardSwishActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-            : TestQuantization(inputShape, outputShape) {}
-
-        TestHardSwishActivationQuantization(const QuantizerOptions& options,
-                                      const TensorShape& inputShape,
-                                      const TensorShape& outputShape)
-            : TestQuantization(options, inputShape, outputShape) {}
-
-        void VisitActivationLayer(const IConnectableLayer* layer,
-                                  const ActivationDescriptor& descriptor,
-                                  const char* name = nullptr) override
-        {
-            IgnoreUnused(descriptor, name);
-            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
-            // Based off default static range [-15.0f, 15.0f]
-            TestQuantizationParams(
-                info, {30.0f / g_AsymmU8QuantizationBase, 128},
-                {30.0f / g_AsymmS8QuantizationBase, 0},
-                {15.0f / g_SymmS8QuantizationBase,  0},
-                {15.0f / g_SymmS16QuantizationBase, 0});
-        }
-    };
-
     ActivationDescriptor descriptor;
     descriptor.m_Function = ActivationFunction::HardSwish;
 
     const TensorShape shape{1U};
     INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestHardSwishActivationQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestHardSwishActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestHardSwishActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestHardSwishActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 
 BOOST_AUTO_TEST_CASE(QuantizeBatchNorm)
 {
-    class TestBatchNormalizationQuantization : public TestQuantization
-    {
-    public:
-        TestBatchNormalizationQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestQuantization(inputShape, outputShape) {}
-
-        TestBatchNormalizationQuantization(const QuantizerOptions& options,
-                                           const TensorShape& inputShape,
-                                           const TensorShape& outputShape)
-        : TestQuantization(options, inputShape, outputShape) {}
-
-        void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
-                                          const BatchNormalizationDescriptor& desc,
-                                          const ConstTensor& mean,
-                                          const ConstTensor& variance,
-                                          const ConstTensor& beta,
-                                          const ConstTensor& gamma,
-                                          const char* name = nullptr) override
-        {
-            IgnoreUnused(desc, name);
-            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
-            // Based off default static range [-15.0f, 15.0f]
-            TestQuantizationParams(
-                info, {30.0f / g_AsymmU8QuantizationBase, 128},
-                      {30.0f / g_AsymmS8QuantizationBase,  0},
-                      {15.0f / g_SymmS8QuantizationBase,  0},
-                      {15.0f / g_SymmS16QuantizationBase, 0});
-
-            // Test constants
-            TestConstantQuantizationParams(mean.GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85});
-            TestConstantQuantizationParams(variance.GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85});
-            TestConstantQuantizationParams(beta.GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85});
-            TestConstantQuantizationParams(gamma.GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85});
-        }
-    };
-
     INetworkPtr network = INetwork::Create();
 
     const TensorShape shape{3U};
@@ -958,55 +838,11 @@
     input0->GetOutputSlot(0).SetTensorInfo(info);
     batchNorm->GetOutputSlot(0).SetTensorInfo(info);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestBatchNormalizationQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestBatchNormalizationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestBatchNormalizationQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions QQsymm16Options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), QQsymm16Options)->ExportNetwork();
-    TestBatchNormalizationQuantization validatorQSymmS16(QQsymm16Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeDepthToSpace)
 {
-    class TestDepthToSpaceQuantization : public TestQuantization
-    {
-    public:
-        TestDepthToSpaceQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-            : TestQuantization(inputShape, outputShape) {}
-
-        TestDepthToSpaceQuantization(const QuantizerOptions& options,
-                                     const TensorShape& inputShape,
-                                     const TensorShape& outputShape)
-            : TestQuantization(options, inputShape, outputShape) {}
-
-        virtual void VisitDepthToSpaceLayer(const IConnectableLayer* layer,
-                                            const DepthToSpaceDescriptor& desc,
-                                            const char* name = nullptr)
-        {
-            IgnoreUnused(desc, name);
-            const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
-
-            const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
-            const OffsetScalePair qAsymmS8Params{ 30.0f / g_AsymmS8QuantizationBase, 0 };
-            const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase,  0 };
-            const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 };
-
-            TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params);
-        }
-    };
-
     const TensorShape inputShape { 1, 2, 2, 4 };
     const TensorShape outputShape{ 1, 4, 4, 1 };
 
@@ -1026,28 +862,7 @@
     inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
     depthToSpaceLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
-    // test QAsymmU8 quantization
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestDepthToSpaceQuantization validatorQAsymmU8(inputShape, outputShape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    // test QAsymmS8 quantization
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestDepthToSpaceQuantization validatorQAsymmS8(qAsymmS8Options, inputShape, outputShape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    // test QSymmS8 quantization
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestDepthToSpaceQuantization validatorQSymmS8(qSymmS8Options, inputShape, outputShape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    // test QSymmS16 quantization
-    const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
-    TestDepthToSpaceQuantization validatorQSymmS16(Qsymm16Options, inputShape, outputShape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), inputShape, outputShape);
 }
 
 BOOST_AUTO_TEST_CASE(OverrideInputRangeEmptyNetwork)
@@ -1058,8 +873,8 @@
     Network network; // Empty network
     auto inputLayers = network.GetGraph().GetInputLayers(); // Empty list of input layers
 
-    OverrideInputRangeVisitor overrideInputRangeVisitor(ranges, 0, minMaxRange);
-    VisitLayers(inputLayers, overrideInputRangeVisitor);
+    OverrideInputRangeStrategy overrideInputRangeStrategy(ranges, 0, minMaxRange);
+    ApplyStrategyToLayers(inputLayers, overrideInputRangeStrategy);
 
     BOOST_CHECK(ranges.IsEmpty()); // Check that the map of ranges remained untouched
 }
@@ -1073,8 +888,8 @@
     network.AddAdditionLayer(); // Network with no input layers
     auto inputLayers = network.GetGraph().GetInputLayers(); // Empty list of input layers
 
-    OverrideInputRangeVisitor overrideInputRangeVisitor(ranges, 0, minMaxRange);
-    VisitLayers(inputLayers, overrideInputRangeVisitor);
+    OverrideInputRangeStrategy overrideInputRangeStrategy(ranges, 0, minMaxRange);
+    ApplyStrategyToLayers(inputLayers, overrideInputRangeStrategy);
 
     BOOST_CHECK(ranges.IsEmpty()); // Check that the map of ranges remained untouched
 }
@@ -1107,15 +922,15 @@
     auto inputLayers = network.GetGraph().GetInputLayers(); // List of input layers
 
     // Trying to override the input range for the input layer with binding id 3 (does not exist in the network)
-    OverrideInputRangeVisitor overrideInputRangeVisitorLayer3(ranges, 3, minMaxRange);
-    VisitLayers(inputLayers, overrideInputRangeVisitorLayer3);
+    OverrideInputRangeStrategy overrideInputRangeStrategy3(ranges, 3, minMaxRange);
+    ApplyStrategyToLayers(inputLayers, overrideInputRangeStrategy3);
 
     // Check that the map of ranges remained untouched
     BOOST_CHECK(ranges.IsEmpty());
 
     // Override the input range for the input layer with binding id 1
-    OverrideInputRangeVisitor overrideInputRangeVisitorLayer1(ranges, 1, minMaxRange);
-    VisitLayers(inputLayers, overrideInputRangeVisitorLayer1);
+    OverrideInputRangeStrategy overrideInputRangeStrategy1(ranges, 1, minMaxRange);
+    ApplyStrategyToLayers(inputLayers, overrideInputRangeStrategy1);
 
     // Check that the map of ranges has been populated
     BOOST_CHECK(!ranges.IsEmpty());
@@ -1170,80 +985,14 @@
 
 void ValidateFullyConnectedLayer(const bool biasEnabled)
 {
-    class TestFullyConnectedQuantization : public TestQuantization
-    {
-    public:
-        TestFullyConnectedQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestQuantization(inputShape, outputShape) {}
-
-        TestFullyConnectedQuantization(const QuantizerOptions& options,
-                                       const TensorShape& inputShape,
-                                       const TensorShape& outputShape)
-        : TestQuantization(options, inputShape, outputShape) {}
-
-        void VisitFullyConnectedLayer(const IConnectableLayer* layer,
-                                      const FullyConnectedDescriptor& desc,
-                                      const ConstTensor& weights,
-                                      const Optional<ConstTensor>& biases,
-                                      const char* name = nullptr) override
-        {
-            IgnoreUnused(desc, name);
-            TestQuantizationOnLayersWithBiases(layer, weights, biases);
-        }
-    };
-
     const TensorShape shape{3U};
     INetworkPtr network = CreateNetworkWithFullyConnectedLayer(biasEnabled, shape, shape);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestFullyConnectedQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestFullyConnectedQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestFullyConnectedQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
-    TestFullyConnectedQuantization validatorQSymmS16(Qsymm16Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeFill)
 {
-    class TestFillQuantization : public TestQuantization
-    {
-    public:
-        TestFillQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestQuantization(inputShape, outputShape) {}
-
-        TestFillQuantization(const QuantizerOptions& options,
-                             const TensorShape& inputShape,
-                             const TensorShape& outputShape)
-        : TestQuantization(options, inputShape, outputShape) {}
-
-        virtual void VisitFillLayer(const IConnectableLayer* layer,
-                                    const FillDescriptor& desc,
-                                    const char* name = nullptr)
-        {
-            IgnoreUnused(desc, name);
-            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
-            const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
-            const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase,  0};
-            const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase,  0};
-            const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 };
-
-            TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params);
-        }
-    };
-
     const TensorShape tensorShape{ 1U };
     const TensorInfo tensorInfo(tensorShape, DataType::Float32);
 
@@ -1262,28 +1011,7 @@
     inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
     fillLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
-    // test QAsymmU8 quantization
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestFillQuantization validatorQAsymmU8(tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    // test QAsymmS8 quantization
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestFillQuantization validatorQAsymmS8(qAsymmS8Options, tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    // test QSymmS8 quantization
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestFillQuantization validatorQSymmS8(qSymmS8Options, tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    // test QuantisedSymmS16 quantization
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestFillQuantization validatorQSymmS16(qSymmS16options, tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), tensorShape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeFullyConnected)
@@ -1298,28 +1026,6 @@
 
 void TestQuantizeConvolution2d(bool useBiases)
 {
-    class TestConv2dQuantization : public TestQuantization
-    {
-    public:
-        TestConv2dQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestQuantization(inputShape, outputShape) {}
-
-        TestConv2dQuantization(const QuantizerOptions& options,
-                               const TensorShape& inputShape,
-                               const TensorShape& outputShape)
-        : TestQuantization(options, inputShape, outputShape) {}
-
-        void VisitConvolution2dLayer(const IConnectableLayer *layer,
-                                     const Convolution2dDescriptor& convolution2dDescriptor,
-                                     const ConstTensor& weights,
-                                     const Optional<ConstTensor>& biases,
-                                     const char *name = nullptr) override
-        {
-            IgnoreUnused(convolution2dDescriptor, name);
-            TestQuantizationOnLayersWithBiases(layer, weights, biases);
-        }
-    };
-
     INetworkPtr network = INetwork::Create();
 
     TensorShape shape{3U};
@@ -1352,24 +1058,7 @@
     input0->GetOutputSlot(0).SetTensorInfo(info);
     conv2d->GetOutputSlot(0).SetTensorInfo(info);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestConv2dQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestConv2dQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestConv2dQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
-    TestConv2dQuantization validatorQSymmS16(Qsymm16Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeConvolution2d)
@@ -1384,28 +1073,6 @@
 
 void TestQuantizeDepthwiseConvolution2d(bool useBiases)
 {
-    class TestDepthwiseConv2dQuantization : public TestQuantization
-    {
-    public:
-        TestDepthwiseConv2dQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestQuantization(inputShape, outputShape) {}
-
-        TestDepthwiseConv2dQuantization(const QuantizerOptions& options,
-                                        const TensorShape& inputShape,
-                                        const TensorShape& outputShape)
-        : TestQuantization(options, inputShape, outputShape) {}
-
-        void VisitDepthwiseConvolution2dLayer(const IConnectableLayer *layer,
-                                              const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-                                              const ConstTensor& weights,
-                                              const Optional<ConstTensor>& biases,
-                                              const char *name = nullptr) override
-        {
-            IgnoreUnused(convolution2dDescriptor, name);
-            TestQuantizationOnLayersWithBiases(layer, weights, biases);
-        }
-    };
-
     INetworkPtr network = INetwork::Create();
 
     TensorShape shape{3U};
@@ -1438,24 +1105,7 @@
     input0->GetOutputSlot(0).SetTensorInfo(info);
     depthwiseConv2d->GetOutputSlot(0).SetTensorInfo(info);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestDepthwiseConv2dQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestDepthwiseConv2dQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestDepthwiseConv2dQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
-    TestDepthwiseConv2dQuantization validatorQSymmS16(Qsymm16Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeDepthwiseConvolution2d)
@@ -1470,35 +1120,8 @@
 
 BOOST_AUTO_TEST_CASE(QuantizeInstanceNormalization)
 {
-    class TestInstanceNormalizationQuantization : public TestQuantization
-    {
-    public:
-        TestInstanceNormalizationQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-            : TestQuantization(inputShape, outputShape) {}
-
-        TestInstanceNormalizationQuantization(const QuantizerOptions& options,
-                                              const TensorShape& inputShape,
-                                              const TensorShape& outputShape)
-            : TestQuantization(options, inputShape, outputShape) {}
-
-        virtual void VisitInstanceNormalizationLayer(const IConnectableLayer* layer,
-                                                     const InstanceNormalizationDescriptor& descriptor,
-                                                     const char* name = nullptr)
-        {
-            IgnoreUnused(descriptor, name);
-            const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
-
-            const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
-            const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase,  0};
-            const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase,  0};
-            const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 };
-
-            TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params);
-        }
-    };
-
-    const TensorShape tensorShape{ 1, 4, 4, 1 };
-    const TensorInfo tensorInfo(tensorShape, DataType::Float32);
+    const TensorShape shape{ 1, 4, 4, 1 };
+    const TensorInfo tensorInfo(shape, DataType::Float32);
 
     INetworkPtr network = INetwork::Create();
 
@@ -1512,59 +1135,11 @@
     inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
     instanceNormLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
-    // test QAsymmU8 quantization
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestInstanceNormalizationQuantization validatorQAsymmU8(tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    //test QAsymmS8 quantization
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestInstanceNormalizationQuantization validatorQAsymmS8(qAsymmS8Options, tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    // test QSymmS8 quantization
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestInstanceNormalizationQuantization validatorQSymmS8(qSymmS8Options, tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    // test QSymmS16 quantization
-    const QuantizerOptions qSymmS16Options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16Options)->ExportNetwork();
-    TestInstanceNormalizationQuantization validatorQSymmS16(qSymmS16Options, tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeLogSoftmax)
 {
-    class TestLogSoftmaxQuantization : public TestQuantization
-    {
-    public:
-        TestLogSoftmaxQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-            : TestQuantization(inputShape, outputShape) {}
-
-        TestLogSoftmaxQuantization(const QuantizerOptions& options,
-                                   const TensorShape& inputShape,
-                                   const TensorShape& outputShape)
-            : TestQuantization(options, inputShape, outputShape) {}
-
-        void VisitLogSoftmaxLayer(const IConnectableLayer* layer,
-                                  const SoftmaxDescriptor& descriptor,
-                                  const char* name = nullptr) override
-        {
-            IgnoreUnused(descriptor, name);
-            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
-            const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
-            const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase,  0};
-            const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase,  0};
-            const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 };
-
-            TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params);
-        }
-    };
-
     const TensorShape tensorShape{ 1U };
     const TensorInfo tensorInfo(tensorShape, DataType::Float32);
 
@@ -1583,28 +1158,7 @@
     inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
     logSoftmaxLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
-    // test QAsymmU8 quantization
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestLogSoftmaxQuantization validatorQAsymmU8(tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    // test QAsymmS8 quantization
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestLogSoftmaxQuantization validatorQAsymmS8(qAsymmS8Options, tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    // test QSymmS8 quantization
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestLogSoftmaxQuantization validatorQSymmS8(qSymmS8Options, tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    // test QuantisedSymmS16 quantization
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestLogSoftmaxQuantization validatorQSymmS16(qSymmS16options, tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), tensorShape);
 }
 
 INetworkPtr CreateNetworkWithSoftmaxLayer(const SoftmaxDescriptor& descriptor, const TensorShape& shape)
@@ -1630,57 +1184,13 @@
 
 BOOST_AUTO_TEST_CASE(QuantizeSoftmax)
 {
-    class TestSoftmaxQuantization : public TestQuantization
-    {
-    public:
-        TestSoftmaxQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestQuantization(inputShape, outputShape) {}
-
-        TestSoftmaxQuantization(const QuantizerOptions& options,
-                                const TensorShape& inputShape,
-                                const TensorShape& outputShape)
-        : TestQuantization(options, inputShape, outputShape) {}
-
-        void VisitSoftmaxLayer(const IConnectableLayer* layer,
-                               const SoftmaxDescriptor& descriptor,
-                               const char* name = nullptr) override
-        {
-            IgnoreUnused(descriptor, name);
-            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
-            // Based off default static range [0.0f, 1.0f]
-            TestQuantizationParams(info, {1.0f / g_AsymmU8QuantizationBase, 0},
-                                         {1.0f / g_AsymmS8QuantizationBase, -128},
-                                         {1.0f / g_SymmS8QuantizationBase,  0},
-                                         {1.0f / g_SymmS16QuantizationBase, 0});
-        }
-    };
-
     SoftmaxDescriptor descriptor;
     descriptor.m_Beta = 1.0f;
 
     const TensorShape shape{1U};
     INetworkPtr network = CreateNetworkWithSoftmaxLayer(descriptor, shape);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestSoftmaxQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestSoftmaxQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    // test QSymmS8 quantization
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestSoftmaxQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestSoftmaxQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeStandIn)
@@ -1763,26 +1273,6 @@
 
 BOOST_AUTO_TEST_CASE(QuantizePermute)
 {
-    class TestPermuteQuantization : public TestLeakyReLuActivationQuantization
-    {
-    public:
-        TestPermuteQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestLeakyReLuActivationQuantization(inputShape, outputShape) {}
-
-        TestPermuteQuantization(const QuantizerOptions& options,
-                                const TensorShape& inputShape,
-                                const TensorShape& outputShape)
-        : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {}
-
-        void VisitPermuteLayer(const IConnectableLayer* layer,
-                               const PermuteDescriptor& desc,
-                               const char* name = nullptr) override
-        {
-            IgnoreUnused(desc, name);
-            CheckForwardedQuantizationSettings(layer);
-        }
-    };
-
     INetworkPtr network = INetwork::Create();
 
     const TensorShape shape{1U};
@@ -1796,48 +1286,11 @@
 
     CompleteLeakyReluNetwork(network.get(), activation, permute, info);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestPermuteQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestPermuteQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestPermuteQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestPermuteQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeSpaceToBatch)
 {
-    class TestSpaceToBatchQuantization : public TestLeakyReLuActivationQuantization
-    {
-    public:
-        TestSpaceToBatchQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestLeakyReLuActivationQuantization(inputShape, outputShape) {}
-
-        TestSpaceToBatchQuantization(const QuantizerOptions& options,
-                                     const TensorShape& inputShape,
-                                     const TensorShape& outputShape)
-        : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {}
-
-        void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
-                                      const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
-                                      const char* name = nullptr) override
-        {
-            IgnoreUnused(spaceToBatchNdDescriptor, name);
-            CheckForwardedQuantizationSettings(layer);
-        }
-    };
-
     INetworkPtr network = INetwork::Create();
 
     const TensorShape shape{1U};
@@ -1851,54 +1304,11 @@
 
     CompleteLeakyReluNetwork(network.get(), activation, spaceToBatch, info);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestSpaceToBatchQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestSpaceToBatchQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestSpaceToBatchQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestSpaceToBatchQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeSpaceToDepth)
 {
-    class TestSpaceToDepthQuantization : public TestLeakyReLuActivationQuantization
-    {
-    public:
-        TestSpaceToDepthQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-            : TestLeakyReLuActivationQuantization(inputShape, outputShape)
-        {}
-
-        TestSpaceToDepthQuantization(const QuantizerOptions& options,
-                                     const TensorShape& inputShape,
-                                     const TensorShape& outputShape)
-            : TestLeakyReLuActivationQuantization(options, inputShape, outputShape)
-        {}
-
-        void VisitSpaceToDepthLayer(const IConnectableLayer* layer,
-                                    const SpaceToDepthDescriptor&,
-                                    const char* = nullptr) override
-        {
-            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-            TestQuantizationParams(info,
-                                  { 30.0f / g_AsymmU8QuantizationBase, 128 },
-                                  { 30.0f / g_AsymmS8QuantizationBase, 0   },
-                                  { 15.0f / g_SymmS8QuantizationBase,  0   },
-                                  { 15.0f / g_SymmS16QuantizationBase, 0   });
-        }
-    };
-
     INetworkPtr network = INetwork::Create();
 
     const TensorShape shape{ 1u };
@@ -1909,48 +1319,11 @@
 
     CompleteLeakyReluNetwork(network.get(), activation, spaceToDepth, info);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestSpaceToDepthQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestSpaceToDepthQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestSpaceToDepthQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestSpaceToDepthQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizePooling2d)
 {
-    class TestPooling2dQuantization : public TestLeakyReLuActivationQuantization
-    {
-    public:
-        TestPooling2dQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestLeakyReLuActivationQuantization(inputShape, outputShape) {}
-
-        TestPooling2dQuantization(const QuantizerOptions& options,
-                                  const TensorShape& inputShape,
-                                  const TensorShape& outputShape)
-        : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {}
-
-        void VisitPooling2dLayer(const IConnectableLayer* layer,
-                                 const Pooling2dDescriptor& desc,
-                                 const char* name = nullptr) override
-        {
-            IgnoreUnused(desc, name);
-            CheckForwardedQuantizationSettings(layer);
-        }
-    };
-
     auto network = INetwork::Create();
 
     TensorShape shape{1U};
@@ -1978,54 +1351,11 @@
     activation->GetOutputSlot(0).SetTensorInfo(info);
     pooling2d->GetOutputSlot(0).SetTensorInfo(info);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestPooling2dQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestPooling2dQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestPooling2dQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestPooling2dQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeConstant)
 {
-    class TestConstantQuantization : public TestAdditionQuantization
-    {
-    public:
-        TestConstantQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestAdditionQuantization(inputShape, outputShape) {}
-
-        TestConstantQuantization(const QuantizerOptions& options,
-                                 const TensorShape& inputShape,
-                                 const TensorShape& outputShape)
-        : TestAdditionQuantization(options, inputShape, outputShape) {}
-
-        void VisitConstantLayer(const IConnectableLayer* layer,
-                                const ConstTensor& input,
-                                const char* name = nullptr) override
-        {
-            IgnoreUnused(input, name);
-            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
-            // Based off the range of values in the const tensor used for the test: [-2.0f, 6.0f]
-            TestQuantizationParams(info, {8.0f / g_AsymmU8QuantizationBase, 64},
-                                         {8.0f / g_AsymmS8QuantizationBase, -64},
-                                         {6.0f / g_SymmS8QuantizationBase,  0},
-                                         {6.0f / g_SymmS16QuantizationBase, 0});
-        }
-    };
-
     INetworkPtr network = INetwork::Create();
 
     // Constant layer data
@@ -2050,68 +1380,11 @@
     addition->GetOutputSlot(0).SetTensorInfo(tensorInfo);
     constant->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestConstantQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestConstantQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestConstantQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestConstantQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeArgMinMax)
 {
-    class TestArgMinMaxQuantization : public TestQuantization
-    {
-    public:
-        TestArgMinMaxQuantization(const TensorShape& inputShape, const TensorShape& outputShape)  :
-                TestQuantization(inputShape, outputShape) {}
-
-        TestArgMinMaxQuantization(const QuantizerOptions& options,
-                                  const TensorShape& inputShape,
-                                  const TensorShape& outputShape) :
-                TestQuantization(options, inputShape, outputShape)
-        {}
-
-        void VisitInputLayer(const IConnectableLayer* layer,
-                             LayerBindingId id,
-                             const char* name = nullptr) override
-        {
-            IgnoreUnused(layer, id, name);
-        }
-
-        void VisitOutputLayer(const IConnectableLayer* layer,
-                              LayerBindingId id,
-                              const char* name = nullptr) override
-        {
-            IgnoreUnused(layer, id, name);
-        }
-        void VisitArgMinMaxLayer(const IConnectableLayer* layer,
-                                 const ArgMinMaxDescriptor& argMinMaxDescriptor,
-                                 const char* name = nullptr) override
-        {
-                IgnoreUnused(argMinMaxDescriptor, name);
-                TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
-
-                TestQuantizationParams(outputInfo,
-                                       { 30.0f / g_AsymmU8QuantizationBase, 128 },
-                                       { 30.0f / g_AsymmS8QuantizationBase,  0},
-                                       { 15.0f / g_SymmS8QuantizationBase,  0},
-                                       { 15.0f / g_SymmS16QuantizationBase, 0 });
-        }
-    };
-
     INetworkPtr network = INetwork::Create();
 
     const TensorShape inputShape{ 1, 1, 1, 5 };
@@ -2139,55 +1412,11 @@
     input->GetOutputSlot(0).SetTensorInfo(inputInfo);
     argMinMaxLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestArgMinMaxQuantization validatorQAsymmU8(inputShape, outputShape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestArgMinMaxQuantization validatorQAsymmS8(qAsymmS8Options, inputShape, outputShape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestArgMinMaxQuantization validatorQSymmS8(qSymmS8Options, inputShape, outputShape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestArgMinMaxQuantization validatorQSymmS16(qSymmS16options, inputShape, outputShape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), inputShape, outputShape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeComparison)
 {
-    class TestComparisonQuantization : public TestQuantization
-    {
-    public:
-        TestComparisonQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-            : TestQuantization(inputShape, outputShape) {}
-
-        TestComparisonQuantization(const QuantizerOptions& options,
-                                   const TensorShape& inputShape,
-                                   const TensorShape& outputShape)
-            : TestQuantization(options, inputShape, outputShape) {}
-
-        void VisitComparisonLayer(const IConnectableLayer* layer,
-                                  const ComparisonDescriptor& descriptor,
-                                  const char* name = nullptr) override
-        {
-            IgnoreUnused(descriptor, name);
-            TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
-            const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
-            const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase,  0};
-            const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase,  0};
-            const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 };
-
-            TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params);
-        }
-    };
-
     const TensorShape tensorShape{ 1u };
     const TensorInfo tensorInfo(tensorShape, DataType::Float32);
 
@@ -2207,28 +1436,7 @@
     inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
     comparisonLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
-    // test QAsymmU8 quantization
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestComparisonQuantization validatorQAsymmU8(tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    // test QAsymmS8 quantization
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestComparisonQuantization validatorQAsymmS8(qAsymmS8Options, tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    // test QSymmS8 quantization
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestComparisonQuantization validatorQSymmS8(qSymmS8Options, tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    // test QuantisedSymmS16 quantization
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestComparisonQuantization validatorQSymmS16(qSymmS16options, tensorShape, tensorShape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), tensorShape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeConcat)
@@ -2244,38 +1452,42 @@
                                const TensorShape& outputShape)
         : TestQuantization(options, inputShape, outputShape) {}
 
-        void VisitInputLayer(const IConnectableLayer* layer,
-                             LayerBindingId id,
-                             const char* name = nullptr) override
+        void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                             const BaseDescriptor& descriptor,
+                             const std::vector<armnn::ConstTensor>& constants,
+                             const char* name,
+                             const armnn::LayerBindingId id) override
         {
-            IgnoreUnused(layer, id, name);
-        }
-        void VisitOutputLayer(const IConnectableLayer* layer,
-                              LayerBindingId id,
-                              const char* name = nullptr) override
-        {
-            IgnoreUnused(layer, id, name);
-        }
-        void VisitConcatLayer(const IConnectableLayer* layer,
-                              const OriginsDescriptor& originsDescriptor,
-                              const char* name = nullptr) override
-        {
-            IgnoreUnused(originsDescriptor, name);
-            TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
-            TestQuantizationParams(
-                outputInfo, {60.8f / g_AsymmU8QuantizationBase, 65},
+            IgnoreUnused(name, constants, id, descriptor);
+
+            switch (layer->GetType())
+            {
+                case armnn::LayerType::Input :
+                    break;
+                case armnn::LayerType::Output :
+                    break;
+                case armnn::LayerType::Concat :
+                {
+                    TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
+                    TestQuantizationParams(
+                            outputInfo, {60.8f / g_AsymmU8QuantizationBase, 65},
                             {60.8f / g_SymmS8QuantizationBase,  -63},
                             {45.3f / g_SymmS8QuantizationBase,  0},
                             {45.3f / g_SymmS16QuantizationBase, 0});
 
-            TensorInfo inputInfo0 = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
-            TensorInfo inputInfo1 = layer->GetInputSlot(1).GetConnection()->GetTensorInfo();
-            TensorInfo inputInfo2 = layer->GetInputSlot(2).GetConnection()->GetTensorInfo();
+                    TensorInfo inputInfo0 = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
+                    TensorInfo inputInfo1 = layer->GetInputSlot(1).GetConnection()->GetTensorInfo();
+                    TensorInfo inputInfo2 = layer->GetInputSlot(2).GetConnection()->GetTensorInfo();
 
-            TestDifferentQuantizationScale(inputInfo0, inputInfo1);
-            TestDifferentQuantizationScale(inputInfo0, inputInfo2);
-            TestDifferentQuantizationScale(inputInfo1, inputInfo2);
-            TestDifferentQuantizationScale(inputInfo0, outputInfo);
+                    TestDifferentQuantizationScale(inputInfo0, inputInfo1);
+                    TestDifferentQuantizationScale(inputInfo0, inputInfo2);
+                    TestDifferentQuantizationScale(inputInfo1, inputInfo2);
+                    TestDifferentQuantizationScale(inputInfo0, outputInfo);
+                    break;
+                }
+                default:
+                {}
+            }
         }
     };
 
@@ -2341,26 +1553,6 @@
 
 BOOST_AUTO_TEST_CASE(QuantizeReshape)
 {
-    class TestReshapeQuantization : public TestLeakyReLuActivationQuantization
-    {
-    public:
-        TestReshapeQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestLeakyReLuActivationQuantization(inputShape, outputShape) {}
-
-        TestReshapeQuantization(const QuantizerOptions& options,
-                                const TensorShape& inputShape,
-                                const TensorShape& outputShape)
-        : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {}
-
-        virtual void VisitReshapeLayer(const IConnectableLayer* layer,
-                                       const ReshapeDescriptor& reshapeDescriptor,
-                                       const char* name = nullptr) override
-        {
-            IgnoreUnused(reshapeDescriptor, name);
-            CheckForwardedQuantizationSettings(layer);
-        }
-    };
-
     INetworkPtr network = INetwork::Create();
 
     const TensorShape shape{1U};
@@ -2374,48 +1566,11 @@
 
     CompleteLeakyReluNetwork(network.get(), activation, reshape, info);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestReshapeQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestReshapeQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestReshapeQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestReshapeQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeSplitter)
 {
-    class TestSplitterQuantization : public TestLeakyReLuActivationQuantization
-    {
-    public:
-        TestSplitterQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestLeakyReLuActivationQuantization(inputShape, outputShape) {}
-
-        TestSplitterQuantization(const QuantizerOptions& options,
-                                 const TensorShape& inputShape,
-                                 const TensorShape& outputShape)
-        : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {}
-
-        virtual void VisitSplitterLayer(const IConnectableLayer* layer,
-                                        const SplitterDescriptor& desc,
-                                        const char* name = nullptr)
-        {
-            IgnoreUnused(desc, name);
-            CheckForwardedQuantizationSettings(layer);
-        }
-    };
-
     INetworkPtr network = INetwork::Create();
 
     const TensorShape shape{3U};
@@ -2428,50 +1583,11 @@
     IConnectableLayer* splitter = network->AddSplitterLayer(splitterDesc);
     CompleteLeakyReluNetwork(network.get(), activation, splitter, info);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestSplitterQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestSplitterQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestSplitterQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestSplitterQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeResize)
 {
-    class TestResizeQuantization : public TestLeakyReLuActivationQuantization
-    {
-    public:
-        TestResizeQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-                : TestLeakyReLuActivationQuantization(inputShape, outputShape)
-        {}
-
-        TestResizeQuantization(const QuantizerOptions& options,
-                                       const TensorShape& inputShape,
-                                       const TensorShape& outputShape)
-                : TestLeakyReLuActivationQuantization(options, inputShape, outputShape)
-        {}
-
-        void VisitResizeLayer(const IConnectableLayer* layer,
-                                      const ResizeDescriptor& resizeDescriptor,
-                                      const char* name = nullptr) override
-        {
-            IgnoreUnused(resizeDescriptor, name);
-            CheckForwardedQuantizationSettings(layer);
-        }
-    };
-
     INetworkPtr network = INetwork::Create();
 
     const TensorShape shape{1U};
@@ -2487,48 +1603,11 @@
 
     CompleteLeakyReluNetwork(network.get(), activation, resizeLayer, info);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestResizeQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestResizeQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestResizeQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestResizeQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeStridedSlice)
 {
-    class TestStridedSliceQuantization : public TestLeakyReLuActivationQuantization
-    {
-    public:
-        TestStridedSliceQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestLeakyReLuActivationQuantization(inputShape, outputShape) {}
-
-        TestStridedSliceQuantization(const QuantizerOptions& options,
-                                     const TensorShape& inputShape,
-                                     const TensorShape& outputShape)
-        : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {}
-
-        virtual void VisitStridedSliceLayer(const IConnectableLayer* layer,
-                                            const StridedSliceDescriptor& desc,
-                                            const char* name = nullptr)
-        {
-            IgnoreUnused(desc, name);
-            CheckForwardedQuantizationSettings(layer);
-        }
-    };
-
     INetworkPtr network = INetwork::Create();
 
     const TensorShape shape{3U};
@@ -2542,48 +1621,11 @@
 
     CompleteLeakyReluNetwork(network.get(), activation, stridedSlice, info);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestStridedSliceQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestStridedSliceQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestStridedSliceQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestStridedSliceQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeBatchToSpace)
 {
-    class TestBatchToSpaceQuantization : public TestLeakyReLuActivationQuantization
-    {
-    public:
-        TestBatchToSpaceQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-        : TestLeakyReLuActivationQuantization(inputShape, outputShape) {}
-
-        TestBatchToSpaceQuantization(const QuantizerOptions& options,
-                                     const TensorShape& inputShape,
-                                     const TensorShape& outputShape)
-        : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {}
-
-        void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
-                                      const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
-                                      const char* name = nullptr) override
-        {
-            IgnoreUnused(batchToSpaceNdDescriptor, name);
-            CheckForwardedQuantizationSettings(layer);
-        }
-    };
-
     INetworkPtr network = INetwork::Create();
 
     const TensorShape shape{1U};
@@ -2597,24 +1639,7 @@
 
     CompleteLeakyReluNetwork(network.get(), activation, batchToSpace, info);
 
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestBatchToSpaceQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestBatchToSpaceQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestBatchToSpaceQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestBatchToSpaceQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizePrelu)
@@ -2637,52 +1662,59 @@
             , m_AlphaShape(alphaShape)
         {}
 
-        void VisitInputLayer(const IConnectableLayer* layer,
-                             LayerBindingId id,
-                             const char* name = nullptr) override
+        void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                             const BaseDescriptor& descriptor,
+                             const std::vector<armnn::ConstTensor>& constants,
+                             const char* name,
+                             const armnn::LayerBindingId id) override
         {
-            IgnoreUnused(id, name);
-            const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
+            IgnoreUnused(name, constants, id, descriptor);
 
-            switch (id)
+            switch (layer->GetType())
             {
-            case 0: // Input
-                BOOST_TEST(m_InputShape == info.GetShape());
-                break;
-            case 1: // Alpha
-                BOOST_TEST(m_AlphaShape == info.GetShape());
-                break;
-            default:
-                throw InvalidArgumentException("Invalid layer binding id for PReLU layer");
+                case armnn::LayerType::Input :
+                {
+                    const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
+
+                    switch (id)
+                    {
+                        case 0: // Input
+                            BOOST_TEST(m_InputShape == info.GetShape());
+                            break;
+                        case 1: // Alpha
+                            BOOST_TEST(m_AlphaShape == info.GetShape());
+                            break;
+                        default:
+                            throw InvalidArgumentException("Invalid layer binding id for PReLU layer");
+                    }
+
+                    // Based off current default [-15.0f, 15.0f]
+                    TestQuantizationParams(info,
+                                           { 30.0f / g_AsymmU8QuantizationBase, 128 }, // QASymmU8
+                                           { 30.0f / g_AsymmS8QuantizationBase,  0},   // QASymmS8
+                                           { 15.0f / g_SymmS8QuantizationBase,  0},    // QSymmS8
+                                           { 15.0f / g_SymmS16QuantizationBase, 0 });  // QSymmS16
+                    break;
+                }
+                case armnn::LayerType::Output :
+                {
+                    const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
+                    BOOST_TEST(m_OutputShape == info.GetShape());
+                    break;
+                }
+                case armnn::LayerType::Prelu :
+                {
+                    const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
+                    TestQuantizationParams(info,
+                                           { 30.0f / g_AsymmU8QuantizationBase, 128 }, // QASymmU8
+                                           { 30.0f / g_AsymmS8QuantizationBase,  0},   // QAsymmS8
+                                           { 15.0f / g_SymmS8QuantizationBase,  0},    // QSymmS8
+                                           { 15.0f / g_SymmS16QuantizationBase, 0 });  // QSymmS16
+                    break;
+                }
+                default:
+                {}
             }
-
-            // Based off current default [-15.0f, 15.0f]
-            TestQuantizationParams(info,
-                                   { 30.0f / g_AsymmU8QuantizationBase, 128 }, // QASymmU8
-                                   { 30.0f / g_AsymmS8QuantizationBase,  0},   // QASymmS8
-                                   { 15.0f / g_SymmS8QuantizationBase,  0},    // QSymmS8
-                                   { 15.0f / g_SymmS16QuantizationBase, 0 });  // QSymmS16
-        }
-
-        void VisitOutputLayer(const IConnectableLayer* layer,
-                              LayerBindingId id,
-                              const char* name = nullptr) override
-        {
-            IgnoreUnused(id, name);
-            const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
-            BOOST_TEST(m_OutputShape == info.GetShape());
-        }
-
-        void VisitPreluLayer(const IConnectableLayer* layer,
-                             const char* name = nullptr) override
-        {
-            IgnoreUnused(name);
-            const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
-            TestQuantizationParams(info,
-                                   { 30.0f / g_AsymmU8QuantizationBase, 128 }, // QASymmU8
-                                   { 30.0f / g_AsymmS8QuantizationBase,  0},   // QAsymmS8
-                                   { 15.0f / g_SymmS8QuantizationBase,  0},    // QSymmS8
-                                   { 15.0f / g_SymmS16QuantizationBase, 0 });  // QSymmS16
         }
 
     private:
@@ -2740,30 +1772,6 @@
 
 void TestQuantizeTransposeConvolution2d(bool useBiases)
 {
-    class TestTransposeConvolution2dQuantization : public TestQuantization
-    {
-    public:
-        TestTransposeConvolution2dQuantization(const TensorShape& inputShape, const TensorShape& outputShape) :
-            TestQuantization(inputShape, outputShape)
-        {}
-
-        TestTransposeConvolution2dQuantization(const QuantizerOptions& options,
-                                               const TensorShape& inputShape,
-                                               const TensorShape& outputShape) :
-            TestQuantization(options, inputShape, outputShape)
-        {}
-
-        void VisitTransposeConvolution2dLayer(const IConnectableLayer *layer,
-                                              const TransposeConvolution2dDescriptor& descriptor,
-                                              const ConstTensor& weights,
-                                              const Optional<ConstTensor>& biases,
-                                              const char *name = nullptr) override
-        {
-            IgnoreUnused(descriptor, name);
-            TestQuantizationOnLayersWithBiases(layer, weights, biases);
-        }
-    };
-
     INetworkPtr network = INetwork::Create();
 
     TensorShape shape{ 3 };
@@ -2794,28 +1802,7 @@
     input->GetOutputSlot(0).SetTensorInfo(info);
     transposeConv2d->GetOutputSlot(0).SetTensorInfo(info);
 
-    // test QAsymmU8 quantization
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestTransposeConvolution2dQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    //test QAsymmS8 quantization
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestTransposeConvolution2dQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    // test QSymmS8 quantization
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestTransposeConvolution2dQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    // test QSymmS16 quantization
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestTransposeConvolution2dQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 BOOST_AUTO_TEST_CASE(QuantizeTransposeConvolution2d)
@@ -2835,38 +1822,45 @@
     public:
         TestStackQuantization(const TensorShape& inputShape,
                               const TensorShape& outputShape)
-            : TestQuantization(inputShape, outputShape) {}
+                : TestQuantization(inputShape, outputShape) {}
 
         TestStackQuantization(const QuantizerOptions& options,
                               const TensorShape& inputShape,
                               const TensorShape& outputShape)
-            : TestQuantization(options, inputShape, outputShape) {}
+                : TestQuantization(options, inputShape, outputShape) {}
 
-        void VisitInputLayer(const IConnectableLayer* layer,
-                             LayerBindingId id,
-                             const char* name = nullptr) override
+        void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                             const BaseDescriptor& descriptor,
+                             const std::vector<armnn::ConstTensor>& constants,
+                             const char* name,
+                             const armnn::LayerBindingId id) override
         {
-            IgnoreUnused(layer, id, name);
-        }
-        void VisitOutputLayer(const IConnectableLayer* layer,
-                              LayerBindingId id,
-                              const char* name = nullptr) override
-        {
-            IgnoreUnused(layer, id, name);
-        }
+            IgnoreUnused(name, constants, id, descriptor);
 
-        void VisitStackLayer(const IConnectableLayer* layer,
-                             const StackDescriptor& descriptor,
-                             const char* name = nullptr) override
-        {
-            IgnoreUnused(descriptor, name);
-            TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
+            switch (layer->GetType())
+            {
+                case armnn::LayerType::Input :
+                {
+                    break;
+                }
+                case armnn::LayerType::Output :
+                {
+                    break;
+                }
+                case armnn::LayerType::Stack :
+                {
+                    TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
 
-            TestQuantizationParams(outputInfo,
-                { 30.0f / g_AsymmU8QuantizationBase, 128 },
-                { 30.0f / g_AsymmS8QuantizationBase, 0},
-                { 15.0f / g_SymmS8QuantizationBase,  0},
-                { 15.0f / g_SymmS16QuantizationBase, 0 });
+                    TestQuantizationParams(outputInfo,
+                                           { 30.0f / g_AsymmU8QuantizationBase, 128 },
+                                           { 30.0f / g_AsymmS8QuantizationBase, 0},
+                                           { 15.0f / g_SymmS8QuantizationBase,  0},
+                                           { 15.0f / g_SymmS16QuantizationBase, 0 });
+                    break;
+                }
+                default:
+                {}
+            }
         }
     };
 
@@ -2909,35 +1903,6 @@
 
 BOOST_AUTO_TEST_CASE(QuantizeSlice)
 {
-    class TestSliceQuantization : public TestQuantization
-    {
-    public:
-        TestSliceQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
-            : TestQuantization(inputShape, outputShape)
-        {}
-
-        TestSliceQuantization(const QuantizerOptions& options,
-                              const TensorShape& inputShape,
-                              const TensorShape& outputShape)
-            : TestQuantization(options, inputShape, outputShape)
-        {}
-
-        virtual void VisitSliceLayer(const IConnectableLayer* layer,
-                                     const SliceDescriptor& desc,
-                                     const char* name = nullptr)
-        {
-            IgnoreUnused(desc, name);
-            const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
-
-            const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
-            const OffsetScalePair qAsymmS8Params{ 30.0f / g_AsymmS8QuantizationBase, 0 };
-            const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase,  0 };
-            const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 };
-
-            TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params);
-        }
-    };
-
     TensorShape shape{ 3 };
     TensorInfo info(shape, DataType::Float32);
 
@@ -2953,28 +1918,7 @@
     inputLayer->GetOutputSlot(0).SetTensorInfo(info);
     sliceLayer->GetOutputSlot(0).SetTensorInfo(info);
 
-    // test QAsymmU8 quantization
-    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestSliceQuantization validatorQAsymmU8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
-
-    // test QASymmS8 quantization
-    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
-    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
-    TestSliceQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
-
-    // test QSymmS8 quantization
-    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
-    TestSliceQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
-
-    // test QSymmS16 quantization
-    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
-    TestSliceQuantization validatorQSymmS16(qSymmS16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+    TestNetwork(network.get(), shape);
 }
 
 std::vector<uint8_t> SetupQuantize(float value)
@@ -3002,50 +1946,55 @@
     BOOST_CHECK_EQUAL(SetupQuantize(-1 * std::numeric_limits<float>::infinity())[0], 0);
 }
 
-class TestPreserveType : public TestAdditionQuantization
+class TestPreserveType : public TestQuantization
 {
 public:
     TestPreserveType(const QuantizerOptions& options,
                      const DataType& dataType,
                      const TensorShape& inputShape,
                      const TensorShape& outputShape)
-    : TestAdditionQuantization(options, inputShape, outputShape)
+    : TestQuantization(options, inputShape, outputShape)
     , m_DataType(dataType)
     , m_VisitedQuantizeLayer(false)
     , m_VisitedDequantizeLayer(false) {}
 
-    void VisitInputLayer(const IConnectableLayer* layer,
-                         LayerBindingId id,
-                         const char* name = nullptr) override
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id) override
     {
-        IgnoreUnused(id, name);
-        const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
-        BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
-        BOOST_TEST(m_InputShape == info.GetShape());
-    }
+        IgnoreUnused(name, constants, id, descriptor);
 
-    void VisitOutputLayer(const IConnectableLayer* layer,
-                          LayerBindingId id,
-                          const char* name = nullptr) override
-    {
-        IgnoreUnused(id, name);
-        const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
-        BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
-        BOOST_TEST(m_OutputShape == info.GetShape());
-    }
-
-    void VisitQuantizeLayer(const IConnectableLayer* layer,
-                            const char* name = nullptr) override
-    {
-        IgnoreUnused(layer, name);
-        m_VisitedQuantizeLayer = true;
-    }
-
-    void VisitDequantizeLayer(const IConnectableLayer* layer,
-                              const char* name = nullptr) override
-    {
-        IgnoreUnused(layer, name);
-        m_VisitedDequantizeLayer = true;
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::Input :
+            {
+                const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
+                BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
+                BOOST_TEST(m_InputShape == info.GetShape());
+                break;
+            }
+            case armnn::LayerType::Output :
+            {
+                const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
+                BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
+                BOOST_TEST(m_OutputShape == info.GetShape());
+                break;
+            }
+            case armnn::LayerType::Quantize :
+            {
+                m_VisitedQuantizeLayer = true;
+                break;
+            }
+            case armnn::LayerType::Dequantize :
+            {
+                m_VisitedDequantizeLayer = true;
+                break;
+            }
+            default:
+            {}
+        }
     }
 
     void CheckQuantizeDequantizeLayerVisited(bool expected)
@@ -3119,39 +2068,52 @@
 
 BOOST_AUTO_TEST_CASE(TestConnectionPreservationAfterDynamicQuant)
 {
-    class TestConnectionPreservation : public LayerVisitorBase<VisitorNoThrowPolicy>
+    class TestConnectionPreservation : public IStrategy
     {
     public:
         TestConnectionPreservation(const Graph& graph)
-            : LayerVisitorBase<VisitorNoThrowPolicy>()
-            , m_Graph(graph)
+            : m_Graph(graph)
         {}
 
-        void VisitAdditionLayer(const IConnectableLayer* layer, const char*) override
-        {
-            CheckLayerName(layer->GetInputSlot(0).GetConnection()->GetOwningLayerGuid(), "reLU1");
-            CheckLayerName(layer->GetInputSlot(1).GetConnection()->GetOwningLayerGuid(), "reLU2");
-        }
+        void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                             const BaseDescriptor& descriptor,
+                             const std::vector<armnn::ConstTensor>& constants,
+                             const char* name,
+                             const armnn::LayerBindingId id) override
+    {
+        IgnoreUnused(name, constants, id, descriptor);
 
-        void CheckLayerName(LayerGuid guid, std::string expectedName)
+        switch (layer->GetType())
         {
-            bool guidFound = false;
-            for (Layer* layer : m_Graph)
+            case armnn::LayerType::Addition :
             {
-                if (layer->GetGuid() == guid)
-                {
-                    BOOST_CHECK_EQUAL(layer->GetName(), expectedName.c_str());
-                    guidFound = true;
-                    break;
-                }
+                CheckLayerName(layer->GetInputSlot(0).GetConnection()->GetOwningLayerGuid(), "reLU1");
+                CheckLayerName(layer->GetInputSlot(1).GetConnection()->GetOwningLayerGuid(), "reLU2");
+                break;
             }
-            if (!guidFound)
+            default:
+            {}
+        }
+    }
+
+    void CheckLayerName(LayerGuid guid, std::string expectedName)
+    {
+        bool guidFound = false;
+        for (Layer* layer : m_Graph)
+        {
+            if (layer->GetGuid() == guid)
             {
-                BOOST_FAIL("No layer matching the GUID was found");
+                BOOST_CHECK_EQUAL(layer->GetName(), expectedName.c_str());
+                guidFound = true;
+                break;
             }
         }
-
-    private:
+        if (!guidFound)
+        {
+            BOOST_FAIL("No layer matching the GUID was found");
+        }
+    }
+        private:
         Graph m_Graph;
     };
 
@@ -3177,8 +2139,8 @@
     reLULayer2->GetOutputSlot(0).SetTensorInfo(TensorInfo(TensorShape({1, 2, 2, 1}), DataType::Float32));
     addLayer1->GetOutputSlot(0).SetTensorInfo(TensorInfo(TensorShape({1, 2, 2, 1}), DataType::Float32));
 
-    TestConnectionPreservation visitor1(PolymorphicDowncast<const Network*>(network.get())->GetGraph());
-    VisitLayersTopologically(network.get(), visitor1);
+    TestConnectionPreservation strategy1(PolymorphicDowncast<const Network*>(network.get())->GetGraph());
+    VisitLayersTopologically(network.get(), strategy1);
 
     armnn::INetworkQuantizerPtr quantizer = armnn::INetworkQuantizer::Create(network.get());
 
@@ -3193,8 +2155,8 @@
 
     INetworkPtr quantNetwork = quantizer->ExportNetwork();
 
-    TestConnectionPreservation visitor2(PolymorphicDowncast<const Network*>(quantNetwork.get())->GetGraph());
-    VisitLayersTopologically(quantNetwork.get(), visitor2);
+    TestConnectionPreservation strategy2(PolymorphicDowncast<const Network*>(quantNetwork.get())->GetGraph());
+    VisitLayersTopologically(quantNetwork.get(), strategy2);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnDeserializer/test/DeserializeReduceSum.cpp b/src/armnnDeserializer/test/DeserializeReduceSum.cpp
index d88613e..326560f 100644
--- a/src/armnnDeserializer/test/DeserializeReduceSum.cpp
+++ b/src/armnnDeserializer/test/DeserializeReduceSum.cpp
@@ -8,7 +8,6 @@
 #include "../Deserializer.hpp"
 
 #include <string>
-#include <iostream>
 
 BOOST_AUTO_TEST_SUITE(Deserializer)
 
diff --git a/src/armnnQuantizer/ArmNNQuantizerMain.cpp b/src/armnnQuantizer/ArmNNQuantizerMain.cpp
index 219363e..49652ef 100644
--- a/src/armnnQuantizer/ArmNNQuantizerMain.cpp
+++ b/src/armnnQuantizer/ArmNNQuantizerMain.cpp
@@ -61,8 +61,8 @@
         if (!dataSet.IsEmpty())
         {
             // Get the Input Tensor Infos
-            armnnQuantizer::InputLayerVisitor inputLayerVisitor;
-            network->Accept(inputLayerVisitor);
+            armnnQuantizer::InputLayerStrategy inputLayerStrategy;
+            network->ExecuteStrategy(inputLayerStrategy);
 
             for (armnnQuantizer::QuantizationInput quantizationInput : dataSet)
             {
@@ -72,7 +72,7 @@
                 unsigned int count = 0;
                 for (armnn::LayerBindingId layerBindingId : quantizationInput.GetLayerBindingIds())
                 {
-                    armnn::TensorInfo tensorInfo = inputLayerVisitor.GetTensorInfo(layerBindingId);
+                    armnn::TensorInfo tensorInfo = inputLayerStrategy.GetTensorInfo(layerBindingId);
                     inputData[count] = quantizationInput.GetDataForEntry(layerBindingId);
                     armnn::ConstTensor inputTensor(tensorInfo, inputData[count].data());
                     inputTensors.push_back(std::make_pair(layerBindingId, inputTensor));
diff --git a/src/armnnQuantizer/QuantizationDataSet.cpp b/src/armnnQuantizer/QuantizationDataSet.cpp
index acd301a..99fc021 100644
--- a/src/armnnQuantizer/QuantizationDataSet.cpp
+++ b/src/armnnQuantizer/QuantizationDataSet.cpp
@@ -47,6 +47,36 @@
 {
 }
 
+
+/// Visitor class implementation to gather the TensorInfo for LayerBindingID for creation of ConstTensor for Refine.
+
+void InputLayerStrategy::ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                                         const armnn::BaseDescriptor& descriptor,
+                                         const std::vector<armnn::ConstTensor>& constants,
+                                         const char* name,
+                                         const armnn::LayerBindingId id)
+{
+    armnn::IgnoreUnused(name, descriptor, constants);
+
+    m_TensorInfos.emplace(id, layer->GetOutputSlot(0).GetTensorInfo());
+}
+
+
+
+
+armnn::TensorInfo InputLayerStrategy::GetTensorInfo(armnn::LayerBindingId layerBindingId)
+{
+    auto iterator = m_TensorInfos.find(layerBindingId);
+    if (iterator != m_TensorInfos.end())
+    {
+        return m_TensorInfos.at(layerBindingId);
+    }
+    else
+    {
+        throw armnn::Exception("Could not retrieve tensor info for binding ID " + std::to_string(layerBindingId));
+    }
+}
+
 void InputLayerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer,
                                         armnn::LayerBindingId id,
                                         const char* name)
diff --git a/src/armnnQuantizer/QuantizationDataSet.hpp b/src/armnnQuantizer/QuantizationDataSet.hpp
index 3a97630..47b893a 100644
--- a/src/armnnQuantizer/QuantizationDataSet.hpp
+++ b/src/armnnQuantizer/QuantizationDataSet.hpp
@@ -43,6 +43,22 @@
 };
 
 /// Visitor class implementation to gather the TensorInfo for LayerBindingID for creation of ConstTensor for Refine.
+class InputLayerStrategy : public armnn::IStrategy
+{
+public:
+    virtual void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                                 const armnn::BaseDescriptor& descriptor,
+                                 const std::vector<armnn::ConstTensor>& constants,
+                                 const char* name,
+                                 const armnn::LayerBindingId id = 0) override;
+
+    armnn::TensorInfo GetTensorInfo(armnn::LayerBindingId);
+private:
+    std::map<armnn::LayerBindingId, armnn::TensorInfo> m_TensorInfos;
+};
+
+
+/// Visitor class implementation to gather the TensorInfo for LayerBindingID for creation of ConstTensor for Refine.
 class InputLayerVisitor : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
 {
 public:
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 28afac7..bcdaa08 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -3,6 +3,7 @@
 // SPDX-License-Identifier: MIT
 //
 #include "Serializer.hpp"
+#include "SerializerUtils.hpp"
 
 #include <armnn/Descriptors.hpp>
 #include <armnn/LstmParams.hpp>
@@ -10,9 +11,9 @@
 #include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/utility/NumericCast.hpp>
 
+#include <fmt/format.h>
 #include <iostream>
 
-#include "SerializerUtils.hpp"
 
 using namespace armnn;
 namespace fb = flatbuffers;
@@ -95,7 +96,7 @@
     }
 }
 
-uint32_t SerializerVisitor::GetSerializedId(armnn::LayerGuid guid)
+uint32_t SerializerStrategy::GetSerializedId(armnn::LayerGuid guid)
 {
     if (m_guidMap.empty())
     {
@@ -112,7 +113,7 @@
 }
 
 // Build FlatBuffer for Input Layer
-void SerializerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
+void SerializerStrategy::SerializeInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
 {
     IgnoreUnused(name);
 
@@ -134,7 +135,8 @@
 }
 
 // Build FlatBuffer for Output Layer
-void SerializerVisitor::VisitOutputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
+void SerializerStrategy::SerializeOutputLayer(const armnn::IConnectableLayer* layer,
+                                              LayerBindingId id, const char* name)
 {
     IgnoreUnused(name);
 
@@ -154,7 +156,7 @@
     CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
 }
 
-void SerializerVisitor::VisitAbsLayer(const armnn::IConnectableLayer* layer, const char* name)
+void SerializerStrategy::SerializeAbsLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     IgnoreUnused(name);
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Abs);
@@ -164,9 +166,9 @@
 }
 
 // Build FlatBuffer for Activation Layer
-void SerializerVisitor::VisitActivationLayer(const armnn::IConnectableLayer* layer,
-                                             const armnn::ActivationDescriptor& descriptor,
-                                             const char* name)
+void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer,
+                                                  const armnn::ActivationDescriptor& descriptor,
+                                                  const char* name)
 {
     IgnoreUnused(name);
 
@@ -189,7 +191,7 @@
 }
 
 // Build FlatBuffer for Addition Layer
-void SerializerVisitor::VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
+void SerializerStrategy::SerializeAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     IgnoreUnused(name);
 
@@ -204,9 +206,9 @@
 }
 
 // Build FlatBuffer for ArgMinMax Layer
-void SerializerVisitor::VisitArgMinMaxLayer(const armnn::IConnectableLayer *layer,
-                                            const armnn::ArgMinMaxDescriptor& descriptor,
-                                            const char *name)
+void SerializerStrategy::SerializeArgMinMaxLayer(const armnn::IConnectableLayer *layer,
+                                                 const armnn::ArgMinMaxDescriptor& descriptor,
+                                                 const char *name)
 {
     IgnoreUnused(name);
 
@@ -227,9 +229,9 @@
 }
 
 // Build FlatBuffer for BatchToSpaceNd Layer
-void SerializerVisitor::VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
-                                                 const armnn::BatchToSpaceNdDescriptor& descriptor,
-                                                 const char* name)
+void SerializerStrategy::SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
+                                                      const armnn::BatchToSpaceNdDescriptor& descriptor,
+                                                      const char* name)
 {
     IgnoreUnused(name);
 
@@ -257,16 +259,19 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_BatchToSpaceNdLayer);
 }
 
-void SerializerVisitor::VisitBatchNormalizationLayer(const armnn::IConnectableLayer* layer,
-                                                     const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
-                                                     const armnn::ConstTensor& mean,
-                                                     const armnn::ConstTensor& variance,
-                                                     const armnn::ConstTensor& beta,
-                                                     const armnn::ConstTensor& gamma,
-                                                     const char* name)
+void SerializerStrategy::SerializeBatchNormalizationLayer(
+        const armnn::IConnectableLayer* layer,
+        const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
+        const std::vector<armnn::ConstTensor>& constants,
+        const char* name)
 {
     IgnoreUnused(name);
 
+    const armnn::ConstTensor& mean     = constants[0];
+    const armnn::ConstTensor& variance = constants[1];
+    const armnn::ConstTensor& beta     = constants[2];
+    const armnn::ConstTensor& gamma    = constants[3];
+
     auto fbBatchNormalizationBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
     auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
                                                   m_flatBufferBuilder,
@@ -288,7 +293,7 @@
     CreateAnyLayer(fbBatchNormalizationLayer.o, serializer::Layer::Layer_BatchNormalizationLayer);
 }
 
-void SerializerVisitor::VisitComparisonLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
                                              const armnn::ComparisonDescriptor& descriptor,
                                              const char* name)
 {
@@ -304,12 +309,14 @@
 }
 
 // Build FlatBuffer for Constant Layer
-void SerializerVisitor::VisitConstantLayer(const armnn::IConnectableLayer* layer,
-                                           const armnn::ConstTensor& input,
-                                           const char* name)
+void SerializerStrategy::SerializeConstantLayer(const armnn::IConnectableLayer* layer,
+                                                const std::vector<armnn::ConstTensor>& constants,
+                                                const char* name)
 {
     IgnoreUnused(name);
 
+    armnn::ConstTensor input = constants[0];
+
     // Create FlatBuffer BaseLayer
     auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
 
@@ -325,14 +332,15 @@
 }
 
 // Build FlatBuffer for Convolution2dLayer
-void SerializerVisitor::VisitConvolution2dLayer(const armnn::IConnectableLayer* layer,
-                                                const armnn::Convolution2dDescriptor& descriptor,
-                                                const armnn::ConstTensor& weights,
-                                                const armnn::Optional<armnn::ConstTensor>& biases,
-                                                const char* name)
+void SerializerStrategy::SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer,
+                                                     const armnn::Convolution2dDescriptor& descriptor,
+                                                     const std::vector<armnn::ConstTensor>& constants,
+                                                     const char* name)
 {
     IgnoreUnused(name);
 
+    const armnn::ConstTensor weights = constants[0];
+
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
 
@@ -350,9 +358,10 @@
     auto flatBufferWeightsConstTensorInfo = CreateConstTensorInfo(weights);
     flatbuffers::Offset<serializer::ConstTensor> flatBufferBiasesConstTensorInfo;
 
-    if (biases.has_value())
+    if (constants.size() > 1)
     {
-        flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases.value());
+        const armnn::ConstTensor biases = constants[1];
+        flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases);
     }
 
     // Create the FlatBuffer Convolution2dLayer
@@ -366,7 +375,7 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution2dLayer);
 }
 
-void SerializerVisitor::VisitDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
                                                const armnn::DepthToSpaceDescriptor& descriptor,
                                                const char* name)
 {
@@ -382,14 +391,15 @@
     CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_DepthToSpaceLayer);
 }
 
-void SerializerVisitor::VisitDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
-                                                         const armnn::DepthwiseConvolution2dDescriptor& descriptor,
-                                                         const armnn::ConstTensor& weights,
-                                                         const armnn::Optional<armnn::ConstTensor>& biases,
-                                                         const char* name)
+void SerializerStrategy::SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
+                                                              const armnn::DepthwiseConvolution2dDescriptor& descriptor,
+                                                              const std::vector<armnn::ConstTensor>& constants,
+                                                              const char* name)
 {
     IgnoreUnused(name);
 
+    const armnn::ConstTensor& weights = constants[0];
+
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d);
     auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
                                                                descriptor.m_PadLeft,
@@ -405,9 +415,11 @@
 
     flatbuffers::Offset<serializer::ConstTensor> fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
     flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
-    if (biases.has_value())
+
+    if (constants.size() > 1)
     {
-        fbBiasesConstTensorInfo = CreateConstTensorInfo(biases.value());
+        const armnn::ConstTensor& biases = constants[1];
+        fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
     }
 
     auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder,
@@ -419,7 +431,7 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DepthwiseConvolution2dLayer);
 }
 
-void SerializerVisitor::VisitDequantizeLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeDequantizeLayer(const armnn::IConnectableLayer* layer,
                                              const char* name)
 {
     IgnoreUnused(name);
@@ -430,13 +442,15 @@
     CreateAnyLayer(fbDequantizeLayer.o, serializer::Layer::Layer_DequantizeLayer);
 }
 
-void SerializerVisitor::VisitDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
-                                                       const armnn::DetectionPostProcessDescriptor& descriptor,
-                                                       const armnn::ConstTensor& anchors,
-                                                       const char* name)
+void SerializerStrategy::SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
+                                                            const armnn::DetectionPostProcessDescriptor& descriptor,
+                                                            const std::vector<armnn::ConstTensor>& constants,
+                                                            const char* name)
 {
     IgnoreUnused(name);
 
+    const armnn::ConstTensor& anchors = constants[0];
+
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_DetectionPostProcess);
     auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
                                                              descriptor.m_MaxDetections,
@@ -461,7 +475,7 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DetectionPostProcessLayer);
 }
 
-void SerializerVisitor::VisitDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
+void SerializerStrategy::SerializeDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     IgnoreUnused(name);
 
@@ -471,7 +485,7 @@
     CreateAnyLayer(fbDivisionLayer.o, serializer::Layer::Layer_DivisionLayer);
 }
 
-void SerializerVisitor::VisitElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
                                                    const armnn::ElementwiseUnaryDescriptor& descriptor,
                                                    const char* name)
 {
@@ -486,7 +500,7 @@
     CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseUnaryLayer);
 }
 
-void SerializerVisitor::VisitEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
+void SerializerStrategy::SerializeEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     IgnoreUnused(name);
 
@@ -496,7 +510,7 @@
     CreateAnyLayer(fbEqualLayer.o, serializer::Layer::Layer_EqualLayer);
 }
 
-void SerializerVisitor::VisitFillLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer,
                                        const armnn::FillDescriptor& fillDescriptor,
                                        const char* name)
 {
@@ -511,7 +525,7 @@
     CreateAnyLayer(fbFillLayer.o, serializer::Layer::Layer_FillLayer);
 }
 
-void SerializerVisitor::VisitFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
+void SerializerStrategy::SerializeFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
 {
     IgnoreUnused(name);
 
@@ -521,14 +535,7 @@
     CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer);
 }
 
-void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer,
-                                         const char* name)
-{
-    armnn::GatherDescriptor gatherDescriptor{};
-    VisitGatherLayer(layer, gatherDescriptor, name);
-}
-
-void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeGatherLayer(const armnn::IConnectableLayer* layer,
                                          const armnn::GatherDescriptor& gatherDescriptor,
                                          const char* name)
 {
@@ -542,7 +549,8 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherLayer);
 }
 
-void SerializerVisitor::VisitGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
+
+void SerializerStrategy::SerializeGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     IgnoreUnused(name);
 
@@ -552,7 +560,7 @@
     CreateAnyLayer(fbGreaterLayer.o, serializer::Layer::Layer_GreaterLayer);
 }
 
-void SerializerVisitor::VisitInstanceNormalizationLayer(
+void SerializerStrategy::SerializeInstanceNormalizationLayer(
     const armnn::IConnectableLayer* layer,
     const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
     const char* name)
@@ -572,7 +580,7 @@
     CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_InstanceNormalizationLayer);
 }
 
-void SerializerVisitor::VisitL2NormalizationLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer,
                                                   const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
                                                   const char* name)
 {
@@ -593,7 +601,7 @@
     CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_L2NormalizationLayer);
 }
 
-void SerializerVisitor::VisitLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
                                                 const armnn::LogicalBinaryDescriptor& descriptor,
                                                 const char* name)
 {
@@ -608,7 +616,7 @@
     CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_LogicalBinaryLayer);
 }
 
-void SerializerVisitor::VisitLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
                                              const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
                                              const char* name)
 {
@@ -632,10 +640,10 @@
     CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer);
 }
 
-void SerializerVisitor::VisitLstmLayer(const armnn::IConnectableLayer* layer,
-                                       const armnn::LstmDescriptor& descriptor,
-                                       const armnn::LstmInputParams& params,
-                                       const char* name)
+void SerializerStrategy::SerializeLstmLayer(const armnn::IConnectableLayer* layer,
+                                            const armnn::LstmDescriptor& descriptor,
+                                            const std::vector<armnn::ConstTensor>& constants,
+                                            const char* name)
 {
     IgnoreUnused(name);
 
@@ -651,16 +659,21 @@
         descriptor.m_ProjectionEnabled,
         descriptor.m_LayerNormEnabled);
 
-    // Get mandatory input parameters
-    auto inputToForgetWeights = CreateConstTensorInfo(*params.m_InputToForgetWeights);
-    auto inputToCellWeights = CreateConstTensorInfo(*params.m_InputToCellWeights);
-    auto inputToOutputWeights = CreateConstTensorInfo(*params.m_InputToOutputWeights);
-    auto recurrentToForgetWeights = CreateConstTensorInfo(*params.m_RecurrentToForgetWeights);
-    auto recurrentToCellWeights = CreateConstTensorInfo(*params.m_RecurrentToCellWeights);
-    auto recurrentToOutputWeights = CreateConstTensorInfo(*params.m_RecurrentToOutputWeights);
-    auto forgetGateBias = CreateConstTensorInfo(*params.m_ForgetGateBias);
-    auto cellBias = CreateConstTensorInfo(*params.m_CellBias);
-    auto outputGateBias = CreateConstTensorInfo(*params.m_OutputGateBias);
+    // Index for constants vector
+    std::size_t i = 0;
+
+    // Get mandatory/basic input parameters
+    auto inputToForgetWeights     = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
+    auto inputToCellWeights       = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
+    auto inputToOutputWeights     = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
+    auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
+    auto recurrentToCellWeights   = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
+    auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
+    auto forgetGateBias           = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
+    auto cellBias                 = CreateConstTensorInfo(constants[i++]); //CellBias
+    auto outputGateBias           = CreateConstTensorInfo(constants[i++]); //OutputGateBias
+
+
 
     //Define optional parameters, these will be set depending on configuration in Lstm descriptor
     flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
@@ -678,33 +691,36 @@
 
     if (!descriptor.m_CifgEnabled)
     {
-        inputToInputWeights = CreateConstTensorInfo(*params.m_InputToInputWeights);
-        recurrentToInputWeights = CreateConstTensorInfo(*params.m_RecurrentToInputWeights);
-        cellToInputWeights = CreateConstTensorInfo(*params.m_CellToInputWeights);
-        inputGateBias = CreateConstTensorInfo(*params.m_InputGateBias);
-    }
-
-    if (descriptor.m_ProjectionEnabled)
-    {
-        projectionWeights = CreateConstTensorInfo(*params.m_ProjectionWeights);
-        projectionBias = CreateConstTensorInfo(*params.m_ProjectionBias);
+        inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
+        recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
+        inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
     }
 
     if (descriptor.m_PeepholeEnabled)
     {
-        cellToForgetWeights = CreateConstTensorInfo(*params.m_CellToForgetWeights);
-        cellToOutputWeights = CreateConstTensorInfo(*params.m_CellToOutputWeights);
+        if (!descriptor.m_CifgEnabled)
+        {
+            cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
+        }
+        cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
+        cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
+    }
+
+    if (descriptor.m_ProjectionEnabled)
+    {
+        projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
+        projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
     }
 
     if (descriptor.m_LayerNormEnabled)
     {
         if (!descriptor.m_CifgEnabled)
         {
-            inputLayerNormWeights = CreateConstTensorInfo((*params.m_InputLayerNormWeights));
+            inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
         }
-        forgetLayerNormWeights = CreateConstTensorInfo(*params.m_ForgetLayerNormWeights);
-        cellLayerNormWeights   = CreateConstTensorInfo(*params.m_CellLayerNormWeights);
-        outputLayerNormWeights = CreateConstTensorInfo(*params.m_OutputLayerNormWeights);
+        forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
+        cellLayerNormWeights   = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
+        outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
     }
 
     auto fbLstmParams = serializer::CreateLstmInputParams(
@@ -740,7 +756,7 @@
     CreateAnyLayer(fbLstmLayer.o, serializer::Layer::Layer_LstmLayer);
 }
 
-void SerializerVisitor::VisitMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
+void SerializerStrategy::SerializeMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     IgnoreUnused(name);
 
@@ -750,7 +766,7 @@
     CreateAnyLayer(fbMaximumLayer.o, serializer::Layer::Layer_MaximumLayer);
 }
 
-void SerializerVisitor::VisitMeanLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeMeanLayer(const armnn::IConnectableLayer* layer,
                                        const armnn::MeanDescriptor& descriptor,
                                        const char* name)
 {
@@ -768,7 +784,7 @@
     CreateAnyLayer(fbMeanLayer.o, serializer::Layer::Layer_MeanLayer);
 }
 
-void SerializerVisitor::VisitMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
+void SerializerStrategy::SerializeMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     IgnoreUnused(name);
 
@@ -778,7 +794,7 @@
     CreateAnyLayer(fbMinimumLayer.o, serializer::Layer::Layer_MinimumLayer);
 }
 
-void SerializerVisitor::VisitMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
+void SerializerStrategy::SerializeMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     IgnoreUnused(name);
 
@@ -788,14 +804,14 @@
     CreateAnyLayer(fbMergeLayer.o, serializer::Layer::Layer_MergeLayer);
 }
 
-void SerializerVisitor::VisitMergerLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeMergerLayer(const armnn::IConnectableLayer* layer,
                                          const armnn::MergerDescriptor& mergerDescriptor,
                                          const char* name)
 {
-    VisitConcatLayer(layer, mergerDescriptor, name);
+    SerializeConcatLayer(layer, mergerDescriptor, name);
 }
 
-void SerializerVisitor::VisitConcatLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer,
                                          const armnn::ConcatDescriptor& concatDescriptor,
                                          const char* name)
 {
@@ -830,7 +846,7 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConcatLayer);
 }
 
-void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
+void SerializerStrategy::SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     IgnoreUnused(name);
 
@@ -841,7 +857,7 @@
     CreateAnyLayer(fbMultiplicationLayer.o, serializer::Layer::Layer_MultiplicationLayer);
 }
 
-void SerializerVisitor::VisitPadLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializePadLayer(const armnn::IConnectableLayer* layer,
                                       const armnn::PadDescriptor& padDescriptor,
                                       const char* name)
 {
@@ -867,7 +883,7 @@
     CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer);
 }
 
-void SerializerVisitor::VisitPermuteLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializePermuteLayer(const armnn::IConnectableLayer* layer,
                                           const armnn::PermuteDescriptor& permuteDescriptor,
                                           const char* name)
 {
@@ -895,7 +911,7 @@
 }
 
 // Build FlatBuffer for Rank Layer
-void SerializerVisitor::VisitRankLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeRankLayer(const armnn::IConnectableLayer* layer,
                                        const char* name)
 {
     IgnoreUnused(name);
@@ -905,9 +921,9 @@
     CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
 }
 
-void SerializerVisitor::VisitReduceLayer(const armnn::IConnectableLayer* layer,
-                                         const armnn::ReduceDescriptor& reduceDescriptor,
-                                         const char*)
+void SerializerStrategy::SerializeReduceLayer(const armnn::IConnectableLayer* layer,
+                                             const armnn::ReduceDescriptor& reduceDescriptor,
+                                             const char*)
 {
     auto fbReduceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reduce);
     auto fbDescriptor = CreateReduceDescriptor(m_flatBufferBuilder,
@@ -922,7 +938,7 @@
 }
 
 // Build FlatBuffer for Reshape Layer
-void SerializerVisitor::VisitReshapeLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeReshapeLayer(const armnn::IConnectableLayer* layer,
                                           const armnn::ReshapeDescriptor& reshapeDescriptor,
                                           const char* name)
 {
@@ -948,7 +964,7 @@
     CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
 }
 
-void SerializerVisitor::VisitResizeBilinearLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeResizeBilinearLayer(const armnn::IConnectableLayer* layer,
                                                  const armnn::ResizeBilinearDescriptor& resizeDescriptor,
                                                  const char* name)
 {
@@ -971,7 +987,7 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeBilinearLayer);
 }
 
-void SerializerVisitor::VisitResizeLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer,
                                          const armnn::ResizeDescriptor& resizeDescriptor,
                                          const char* name)
 {
@@ -995,7 +1011,7 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeLayer);
 }
 
-void SerializerVisitor::VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
+void SerializerStrategy::SerializeRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     IgnoreUnused(name);
 
@@ -1005,7 +1021,7 @@
     CreateAnyLayer(fbRsqrtLayer.o, serializer::Layer::Layer_RsqrtLayer);
 }
 
-void SerializerVisitor::VisitSliceLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer,
                                         const armnn::SliceDescriptor& sliceDescriptor,
                                         const char* name)
 {
@@ -1022,7 +1038,7 @@
 }
 
 // Build FlatBuffer for Softmax Layer
-void SerializerVisitor::VisitSoftmaxLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer,
                                           const armnn::SoftmaxDescriptor& softmaxDescriptor,
                                           const char* name)
 {
@@ -1044,7 +1060,7 @@
     CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
 }
 
-void SerializerVisitor::VisitPooling2dLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializePooling2dLayer(const armnn::IConnectableLayer* layer,
                                             const armnn::Pooling2dDescriptor& pooling2dDescriptor,
                                             const char* name)
 {
@@ -1073,7 +1089,7 @@
     CreateAnyLayer(fbPooling2dLayer.o, serializer::Layer::Layer_Pooling2dLayer);
 }
 
-void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializePreluLayer(const armnn::IConnectableLayer* layer,
                                         const char* name)
 {
     IgnoreUnused(name);
@@ -1088,7 +1104,7 @@
     CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer);
 }
 
-void SerializerVisitor::VisitQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
+void SerializerStrategy::SerializeQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
 {
     IgnoreUnused(name);
 
@@ -1099,14 +1115,15 @@
 }
 
 // Build FlatBuffer for FullyConnected Layer
-void SerializerVisitor::VisitFullyConnectedLayer(const armnn::IConnectableLayer* layer,
-                                                 const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
-                                                 const armnn::ConstTensor& weights,
-                                                 const armnn::Optional<armnn::ConstTensor>& biases,
-                                                 const char* name)
+void SerializerStrategy::SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer,
+                                                      const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
+                                                      const std::vector<armnn::ConstTensor>& constants,
+                                                      const char* name)
 {
     IgnoreUnused(name);
 
+    const armnn::ConstTensor& weights = constants.at(0);
+
     // Create FlatBuffer BaseLayer
     auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
 
@@ -1123,7 +1140,8 @@
     flatbuffers::Offset<serializer::ConstTensor> flatBufferBiases;
     if (fullyConnectedDescriptor.m_BiasEnabled)
     {
-        flatBufferBiases = CreateConstTensorInfo(biases.value());
+        armnn::ConstTensor biases = constants.at(1);
+        flatBufferBiases = CreateConstTensorInfo(biases);
     }
 
     // Create FlatBuffer FullyConnectedLayer
@@ -1138,7 +1156,7 @@
 }
 
 // Build FlatBuffer for SpaceToBatchNd Layer
-void SerializerVisitor::VisitSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
                                                  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                                  const char* name)
 {
@@ -1169,7 +1187,7 @@
 }
 
 // Build FlatBuffer for SpaceToDepthLayer
-void SerializerVisitor::VisitSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
                                                const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
                                                const char* name)
 {
@@ -1189,7 +1207,7 @@
 }
 
 // Build FlatBuffer for Splitter Layer
-void SerializerVisitor::VisitSplitterLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer* layer,
                                            const armnn::ViewsDescriptor& viewsDescriptor,
                                            const char* name)
 {
@@ -1255,7 +1273,7 @@
     CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer);
 }
 
-void SerializerVisitor::VisitNormalizationLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeNormalizationLayer(const armnn::IConnectableLayer* layer,
                                                 const armnn::NormalizationDescriptor& descriptor,
                                                 const char* name)
 {
@@ -1280,7 +1298,7 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_NormalizationLayer);
 }
 
-void SerializerVisitor::VisitStackLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeStackLayer(const armnn::IConnectableLayer* layer,
                                         const armnn::StackDescriptor& stackDescriptor,
                                         const char* name)
 {
@@ -1303,7 +1321,7 @@
     CreateAnyLayer(stackLayer.o, serializer::Layer::Layer_StackLayer);
 }
 
-void SerializerVisitor::VisitStandInLayer(const armnn::IConnectableLayer *layer,
+void SerializerStrategy::SerializeStandInLayer(const armnn::IConnectableLayer *layer,
                                           const armnn::StandInDescriptor& standInDescriptor,
                                           const char *name)
 {
@@ -1319,7 +1337,7 @@
     CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_StandInLayer);
 }
 
-void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer,
                                                const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
                                                const char* name)
 {
@@ -1346,7 +1364,7 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_StridedSliceLayer);
 }
 
-void SerializerVisitor::VisitSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
+void SerializerStrategy::SerializeSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     IgnoreUnused(name);
 
@@ -1356,7 +1374,7 @@
     CreateAnyLayer(fbSubtractionLayer.o, serializer::Layer::Layer_SubtractionLayer);
 }
 
-void SerializerVisitor::VisitSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
+void SerializerStrategy::SerializeSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     IgnoreUnused(name);
 
@@ -1366,15 +1384,16 @@
     CreateAnyLayer(fbSwitchLayer.o, serializer::Layer::Layer_SwitchLayer);
 }
 
-void SerializerVisitor::VisitTransposeConvolution2dLayer(
+void SerializerStrategy::SerializeTransposeConvolution2dLayer(
     const armnn::IConnectableLayer* layer,
     const armnn::TransposeConvolution2dDescriptor& descriptor,
-    const armnn::ConstTensor& weights,
-    const armnn::Optional<armnn::ConstTensor>& biases,
+    const std::vector<armnn::ConstTensor>& constants,
     const char* name)
 {
     IgnoreUnused(name);
 
+    const armnn::ConstTensor& weights = constants.at(0);
+
     auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
     auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
                                                                descriptor.m_PadLeft,
@@ -1389,9 +1408,10 @@
     // weights & biases
     auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
     flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
-    if (biases.has_value())
+    if (constants.size() > 1)
     {
-        fbBiasesConstTensorInfo = CreateConstTensorInfo(biases.value());
+        const armnn::ConstTensor& biases = constants.at(1);
+        fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
     }
 
     auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder,
@@ -1403,7 +1423,7 @@
     CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_TransposeConvolution2dLayer);
 }
 
-void SerializerVisitor::VisitTransposeLayer(const armnn::IConnectableLayer* layer,
+void SerializerStrategy::SerializeTransposeLayer(const armnn::IConnectableLayer* layer,
                                             const armnn::TransposeDescriptor& descriptor,
                                             const char* name)
 {
@@ -1430,10 +1450,10 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_TransposeLayer);
 }
 
-void SerializerVisitor::VisitQLstmLayer(const armnn::IConnectableLayer* layer,
-                                        const armnn::QLstmDescriptor& descriptor,
-                                        const armnn::LstmInputParams& params,
-                                        const char* name)
+void SerializerStrategy::SerializeQLstmLayer(const armnn::IConnectableLayer* layer,
+                                             const armnn::QLstmDescriptor& descriptor,
+                                             const std::vector<armnn::ConstTensor>& constants,
+                                             const char* name)
 {
     IgnoreUnused(name);
 
@@ -1455,16 +1475,19 @@
             descriptor.m_HiddenStateScale
             );
 
+    // Index for constants vector
+    std::size_t i = 0;
+
     // Mandatory params
-    auto inputToForgetWeights = CreateConstTensorInfo(*params.m_InputToForgetWeights);
-    auto inputToCellWeights = CreateConstTensorInfo(*params.m_InputToCellWeights);
-    auto inputToOutputWeights = CreateConstTensorInfo(*params.m_InputToOutputWeights);
-    auto recurrentToForgetWeights = CreateConstTensorInfo(*params.m_RecurrentToForgetWeights);
-    auto recurrentToCellWeights = CreateConstTensorInfo(*params.m_RecurrentToCellWeights);
-    auto recurrentToOutputWeights = CreateConstTensorInfo(*params.m_RecurrentToOutputWeights);
-    auto forgetGateBias = CreateConstTensorInfo(*params.m_ForgetGateBias);
-    auto cellBias = CreateConstTensorInfo(*params.m_CellBias);
-    auto outputGateBias = CreateConstTensorInfo(*params.m_OutputGateBias);
+    auto inputToForgetWeights     = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
+    auto inputToCellWeights       = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
+    auto inputToOutputWeights     = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
+    auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
+    auto recurrentToCellWeights   = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
+    auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
+    auto forgetGateBias           = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
+    auto cellBias                 = CreateConstTensorInfo(constants[i++]); //CellBias
+    auto outputGateBias           = CreateConstTensorInfo(constants[i++]); //OutputGateBias
 
     // CIFG
     flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
@@ -1473,19 +1496,9 @@
 
     if (!descriptor.m_CifgEnabled)
     {
-        inputToInputWeights = CreateConstTensorInfo(*params.m_InputToInputWeights);
-        recurrentToInputWeights = CreateConstTensorInfo(*params.m_RecurrentToInputWeights);
-        inputGateBias = CreateConstTensorInfo(*params.m_InputGateBias);
-    }
-
-    // Projectiom
-    flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
-    flatbuffers::Offset<serializer::ConstTensor> projectionBias;
-
-    if (descriptor.m_ProjectionEnabled)
-    {
-        projectionWeights = CreateConstTensorInfo(*params.m_ProjectionWeights);
-        projectionBias = CreateConstTensorInfo(*params.m_ProjectionBias);
+        inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
+        recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
+        inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
     }
 
     // Peephole
@@ -1497,11 +1510,20 @@
     {
         if (!descriptor.m_CifgEnabled)
         {
-            cellToInputWeights  = CreateConstTensorInfo(*params.m_CellToInputWeights);
+            cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
         }
+        cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
+        cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
+    }
 
-        cellToForgetWeights = CreateConstTensorInfo(*params.m_CellToForgetWeights);
-        cellToOutputWeights = CreateConstTensorInfo(*params.m_CellToOutputWeights);
+    // Projection
+    flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
+    flatbuffers::Offset<serializer::ConstTensor> projectionBias;
+
+    if (descriptor.m_ProjectionEnabled)
+    {
+        projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
+        projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
     }
 
     // Layer norm
@@ -1514,12 +1536,11 @@
     {
         if (!descriptor.m_CifgEnabled)
         {
-            inputLayerNormWeights = CreateConstTensorInfo((*params.m_InputLayerNormWeights));
+            inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
         }
-
-        forgetLayerNormWeights = CreateConstTensorInfo(*params.m_ForgetLayerNormWeights);
-        cellLayerNormWeights   = CreateConstTensorInfo(*params.m_CellLayerNormWeights);
-        outputLayerNormWeights = CreateConstTensorInfo(*params.m_OutputLayerNormWeights);
+        forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
+        cellLayerNormWeights   = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
+        outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
     }
 
     auto fbQLstmParams = serializer::CreateQLstmInputParams(
@@ -1555,29 +1576,32 @@
     CreateAnyLayer(fbQLstmLayer.o, serializer::Layer::Layer_QLstmLayer);
 }
 
-void SerializerVisitor::VisitQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
-                                                const armnn::QuantizedLstmInputParams& params,
-                                                const char* name)
+void SerializerStrategy::SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
+                                                     const std::vector<armnn::ConstTensor>& constants,
+                                                     const char* name)
 {
     IgnoreUnused(name);
 
     auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
 
+    // index for constants vector
+    size_t i = 0;
+
     // Get input parameters
-    auto inputToInputWeights = CreateConstTensorInfo(params.GetInputToInputWeights());
-    auto inputToForgetWeights = CreateConstTensorInfo(params.GetInputToForgetWeights());
-    auto inputToCellWeights = CreateConstTensorInfo(params.GetInputToCellWeights());
-    auto inputToOutputWeights = CreateConstTensorInfo(params.GetInputToOutputWeights());
+    auto inputToInputWeights  = CreateConstTensorInfo(constants[i++]);
+    auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
+    auto inputToCellWeights   = CreateConstTensorInfo(constants[i++]);
+    auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
 
-    auto recurrentToInputWeights = CreateConstTensorInfo(params.GetRecurrentToInputWeights());
-    auto recurrentToForgetWeights = CreateConstTensorInfo(params.GetRecurrentToForgetWeights());
-    auto recurrentToCellWeights = CreateConstTensorInfo(params.GetRecurrentToCellWeights());
-    auto recurrentToOutputWeights = CreateConstTensorInfo(params.GetRecurrentToOutputWeights());
+    auto recurrentToInputWeights  = CreateConstTensorInfo(constants[i++]);
+    auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
+    auto recurrentToCellWeights   = CreateConstTensorInfo(constants[i++]);
+    auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
 
-    auto inputGateBias = CreateConstTensorInfo(params.GetInputGateBias());
-    auto forgetGateBias = CreateConstTensorInfo(params.GetForgetGateBias());
-    auto cellBias = CreateConstTensorInfo(params.GetCellBias());
-    auto outputGateBias = CreateConstTensorInfo(params.GetOutputGateBias());
+    auto inputGateBias  = CreateConstTensorInfo(constants[i++]);
+    auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
+    auto cellBias       = CreateConstTensorInfo(constants[i++]);
+    auto outputGateBias = CreateConstTensorInfo(constants[i++]);
 
     auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams(
         m_flatBufferBuilder,
@@ -1602,7 +1626,7 @@
     CreateAnyLayer(fbQuantizedLstmLayer.o, serializer::Layer::Layer_QuantizedLstmLayer);
 }
 
-fb::Offset<serializer::LayerBase> SerializerVisitor::CreateLayerBase(const IConnectableLayer* layer,
+fb::Offset<serializer::LayerBase> SerializerStrategy::CreateLayerBase(const IConnectableLayer* layer,
                                                                      const serializer::LayerType layerType)
 {
 
@@ -1619,7 +1643,7 @@
                                        m_flatBufferBuilder.CreateVector(outputSlots));
 }
 
-void SerializerVisitor::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
+void SerializerStrategy::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
 {
 
     auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
@@ -1627,7 +1651,7 @@
 }
 
 template <typename T>
-flatbuffers::Offset<flatbuffers::Vector<T>> SerializerVisitor::CreateDataVector(const void* memory, unsigned int size)
+flatbuffers::Offset<flatbuffers::Vector<T>> SerializerStrategy::CreateDataVector(const void* memory, unsigned int size)
 {
     const T* buffer = reinterpret_cast<const T*>(memory);
     std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
@@ -1635,7 +1659,7 @@
     return fbVector;
 }
 
-flatbuffers::Offset<TensorInfo>  SerializerVisitor::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
+flatbuffers::Offset<TensorInfo>  SerializerStrategy::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
 {
     // Get the dimensions
     std::vector<unsigned int> shape;
@@ -1674,7 +1698,7 @@
 }
 
 flatbuffers::Offset<serializer::ConstTensor>
-    SerializerVisitor::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
+    SerializerStrategy::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
 {
     armnn::TensorInfo tensorInfo = constTensor.GetInfo();
 
@@ -1724,7 +1748,7 @@
     return flatBufferConstTensor;
 }
 
-flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerVisitor::GetVersionTable()
+flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerStrategy::GetVersionTable()
 {
     flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
         serializer::CreateFeatureCompatibilityVersions(
@@ -1735,7 +1759,7 @@
 }
 
 std::vector<fb::Offset<serializer::InputSlot>>
-    SerializerVisitor::CreateInputSlots(const armnn::IConnectableLayer* layer)
+    SerializerStrategy::CreateInputSlots(const armnn::IConnectableLayer* layer)
 {
     std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
 
@@ -1757,7 +1781,7 @@
 }
 
 std::vector<fb::Offset<serializer::OutputSlot>>
-    SerializerVisitor::CreateOutputSlots(const armnn::IConnectableLayer* layer)
+    SerializerStrategy::CreateOutputSlots(const armnn::IConnectableLayer* layer)
 {
     std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
 
@@ -1775,32 +1799,421 @@
     return outputSlots;
 }
 
+void SerializerStrategy::ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                                         const BaseDescriptor& descriptor,
+                                         const std::vector<armnn::ConstTensor>& constants,
+                                         const char* name,
+                                         const armnn::LayerBindingId id)
+{
+    IgnoreUnused(constants);
+
+    switch (layer->GetType())
+    {
+        case armnn::LayerType::Activation :
+        {
+            const armnn::ActivationDescriptor& layerDescriptor =
+                    static_cast<const armnn::ActivationDescriptor&>(descriptor);
+            SerializeActivationLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Addition :
+        {
+            SerializeAdditionLayer(layer, name);
+            break;
+        }
+        case armnn::LayerType::ArgMinMax :
+        {
+            const armnn::ArgMinMaxDescriptor& layerDescriptor =
+                    static_cast<const armnn::ArgMinMaxDescriptor&>(descriptor);
+            SerializeArgMinMaxLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::BatchNormalization :
+        {
+            const armnn::BatchNormalizationDescriptor& layerDescriptor =
+                    static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor);
+            SerializeBatchNormalizationLayer(layer,
+                                             layerDescriptor,
+                                             constants,
+                                             name);
+            break;
+        }
+        case armnn::LayerType::BatchToSpaceNd :
+        {
+            const armnn::BatchToSpaceNdDescriptor& layerDescriptor =
+                    static_cast<const armnn::BatchToSpaceNdDescriptor&>(descriptor);
+            SerializeBatchToSpaceNdLayer(layer,
+                                         layerDescriptor,
+                                         name);
+            break;
+        }
+        case armnn::LayerType::Comparison :
+        {
+            const armnn::ComparisonDescriptor& layerDescriptor =
+                    static_cast<const armnn::ComparisonDescriptor&>(descriptor);
+            SerializeComparisonLayer(layer,
+                                     layerDescriptor,
+                                     name);
+            break;
+        }
+        case armnn::LayerType::Concat :
+        {
+            const armnn::ConcatDescriptor& layerDescriptor =
+                    static_cast<const armnn::ConcatDescriptor&>(descriptor);
+            SerializeConcatLayer(layer,
+                                 layerDescriptor,
+                                 name);
+            break;
+        }
+        case armnn::LayerType::Constant :
+        {
+            SerializeConstantLayer(layer,
+                                   constants,
+                                   name);
+            break;
+        }
+        case armnn::LayerType::Convolution2d :
+        {
+            const armnn::Convolution2dDescriptor& layerDescriptor =
+                    static_cast<const armnn::Convolution2dDescriptor&>(descriptor);
+            SerializeConvolution2dLayer(layer,
+                                        layerDescriptor,
+                                        constants,
+                                        name);
+            break;
+        }
+        case armnn::LayerType::DepthToSpace :
+        {
+            const armnn::DepthToSpaceDescriptor& layerDescriptor =
+                    static_cast<const armnn::DepthToSpaceDescriptor&>(descriptor);
+            SerializeDepthToSpaceLayer(layer,
+                                       layerDescriptor,
+                                       name);
+            break;
+        }
+        case armnn::LayerType::DepthwiseConvolution2d :
+        {
+            const armnn::DepthwiseConvolution2dDescriptor& layerDescriptor =
+                    static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor);
+            SerializeDepthwiseConvolution2dLayer(layer,
+                                                 layerDescriptor,
+                                                 constants,
+                                                 name);
+            break;
+        }
+        case armnn::LayerType::Dequantize :
+        {
+            SerializeDequantizeLayer(layer,
+                                     name);
+            break;
+        }
+        case armnn::LayerType::DetectionPostProcess :
+        {
+            const armnn::DetectionPostProcessDescriptor& layerDescriptor =
+                    static_cast<const armnn::DetectionPostProcessDescriptor&>(descriptor);
+            SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name);
+            break;
+        }
+        case armnn::LayerType::Division :
+        {
+            SerializeDivisionLayer(layer, name);
+            break;
+        }
+        case armnn::LayerType::ElementwiseUnary :
+        {
+            const armnn::ElementwiseUnaryDescriptor& layerDescriptor =
+                    static_cast<const armnn::ElementwiseUnaryDescriptor&>(descriptor);
+            SerializeElementwiseUnaryLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Fill :
+        {
+            const armnn::FillDescriptor& layerDescriptor =
+                    static_cast<const armnn::FillDescriptor&>(descriptor);
+            SerializeFillLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Floor :
+        {
+            SerializeFloorLayer(layer, name);
+            break;
+        }
+        case armnn::LayerType::FullyConnected :
+        {
+            const armnn::FullyConnectedDescriptor& layerDescriptor =
+                    static_cast<const armnn::FullyConnectedDescriptor&>(descriptor);
+            SerializeFullyConnectedLayer(layer, layerDescriptor, constants, name);
+            break;
+        }
+        case armnn::LayerType::Gather :
+        {
+            const armnn::GatherDescriptor& layerDescriptor =
+                    static_cast<const armnn::GatherDescriptor&>(descriptor);
+            SerializeGatherLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Input:
+        {
+            SerializeInputLayer(layer, id, name);
+            break;
+        }
+        case armnn::LayerType::InstanceNormalization :
+        {
+            const armnn::InstanceNormalizationDescriptor& layerDescriptor =
+                    static_cast<const armnn::InstanceNormalizationDescriptor&>(descriptor);
+            SerializeInstanceNormalizationLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::L2Normalization :
+        {
+            const armnn::L2NormalizationDescriptor& layerDescriptor =
+                    static_cast<const armnn::L2NormalizationDescriptor&>(descriptor);
+            SerializeL2NormalizationLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::LogicalBinary :
+        {
+            const armnn::LogicalBinaryDescriptor& layerDescriptor =
+                    static_cast<const armnn::LogicalBinaryDescriptor&>(descriptor);
+            SerializeLogicalBinaryLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::LogSoftmax :
+        {
+            const armnn::LogSoftmaxDescriptor& layerDescriptor =
+                    static_cast<const armnn::LogSoftmaxDescriptor&>(descriptor);
+            SerializeLogSoftmaxLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Lstm :
+        {
+            const armnn::LstmDescriptor& layerDescriptor =
+                    static_cast<const armnn::LstmDescriptor&>(descriptor);
+            SerializeLstmLayer(layer, layerDescriptor, constants, name);
+            break;
+        }
+        case armnn::LayerType::QLstm :
+        {
+            const armnn::QLstmDescriptor& layerDescriptor =
+                    static_cast<const armnn::QLstmDescriptor&>(descriptor);
+            SerializeQLstmLayer(layer, layerDescriptor, constants, name);
+            break;
+        }
+        case armnn::LayerType::Maximum :
+        {
+            SerializeMaximumLayer(layer, name);
+            break;
+        }
+        case armnn::LayerType::Mean :
+        {
+            const armnn::MeanDescriptor& layerDescriptor =
+                    static_cast<const armnn::MeanDescriptor&>(descriptor);
+            SerializeMeanLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Merge :
+        {
+            SerializeMergeLayer(layer, name);
+            break;
+        }
+        case armnn::LayerType::Minimum :
+        {
+            SerializeMinimumLayer(layer, name);
+            break;
+        }
+        case armnn::LayerType::Multiplication :
+        {
+            SerializeMultiplicationLayer(layer, name);
+            break;
+        }
+        case armnn::LayerType::Normalization :
+        {
+            const armnn::NormalizationDescriptor& layerDescriptor =
+                    static_cast<const armnn::NormalizationDescriptor&>(descriptor);
+            SerializeNormalizationLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Output:
+        {
+            SerializeOutputLayer(layer, id, name);
+            break;
+        }
+        case armnn::LayerType::Pad :
+        {
+            const armnn::PadDescriptor& layerDescriptor =
+                    static_cast<const armnn::PadDescriptor&>(descriptor);
+            SerializePadLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Permute :
+        {
+            const armnn::PermuteDescriptor& layerDescriptor =
+                    static_cast<const armnn::PermuteDescriptor&>(descriptor);
+            SerializePermuteLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Pooling2d :
+        {
+            const armnn::Pooling2dDescriptor& layerDescriptor =
+                    static_cast<const armnn::Pooling2dDescriptor&>(descriptor);
+            SerializePooling2dLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Prelu :
+        {
+            SerializePreluLayer(layer, name);
+            break;
+        }
+        case armnn::LayerType::Quantize :
+        {
+            SerializeQuantizeLayer(layer, name);
+            break;
+        }
+        case armnn::LayerType::QuantizedLstm:
+            SerializeQuantizedLstmLayer(layer, constants, name);
+            break;
+        case armnn::LayerType::Reshape:
+        {
+            const armnn::ReshapeDescriptor &layerDescriptor =
+                    static_cast<const armnn::ReshapeDescriptor &>(descriptor);
+            SerializeReshapeLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Rank:
+        {
+            SerializeRankLayer(layer, name);
+            break;
+        }
+        case armnn::LayerType::Reduce:
+        {
+            const armnn::ReduceDescriptor& layerDescriptor =
+                    static_cast<const armnn::ReduceDescriptor&>(descriptor);
+            SerializeReduceLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Resize:
+        {
+            const armnn::ResizeDescriptor& layerDescriptor =
+                    static_cast<const armnn::ResizeDescriptor&>(descriptor);
+            SerializeResizeLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Slice:
+        {
+            const armnn::SliceDescriptor& layerDescriptor =
+                    static_cast<const armnn::SliceDescriptor&>(descriptor);
+            SerializeSliceLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Softmax:
+        {
+            const armnn::SoftmaxDescriptor& layerDescriptor =
+                    static_cast<const armnn::SoftmaxDescriptor&>(descriptor);
+            SerializeSoftmaxLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::SpaceToBatchNd:
+        {
+            const armnn::SpaceToBatchNdDescriptor& layerDescriptor =
+                    static_cast<const armnn::SpaceToBatchNdDescriptor&>(descriptor);
+            SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::SpaceToDepth:
+        {
+            const armnn::SpaceToDepthDescriptor& layerDescriptor =
+                    static_cast<const armnn::SpaceToDepthDescriptor&>(descriptor);
+            SerializeSpaceToDepthLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Splitter:
+        {
+            const armnn::SplitterDescriptor& layerDescriptor =
+                    static_cast<const armnn::SplitterDescriptor&>(descriptor);
+            SerializeSplitterLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Stack:
+        {
+            const armnn::StackDescriptor& layerDescriptor =
+                    static_cast<const armnn::StackDescriptor&>(descriptor);
+            SerializeStackLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::StandIn:
+        {
+            const armnn::StandInDescriptor& layerDescriptor =
+                    static_cast<const armnn::StandInDescriptor&>(descriptor);
+            SerializeStandInLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::StridedSlice:
+        {
+            const armnn::StridedSliceDescriptor& layerDescriptor =
+                    static_cast<const armnn::StridedSliceDescriptor&>(descriptor);
+            SerializeStridedSliceLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::Subtraction:
+        {
+            SerializeSubtractionLayer(layer, name);
+            break;
+        }
+        case armnn::LayerType::Switch:
+        {
+            SerializeSwitchLayer(layer, name);
+            break;
+        }
+        case armnn::LayerType::Transpose:
+        {
+            const armnn::TransposeDescriptor& layerDescriptor =
+                    static_cast<const armnn::TransposeDescriptor&>(descriptor);
+            SerializeTransposeLayer(layer, layerDescriptor, name);
+            break;
+        }
+        case armnn::LayerType::TransposeConvolution2d:
+        {
+            const armnn::TransposeConvolution2dDescriptor& layerDescriptor =
+                    static_cast<const armnn::TransposeConvolution2dDescriptor&>(descriptor);
+            SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name);
+            break;
+        }
+        default:
+        {
+            throw InvalidArgumentException(
+                    fmt::format("A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}",
+                                layer->GetName(),
+                                id));
+        }
+    }
+}
+
 void ISerializer::SerializerImpl::Serialize(const INetwork& inNetwork)
 {
     // Iterate through to network
-    inNetwork.Accept(m_SerializerVisitor);
-    flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerVisitor.GetFlatBufferBuilder();
+    inNetwork.ExecuteStrategy(m_SerializerStrategy);
+    flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
 
     // Create FlatBuffer SerializedGraph
     auto serializedGraph = serializer::CreateSerializedGraph(
-        fbBuilder,
-        fbBuilder.CreateVector(m_SerializerVisitor.GetSerializedLayers()),
-        fbBuilder.CreateVector(m_SerializerVisitor.GetInputIds()),
-        fbBuilder.CreateVector(m_SerializerVisitor.GetOutputIds()),
-        m_SerializerVisitor.GetVersionTable());
+            fbBuilder,
+            fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()),
+            fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()),
+            fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()),
+            m_SerializerStrategy.GetVersionTable());
 
     // Serialize the graph
     fbBuilder.Finish(serializedGraph);
 }
 
+
 bool ISerializer::SerializerImpl::SaveSerializedToStream(std::ostream& stream)
 {
-    flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerVisitor.GetFlatBufferBuilder();
+    flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
 
     auto bytesToWrite = armnn::numeric_cast<std::streamsize>(fbBuilder.GetSize());
     stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
     return !stream.bad();
 }
 
-
 } // namespace armnnSerializer
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 10971fd..7226006 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -5,6 +5,7 @@
 #pragma once
 
 #include <armnn/ILayerVisitor.hpp>
+#include <armnn/IStrategy.hpp>
 #include <armnn/LayerVisitorBase.hpp>
 
 #include <armnnSerializer/ISerializer.hpp>
@@ -18,11 +19,17 @@
 namespace armnnSerializer
 {
 
-class SerializerVisitor : public armnn::ILayerVisitor
+class SerializerStrategy : public armnn::IStrategy
 {
 public:
-    SerializerVisitor() : m_layerId(0) {}
-    ~SerializerVisitor() {}
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id) override;
+
+    SerializerStrategy() : m_layerId(0) {}
+    ~SerializerStrategy() {}
 
     flatbuffers::FlatBufferBuilder& GetFlatBufferBuilder()
     {
@@ -46,261 +53,7 @@
 
     flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> GetVersionTable();
 
-
-    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
-    void VisitAbsLayer(const armnn::IConnectableLayer* layer,
-                       const char* name = nullptr) override;
-
-    void VisitActivationLayer(const armnn::IConnectableLayer* layer,
-                              const armnn::ActivationDescriptor& descriptor,
-                              const char* name = nullptr) override;
-
-    void VisitAdditionLayer(const armnn::IConnectableLayer* layer,
-                            const char* name = nullptr) override;
-
-    void VisitArgMinMaxLayer(const armnn::IConnectableLayer* layer,
-                             const armnn::ArgMinMaxDescriptor& argMinMaxDescriptor,
-                             const char* name = nullptr) override;
-
-    void VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
-                                  const armnn::BatchToSpaceNdDescriptor& descriptor,
-                                  const char* name = nullptr) override;
-
-    void VisitBatchNormalizationLayer(const armnn::IConnectableLayer* layer,
-                                      const armnn::BatchNormalizationDescriptor& BatchNormalizationDescriptor,
-                                      const armnn::ConstTensor& mean,
-                                      const armnn::ConstTensor& variance,
-                                      const armnn::ConstTensor& beta,
-                                      const armnn::ConstTensor& gamma,
-                                      const char* name = nullptr) override;
-
-    void VisitComparisonLayer(const armnn::IConnectableLayer* layer,
-                              const armnn::ComparisonDescriptor& descriptor,
-                              const char* name = nullptr) override;
-
-    void VisitConcatLayer(const armnn::IConnectableLayer* layer,
-                          const armnn::ConcatDescriptor& concatDescriptor,
-                          const char* name = nullptr) override;
-
-    void VisitConstantLayer(const armnn::IConnectableLayer* layer,
-                            const armnn::ConstTensor& input,
-                            const char* = nullptr) override;
-
-    void VisitConvolution2dLayer(const armnn::IConnectableLayer* layer,
-                                 const armnn::Convolution2dDescriptor& descriptor,
-                                 const armnn::ConstTensor& weights,
-                                 const armnn::Optional<armnn::ConstTensor>& biases,
-                                 const char* = nullptr) override;
-
-    void VisitDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
-                                const armnn::DepthToSpaceDescriptor& descriptor,
-                                const char* name = nullptr) override;
-
-    void VisitDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
-                                          const armnn::DepthwiseConvolution2dDescriptor& descriptor,
-                                          const armnn::ConstTensor& weights,
-                                          const armnn::Optional<armnn::ConstTensor>& biases,
-                                          const char* name = nullptr) override;
-
-    void VisitDequantizeLayer(const armnn::IConnectableLayer* layer,
-                              const char* name = nullptr) override;
-
-    void VisitDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
-                                        const armnn::DetectionPostProcessDescriptor& descriptor,
-                                        const armnn::ConstTensor& anchors,
-                                        const char* name = nullptr) override;
-
-    void VisitDivisionLayer(const armnn::IConnectableLayer* layer,
-                            const char* name = nullptr) override;
-
-    void VisitElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
-                                    const armnn::ElementwiseUnaryDescriptor& descriptor,
-                                    const char* name = nullptr) override;
-
-    ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead")
-    void VisitEqualLayer(const armnn::IConnectableLayer* layer,
-                         const char* name = nullptr) override;
-
-    void VisitFillLayer(const armnn::IConnectableLayer* layer,
-                        const armnn::FillDescriptor& fillDescriptor,
-                        const char* name = nullptr) override;
-
-    void VisitFloorLayer(const armnn::IConnectableLayer *layer,
-                         const char *name = nullptr) override;
-
-    void VisitFullyConnectedLayer(const armnn::IConnectableLayer* layer,
-                                  const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
-                                  const armnn::ConstTensor& weights,
-                                  const armnn::Optional<armnn::ConstTensor>& biases,
-                                  const char* name = nullptr) override;
-
-    ARMNN_DEPRECATED_MSG("Use VisitGatherLayer with descriptor instead")
-    void VisitGatherLayer(const armnn::IConnectableLayer* layer,
-                          const char* name = nullptr) override;
-
-    void VisitGatherLayer(const armnn::IConnectableLayer* layer,
-                          const armnn::GatherDescriptor& gatherDescriptor,
-                          const char* name = nullptr) override;
-
-    ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead")
-    void VisitGreaterLayer(const armnn::IConnectableLayer* layer,
-                           const char* name = nullptr) override;
-
-    void VisitInputLayer(const armnn::IConnectableLayer* layer,
-                         armnn::LayerBindingId id,
-                         const char* name = nullptr) override;
-
-    void VisitInstanceNormalizationLayer(const armnn::IConnectableLayer* layer,
-                                         const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
-                                         const char* name = nullptr) override;
-
-    void VisitL2NormalizationLayer(const armnn::IConnectableLayer* layer,
-                                   const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
-                                   const char* name = nullptr) override;
-
-    void VisitLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
-                                 const armnn::LogicalBinaryDescriptor& descriptor,
-                                 const char* name = nullptr) override;
-
-    void VisitLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
-                              const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
-                              const char* name = nullptr) override;
-
-    void VisitLstmLayer(const armnn::IConnectableLayer* layer,
-                        const armnn::LstmDescriptor& descriptor,
-                        const armnn::LstmInputParams& params,
-                        const char* name = nullptr) override;
-
-    void VisitMeanLayer(const armnn::IConnectableLayer* layer,
-                        const armnn::MeanDescriptor& descriptor,
-                        const char* name) override;
-
-    void VisitMinimumLayer(const armnn::IConnectableLayer* layer,
-                           const char* name = nullptr) override;
-
-    void VisitMaximumLayer(const armnn::IConnectableLayer* layer,
-                           const char* name = nullptr) override;
-
-    void VisitMergeLayer(const armnn::IConnectableLayer* layer,
-                         const char* name = nullptr) override;
-
-    ARMNN_DEPRECATED_MSG("Use VisitConcatLayer instead")
-    void VisitMergerLayer(const armnn::IConnectableLayer* layer,
-                          const armnn::MergerDescriptor& mergerDescriptor,
-                          const char* name = nullptr) override;
-
-    void VisitMultiplicationLayer(const armnn::IConnectableLayer* layer,
-                                  const char* name = nullptr) override;
-
-    void VisitOutputLayer(const armnn::IConnectableLayer* layer,
-                          armnn::LayerBindingId id,
-                          const char* name = nullptr) override;
-
-    void VisitPadLayer(const armnn::IConnectableLayer* layer,
-                       const armnn::PadDescriptor& PadDescriptor,
-                       const char* name = nullptr) override;
-
-    void VisitPermuteLayer(const armnn::IConnectableLayer* layer,
-                           const armnn::PermuteDescriptor& PermuteDescriptor,
-                           const char* name = nullptr) override;
-
-    void VisitPooling2dLayer(const armnn::IConnectableLayer* layer,
-                             const armnn::Pooling2dDescriptor& pooling2dDescriptor,
-                             const char* name = nullptr) override;
-
-    void VisitPreluLayer(const armnn::IConnectableLayer* layer,
-                         const char* name = nullptr) override;
-
-    void VisitQuantizeLayer(const armnn::IConnectableLayer* layer,
-                            const char* name = nullptr) override;
-
-    void VisitQLstmLayer(const armnn::IConnectableLayer* layer,
-                         const armnn::QLstmDescriptor& descriptor,
-                         const armnn::LstmInputParams& params,
-                         const char* name = nullptr) override;
-
-    void VisitQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
-                                 const armnn::QuantizedLstmInputParams& params,
-                                 const char* name = nullptr) override;
-
-    void VisitRankLayer(const armnn::IConnectableLayer* layer,
-                        const char* name = nullptr) override;
-
-   void VisitReduceLayer(const armnn::IConnectableLayer* layer,
-                         const armnn::ReduceDescriptor& reduceDescriptor,
-                         const char* name = nullptr) override;
-
-    void VisitReshapeLayer(const armnn::IConnectableLayer* layer,
-                           const armnn::ReshapeDescriptor& reshapeDescriptor,
-                           const char* name = nullptr) override;
-
-    void VisitResizeLayer(const armnn::IConnectableLayer* layer,
-                          const armnn::ResizeDescriptor& resizeDescriptor,
-                          const char* name = nullptr) override;
-
-    ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead")
-    void VisitResizeBilinearLayer(const armnn::IConnectableLayer* layer,
-                                  const armnn::ResizeBilinearDescriptor& resizeDescriptor,
-                                  const char* name = nullptr) override;
-
-    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
-    void VisitRsqrtLayer(const armnn::IConnectableLayer* layer,
-                         const char* name = nullptr) override;
-
-    void VisitSliceLayer(const armnn::IConnectableLayer* layer,
-                         const armnn::SliceDescriptor& sliceDescriptor,
-                         const char* name = nullptr) override;
-
-    void VisitSoftmaxLayer(const armnn::IConnectableLayer* layer,
-                           const armnn::SoftmaxDescriptor& softmaxDescriptor,
-                           const char* name = nullptr) override;
-
-    void VisitSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
-                                  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
-                                  const char* name = nullptr) override;
-
-    void VisitSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
-                                const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
-                                const char* name = nullptr) override;
-
-    void VisitNormalizationLayer(const armnn::IConnectableLayer* layer,
-                                 const armnn::NormalizationDescriptor& normalizationDescriptor,
-                                 const char* name = nullptr) override;
-
-    void VisitSplitterLayer(const armnn::IConnectableLayer* layer,
-                            const armnn::ViewsDescriptor& viewsDescriptor,
-                            const char* name = nullptr) override;
-
-    void VisitStandInLayer(const armnn::IConnectableLayer* layer,
-                           const armnn::StandInDescriptor& standInDescriptor,
-                           const char* name = nullptr) override;
-
-    void VisitStackLayer(const armnn::IConnectableLayer* layer,
-                         const armnn::StackDescriptor& stackDescriptor,
-                         const char* name = nullptr) override;
-
-    void VisitStridedSliceLayer(const armnn::IConnectableLayer* layer,
-                                const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
-                                const char* name = nullptr) override;
-
-    void VisitSubtractionLayer(const armnn::IConnectableLayer* layer,
-                               const char* name = nullptr) override;
-
-    void VisitSwitchLayer(const armnn::IConnectableLayer* layer,
-                          const char* name = nullptr) override;
-
-    void VisitTransposeConvolution2dLayer(const armnn::IConnectableLayer* layer,
-                                          const armnn::TransposeConvolution2dDescriptor& descriptor,
-                                          const armnn::ConstTensor& weights,
-                                          const armnn::Optional<armnn::ConstTensor>& biases,
-                                          const char* = nullptr) override;
-
-    void VisitTransposeLayer(const armnn::IConnectableLayer* layer,
-                             const armnn::TransposeDescriptor& descriptor,
-                             const char* name = nullptr) override;
-
 private:
-
     /// Creates the Input Slots and Output Slots and LayerBase for the layer.
     flatbuffers::Offset<armnnSerializer::LayerBase> CreateLayerBase(
             const armnn::IConnectableLayer* layer,
@@ -324,11 +77,11 @@
 
     /// Creates the serializer InputSlots for the layer.
     std::vector<flatbuffers::Offset<armnnSerializer::InputSlot>> CreateInputSlots(
-            const armnn::IConnectableLayer* layer);
+    const armnn::IConnectableLayer* layer);
 
     /// Creates the serializer OutputSlots for the layer.
     std::vector<flatbuffers::Offset<armnnSerializer::OutputSlot>> CreateOutputSlots(
-            const armnn::IConnectableLayer* layer);
+    const armnn::IConnectableLayer* layer);
 
     /// FlatBufferBuilder to create our layers' FlatBuffers.
     flatbuffers::FlatBufferBuilder m_flatBufferBuilder;
@@ -347,8 +100,250 @@
 
     /// layer within our FlatBuffer index.
     uint32_t m_layerId;
+
+private:
+    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
+    void SerializeAbsLayer(const armnn::IConnectableLayer* layer,
+                                  const char* name = nullptr);
+
+    void SerializeActivationLayer(const armnn::IConnectableLayer* layer,
+                                  const armnn::ActivationDescriptor& descriptor,
+                                  const char* name = nullptr);
+
+    void SerializeAdditionLayer(const armnn::IConnectableLayer* layer,
+                                const char* name = nullptr);
+
+    void SerializeArgMinMaxLayer(const armnn::IConnectableLayer* layer,
+                                 const armnn::ArgMinMaxDescriptor& argMinMaxDescriptor,
+                                 const char* name = nullptr);
+
+    void SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
+                                      const armnn::BatchToSpaceNdDescriptor& descriptor,
+                                      const char* name = nullptr);
+
+    void SerializeBatchNormalizationLayer(const armnn::IConnectableLayer* layer,
+                                          const armnn::BatchNormalizationDescriptor& BatchNormalizationDescriptor,
+                                          const std::vector<armnn::ConstTensor>& constants,
+                                          const char* name = nullptr);
+
+    void SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
+                                  const armnn::ComparisonDescriptor& descriptor,
+                                  const char* name = nullptr);
+
+    void SerializeConcatLayer(const armnn::IConnectableLayer* layer,
+                              const armnn::ConcatDescriptor& concatDescriptor,
+                              const char* name = nullptr);
+
+    void SerializeConstantLayer(const armnn::IConnectableLayer* layer,
+                                const std::vector<armnn::ConstTensor>& contants,
+                                const char* name = nullptr);
+
+    void SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer,
+                                     const armnn::Convolution2dDescriptor& descriptor,
+                                     const std::vector<armnn::ConstTensor>& contants,
+                                     const char* name = nullptr);
+
+    void SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
+                                    const armnn::DepthToSpaceDescriptor& descriptor,
+                                    const char* name = nullptr);
+
+    void SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
+                                              const armnn::DepthwiseConvolution2dDescriptor& descriptor,
+                                              const std::vector<armnn::ConstTensor>& constants,
+                                              const char* name = nullptr);
+
+    void SerializeDequantizeLayer(const armnn::IConnectableLayer* layer,
+                                  const char* name = nullptr);
+
+    void SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
+                                            const armnn::DetectionPostProcessDescriptor& descriptor,
+                                            const std::vector<armnn::ConstTensor>& constants,
+                                            const char* name = nullptr);
+
+    void SerializeDivisionLayer(const armnn::IConnectableLayer* layer,
+                                const char* name = nullptr);
+
+    void SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
+                                        const armnn::ElementwiseUnaryDescriptor& descriptor,
+                                        const char* name = nullptr);
+
+    ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead")
+    void SerializeEqualLayer(const armnn::IConnectableLayer* layer, const char* name);
+
+    void SerializeFillLayer(const armnn::IConnectableLayer* layer,
+                            const armnn::FillDescriptor& fillDescriptor,
+                            const char* name = nullptr);
+
+    void SerializeFloorLayer(const armnn::IConnectableLayer *layer,
+                             const char *name = nullptr);
+
+    void SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer,
+                                      const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
+                                      const std::vector<armnn::ConstTensor>& constants,
+                                      const char* name = nullptr);
+
+    void SerializeGatherLayer(const armnn::IConnectableLayer* layer,
+                              const armnn::GatherDescriptor& gatherDescriptor,
+                              const char* name = nullptr);
+
+    ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead")
+    void SerializeGreaterLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr);
+
+    void SerializeInputLayer(const armnn::IConnectableLayer* layer,
+                         armnn::LayerBindingId id,
+                         const char* name = nullptr);
+
+    void SerializeInstanceNormalizationLayer(const armnn::IConnectableLayer* layer,
+                                         const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
+                                         const char* name = nullptr);
+
+    void SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer,
+                                   const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
+                                   const char* name = nullptr);
+
+    void SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
+                                 const armnn::LogicalBinaryDescriptor& descriptor,
+                                 const char* name = nullptr);
+
+    void SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
+                              const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
+                              const char* name = nullptr);
+
+    void SerializeLstmLayer(const armnn::IConnectableLayer* layer,
+                            const armnn::LstmDescriptor& descriptor,
+                            const std::vector<armnn::ConstTensor>& constants,
+                            const char* name = nullptr);
+
+    void SerializeMeanLayer(const armnn::IConnectableLayer* layer,
+                            const armnn::MeanDescriptor& descriptor,
+                            const char* name);
+
+    void SerializeMinimumLayer(const armnn::IConnectableLayer* layer,
+                               const char* name = nullptr);
+
+    void SerializeMaximumLayer(const armnn::IConnectableLayer* layer,
+                               const char* name = nullptr);
+
+    void SerializeMergeLayer(const armnn::IConnectableLayer* layer,
+                             const char* name = nullptr);
+
+    ARMNN_DEPRECATED_MSG("Use VisitConcatLayer instead")
+    void SerializeMergerLayer(const armnn::IConnectableLayer* layer,
+                              const armnn::MergerDescriptor& mergerDescriptor,
+                              const char* name = nullptr);
+
+    void SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer,
+                                      const char* name = nullptr);
+
+    void SerializeOutputLayer(const armnn::IConnectableLayer* layer,
+                              armnn::LayerBindingId id,
+                              const char* name = nullptr);
+
+    void SerializePadLayer(const armnn::IConnectableLayer* layer,
+                           const armnn::PadDescriptor& PadDescriptor,
+                           const char* name = nullptr);
+
+    void SerializePermuteLayer(const armnn::IConnectableLayer* layer,
+                               const armnn::PermuteDescriptor& PermuteDescriptor,
+                               const char* name = nullptr);
+
+    void SerializePooling2dLayer(const armnn::IConnectableLayer* layer,
+                                 const armnn::Pooling2dDescriptor& pooling2dDescriptor,
+                                 const char* name = nullptr);
+
+    void SerializePreluLayer(const armnn::IConnectableLayer* layer,
+                             const char* name = nullptr);
+
+    void SerializeQuantizeLayer(const armnn::IConnectableLayer* layer,
+                                const char* name = nullptr);
+
+    void SerializeQLstmLayer(const armnn::IConnectableLayer* layer,
+                             const armnn::QLstmDescriptor& descriptor,
+                             const std::vector<armnn::ConstTensor>& constants,
+                             const char* name = nullptr);
+
+    void SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
+                                     const std::vector<armnn::ConstTensor>& constants,
+                                     const char* name = nullptr);
+
+    void SerializeRankLayer(const armnn::IConnectableLayer* layer,
+                            const char* name = nullptr);
+
+    void SerializeReduceLayer(const armnn::IConnectableLayer* layer,
+                          const armnn::ReduceDescriptor& reduceDescriptor,
+                          const char* name = nullptr);
+
+    void SerializeReshapeLayer(const armnn::IConnectableLayer* layer,
+                               const armnn::ReshapeDescriptor& reshapeDescriptor,
+                               const char* name = nullptr);
+
+    void SerializeResizeLayer(const armnn::IConnectableLayer* layer,
+                              const armnn::ResizeDescriptor& resizeDescriptor,
+                              const char* name = nullptr);
+
+    ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead")
+    void SerializeResizeBilinearLayer(const armnn::IConnectableLayer* layer,
+                                      const armnn::ResizeBilinearDescriptor& resizeDescriptor,
+                                      const char* name = nullptr);
+
+    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
+    void SerializeRsqrtLayer(const armnn::IConnectableLayer* layer,
+                             const char* name = nullptr);
+
+    void SerializeSliceLayer(const armnn::IConnectableLayer* layer,
+                             const armnn::SliceDescriptor& sliceDescriptor,
+                             const char* name = nullptr);
+
+    void SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer,
+                               const armnn::SoftmaxDescriptor& softmaxDescriptor,
+                               const char* name = nullptr);
+
+    void SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
+                                      const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
+                                      const char* name = nullptr);
+
+    void SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
+                                    const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
+                                    const char* name = nullptr);
+
+    void SerializeNormalizationLayer(const armnn::IConnectableLayer* layer,
+                                     const armnn::NormalizationDescriptor& normalizationDescriptor,
+                                     const char* name = nullptr);
+
+    void SerializeSplitterLayer(const armnn::IConnectableLayer* layer,
+                                const armnn::ViewsDescriptor& viewsDescriptor,
+                                const char* name = nullptr);
+
+    void SerializeStandInLayer(const armnn::IConnectableLayer* layer,
+                               const armnn::StandInDescriptor& standInDescriptor,
+                               const char* name = nullptr);
+
+    void SerializeStackLayer(const armnn::IConnectableLayer* layer,
+                             const armnn::StackDescriptor& stackDescriptor,
+                             const char* name = nullptr);
+
+    void SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer,
+                                    const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
+                                    const char* name = nullptr);
+
+    void SerializeSubtractionLayer(const armnn::IConnectableLayer* layer,
+                                   const char* name = nullptr);
+
+    void SerializeSwitchLayer(const armnn::IConnectableLayer* layer,
+                              const char* name = nullptr);
+
+    void SerializeTransposeConvolution2dLayer(const armnn::IConnectableLayer* layer,
+                                              const armnn::TransposeConvolution2dDescriptor& descriptor,
+                                              const std::vector<armnn::ConstTensor>& constants,
+                                              const char* = nullptr);
+
+    void SerializeTransposeLayer(const armnn::IConnectableLayer* layer,
+                                 const armnn::TransposeDescriptor& descriptor,
+                                 const char* name = nullptr);
 };
 
+
+
 class ISerializer::SerializerImpl
 {
 public:
@@ -367,7 +362,7 @@
 private:
 
     /// Visitor to contruct serialized network
-    SerializerVisitor m_SerializerVisitor;
+    SerializerStrategy m_SerializerStrategy;
 };
 
 } //namespace armnnSerializer
diff --git a/src/armnnSerializer/test/ActivationSerializationTests.cpp b/src/armnnSerializer/test/ActivationSerializationTests.cpp
index 1645731..fbe1ae0 100644
--- a/src/armnnSerializer/test/ActivationSerializationTests.cpp
+++ b/src/armnnSerializer/test/ActivationSerializationTests.cpp
@@ -17,15 +17,20 @@
 
 BOOST_AUTO_TEST_SUITE(SerializerTests)
 
-class VerifyActivationName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
+class VerifyActivationName : public armnn::IStrategy
 {
 public:
-    void VisitActivationLayer(const armnn::IConnectableLayer* layer,
-                              const armnn::ActivationDescriptor& activationDescriptor,
-                              const char* name) override
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
     {
-        IgnoreUnused(layer, activationDescriptor);
-        BOOST_TEST(name == "activation");
+        IgnoreUnused(layer, descriptor, constants, id);
+        if (layer->GetType() == armnn::LayerType::Activation)
+        {
+            BOOST_TEST(name == "activation");
+        }
     }
 };
 
@@ -67,7 +72,7 @@
     armnn::INetworkPtr deserializedNetwork = parser->CreateNetworkFromBinary(serializerVector);
 
     VerifyActivationName visitor;
-    deserializedNetwork->Accept(visitor);
+    deserializedNetwork->ExecuteStrategy(visitor);
 
     armnn::IRuntime::CreationOptions options; // default options
     armnn::IRuntimePtr run = armnn::IRuntime::Create(options);
diff --git a/src/armnnSerializer/test/ComparisonSerializationTests.cpp b/src/armnnSerializer/test/ComparisonSerializationTests.cpp
new file mode 100644
index 0000000..3aee9a7
--- /dev/null
+++ b/src/armnnSerializer/test/ComparisonSerializationTests.cpp
@@ -0,0 +1,123 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../Serializer.hpp"
+#include "SerializerTestUtils.hpp"
+
+#include <armnn/Descriptors.hpp>
+#include <armnn/INetwork.hpp>
+#include <armnn/IRuntime.hpp>
+#include <armnnDeserializer/IDeserializer.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+
+BOOST_AUTO_TEST_SUITE(SerializerTests)
+
+struct ComparisonModel
+{
+    ComparisonModel(const std::string& layerName,
+                    const armnn::TensorInfo& inputInfo,
+                    const armnn::TensorInfo& outputInfo,
+                    armnn::ComparisonDescriptor& descriptor)
+            : m_network(armnn::INetwork::Create())
+    {
+        armnn::IConnectableLayer* const inputLayer0 = m_network->AddInputLayer(0);
+        armnn::IConnectableLayer* const inputLayer1 = m_network->AddInputLayer(1);
+        armnn::IConnectableLayer* const equalLayer = m_network->AddComparisonLayer(descriptor, layerName.c_str());
+        armnn::IConnectableLayer* const outputLayer = m_network->AddOutputLayer(0);
+
+        inputLayer0->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(0));
+        inputLayer1->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(1));
+        equalLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+        inputLayer0->GetOutputSlot(0).SetTensorInfo(inputInfo);
+        inputLayer1->GetOutputSlot(0).SetTensorInfo(inputInfo);
+        equalLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+    }
+
+    armnn::INetworkPtr m_network;
+};
+
+class ComparisonLayerVerifier : public LayerVerifierBase
+{
+public:
+    ComparisonLayerVerifier(const std::string& layerName,
+                            const std::vector<armnn::TensorInfo>& inputInfos,
+                            const std::vector<armnn::TensorInfo>& outputInfos,
+                            const armnn::ComparisonDescriptor& descriptor)
+            : LayerVerifierBase(layerName, inputInfos, outputInfos)
+            , m_Descriptor (descriptor) {}
+
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
+    {
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::Input: break;
+            case armnn::LayerType::Output: break;
+            case armnn::LayerType::Comparison:
+            {
+                VerifyNameAndConnections(layer, name);
+                const armnn::ComparisonDescriptor& layerDescriptor =
+                        static_cast<const armnn::ComparisonDescriptor&>(descriptor);
+                BOOST_CHECK(layerDescriptor.m_Operation == m_Descriptor.m_Operation);
+                break;
+            }
+            default:
+            {
+                throw armnn::Exception("Unexpected layer type in Comparison test model");
+            }
+        }
+    }
+
+private:
+    armnn::ComparisonDescriptor m_Descriptor;
+};
+
+BOOST_AUTO_TEST_CASE(SerializeEqual)
+{
+    const std::string layerName("equal");
+
+    const armnn::TensorShape shape{2, 1, 2, 4};
+    const armnn::TensorInfo inputInfo  = armnn::TensorInfo(shape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean);
+
+    armnn::ComparisonDescriptor descriptor (armnn::ComparisonOperation::Equal);
+
+    ComparisonModel model(layerName, inputInfo, outputInfo, descriptor);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*model.m_network));
+    BOOST_CHECK(deserializedNetwork);
+
+    ComparisonLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo }, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
+}
+
+BOOST_AUTO_TEST_CASE(SerializeGreater)
+{
+    const std::string layerName("greater");
+
+    const armnn::TensorShape shape{2, 1, 2, 4};
+    const armnn::TensorInfo inputInfo  = armnn::TensorInfo(shape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean);
+
+    armnn::ComparisonDescriptor descriptor (armnn::ComparisonOperation::Greater);
+
+    ComparisonModel model(layerName, inputInfo, outputInfo, descriptor);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*model.m_network));
+    BOOST_CHECK(deserializedNetwork);
+
+    ComparisonLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo }, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnSerializer/test/LstmSerializationTests.cpp b/src/armnnSerializer/test/LstmSerializationTests.cpp
new file mode 100644
index 0000000..4705c0b
--- /dev/null
+++ b/src/armnnSerializer/test/LstmSerializationTests.cpp
@@ -0,0 +1,2199 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../Serializer.hpp"
+#include "SerializerTestUtils.hpp"
+
+#include <armnn/Descriptors.hpp>
+#include <armnn/INetwork.hpp>
+#include <armnn/IRuntime.hpp>
+#include <armnnDeserializer/IDeserializer.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/LstmParams.hpp>
+#include <armnn/QuantizedLstmParams.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+#include <fmt/format.h>
+
+
+BOOST_AUTO_TEST_SUITE(SerializerTests)
+
+template<typename Descriptor>
+armnn::LstmInputParams ConstantVector2LstmInputParams(const std::vector<armnn::ConstTensor>& constants,
+                                                      Descriptor& descriptor)
+{
+    armnn::LstmInputParams lstmInputParams;
+    size_t i = 0;
+
+    // Inserting basic paramters
+    lstmInputParams.m_InputToForgetWeights     = &constants[i++];
+    lstmInputParams.m_InputToCellWeights       = &constants[i++];
+    lstmInputParams.m_InputToOutputWeights     = &constants[i++];
+    lstmInputParams.m_RecurrentToForgetWeights = &constants[i++];
+    lstmInputParams.m_RecurrentToCellWeights   = &constants[i++];
+    lstmInputParams.m_RecurrentToOutputWeights = &constants[i++];
+    lstmInputParams.m_ForgetGateBias           = &constants[i++];
+    lstmInputParams.m_CellBias                 = &constants[i++];
+    lstmInputParams.m_OutputGateBias           = &constants[i++];
+    if (!descriptor.m_CifgEnabled)
+    {
+        lstmInputParams.m_InputToInputWeights     = &constants[i++];
+        lstmInputParams.m_RecurrentToInputWeights = &constants[i++];
+        lstmInputParams.m_InputGateBias           = &constants[i++];
+    }
+
+    if (descriptor.m_PeepholeEnabled)
+    {
+        if (!descriptor.m_CifgEnabled)
+        {
+            lstmInputParams.m_CellToInputWeights = &constants[i++];
+        }
+        lstmInputParams.m_CellToForgetWeights = &constants[i++];
+        lstmInputParams.m_CellToOutputWeights = &constants[i++];
+    }
+
+    if (descriptor.m_ProjectionEnabled)
+    {
+        lstmInputParams.m_ProjectionWeights = &constants[i++];
+        lstmInputParams.m_ProjectionBias    = &constants[i++];
+    }
+
+    if (descriptor.m_LayerNormEnabled)
+    {
+        if (!descriptor.m_CifgEnabled)
+        {
+            lstmInputParams.m_InputLayerNormWeights = &constants[i++];
+        }
+        lstmInputParams.m_ForgetLayerNormWeights = &constants[i++];
+        lstmInputParams.m_CellLayerNormWeights   = &constants[i++];
+        lstmInputParams.m_OutputLayerNormWeights = &constants[i++];
+    }
+
+    return lstmInputParams;
+}
+
+// Works for Lstm and QLstm (QuantizedLstm uses different parameters)
+template<typename Descriptor>
+class VerifyLstmLayer : public LayerVerifierBaseWithDescriptor<Descriptor>
+{
+public:
+    VerifyLstmLayer(const std::string& layerName,
+                    const std::vector<armnn::TensorInfo>& inputInfos,
+                    const std::vector<armnn::TensorInfo>& outputInfos,
+                    const Descriptor& descriptor,
+                    const armnn::LstmInputParams& inputParams)
+        : LayerVerifierBaseWithDescriptor<Descriptor>(layerName, inputInfos, outputInfos, descriptor)
+        , m_InputParams(inputParams) {}
+
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
+    {
+        armnn::IgnoreUnused(constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::Input: break;
+            case armnn::LayerType::Output: break;
+            case armnn::LayerType::Lstm:
+            {
+                this->VerifyNameAndConnections(layer, name);
+                const Descriptor& internalDescriptor = static_cast<const Descriptor&>(descriptor);
+                this->VerifyDescriptor(internalDescriptor);
+                armnn::LstmInputParams lstmParams = ConstantVector2LstmInputParams(constants, internalDescriptor);
+                VerifyInputParameters(lstmParams);
+                break;
+            }
+            case armnn::LayerType::QLstm:
+            {
+                this->VerifyNameAndConnections(layer, name);
+                const Descriptor& internalDescriptor = static_cast<const Descriptor&>(descriptor);
+                this->VerifyDescriptor(internalDescriptor);
+                armnn::LstmInputParams lstmParams = ConstantVector2LstmInputParams(constants, internalDescriptor);
+                VerifyInputParameters(lstmParams);
+                break;
+            }
+            default:
+            {
+                throw armnn::Exception("Unexpected layer type in Lstm test model");
+            }
+        }
+    }
+
+protected:
+    void VerifyInputParameters(const armnn::LstmInputParams& params)
+    {
+        this->VerifyConstTensors(
+            "m_InputToInputWeights", m_InputParams.m_InputToInputWeights, params.m_InputToInputWeights);
+        this->VerifyConstTensors(
+            "m_InputToForgetWeights", m_InputParams.m_InputToForgetWeights, params.m_InputToForgetWeights);
+        this->VerifyConstTensors(
+            "m_InputToCellWeights", m_InputParams.m_InputToCellWeights, params.m_InputToCellWeights);
+        this->VerifyConstTensors(
+            "m_InputToOutputWeights", m_InputParams.m_InputToOutputWeights, params.m_InputToOutputWeights);
+        this->VerifyConstTensors(
+            "m_RecurrentToInputWeights", m_InputParams.m_RecurrentToInputWeights, params.m_RecurrentToInputWeights);
+        this->VerifyConstTensors(
+            "m_RecurrentToForgetWeights", m_InputParams.m_RecurrentToForgetWeights, params.m_RecurrentToForgetWeights);
+        this->VerifyConstTensors(
+            "m_RecurrentToCellWeights", m_InputParams.m_RecurrentToCellWeights, params.m_RecurrentToCellWeights);
+        this->VerifyConstTensors(
+            "m_RecurrentToOutputWeights", m_InputParams.m_RecurrentToOutputWeights, params.m_RecurrentToOutputWeights);
+        this->VerifyConstTensors(
+            "m_CellToInputWeights", m_InputParams.m_CellToInputWeights, params.m_CellToInputWeights);
+        this->VerifyConstTensors(
+            "m_CellToForgetWeights", m_InputParams.m_CellToForgetWeights, params.m_CellToForgetWeights);
+        this->VerifyConstTensors(
+            "m_CellToOutputWeights", m_InputParams.m_CellToOutputWeights, params.m_CellToOutputWeights);
+        this->VerifyConstTensors(
+            "m_InputGateBias", m_InputParams.m_InputGateBias, params.m_InputGateBias);
+        this->VerifyConstTensors(
+            "m_ForgetGateBias", m_InputParams.m_ForgetGateBias, params.m_ForgetGateBias);
+        this->VerifyConstTensors(
+            "m_CellBias", m_InputParams.m_CellBias, params.m_CellBias);
+        this->VerifyConstTensors(
+            "m_OutputGateBias", m_InputParams.m_OutputGateBias, params.m_OutputGateBias);
+        this->VerifyConstTensors(
+            "m_ProjectionWeights", m_InputParams.m_ProjectionWeights, params.m_ProjectionWeights);
+        this->VerifyConstTensors(
+            "m_ProjectionBias", m_InputParams.m_ProjectionBias, params.m_ProjectionBias);
+        this->VerifyConstTensors(
+            "m_InputLayerNormWeights", m_InputParams.m_InputLayerNormWeights, params.m_InputLayerNormWeights);
+        this->VerifyConstTensors(
+            "m_ForgetLayerNormWeights", m_InputParams.m_ForgetLayerNormWeights, params.m_ForgetLayerNormWeights);
+        this->VerifyConstTensors(
+            "m_CellLayerNormWeights", m_InputParams.m_CellLayerNormWeights, params.m_CellLayerNormWeights);
+        this->VerifyConstTensors(
+            "m_OutputLayerNormWeights", m_InputParams.m_OutputLayerNormWeights, params.m_OutputLayerNormWeights);
+    }
+
+private:
+    armnn::LstmInputParams m_InputParams;
+};
+
+BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmCifgPeepholeNoProjection)
+{
+    armnn::LstmDescriptor descriptor;
+    descriptor.m_ActivationFunc = 4;
+    descriptor.m_ClippingThresProj = 0.0f;
+    descriptor.m_ClippingThresCell = 0.0f;
+    descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
+    descriptor.m_ProjectionEnabled = false;
+    descriptor.m_PeepholeEnabled = true;
+
+    const uint32_t batchSize = 1;
+    const uint32_t inputSize = 2;
+    const uint32_t numUnits = 4;
+    const uint32_t outputSize = numUnits;
+
+    armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32);
+    std::vector<float> inputToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
+    armnn::ConstTensor inputToForgetWeights(inputWeightsInfo1, inputToForgetWeightsData);
+
+    std::vector<float> inputToCellWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
+    armnn::ConstTensor inputToCellWeights(inputWeightsInfo1, inputToCellWeightsData);
+
+    std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
+    armnn::ConstTensor inputToOutputWeights(inputWeightsInfo1, inputToOutputWeightsData);
+
+    armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32);
+    std::vector<float> recurrentToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
+    armnn::ConstTensor recurrentToForgetWeights(inputWeightsInfo2, recurrentToForgetWeightsData);
+
+    std::vector<float> recurrentToCellWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
+    armnn::ConstTensor recurrentToCellWeights(inputWeightsInfo2, recurrentToCellWeightsData);
+
+    std::vector<float> recurrentToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
+    armnn::ConstTensor recurrentToOutputWeights(inputWeightsInfo2, recurrentToOutputWeightsData);
+
+    armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32);
+    std::vector<float> cellToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo3.GetNumElements());
+    armnn::ConstTensor cellToForgetWeights(inputWeightsInfo3, cellToForgetWeightsData);
+
+    std::vector<float> cellToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo3.GetNumElements());
+    armnn::ConstTensor cellToOutputWeights(inputWeightsInfo3, cellToOutputWeightsData);
+
+    std::vector<float> forgetGateBiasData(numUnits, 1.0f);
+    armnn::ConstTensor forgetGateBias(inputWeightsInfo3, forgetGateBiasData);
+
+    std::vector<float> cellBiasData(numUnits, 0.0f);
+    armnn::ConstTensor cellBias(inputWeightsInfo3, cellBiasData);
+
+    std::vector<float> outputGateBiasData(numUnits, 0.0f);
+    armnn::ConstTensor outputGateBias(inputWeightsInfo3, outputGateBiasData);
+
+    armnn::LstmInputParams params;
+    params.m_InputToForgetWeights = &inputToForgetWeights;
+    params.m_InputToCellWeights = &inputToCellWeights;
+    params.m_InputToOutputWeights = &inputToOutputWeights;
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+    params.m_ForgetGateBias = &forgetGateBias;
+    params.m_CellBias = &cellBias;
+    params.m_OutputGateBias = &outputGateBias;
+    params.m_CellToForgetWeights = &cellToForgetWeights;
+    params.m_CellToOutputWeights = &cellToOutputWeights;
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer* const inputLayer   = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1);
+    armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2);
+    const std::string layerName("lstm");
+    armnn::IConnectableLayer* const lstmLayer = network->AddLstmLayer(descriptor, params, layerName.c_str());
+    armnn::IConnectableLayer* const scratchBuffer  = network->AddOutputLayer(0);
+    armnn::IConnectableLayer* const outputStateOut  = network->AddOutputLayer(1);
+    armnn::IConnectableLayer* const cellStateOut  = network->AddOutputLayer(2);
+    armnn::IConnectableLayer* const outputLayer  = network->AddOutputLayer(3);
+
+    // connect up
+    armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32);
+    armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32);
+    armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32);
+    armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 3 }, armnn::DataType::Float32);
+
+    inputLayer->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(0));
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+
+    outputStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(1));
+    outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo);
+
+    cellStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(2));
+    cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo);
+
+    lstmLayer->GetOutputSlot(0).Connect(scratchBuffer->GetInputSlot(0));
+    lstmLayer->GetOutputSlot(0).SetTensorInfo(lstmTensorInfoScratchBuff);
+
+    lstmLayer->GetOutputSlot(1).Connect(outputStateOut->GetInputSlot(0));
+    lstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo);
+
+    lstmLayer->GetOutputSlot(2).Connect(cellStateOut->GetInputSlot(0));
+    lstmLayer->GetOutputSlot(2).SetTensorInfo(cellStateTensorInfo);
+
+    lstmLayer->GetOutputSlot(3).Connect(outputLayer->GetInputSlot(0));
+    lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    BOOST_CHECK(deserializedNetwork);
+
+    VerifyLstmLayer<armnn::LstmDescriptor> checker(
+        layerName,
+        {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo},
+        {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo},
+        descriptor,
+        params);
+    deserializedNetwork->ExecuteStrategy(checker);
+}
+
+BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmNoCifgWithPeepholeAndProjection)
+{
+    armnn::LstmDescriptor descriptor;
+    descriptor.m_ActivationFunc = 4;
+    descriptor.m_ClippingThresProj = 0.0f;
+    descriptor.m_ClippingThresCell = 0.0f;
+    descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams
+    descriptor.m_ProjectionEnabled = true;
+    descriptor.m_PeepholeEnabled = true;
+
+    const uint32_t batchSize = 2;
+    const uint32_t inputSize = 5;
+    const uint32_t numUnits = 20;
+    const uint32_t outputSize = 16;
+
+    armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+    std::vector<float> inputToInputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
+    armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
+
+    std::vector<float> inputToForgetWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
+    armnn::ConstTensor inputToForgetWeights(tensorInfo20x5, inputToForgetWeightsData);
+
+    std::vector<float> inputToCellWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
+    armnn::ConstTensor inputToCellWeights(tensorInfo20x5, inputToCellWeightsData);
+
+    std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
+    armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
+
+    armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+    std::vector<float> inputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
+
+    std::vector<float> forgetGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor forgetGateBias(tensorInfo20, forgetGateBiasData);
+
+    std::vector<float> cellBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor cellBias(tensorInfo20, cellBiasData);
+
+    std::vector<float> outputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
+
+    armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+    std::vector<float> recurrentToInputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
+    armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
+
+    std::vector<float> recurrentToForgetWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
+    armnn::ConstTensor recurrentToForgetWeights(tensorInfo20x16, recurrentToForgetWeightsData);
+
+    std::vector<float> recurrentToCellWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
+    armnn::ConstTensor recurrentToCellWeights(tensorInfo20x16, recurrentToCellWeightsData);
+
+    std::vector<float> recurrentToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
+    armnn::ConstTensor recurrentToOutputWeights(tensorInfo20x16, recurrentToOutputWeightsData);
+
+    std::vector<float> cellToInputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor cellToInputWeights(tensorInfo20, cellToInputWeightsData);
+
+    std::vector<float> cellToForgetWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor cellToForgetWeights(tensorInfo20, cellToForgetWeightsData);
+
+    std::vector<float> cellToOutputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor cellToOutputWeights(tensorInfo20,  cellToOutputWeightsData);
+
+    armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
+    std::vector<float> projectionWeightsData = GenerateRandomData<float>(tensorInfo16x20.GetNumElements());
+    armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
+
+    armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+    std::vector<float> projectionBiasData(outputSize, 0.f);
+    armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
+
+    armnn::LstmInputParams params;
+    params.m_InputToForgetWeights = &inputToForgetWeights;
+    params.m_InputToCellWeights = &inputToCellWeights;
+    params.m_InputToOutputWeights = &inputToOutputWeights;
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+    params.m_ForgetGateBias = &forgetGateBias;
+    params.m_CellBias = &cellBias;
+    params.m_OutputGateBias = &outputGateBias;
+
+    // additional params because: descriptor.m_CifgEnabled = false
+    params.m_InputToInputWeights = &inputToInputWeights;
+    params.m_RecurrentToInputWeights = &recurrentToInputWeights;
+    params.m_CellToInputWeights = &cellToInputWeights;
+    params.m_InputGateBias = &inputGateBias;
+
+    // additional params because: descriptor.m_ProjectionEnabled = true
+    params.m_ProjectionWeights = &projectionWeights;
+    params.m_ProjectionBias = &projectionBias;
+
+    // additional params because: descriptor.m_PeepholeEnabled = true
+    params.m_CellToForgetWeights = &cellToForgetWeights;
+    params.m_CellToOutputWeights = &cellToOutputWeights;
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer* const inputLayer   = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1);
+    armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2);
+    const std::string layerName("lstm");
+    armnn::IConnectableLayer* const lstmLayer = network->AddLstmLayer(descriptor, params, layerName.c_str());
+    armnn::IConnectableLayer* const scratchBuffer  = network->AddOutputLayer(0);
+    armnn::IConnectableLayer* const outputStateOut  = network->AddOutputLayer(1);
+    armnn::IConnectableLayer* const cellStateOut  = network->AddOutputLayer(2);
+    armnn::IConnectableLayer* const outputLayer  = network->AddOutputLayer(3);
+
+    // connect up
+    armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32);
+    armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32);
+    armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32);
+    armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32);
+
+    inputLayer->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(0));
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+
+    outputStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(1));
+    outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo);
+
+    cellStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(2));
+    cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo);
+
+    lstmLayer->GetOutputSlot(0).Connect(scratchBuffer->GetInputSlot(0));
+    lstmLayer->GetOutputSlot(0).SetTensorInfo(lstmTensorInfoScratchBuff);
+
+    lstmLayer->GetOutputSlot(1).Connect(outputStateOut->GetInputSlot(0));
+    lstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo);
+
+    lstmLayer->GetOutputSlot(2).Connect(cellStateOut->GetInputSlot(0));
+    lstmLayer->GetOutputSlot(2).SetTensorInfo(cellStateTensorInfo);
+
+    lstmLayer->GetOutputSlot(3).Connect(outputLayer->GetInputSlot(0));
+    lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    BOOST_CHECK(deserializedNetwork);
+
+    VerifyLstmLayer<armnn::LstmDescriptor> checker(
+        layerName,
+        {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo},
+        {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo},
+        descriptor,
+        params);
+    deserializedNetwork->ExecuteStrategy(checker);
+}
+
+BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm)
+{
+    armnn::LstmDescriptor descriptor;
+    descriptor.m_ActivationFunc = 4;
+    descriptor.m_ClippingThresProj = 0.0f;
+    descriptor.m_ClippingThresCell = 0.0f;
+    descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams
+    descriptor.m_ProjectionEnabled = true;
+    descriptor.m_PeepholeEnabled = true;
+    descriptor.m_LayerNormEnabled = true;
+
+    const uint32_t batchSize = 2;
+    const uint32_t inputSize = 5;
+    const uint32_t numUnits = 20;
+    const uint32_t outputSize = 16;
+
+    armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+    std::vector<float> inputToInputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
+    armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
+
+    std::vector<float> inputToForgetWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
+    armnn::ConstTensor inputToForgetWeights(tensorInfo20x5, inputToForgetWeightsData);
+
+    std::vector<float> inputToCellWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
+    armnn::ConstTensor inputToCellWeights(tensorInfo20x5, inputToCellWeightsData);
+
+    std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
+    armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
+
+    armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+    std::vector<float> inputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
+
+    std::vector<float> forgetGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor forgetGateBias(tensorInfo20, forgetGateBiasData);
+
+    std::vector<float> cellBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor cellBias(tensorInfo20, cellBiasData);
+
+    std::vector<float> outputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
+
+    armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+    std::vector<float> recurrentToInputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
+    armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
+
+    std::vector<float> recurrentToForgetWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
+    armnn::ConstTensor recurrentToForgetWeights(tensorInfo20x16, recurrentToForgetWeightsData);
+
+    std::vector<float> recurrentToCellWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
+    armnn::ConstTensor recurrentToCellWeights(tensorInfo20x16, recurrentToCellWeightsData);
+
+    std::vector<float> recurrentToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
+    armnn::ConstTensor recurrentToOutputWeights(tensorInfo20x16, recurrentToOutputWeightsData);
+
+    std::vector<float> cellToInputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor cellToInputWeights(tensorInfo20, cellToInputWeightsData);
+
+    std::vector<float> cellToForgetWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor cellToForgetWeights(tensorInfo20, cellToForgetWeightsData);
+
+    std::vector<float> cellToOutputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor cellToOutputWeights(tensorInfo20,  cellToOutputWeightsData);
+
+    armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
+    std::vector<float> projectionWeightsData = GenerateRandomData<float>(tensorInfo16x20.GetNumElements());
+    armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
+
+    armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+    std::vector<float> projectionBiasData(outputSize, 0.f);
+    armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
+
+    std::vector<float> inputLayerNormWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor inputLayerNormWeights(tensorInfo20, forgetGateBiasData);
+
+    std::vector<float> forgetLayerNormWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor forgetLayerNormWeights(tensorInfo20, forgetGateBiasData);
+
+    std::vector<float> cellLayerNormWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor cellLayerNormWeights(tensorInfo20, forgetGateBiasData);
+
+    std::vector<float> outLayerNormWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
+    armnn::ConstTensor outLayerNormWeights(tensorInfo20, forgetGateBiasData);
+
+    armnn::LstmInputParams params;
+    params.m_InputToForgetWeights = &inputToForgetWeights;
+    params.m_InputToCellWeights = &inputToCellWeights;
+    params.m_InputToOutputWeights = &inputToOutputWeights;
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+    params.m_ForgetGateBias = &forgetGateBias;
+    params.m_CellBias = &cellBias;
+    params.m_OutputGateBias = &outputGateBias;
+
+    // additional params because: descriptor.m_CifgEnabled = false
+    params.m_InputToInputWeights = &inputToInputWeights;
+    params.m_RecurrentToInputWeights = &recurrentToInputWeights;
+    params.m_CellToInputWeights = &cellToInputWeights;
+    params.m_InputGateBias = &inputGateBias;
+
+    // additional params because: descriptor.m_ProjectionEnabled = true
+    params.m_ProjectionWeights = &projectionWeights;
+    params.m_ProjectionBias = &projectionBias;
+
+    // additional params because: descriptor.m_PeepholeEnabled = true
+    params.m_CellToForgetWeights = &cellToForgetWeights;
+    params.m_CellToOutputWeights = &cellToOutputWeights;
+
+    // additional params because: despriptor.m_LayerNormEnabled = true
+    params.m_InputLayerNormWeights = &inputLayerNormWeights;
+    params.m_ForgetLayerNormWeights = &forgetLayerNormWeights;
+    params.m_CellLayerNormWeights = &cellLayerNormWeights;
+    params.m_OutputLayerNormWeights = &outLayerNormWeights;
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer* const inputLayer   = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1);
+    armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2);
+    const std::string layerName("lstm");
+    armnn::IConnectableLayer* const lstmLayer = network->AddLstmLayer(descriptor, params, layerName.c_str());
+    armnn::IConnectableLayer* const scratchBuffer  = network->AddOutputLayer(0);
+    armnn::IConnectableLayer* const outputStateOut  = network->AddOutputLayer(1);
+    armnn::IConnectableLayer* const cellStateOut  = network->AddOutputLayer(2);
+    armnn::IConnectableLayer* const outputLayer  = network->AddOutputLayer(3);
+
+    // connect up
+    armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32);
+    armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32);
+    armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32);
+    armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32);
+
+    inputLayer->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(0));
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+
+    outputStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(1));
+    outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo);
+
+    cellStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(2));
+    cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo);
+
+    lstmLayer->GetOutputSlot(0).Connect(scratchBuffer->GetInputSlot(0));
+    lstmLayer->GetOutputSlot(0).SetTensorInfo(lstmTensorInfoScratchBuff);
+
+    lstmLayer->GetOutputSlot(1).Connect(outputStateOut->GetInputSlot(0));
+    lstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo);
+
+    lstmLayer->GetOutputSlot(2).Connect(cellStateOut->GetInputSlot(0));
+    lstmLayer->GetOutputSlot(2).SetTensorInfo(cellStateTensorInfo);
+
+    lstmLayer->GetOutputSlot(3).Connect(outputLayer->GetInputSlot(0));
+    lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    BOOST_CHECK(deserializedNetwork);
+
+    VerifyLstmLayer<armnn::LstmDescriptor> checker(
+            layerName,
+            {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo},
+            {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo},
+            descriptor,
+            params);
+    deserializedNetwork->ExecuteStrategy(checker);
+}
+
+BOOST_AUTO_TEST_CASE(EnsureLstmLayersBackwardCompatibility)
+{
+    // The hex data below is a flat buffer containing a lstm layer with no Cifg, with peephole and projection
+    // enabled. That data was obtained before additional layer normalization parameters where added to the
+    // lstm serializer. That way it can be tested if a lstm model with the old parameter configuration can
+    // still be loaded
+    const std::vector<uint8_t> lstmNoCifgWithPeepholeAndProjectionModel =
+    {
+        0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x0A, 0x00,
+        0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+        0xDC, 0x29, 0x00, 0x00, 0x38, 0x29, 0x00, 0x00, 0xB4, 0x28, 0x00, 0x00, 0x94, 0x01, 0x00, 0x00, 0x3C, 0x01,
+        0x00, 0x00, 0xE0, 0x00, 0x00, 0x00, 0x84, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00,
+        0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x70, 0xD6, 0xFF, 0xFF,
+        0x00, 0x00, 0x00, 0x0B, 0x04, 0x00, 0x00, 0x00, 0x06, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x88, 0xD7,
+        0xFF, 0xFF, 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF6, 0xD6, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00,
+        0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0xE8, 0xD7, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xC8, 0xD6, 0xFF, 0xFF, 0x00, 0x00,
+        0x00, 0x0B, 0x04, 0x00, 0x00, 0x00, 0x5E, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0xE0, 0xD7, 0xFF, 0xFF,
+        0x08, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x4E, 0xD7, 0xFF, 0xFF, 0x06, 0x00, 0x00, 0x00, 0x10, 0x00,
+        0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0xD8,
+        0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0xD7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0B,
+        0x04, 0x00, 0x00, 0x00, 0xB6, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x38, 0xD8, 0xFF, 0xFF, 0x08, 0x00,
+        0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xA6, 0xD7, 0xFF, 0xFF, 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+        0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0xD8, 0xFF, 0xFF,
+        0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x78, 0xD7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0B, 0x04, 0x00,
+        0x00, 0x00, 0x0E, 0xD8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x16, 0xD8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00,
+        0xFA, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00,
+        0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+        0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xD8, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x6C, 0xD8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x23, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00,
+        0x12, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x0A, 0x00, 0x00, 0x00, 0xE0, 0x25, 0x00, 0x00, 0xD0, 0x25,
+        0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x00, 0x48, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00,
+        0x10, 0x00, 0x14, 0x00, 0x18, 0x00, 0x1C, 0x00, 0x20, 0x00, 0x24, 0x00, 0x28, 0x00, 0x2C, 0x00, 0x30, 0x00,
+        0x34, 0x00, 0x38, 0x00, 0x3C, 0x00, 0x40, 0x00, 0x44, 0x00, 0x26, 0x00, 0x00, 0x00, 0xC4, 0x23, 0x00, 0x00,
+        0xF8, 0x21, 0x00, 0x00, 0x2C, 0x20, 0x00, 0x00, 0xF0, 0x1A, 0x00, 0x00, 0xB4, 0x15, 0x00, 0x00, 0x78, 0x10,
+        0x00, 0x00, 0xF0, 0x0F, 0x00, 0x00, 0x68, 0x0F, 0x00, 0x00, 0xE0, 0x0E, 0x00, 0x00, 0x14, 0x0D, 0x00, 0x00,
+        0xD8, 0x07, 0x00, 0x00, 0x50, 0x07, 0x00, 0x00, 0xC8, 0x06, 0x00, 0x00, 0x8C, 0x01, 0x00, 0x00, 0x14, 0x01,
+        0x00, 0x00, 0x8C, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xEE, 0xD7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03,
+        0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xFE, 0xD8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5A, 0xD8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01,
+        0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x72, 0xD8,
+        0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x82, 0xD9, 0xFF, 0xFF,
+        0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xD8,
+        0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+        0x14, 0x00, 0x00, 0x00, 0xF6, 0xD8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x54, 0x00, 0x00, 0x00, 0x04, 0x00,
+        0x00, 0x00, 0x06, 0xDA, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0xD9, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x6A, 0xD9, 0xFF, 0xFF, 0x00, 0x00,
+        0x00, 0x03, 0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7A, 0xDA, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00,
+        0x40, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xDE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xA2, 0xDE,
+        0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xB2, 0xDF, 0xFF, 0xFF,
+        0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0xDF,
+        0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+        0x14, 0x00, 0x00, 0x00, 0x26, 0xDF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00,
+        0x00, 0x00, 0x36, 0xE0, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x92, 0xDF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xAA, 0xDF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03,
+        0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xBA, 0xE0, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0xC6, 0xE4, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xE2, 0xE4, 0xFF, 0xFF,
+        0x00, 0x00, 0x00, 0x03, 0xA4, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xF2, 0xE5, 0xFF, 0xFF, 0x04, 0x00,
+        0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0xE6, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01,
+        0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x05, 0x00,
+        0x00, 0x00, 0xAA, 0xE6, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+        0xBA, 0xE7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x16, 0xE7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x2E, 0xE7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00,
+        0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x3E, 0xE8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0xE7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xB2, 0xE7, 0xFF, 0xFF,
+        0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xC2, 0xE8, 0xFF, 0xFF, 0x04, 0x00,
+        0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0xE8, 0xFF, 0xFF,
+        0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00,
+        0x00, 0x00, 0x36, 0xE8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+        0x46, 0xE9, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0xED, 0xFF, 0xFF,
+        0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00,
+        0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x6E, 0xED, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x14, 0x05, 0x00, 0x00,
+        0x04, 0x00, 0x00, 0x00, 0x7E, 0xEE, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x8A, 0xF2, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+        0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xA6, 0xF2, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03,
+        0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xB6, 0xF3, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0xC2, 0xF7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xDE, 0xF7, 0xFF, 0xFF,
+        0x00, 0x00, 0x00, 0x03, 0xA4, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xEE, 0xF8, 0xFF, 0xFF, 0x04, 0x00,
+        0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xF9, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01,
+        0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x05, 0x00,
+        0x00, 0x00, 0xA6, 0xF9, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xA4, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+        0xB6, 0xFA, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0xFB,
+        0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+        0x14, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x6E, 0xFB, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xA4, 0x01,
+        0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7E, 0xFC, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x1A, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x0C, 0x00,
+        0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x01, 0x01, 0x04, 0x00, 0x00, 0x00, 0x2E, 0xFE, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+        0x22, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6C, 0x73,
+        0x74, 0x6D, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xEC, 0x00, 0x00, 0x00, 0xD0, 0x00, 0x00, 0x00,
+        0xB4, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00, 0x30, 0x00,
+        0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x14, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+        0xA6, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+        0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x3C, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00,
+        0x04, 0x00, 0x00, 0x00, 0xCE, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64, 0xFF, 0xFF, 0xFF,
+        0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xF6, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+        0xB4, 0xFE, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x1A, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00,
+        0xF0, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+        0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x09, 0x04, 0x00, 0x00, 0x00,
+        0x7E, 0xFF, 0xFF, 0xFF, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00,
+        0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x76, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00,
+        0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+        0x68, 0xFF, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0xCE, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+        0x08, 0x00, 0x0E, 0x00, 0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x0C, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+        0x08, 0x00, 0x0E, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x01, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x18, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x10, 0x00, 0x14, 0x00,
+        0x0E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00,
+        0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x01, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00,
+        0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6E, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x08, 0x00,
+        0x0C, 0x00, 0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x04, 0x00, 0x00, 0x00,
+        0xF6, 0xFF, 0xFF, 0xFF, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x0A, 0x00, 0x04, 0x00, 0x06, 0x00,
+        0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00,
+        0x0C, 0x00, 0x10, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00,
+        0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x01, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00,
+        0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x08, 0x00, 0x07, 0x00, 0x0C, 0x00,
+        0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+        0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00
+    };
+
+    armnn::INetworkPtr deserializedNetwork =
+        DeserializeNetwork(std::string(lstmNoCifgWithPeepholeAndProjectionModel.begin(),
+                                       lstmNoCifgWithPeepholeAndProjectionModel.end()));
+
+    BOOST_CHECK(deserializedNetwork);
+
+    // generating the same model parameters which where used to serialize the model (Layer norm is not specified)
+    armnn::LstmDescriptor descriptor;
+    descriptor.m_ActivationFunc    = 4;
+    descriptor.m_ClippingThresProj = 0.0f;
+    descriptor.m_ClippingThresCell = 0.0f;
+    descriptor.m_CifgEnabled       = false;
+    descriptor.m_ProjectionEnabled = true;
+    descriptor.m_PeepholeEnabled   = true;
+
+    const uint32_t batchSize  = 2u;
+    const uint32_t inputSize  = 5u;
+    const uint32_t numUnits   = 20u;
+    const uint32_t outputSize = 16u;
+
+    armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+    std::vector<float> inputToInputWeightsData(tensorInfo20x5.GetNumElements(), 0.0f);
+    armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
+
+    std::vector<float> inputToForgetWeightsData(tensorInfo20x5.GetNumElements(), 0.0f);
+    armnn::ConstTensor inputToForgetWeights(tensorInfo20x5, inputToForgetWeightsData);
+
+    std::vector<float> inputToCellWeightsData(tensorInfo20x5.GetNumElements(), 0.0f);
+    armnn::ConstTensor inputToCellWeights(tensorInfo20x5, inputToCellWeightsData);
+
+    std::vector<float> inputToOutputWeightsData(tensorInfo20x5.GetNumElements(), 0.0f);
+    armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
+
+    armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+    std::vector<float> inputGateBiasData(tensorInfo20.GetNumElements(), 0.0f);
+    armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
+
+    std::vector<float> forgetGateBiasData(tensorInfo20.GetNumElements(), 0.0f);
+    armnn::ConstTensor forgetGateBias(tensorInfo20, forgetGateBiasData);
+
+    std::vector<float> cellBiasData(tensorInfo20.GetNumElements(), 0.0f);
+    armnn::ConstTensor cellBias(tensorInfo20, cellBiasData);
+
+    std::vector<float> outputGateBiasData(tensorInfo20.GetNumElements(), 0.0f);
+    armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
+
+    armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+    std::vector<float> recurrentToInputWeightsData(tensorInfo20x16.GetNumElements(), 0.0f);
+    armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
+
+    std::vector<float> recurrentToForgetWeightsData(tensorInfo20x16.GetNumElements(), 0.0f);
+    armnn::ConstTensor recurrentToForgetWeights(tensorInfo20x16, recurrentToForgetWeightsData);
+
+    std::vector<float> recurrentToCellWeightsData(tensorInfo20x16.GetNumElements(), 0.0f);
+    armnn::ConstTensor recurrentToCellWeights(tensorInfo20x16, recurrentToCellWeightsData);
+
+    std::vector<float> recurrentToOutputWeightsData(tensorInfo20x16.GetNumElements(), 0.0f);
+    armnn::ConstTensor recurrentToOutputWeights(tensorInfo20x16, recurrentToOutputWeightsData);
+
+    std::vector<float> cellToInputWeightsData(tensorInfo20.GetNumElements(), 0.0f);
+    armnn::ConstTensor cellToInputWeights(tensorInfo20, cellToInputWeightsData);
+
+    std::vector<float> cellToForgetWeightsData(tensorInfo20.GetNumElements(), 0.0f);
+    armnn::ConstTensor cellToForgetWeights(tensorInfo20, cellToForgetWeightsData);
+
+    std::vector<float> cellToOutputWeightsData(tensorInfo20.GetNumElements(), 0.0f);
+    armnn::ConstTensor cellToOutputWeights(tensorInfo20,  cellToOutputWeightsData);
+
+    armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
+    std::vector<float> projectionWeightsData(tensorInfo16x20.GetNumElements(), 0.0f);
+    armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
+
+    armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+    std::vector<float> projectionBiasData(outputSize, 0.0f);
+    armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
+
+    armnn::LstmInputParams params;
+    params.m_InputToForgetWeights     = &inputToForgetWeights;
+    params.m_InputToCellWeights       = &inputToCellWeights;
+    params.m_InputToOutputWeights     = &inputToOutputWeights;
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+    params.m_ForgetGateBias           = &forgetGateBias;
+    params.m_CellBias                 = &cellBias;
+    params.m_OutputGateBias           = &outputGateBias;
+
+    // additional params because: descriptor.m_CifgEnabled = false
+    params.m_InputToInputWeights      = &inputToInputWeights;
+    params.m_RecurrentToInputWeights  = &recurrentToInputWeights;
+    params.m_CellToInputWeights       = &cellToInputWeights;
+    params.m_InputGateBias            = &inputGateBias;
+
+    // additional params because: descriptor.m_ProjectionEnabled = true
+    params.m_ProjectionWeights        = &projectionWeights;
+    params.m_ProjectionBias           = &projectionBias;
+
+    // additional params because: descriptor.m_PeepholeEnabled = true
+    params.m_CellToForgetWeights      = &cellToForgetWeights;
+    params.m_CellToOutputWeights      = &cellToOutputWeights;
+
+    const std::string layerName("lstm");
+    armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32);
+    armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32);
+    armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32);
+    armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32);
+
+    VerifyLstmLayer<armnn::LstmDescriptor> checker(
+            layerName,
+            {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo},
+            {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo},
+            descriptor,
+            params);
+    deserializedNetwork->ExecuteStrategy(checker);
+}
+
+armnn::QuantizedLstmInputParams ConstantsVector2QuantizedLstmInputParams(
+        const std::vector<armnn::ConstTensor>& constants)
+{
+    armnn::QuantizedLstmInputParams params;
+
+    // index for constants vector
+    size_t i = 0;
+
+    // Get input parameters
+    params.m_InputToInputWeights  = &constants[i++];
+    params.m_InputToForgetWeights = &constants[i++];
+    params.m_InputToCellWeights   = &constants[i++];
+    params.m_InputToOutputWeights = &constants[i++];
+
+    params.m_RecurrentToInputWeights  = &constants[i++];
+    params.m_RecurrentToForgetWeights = &constants[i++];
+    params.m_RecurrentToCellWeights   = &constants[i++];
+    params.m_RecurrentToOutputWeights = &constants[i++];
+
+    params.m_InputGateBias  = &constants[i++];
+    params.m_ForgetGateBias = &constants[i++];
+    params.m_CellBias       = &constants[i++];
+    params.m_OutputGateBias = &constants[i++];
+
+    return params;
+}
+
+class VerifyQuantizedLstmLayer : public LayerVerifierBase
+{
+
+public:
+    VerifyQuantizedLstmLayer(const std::string& layerName,
+                             const std::vector<armnn::TensorInfo>& inputInfos,
+                             const std::vector<armnn::TensorInfo>& outputInfos,
+                             const armnn::QuantizedLstmInputParams& inputParams)
+        : LayerVerifierBase(layerName, inputInfos, outputInfos), m_InputParams(inputParams) {}
+
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
+    {
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::Input: break;
+            case armnn::LayerType::Output: break;
+            case armnn::LayerType::QuantizedLstm:
+            {
+                VerifyNameAndConnections(layer, name);
+                armnn::QuantizedLstmInputParams params = ConstantsVector2QuantizedLstmInputParams(constants);
+                VerifyInputParameters(params);
+                break;
+            }
+            default:
+            {
+                throw armnn::Exception(fmt::format("Unexpected layer type in QuantizedLstm test model:",
+                                                           layer->GetName()));
+            }
+        }
+    }
+
+protected:
+    void VerifyInputParameters(const armnn::QuantizedLstmInputParams& params)
+    {
+        VerifyConstTensors("m_InputToInputWeights",
+                           m_InputParams.m_InputToInputWeights, params.m_InputToInputWeights);
+        VerifyConstTensors("m_InputToForgetWeights",
+                           m_InputParams.m_InputToForgetWeights, params.m_InputToForgetWeights);
+        VerifyConstTensors("m_InputToCellWeights",
+                           m_InputParams.m_InputToCellWeights, params.m_InputToCellWeights);
+        VerifyConstTensors("m_InputToOutputWeights",
+                           m_InputParams.m_InputToOutputWeights, params.m_InputToOutputWeights);
+        VerifyConstTensors("m_RecurrentToInputWeights",
+                           m_InputParams.m_RecurrentToInputWeights, params.m_RecurrentToInputWeights);
+        VerifyConstTensors("m_RecurrentToForgetWeights",
+                           m_InputParams.m_RecurrentToForgetWeights, params.m_RecurrentToForgetWeights);
+        VerifyConstTensors("m_RecurrentToCellWeights",
+                           m_InputParams.m_RecurrentToCellWeights, params.m_RecurrentToCellWeights);
+        VerifyConstTensors("m_RecurrentToOutputWeights",
+                           m_InputParams.m_RecurrentToOutputWeights, params.m_RecurrentToOutputWeights);
+        VerifyConstTensors("m_InputGateBias",
+                           m_InputParams.m_InputGateBias, params.m_InputGateBias);
+        VerifyConstTensors("m_ForgetGateBias",
+                           m_InputParams.m_ForgetGateBias, params.m_ForgetGateBias);
+        VerifyConstTensors("m_CellBias",
+                           m_InputParams.m_CellBias, params.m_CellBias);
+        VerifyConstTensors("m_OutputGateBias",
+                           m_InputParams.m_OutputGateBias, params.m_OutputGateBias);
+    }
+
+private:
+    armnn::QuantizedLstmInputParams m_InputParams;
+};
+
+BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
+{
+    const uint32_t batchSize = 1;
+    const uint32_t inputSize = 2;
+    const uint32_t numUnits = 4;
+    const uint32_t outputSize = numUnits;
+
+    // Scale/Offset for input/output, cellState In/Out, weights, bias
+    float inputOutputScale = 0.0078125f;
+    int32_t inputOutputOffset = 128;
+
+    float cellStateScale = 0.00048828125f;
+    int32_t cellStateOffset = 0;
+
+    float weightsScale = 0.00408021f;
+    int32_t weightsOffset = 100;
+
+    float biasScale = 3.1876640625e-05f;
+    int32_t biasOffset = 0;
+
+    // The shape of weight data is {outputSize, inputSize} = {4, 2}
+    armnn::TensorShape inputToInputWeightsShape = {4, 2};
+    std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
+    armnn::TensorInfo inputToInputWeightsInfo(inputToInputWeightsShape,
+                                              armnn::DataType::QAsymmU8,
+                                              weightsScale,
+                                              weightsOffset);
+    armnn::ConstTensor inputToInputWeights(inputToInputWeightsInfo, inputToInputWeightsData);
+
+    armnn::TensorShape inputToForgetWeightsShape = {4, 2};
+    std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
+    armnn::TensorInfo inputToForgetWeightsInfo(inputToForgetWeightsShape,
+                                               armnn::DataType::QAsymmU8,
+                                               weightsScale,
+                                               weightsOffset);
+    armnn::ConstTensor inputToForgetWeights(inputToForgetWeightsInfo, inputToForgetWeightsData);
+
+    armnn::TensorShape inputToCellWeightsShape = {4, 2};
+    std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
+    armnn::TensorInfo inputToCellWeightsInfo(inputToCellWeightsShape,
+                                             armnn::DataType::QAsymmU8,
+                                             weightsScale,
+                                             weightsOffset);
+    armnn::ConstTensor inputToCellWeights(inputToCellWeightsInfo, inputToCellWeightsData);
+
+    armnn::TensorShape inputToOutputWeightsShape = {4, 2};
+    std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
+    armnn::TensorInfo inputToOutputWeightsInfo(inputToOutputWeightsShape,
+                                               armnn::DataType::QAsymmU8,
+                                               weightsScale,
+                                               weightsOffset);
+    armnn::ConstTensor inputToOutputWeights(inputToOutputWeightsInfo, inputToOutputWeightsData);
+
+    // The shape of recurrent weight data is {outputSize, outputSize} = {4, 4}
+    armnn::TensorShape recurrentToInputWeightsShape = {4, 4};
+    std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+    armnn::TensorInfo recurrentToInputWeightsInfo(recurrentToInputWeightsShape,
+                                                  armnn::DataType::QAsymmU8,
+                                                  weightsScale,
+                                                  weightsOffset);
+    armnn::ConstTensor recurrentToInputWeights(recurrentToInputWeightsInfo, recurrentToInputWeightsData);
+
+    armnn::TensorShape recurrentToForgetWeightsShape = {4, 4};
+    std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+    armnn::TensorInfo recurrentToForgetWeightsInfo(recurrentToForgetWeightsShape,
+                                                   armnn::DataType::QAsymmU8,
+                                                   weightsScale,
+                                                   weightsOffset);
+    armnn::ConstTensor recurrentToForgetWeights(recurrentToForgetWeightsInfo, recurrentToForgetWeightsData);
+
+    armnn::TensorShape recurrentToCellWeightsShape = {4, 4};
+    std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+    armnn::TensorInfo recurrentToCellWeightsInfo(recurrentToCellWeightsShape,
+                                                 armnn::DataType::QAsymmU8,
+                                                 weightsScale,
+                                                 weightsOffset);
+    armnn::ConstTensor recurrentToCellWeights(recurrentToCellWeightsInfo, recurrentToCellWeightsData);
+
+    armnn::TensorShape recurrentToOutputWeightsShape = {4, 4};
+    std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+    armnn::TensorInfo recurrentToOutputWeightsInfo(recurrentToOutputWeightsShape,
+                                                   armnn::DataType::QAsymmU8,
+                                                   weightsScale,
+                                                   weightsOffset);
+    armnn::ConstTensor recurrentToOutputWeights(recurrentToOutputWeightsInfo, recurrentToOutputWeightsData);
+
+    // The shape of bias data is {outputSize} = {4}
+    armnn::TensorShape inputGateBiasShape = {4};
+    std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4};
+    armnn::TensorInfo inputGateBiasInfo(inputGateBiasShape,
+                                        armnn::DataType::Signed32,
+                                        biasScale,
+                                        biasOffset);
+    armnn::ConstTensor inputGateBias(inputGateBiasInfo, inputGateBiasData);
+
+    armnn::TensorShape forgetGateBiasShape = {4};
+    std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4};
+    armnn::TensorInfo forgetGateBiasInfo(forgetGateBiasShape,
+                                         armnn::DataType::Signed32,
+                                         biasScale,
+                                         biasOffset);
+    armnn::ConstTensor forgetGateBias(forgetGateBiasInfo, forgetGateBiasData);
+
+    armnn::TensorShape cellBiasShape = {4};
+    std::vector<int32_t> cellBiasData = {1, 2, 3, 4};
+    armnn::TensorInfo cellBiasInfo(cellBiasShape,
+                                   armnn::DataType::Signed32,
+                                   biasScale,
+                                   biasOffset);
+    armnn::ConstTensor cellBias(cellBiasInfo, cellBiasData);
+
+    armnn::TensorShape outputGateBiasShape = {4};
+    std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4};
+    armnn::TensorInfo outputGateBiasInfo(outputGateBiasShape,
+                                         armnn::DataType::Signed32,
+                                         biasScale,
+                                         biasOffset);
+    armnn::ConstTensor outputGateBias(outputGateBiasInfo, outputGateBiasData);
+
+    armnn::QuantizedLstmInputParams params;
+    params.m_InputToInputWeights = &inputToInputWeights;
+    params.m_InputToForgetWeights = &inputToForgetWeights;
+    params.m_InputToCellWeights = &inputToCellWeights;
+    params.m_InputToOutputWeights = &inputToOutputWeights;
+    params.m_RecurrentToInputWeights = &recurrentToInputWeights;
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+    params.m_InputGateBias = &inputGateBias;
+    params.m_ForgetGateBias = &forgetGateBias;
+    params.m_CellBias = &cellBias;
+    params.m_OutputGateBias = &outputGateBias;
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1);
+    armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2);
+    const std::string layerName("QuantizedLstm");
+    armnn::IConnectableLayer* const quantizedLstmLayer = network->AddQuantizedLstmLayer(params, layerName.c_str());
+    armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(0);
+    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(1);
+
+    // Connect up
+    armnn::TensorInfo inputTensorInfo({ batchSize, inputSize },
+                                      armnn::DataType::QAsymmU8,
+                                      inputOutputScale,
+                                      inputOutputOffset);
+    armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits },
+                                          armnn::DataType::QSymmS16,
+                                          cellStateScale,
+                                          cellStateOffset);
+    armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize },
+                                            armnn::DataType::QAsymmU8,
+                                            inputOutputScale,
+                                            inputOutputOffset);
+
+    inputLayer->GetOutputSlot(0).Connect(quantizedLstmLayer->GetInputSlot(0));
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+
+    cellStateIn->GetOutputSlot(0).Connect(quantizedLstmLayer->GetInputSlot(1));
+    cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo);
+
+    outputStateIn->GetOutputSlot(0).Connect(quantizedLstmLayer->GetInputSlot(2));
+    outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo);
+
+    quantizedLstmLayer->GetOutputSlot(0).Connect(cellStateOut->GetInputSlot(0));
+    quantizedLstmLayer->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo);
+
+    quantizedLstmLayer->GetOutputSlot(1).Connect(outputLayer->GetInputSlot(0));
+    quantizedLstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    BOOST_CHECK(deserializedNetwork);
+
+    VerifyQuantizedLstmLayer checker(layerName,
+                                     {inputTensorInfo, cellStateTensorInfo, outputStateTensorInfo},
+                                     {cellStateTensorInfo, outputStateTensorInfo},
+                                     params);
+
+    deserializedNetwork->ExecuteStrategy(checker);
+}
+
+BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmBasic)
+{
+    armnn::QLstmDescriptor descriptor;
+
+    descriptor.m_CifgEnabled       = true;
+    descriptor.m_ProjectionEnabled = false;
+    descriptor.m_PeepholeEnabled   = false;
+    descriptor.m_LayerNormEnabled  = false;
+
+    descriptor.m_CellClip       = 0.0f;
+    descriptor.m_ProjectionClip = 0.0f;
+
+    descriptor.m_InputIntermediateScale  = 0.00001f;
+    descriptor.m_ForgetIntermediateScale = 0.00001f;
+    descriptor.m_CellIntermediateScale   = 0.00001f;
+    descriptor.m_OutputIntermediateScale = 0.00001f;
+
+    descriptor.m_HiddenStateScale     = 0.07f;
+    descriptor.m_HiddenStateZeroPoint = 0;
+
+    const unsigned int numBatches = 2;
+    const unsigned int inputSize  = 5;
+    const unsigned int outputSize = 4;
+    const unsigned int numUnits   = 4;
+
+    // Scale/Offset quantization info
+    float inputScale    = 0.0078f;
+    int32_t inputOffset = 0;
+
+    float outputScale    = 0.0078f;
+    int32_t outputOffset = 0;
+
+    float cellStateScale    = 3.5002e-05f;
+    int32_t cellStateOffset = 0;
+
+    float weightsScale    = 0.007f;
+    int32_t weightsOffset = 0;
+
+    float biasScale    = 3.5002e-05f / 1024;
+    int32_t biasOffset = 0;
+
+    // Weights and bias tensor and quantization info
+    armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
+                                       armnn::DataType::QSymmS8,
+                                       weightsScale,
+                                       weightsOffset);
+
+    armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
+                                           armnn::DataType::QSymmS8,
+                                           weightsScale,
+                                           weightsOffset);
+
+    armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, biasOffset);
+
+    std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
+    std::vector<int8_t> inputToCellWeightsData   = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
+    std::vector<int8_t> inputToOutputWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
+
+    armnn::ConstTensor inputToForgetWeights(inputWeightsInfo, inputToForgetWeightsData);
+    armnn::ConstTensor inputToCellWeights(inputWeightsInfo, inputToCellWeightsData);
+    armnn::ConstTensor inputToOutputWeights(inputWeightsInfo, inputToOutputWeightsData);
+
+    std::vector<int8_t> recurrentToForgetWeightsData =
+            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
+    std::vector<int8_t> recurrentToCellWeightsData   =
+            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
+    std::vector<int8_t> recurrentToOutputWeightsData =
+            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
+
+    armnn::ConstTensor recurrentToForgetWeights(recurrentWeightsInfo, recurrentToForgetWeightsData);
+    armnn::ConstTensor recurrentToCellWeights(recurrentWeightsInfo, recurrentToCellWeightsData);
+    armnn::ConstTensor recurrentToOutputWeights(recurrentWeightsInfo, recurrentToOutputWeightsData);
+
+    std::vector<int32_t> forgetGateBiasData(numUnits, 1);
+    std::vector<int32_t> cellBiasData(numUnits, 0);
+    std::vector<int32_t> outputGateBiasData(numUnits, 0);
+
+    armnn::ConstTensor forgetGateBias(biasInfo, forgetGateBiasData);
+    armnn::ConstTensor cellBias(biasInfo, cellBiasData);
+    armnn::ConstTensor outputGateBias(biasInfo, outputGateBiasData);
+
+    // Set up params
+    armnn::LstmInputParams params;
+    params.m_InputToForgetWeights = &inputToForgetWeights;
+    params.m_InputToCellWeights   = &inputToCellWeights;
+    params.m_InputToOutputWeights = &inputToOutputWeights;
+
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+
+    params.m_ForgetGateBias = &forgetGateBias;
+    params.m_CellBias       = &cellBias;
+    params.m_OutputGateBias = &outputGateBias;
+
+    // Create network
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    const std::string layerName("qLstm");
+
+    armnn::IConnectableLayer* const input         = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(1);
+    armnn::IConnectableLayer* const cellStateIn   = network->AddInputLayer(2);
+
+    armnn::IConnectableLayer* const qLstmLayer = network->AddQLstmLayer(descriptor, params, layerName.c_str());
+
+    armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(0);
+    armnn::IConnectableLayer* const cellStateOut   = network->AddOutputLayer(1);
+    armnn::IConnectableLayer* const outputLayer    = network->AddOutputLayer(2);
+
+    // Input/Output tensor info
+    armnn::TensorInfo inputInfo({numBatches , inputSize},
+                                armnn::DataType::QAsymmS8,
+                                inputScale,
+                                inputOffset);
+
+    armnn::TensorInfo cellStateInfo({numBatches , numUnits},
+                                    armnn::DataType::QSymmS16,
+                                    cellStateScale,
+                                    cellStateOffset);
+
+    armnn::TensorInfo outputStateInfo({numBatches , outputSize},
+                                      armnn::DataType::QAsymmS8,
+                                      outputScale,
+                                      outputOffset);
+
+    // Connect input/output slots
+    input->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(0));
+    input->GetOutputSlot(0).SetTensorInfo(inputInfo);
+
+    outputStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(1));
+    outputStateIn->GetOutputSlot(0).SetTensorInfo(cellStateInfo);
+
+    cellStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(2));
+    cellStateIn->GetOutputSlot(0).SetTensorInfo(outputStateInfo);
+
+    qLstmLayer->GetOutputSlot(0).Connect(outputStateOut->GetInputSlot(0));
+    qLstmLayer->GetOutputSlot(0).SetTensorInfo(outputStateInfo);
+
+    qLstmLayer->GetOutputSlot(1).Connect(cellStateOut->GetInputSlot(0));
+    qLstmLayer->GetOutputSlot(1).SetTensorInfo(cellStateInfo);
+
+    qLstmLayer->GetOutputSlot(2).Connect(outputLayer->GetInputSlot(0));
+    qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    BOOST_CHECK(deserializedNetwork);
+
+    VerifyLstmLayer<armnn::QLstmDescriptor> checker(
+            layerName,
+            {inputInfo, cellStateInfo, outputStateInfo},
+            {outputStateInfo, cellStateInfo, outputStateInfo},
+            descriptor,
+            params);
+
+    deserializedNetwork->ExecuteStrategy(checker);
+}
+
+BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmCifgLayerNorm)
+{
+    armnn::QLstmDescriptor descriptor;
+
+    // CIFG params are used when CIFG is disabled
+    descriptor.m_CifgEnabled       = true;
+    descriptor.m_ProjectionEnabled = false;
+    descriptor.m_PeepholeEnabled   = false;
+    descriptor.m_LayerNormEnabled  = true;
+
+    descriptor.m_CellClip       = 0.0f;
+    descriptor.m_ProjectionClip = 0.0f;
+
+    descriptor.m_InputIntermediateScale  = 0.00001f;
+    descriptor.m_ForgetIntermediateScale = 0.00001f;
+    descriptor.m_CellIntermediateScale   = 0.00001f;
+    descriptor.m_OutputIntermediateScale = 0.00001f;
+
+    descriptor.m_HiddenStateScale     = 0.07f;
+    descriptor.m_HiddenStateZeroPoint = 0;
+
+    const unsigned int numBatches = 2;
+    const unsigned int inputSize  = 5;
+    const unsigned int outputSize = 4;
+    const unsigned int numUnits   = 4;
+
+    // Scale/Offset quantization info
+    float inputScale    = 0.0078f;
+    int32_t inputOffset = 0;
+
+    float outputScale    = 0.0078f;
+    int32_t outputOffset = 0;
+
+    float cellStateScale    = 3.5002e-05f;
+    int32_t cellStateOffset = 0;
+
+    float weightsScale    = 0.007f;
+    int32_t weightsOffset = 0;
+
+    float layerNormScale    = 3.5002e-05f;
+    int32_t layerNormOffset = 0;
+
+    float biasScale    = layerNormScale / 1024;
+    int32_t biasOffset = 0;
+
+    // Weights and bias tensor and quantization info
+    armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
+                                       armnn::DataType::QSymmS8,
+                                       weightsScale,
+                                       weightsOffset);
+
+    armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
+                                           armnn::DataType::QSymmS8,
+                                           weightsScale,
+                                           weightsOffset);
+
+    armnn::TensorInfo biasInfo({numUnits},
+                               armnn::DataType::Signed32,
+                               biasScale,
+                               biasOffset);
+
+    armnn::TensorInfo layerNormWeightsInfo({numUnits},
+                                           armnn::DataType::QSymmS16,
+                                           layerNormScale,
+                                           layerNormOffset);
+
+    // Mandatory params
+    std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
+    std::vector<int8_t> inputToCellWeightsData   = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
+    std::vector<int8_t> inputToOutputWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
+
+    armnn::ConstTensor inputToForgetWeights(inputWeightsInfo, inputToForgetWeightsData);
+    armnn::ConstTensor inputToCellWeights(inputWeightsInfo, inputToCellWeightsData);
+    armnn::ConstTensor inputToOutputWeights(inputWeightsInfo, inputToOutputWeightsData);
+
+    std::vector<int8_t> recurrentToForgetWeightsData =
+            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
+    std::vector<int8_t> recurrentToCellWeightsData   =
+            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
+    std::vector<int8_t> recurrentToOutputWeightsData =
+            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
+
+    armnn::ConstTensor recurrentToForgetWeights(recurrentWeightsInfo, recurrentToForgetWeightsData);
+    armnn::ConstTensor recurrentToCellWeights(recurrentWeightsInfo, recurrentToCellWeightsData);
+    armnn::ConstTensor recurrentToOutputWeights(recurrentWeightsInfo, recurrentToOutputWeightsData);
+
+    std::vector<int32_t> forgetGateBiasData(numUnits, 1);
+    std::vector<int32_t> cellBiasData(numUnits, 0);
+    std::vector<int32_t> outputGateBiasData(numUnits, 0);
+
+    armnn::ConstTensor forgetGateBias(biasInfo, forgetGateBiasData);
+    armnn::ConstTensor cellBias(biasInfo, cellBiasData);
+    armnn::ConstTensor outputGateBias(biasInfo, outputGateBiasData);
+
+    // Layer Norm
+    std::vector<int16_t> forgetLayerNormWeightsData =
+            GenerateRandomData<int16_t>(layerNormWeightsInfo.GetNumElements());
+    std::vector<int16_t> cellLayerNormWeightsData =
+            GenerateRandomData<int16_t>(layerNormWeightsInfo.GetNumElements());
+    std::vector<int16_t> outputLayerNormWeightsData =
+            GenerateRandomData<int16_t>(layerNormWeightsInfo.GetNumElements());
+
+    armnn::ConstTensor forgetLayerNormWeights(layerNormWeightsInfo, forgetLayerNormWeightsData);
+    armnn::ConstTensor cellLayerNormWeights(layerNormWeightsInfo, cellLayerNormWeightsData);
+    armnn::ConstTensor outputLayerNormWeights(layerNormWeightsInfo, outputLayerNormWeightsData);
+
+    // Set up params
+    armnn::LstmInputParams params;
+
+    // Mandatory params
+    params.m_InputToForgetWeights = &inputToForgetWeights;
+    params.m_InputToCellWeights   = &inputToCellWeights;
+    params.m_InputToOutputWeights = &inputToOutputWeights;
+
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+
+    params.m_ForgetGateBias = &forgetGateBias;
+    params.m_CellBias       = &cellBias;
+    params.m_OutputGateBias = &outputGateBias;
+
+    // Layer Norm
+    params.m_ForgetLayerNormWeights = &forgetLayerNormWeights;
+    params.m_CellLayerNormWeights   = &cellLayerNormWeights;
+    params.m_OutputLayerNormWeights = &outputLayerNormWeights;
+
+    // Create network
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    const std::string layerName("qLstm");
+
+    armnn::IConnectableLayer* const input         = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(1);
+    armnn::IConnectableLayer* const cellStateIn   = network->AddInputLayer(2);
+
+    armnn::IConnectableLayer* const qLstmLayer = network->AddQLstmLayer(descriptor, params, layerName.c_str());
+
+    armnn::IConnectableLayer* const outputStateOut  = network->AddOutputLayer(0);
+    armnn::IConnectableLayer* const cellStateOut  = network->AddOutputLayer(1);
+    armnn::IConnectableLayer* const outputLayer  = network->AddOutputLayer(2);
+
+    // Input/Output tensor info
+    armnn::TensorInfo inputInfo({numBatches , inputSize},
+                                armnn::DataType::QAsymmS8,
+                                inputScale,
+                                inputOffset);
+
+    armnn::TensorInfo cellStateInfo({numBatches , numUnits},
+                                    armnn::DataType::QSymmS16,
+                                    cellStateScale,
+                                    cellStateOffset);
+
+    armnn::TensorInfo outputStateInfo({numBatches , outputSize},
+                                      armnn::DataType::QAsymmS8,
+                                      outputScale,
+                                      outputOffset);
+
+    // Connect input/output slots
+    input->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(0));
+    input->GetOutputSlot(0).SetTensorInfo(inputInfo);
+
+    outputStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(1));
+    outputStateIn->GetOutputSlot(0).SetTensorInfo(cellStateInfo);
+
+    cellStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(2));
+    cellStateIn->GetOutputSlot(0).SetTensorInfo(outputStateInfo);
+
+    qLstmLayer->GetOutputSlot(0).Connect(outputStateOut->GetInputSlot(0));
+    qLstmLayer->GetOutputSlot(0).SetTensorInfo(outputStateInfo);
+
+    qLstmLayer->GetOutputSlot(1).Connect(cellStateOut->GetInputSlot(0));
+    qLstmLayer->GetOutputSlot(1).SetTensorInfo(cellStateInfo);
+
+    qLstmLayer->GetOutputSlot(2).Connect(outputLayer->GetInputSlot(0));
+    qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    BOOST_CHECK(deserializedNetwork);
+
+    VerifyLstmLayer<armnn::QLstmDescriptor> checker(layerName,
+                                                    {inputInfo, cellStateInfo, outputStateInfo},
+                                                    {outputStateInfo, cellStateInfo, outputStateInfo},
+                                                    descriptor,
+                                                    params);
+
+    deserializedNetwork->ExecuteStrategy(checker);
+}
+
+BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmAdvanced)
+{
+    armnn::QLstmDescriptor descriptor;
+
+    descriptor.m_CifgEnabled       = false;
+    descriptor.m_ProjectionEnabled = true;
+    descriptor.m_PeepholeEnabled   = true;
+    descriptor.m_LayerNormEnabled  = true;
+
+    descriptor.m_CellClip       = 0.1f;
+    descriptor.m_ProjectionClip = 0.1f;
+
+    descriptor.m_InputIntermediateScale  = 0.00001f;
+    descriptor.m_ForgetIntermediateScale = 0.00001f;
+    descriptor.m_CellIntermediateScale   = 0.00001f;
+    descriptor.m_OutputIntermediateScale = 0.00001f;
+
+    descriptor.m_HiddenStateScale     = 0.07f;
+    descriptor.m_HiddenStateZeroPoint = 0;
+
+    const unsigned int numBatches = 2;
+    const unsigned int inputSize  = 5;
+    const unsigned int outputSize = 4;
+    const unsigned int numUnits   = 4;
+
+    // Scale/Offset quantization info
+    float inputScale    = 0.0078f;
+    int32_t inputOffset = 0;
+
+    float outputScale    = 0.0078f;
+    int32_t outputOffset = 0;
+
+    float cellStateScale    = 3.5002e-05f;
+    int32_t cellStateOffset = 0;
+
+    float weightsScale    = 0.007f;
+    int32_t weightsOffset = 0;
+
+    float layerNormScale    = 3.5002e-05f;
+    int32_t layerNormOffset = 0;
+
+    float biasScale    = layerNormScale / 1024;
+    int32_t biasOffset = 0;
+
+    // Weights and bias tensor and quantization info
+    armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
+                                       armnn::DataType::QSymmS8,
+                                       weightsScale,
+                                       weightsOffset);
+
+    armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
+                                           armnn::DataType::QSymmS8,
+                                           weightsScale,
+                                           weightsOffset);
+
+    armnn::TensorInfo biasInfo({numUnits},
+                               armnn::DataType::Signed32,
+                               biasScale,
+                               biasOffset);
+
+    armnn::TensorInfo peepholeWeightsInfo({numUnits},
+                                          armnn::DataType::QSymmS16,
+                                          weightsScale,
+                                          weightsOffset);
+
+    armnn::TensorInfo layerNormWeightsInfo({numUnits},
+                                           armnn::DataType::QSymmS16,
+                                           layerNormScale,
+                                           layerNormOffset);
+
+    armnn::TensorInfo projectionWeightsInfo({outputSize, numUnits},
+                                             armnn::DataType::QSymmS8,
+                                             weightsScale,
+                                             weightsOffset);
+
+    // Mandatory params
+    std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
+    std::vector<int8_t> inputToCellWeightsData   = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
+    std::vector<int8_t> inputToOutputWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
+
+    armnn::ConstTensor inputToForgetWeights(inputWeightsInfo, inputToForgetWeightsData);
+    armnn::ConstTensor inputToCellWeights(inputWeightsInfo, inputToCellWeightsData);
+    armnn::ConstTensor inputToOutputWeights(inputWeightsInfo, inputToOutputWeightsData);
+
+    std::vector<int8_t> recurrentToForgetWeightsData =
+            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
+    std::vector<int8_t> recurrentToCellWeightsData   =
+            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
+    std::vector<int8_t> recurrentToOutputWeightsData =
+            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
+
+    armnn::ConstTensor recurrentToForgetWeights(recurrentWeightsInfo, recurrentToForgetWeightsData);
+    armnn::ConstTensor recurrentToCellWeights(recurrentWeightsInfo, recurrentToCellWeightsData);
+    armnn::ConstTensor recurrentToOutputWeights(recurrentWeightsInfo, recurrentToOutputWeightsData);
+
+    std::vector<int32_t> forgetGateBiasData(numUnits, 1);
+    std::vector<int32_t> cellBiasData(numUnits, 0);
+    std::vector<int32_t> outputGateBiasData(numUnits, 0);
+
+    armnn::ConstTensor forgetGateBias(biasInfo, forgetGateBiasData);
+    armnn::ConstTensor cellBias(biasInfo, cellBiasData);
+    armnn::ConstTensor outputGateBias(biasInfo, outputGateBiasData);
+
+    // CIFG
+    std::vector<int8_t> inputToInputWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
+    std::vector<int8_t> recurrentToInputWeightsData =
+            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
+    std::vector<int32_t> inputGateBiasData(numUnits, 1);
+
+    armnn::ConstTensor inputToInputWeights(inputWeightsInfo, inputToInputWeightsData);
+    armnn::ConstTensor recurrentToInputWeights(recurrentWeightsInfo, recurrentToInputWeightsData);
+    armnn::ConstTensor inputGateBias(biasInfo, inputGateBiasData);
+
+    // Peephole
+    std::vector<int16_t> cellToInputWeightsData  = GenerateRandomData<int16_t>(peepholeWeightsInfo.GetNumElements());
+    std::vector<int16_t> cellToForgetWeightsData = GenerateRandomData<int16_t>(peepholeWeightsInfo.GetNumElements());
+    std::vector<int16_t> cellToOutputWeightsData = GenerateRandomData<int16_t>(peepholeWeightsInfo.GetNumElements());
+
+    armnn::ConstTensor cellToInputWeights(peepholeWeightsInfo, cellToInputWeightsData);
+    armnn::ConstTensor cellToForgetWeights(peepholeWeightsInfo, cellToForgetWeightsData);
+    armnn::ConstTensor cellToOutputWeights(peepholeWeightsInfo, cellToOutputWeightsData);
+
+    // Projection
+    std::vector<int8_t> projectionWeightsData = GenerateRandomData<int8_t>(projectionWeightsInfo.GetNumElements());
+    std::vector<int32_t> projectionBiasData(outputSize, 1);
+
+    armnn::ConstTensor projectionWeights(projectionWeightsInfo, projectionWeightsData);
+    armnn::ConstTensor projectionBias(biasInfo, projectionBiasData);
+
+    // Layer Norm
+    std::vector<int16_t> inputLayerNormWeightsData =
+            GenerateRandomData<int16_t>(layerNormWeightsInfo.GetNumElements());
+    std::vector<int16_t> forgetLayerNormWeightsData =
+            GenerateRandomData<int16_t>(layerNormWeightsInfo.GetNumElements());
+    std::vector<int16_t> cellLayerNormWeightsData =
+            GenerateRandomData<int16_t>(layerNormWeightsInfo.GetNumElements());
+    std::vector<int16_t> outputLayerNormWeightsData =
+            GenerateRandomData<int16_t>(layerNormWeightsInfo.GetNumElements());
+
+    armnn::ConstTensor inputLayerNormWeights(layerNormWeightsInfo, inputLayerNormWeightsData);
+    armnn::ConstTensor forgetLayerNormWeights(layerNormWeightsInfo, forgetLayerNormWeightsData);
+    armnn::ConstTensor cellLayerNormWeights(layerNormWeightsInfo, cellLayerNormWeightsData);
+    armnn::ConstTensor outputLayerNormWeights(layerNormWeightsInfo, outputLayerNormWeightsData);
+
+    // Set up params
+    armnn::LstmInputParams params;
+
+    // Mandatory params
+    params.m_InputToForgetWeights = &inputToForgetWeights;
+    params.m_InputToCellWeights   = &inputToCellWeights;
+    params.m_InputToOutputWeights = &inputToOutputWeights;
+
+    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
+    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+
+    params.m_ForgetGateBias = &forgetGateBias;
+    params.m_CellBias       = &cellBias;
+    params.m_OutputGateBias = &outputGateBias;
+
+    // CIFG
+    params.m_InputToInputWeights     = &inputToInputWeights;
+    params.m_RecurrentToInputWeights = &recurrentToInputWeights;
+    params.m_InputGateBias           = &inputGateBias;
+
+    // Peephole
+    params.m_CellToInputWeights  = &cellToInputWeights;
+    params.m_CellToForgetWeights = &cellToForgetWeights;
+    params.m_CellToOutputWeights = &cellToOutputWeights;
+
+    // Projection
+    params.m_ProjectionWeights = &projectionWeights;
+    params.m_ProjectionBias    = &projectionBias;
+
+    // Layer Norm
+    params.m_InputLayerNormWeights  = &inputLayerNormWeights;
+    params.m_ForgetLayerNormWeights = &forgetLayerNormWeights;
+    params.m_CellLayerNormWeights   = &cellLayerNormWeights;
+    params.m_OutputLayerNormWeights = &outputLayerNormWeights;
+
+    // Create network
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    const std::string layerName("qLstm");
+
+    armnn::IConnectableLayer* const input         = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(1);
+    armnn::IConnectableLayer* const cellStateIn   = network->AddInputLayer(2);
+
+    armnn::IConnectableLayer* const qLstmLayer = network->AddQLstmLayer(descriptor, params, layerName.c_str());
+
+    armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(0);
+    armnn::IConnectableLayer* const cellStateOut   = network->AddOutputLayer(1);
+    armnn::IConnectableLayer* const outputLayer    = network->AddOutputLayer(2);
+
+    // Input/Output tensor info
+    armnn::TensorInfo inputInfo({numBatches , inputSize},
+                                armnn::DataType::QAsymmS8,
+                                inputScale,
+                                inputOffset);
+
+    armnn::TensorInfo cellStateInfo({numBatches , numUnits},
+                                    armnn::DataType::QSymmS16,
+                                    cellStateScale,
+                                    cellStateOffset);
+
+    armnn::TensorInfo outputStateInfo({numBatches , outputSize},
+                                      armnn::DataType::QAsymmS8,
+                                      outputScale,
+                                      outputOffset);
+
+    // Connect input/output slots
+    input->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(0));
+    input->GetOutputSlot(0).SetTensorInfo(inputInfo);
+
+    outputStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(1));
+    outputStateIn->GetOutputSlot(0).SetTensorInfo(cellStateInfo);
+
+    cellStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(2));
+    cellStateIn->GetOutputSlot(0).SetTensorInfo(outputStateInfo);
+
+    qLstmLayer->GetOutputSlot(0).Connect(outputStateOut->GetInputSlot(0));
+    qLstmLayer->GetOutputSlot(0).SetTensorInfo(outputStateInfo);
+
+    qLstmLayer->GetOutputSlot(1).Connect(cellStateOut->GetInputSlot(0));
+    qLstmLayer->GetOutputSlot(1).SetTensorInfo(cellStateInfo);
+
+    qLstmLayer->GetOutputSlot(2).Connect(outputLayer->GetInputSlot(0));
+    qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    BOOST_CHECK(deserializedNetwork);
+
+    VerifyLstmLayer<armnn::QLstmDescriptor> checker(layerName,
+                                                    {inputInfo, cellStateInfo, outputStateInfo},
+                                                    {outputStateInfo, cellStateInfo, outputStateInfo},
+                                                    descriptor,
+                                                    params);
+
+    deserializedNetwork->ExecuteStrategy(checker);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnSerializer/test/SerializerTestUtils.cpp b/src/armnnSerializer/test/SerializerTestUtils.cpp
new file mode 100644
index 0000000..586d2a0
--- /dev/null
+++ b/src/armnnSerializer/test/SerializerTestUtils.cpp
@@ -0,0 +1,163 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SerializerTestUtils.hpp"
+#include "../Serializer.hpp"
+
+using armnnDeserializer::IDeserializer;
+
+LayerVerifierBase::LayerVerifierBase(const std::string& layerName,
+                                     const std::vector<armnn::TensorInfo>& inputInfos,
+                                     const std::vector<armnn::TensorInfo>& outputInfos)
+                                     : m_LayerName(layerName)
+                                     , m_InputTensorInfos(inputInfos)
+                                     , m_OutputTensorInfos(outputInfos)
+{}
+
+void LayerVerifierBase::ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                     const armnn::BaseDescriptor& descriptor,
+                     const std::vector<armnn::ConstTensor>& constants,
+                     const char* name,
+                     const armnn::LayerBindingId id)
+{
+    armnn::IgnoreUnused(descriptor, constants, id);
+    switch (layer->GetType())
+    {
+        case armnn::LayerType::Input: break;
+        case armnn::LayerType::Output: break;
+        default:
+        {
+            VerifyNameAndConnections(layer, name);
+        }
+    }
+}
+
+
+void LayerVerifierBase::VerifyNameAndConnections(const armnn::IConnectableLayer* layer, const char* name)
+{
+    BOOST_TEST(name == m_LayerName.c_str());
+
+    BOOST_TEST(layer->GetNumInputSlots() == m_InputTensorInfos.size());
+    BOOST_TEST(layer->GetNumOutputSlots() == m_OutputTensorInfos.size());
+
+    for (unsigned int i = 0; i < m_InputTensorInfos.size(); i++)
+    {
+        const armnn::IOutputSlot* connectedOutput = layer->GetInputSlot(i).GetConnection();
+        BOOST_CHECK(connectedOutput);
+
+        const armnn::TensorInfo& connectedInfo = connectedOutput->GetTensorInfo();
+        BOOST_TEST(connectedInfo.GetShape() == m_InputTensorInfos[i].GetShape());
+        BOOST_TEST(
+            GetDataTypeName(connectedInfo.GetDataType()) == GetDataTypeName(m_InputTensorInfos[i].GetDataType()));
+
+        BOOST_TEST(connectedInfo.GetQuantizationScale() == m_InputTensorInfos[i].GetQuantizationScale());
+        BOOST_TEST(connectedInfo.GetQuantizationOffset() == m_InputTensorInfos[i].GetQuantizationOffset());
+    }
+
+    for (unsigned int i = 0; i < m_OutputTensorInfos.size(); i++)
+    {
+        const armnn::TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
+        BOOST_TEST(outputInfo.GetShape() == m_OutputTensorInfos[i].GetShape());
+        BOOST_TEST(
+            GetDataTypeName(outputInfo.GetDataType()) == GetDataTypeName(m_OutputTensorInfos[i].GetDataType()));
+
+        BOOST_TEST(outputInfo.GetQuantizationScale() == m_OutputTensorInfos[i].GetQuantizationScale());
+        BOOST_TEST(outputInfo.GetQuantizationOffset() == m_OutputTensorInfos[i].GetQuantizationOffset());
+    }
+}
+
+void LayerVerifierBase::VerifyConstTensors(const std::string& tensorName,
+                                           const armnn::ConstTensor* expectedPtr,
+                                           const armnn::ConstTensor* actualPtr)
+{
+    if (expectedPtr == nullptr)
+    {
+        BOOST_CHECK_MESSAGE(actualPtr == nullptr, tensorName + " should not exist");
+    }
+    else
+    {
+        BOOST_CHECK_MESSAGE(actualPtr != nullptr, tensorName + " should have been set");
+        if (actualPtr != nullptr)
+        {
+            const armnn::TensorInfo& expectedInfo = expectedPtr->GetInfo();
+            const armnn::TensorInfo& actualInfo = actualPtr->GetInfo();
+
+            BOOST_CHECK_MESSAGE(expectedInfo.GetShape() == actualInfo.GetShape(),
+                                tensorName + " shapes don't match");
+            BOOST_CHECK_MESSAGE(
+                    GetDataTypeName(expectedInfo.GetDataType()) == GetDataTypeName(actualInfo.GetDataType()),
+                    tensorName + " data types don't match");
+
+            BOOST_CHECK_MESSAGE(expectedPtr->GetNumBytes() == actualPtr->GetNumBytes(),
+                                tensorName + " (GetNumBytes) data sizes do not match");
+            if (expectedPtr->GetNumBytes() == actualPtr->GetNumBytes())
+            {
+                //check the data is identical
+                const char* expectedData = static_cast<const char*>(expectedPtr->GetMemoryArea());
+                const char* actualData = static_cast<const char*>(actualPtr->GetMemoryArea());
+                bool same = true;
+                for (unsigned int i = 0; i < expectedPtr->GetNumBytes(); ++i)
+                {
+                    same = expectedData[i] == actualData[i];
+                    if (!same)
+                    {
+                        break;
+                    }
+                }
+                BOOST_CHECK_MESSAGE(same, tensorName + " data does not match");
+            }
+        }
+    }
+}
+
+void CompareConstTensor(const armnn::ConstTensor& tensor1, const armnn::ConstTensor& tensor2)
+{
+    BOOST_TEST(tensor1.GetShape() == tensor2.GetShape());
+    BOOST_TEST(GetDataTypeName(tensor1.GetDataType()) == GetDataTypeName(tensor2.GetDataType()));
+
+    switch (tensor1.GetDataType())
+    {
+        case armnn::DataType::Float32:
+            CompareConstTensorData<const float*>(
+                tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
+            break;
+        case armnn::DataType::QAsymmU8:
+        case armnn::DataType::Boolean:
+            CompareConstTensorData<const uint8_t*>(
+                tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
+            break;
+        case armnn::DataType::QSymmS8:
+            CompareConstTensorData<const int8_t*>(
+                tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
+            break;
+        case armnn::DataType::Signed32:
+            CompareConstTensorData<const int32_t*>(
+                tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
+            break;
+        default:
+            // Note that Float16 is not yet implemented
+            BOOST_TEST_MESSAGE("Unexpected datatype");
+            BOOST_TEST(false);
+    }
+}
+
+armnn::INetworkPtr DeserializeNetwork(const std::string& serializerString)
+{
+    std::vector<std::uint8_t> const serializerVector{serializerString.begin(), serializerString.end()};
+    return IDeserializer::Create()->CreateNetworkFromBinary(serializerVector);
+}
+
+std::string SerializeNetwork(const armnn::INetwork& network)
+{
+    armnnSerializer::ISerializerPtr serializer = armnnSerializer::ISerializer::Create();
+
+    serializer->Serialize(network);
+
+    std::stringstream stream;
+    serializer->SaveSerializedToStream(stream);
+
+    std::string serializerString{stream.str()};
+    return serializerString;
+}
diff --git a/src/armnnSerializer/test/SerializerTestUtils.hpp b/src/armnnSerializer/test/SerializerTestUtils.hpp
new file mode 100644
index 0000000..e085d2e
--- /dev/null
+++ b/src/armnnSerializer/test/SerializerTestUtils.hpp
@@ -0,0 +1,167 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/Descriptors.hpp>
+#include <armnn/INetwork.hpp>
+#include <armnn/TypesUtils.hpp>
+#include <armnnDeserializer/IDeserializer.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <random>
+#include <vector>
+
+#include <boost/test/unit_test.hpp>
+
+
+armnn::INetworkPtr DeserializeNetwork(const std::string& serializerString);
+
+std::string SerializeNetwork(const armnn::INetwork& network);
+
+void CompareConstTensor(const armnn::ConstTensor& tensor1, const armnn::ConstTensor& tensor2);
+
+class LayerVerifierBase : public armnn::IStrategy
+{
+public:
+    LayerVerifierBase(const std::string& layerName,
+                      const std::vector<armnn::TensorInfo>& inputInfos,
+                      const std::vector<armnn::TensorInfo>& outputInfos);
+
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override;
+
+protected:
+    void VerifyNameAndConnections(const armnn::IConnectableLayer* layer, const char* name);
+
+    void VerifyConstTensors(const std::string& tensorName,
+                            const armnn::ConstTensor* expectedPtr,
+                            const armnn::ConstTensor* actualPtr);
+
+private:
+    std::string m_LayerName;
+    std::vector<armnn::TensorInfo> m_InputTensorInfos;
+    std::vector<armnn::TensorInfo> m_OutputTensorInfos;
+};
+
+template<typename Descriptor>
+class LayerVerifierBaseWithDescriptor : public LayerVerifierBase
+{
+public:
+    LayerVerifierBaseWithDescriptor(const std::string& layerName,
+                                    const std::vector<armnn::TensorInfo>& inputInfos,
+                                    const std::vector<armnn::TensorInfo>& outputInfos,
+                                    const Descriptor& descriptor)
+        : LayerVerifierBase(layerName, inputInfos, outputInfos)
+        , m_Descriptor(descriptor) {}
+
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
+    {
+        armnn::IgnoreUnused(constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::Input: break;
+            case armnn::LayerType::Output: break;
+            default:
+            {
+                VerifyNameAndConnections(layer, name);
+                const Descriptor& internalDescriptor = static_cast<const Descriptor&>(descriptor);
+                VerifyDescriptor(internalDescriptor);
+                break;
+            }
+        }
+    }
+
+protected:
+    void VerifyDescriptor(const Descriptor& descriptor)
+    {
+        BOOST_CHECK(descriptor == m_Descriptor);
+    }
+
+    Descriptor m_Descriptor;
+};
+
+template<typename T>
+void CompareConstTensorData(const void* data1, const void* data2, unsigned int numElements)
+{
+    T typedData1 = static_cast<T>(data1);
+    T typedData2 = static_cast<T>(data2);
+    BOOST_CHECK(typedData1);
+    BOOST_CHECK(typedData2);
+
+    for (unsigned int i = 0; i < numElements; i++)
+    {
+        BOOST_TEST(typedData1[i] == typedData2[i]);
+    }
+}
+
+
+template <typename Descriptor>
+class LayerVerifierBaseWithDescriptorAndConstants : public LayerVerifierBaseWithDescriptor<Descriptor>
+{
+public:
+    LayerVerifierBaseWithDescriptorAndConstants(const std::string& layerName,
+                                                const std::vector<armnn::TensorInfo>& inputInfos,
+                                                const std::vector<armnn::TensorInfo>& outputInfos,
+                                                const Descriptor& descriptor,
+                                                const std::vector<armnn::ConstTensor>& constants)
+            : LayerVerifierBaseWithDescriptor<Descriptor>(layerName, inputInfos, outputInfos, descriptor)
+            , m_Constants(constants) {}
+
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
+    {
+        armnn::IgnoreUnused(id);
+
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::Input: break;
+            case armnn::LayerType::Output: break;
+            default:
+            {
+                this->VerifyNameAndConnections(layer, name);
+                const Descriptor& internalDescriptor = static_cast<const Descriptor&>(descriptor);
+                this->VerifyDescriptor(internalDescriptor);
+
+                for(std::size_t i = 0; i < constants.size(); i++)
+                {
+                    CompareConstTensor(constants[i], m_Constants[i]);
+                }
+            }
+        }
+    }
+
+private:
+    std::vector<armnn::ConstTensor> m_Constants;
+};
+
+template<typename DataType>
+static std::vector<DataType> GenerateRandomData(size_t size)
+{
+    constexpr bool isIntegerType = std::is_integral<DataType>::value;
+    using Distribution =
+        typename std::conditional<isIntegerType,
+                                  std::uniform_int_distribution<DataType>,
+                                  std::uniform_real_distribution<DataType>>::type;
+
+    static constexpr DataType lowerLimit = std::numeric_limits<DataType>::min();
+    static constexpr DataType upperLimit = std::numeric_limits<DataType>::max();
+
+    static Distribution distribution(lowerLimit, upperLimit);
+    static std::default_random_engine generator;
+
+    std::vector<DataType> randomData(size);
+    std::generate(randomData.begin(), randomData.end(), []() { return distribution(generator); });
+
+    return randomData;
+}
\ No newline at end of file
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 44e8a38..f261731 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -4,6 +4,7 @@
 //
 
 #include "../Serializer.hpp"
+#include "SerializerTestUtils.hpp"
 
 #include <armnn/Descriptors.hpp>
 #include <armnn/INetwork.hpp>
@@ -11,6 +12,7 @@
 #include <armnn/LstmParams.hpp>
 #include <armnn/QuantizedLstmParams.hpp>
 #include <armnnDeserializer/IDeserializer.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <random>
 #include <vector>
@@ -19,264 +21,36 @@
 
 using armnnDeserializer::IDeserializer;
 
-namespace
-{
-
-#define DECLARE_LAYER_VERIFIER_CLASS(name) \
-class name##LayerVerifier : public LayerVerifierBase \
-{ \
-public: \
-    name##LayerVerifier(const std::string& layerName, \
-                        const std::vector<armnn::TensorInfo>& inputInfos, \
-                        const std::vector<armnn::TensorInfo>& outputInfos) \
-        : LayerVerifierBase(layerName, inputInfos, outputInfos) {} \
-\
-    void Visit##name##Layer(const armnn::IConnectableLayer* layer, const char* name) override \
-    { \
-        VerifyNameAndConnections(layer, name); \
-    } \
-};
-
-#define DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(name) \
-class name##LayerVerifier : public LayerVerifierBaseWithDescriptor<armnn::name##Descriptor> \
-{ \
-public: \
-    name##LayerVerifier(const std::string& layerName, \
-                        const std::vector<armnn::TensorInfo>& inputInfos, \
-                        const std::vector<armnn::TensorInfo>& outputInfos, \
-                        const armnn::name##Descriptor& descriptor) \
-        : LayerVerifierBaseWithDescriptor<armnn::name##Descriptor>( \
-            layerName, inputInfos, outputInfos, descriptor) {} \
-\
-    void Visit##name##Layer(const armnn::IConnectableLayer* layer, \
-                            const armnn::name##Descriptor& descriptor, \
-                            const char* name) override \
-    { \
-        VerifyNameAndConnections(layer, name); \
-        VerifyDescriptor(descriptor); \
-    } \
-};
-
-struct DefaultLayerVerifierPolicy
-{
-    static void Apply(const std::string)
-    {
-        BOOST_TEST_MESSAGE("Unexpected layer found in network");
-        BOOST_TEST(false);
-    }
-};
-
-class LayerVerifierBase : public armnn::LayerVisitorBase<DefaultLayerVerifierPolicy>
-{
-public:
-    LayerVerifierBase(const std::string& layerName,
-                      const std::vector<armnn::TensorInfo>& inputInfos,
-                      const std::vector<armnn::TensorInfo>& outputInfos)
-    : m_LayerName(layerName)
-    , m_InputTensorInfos(inputInfos)
-    , m_OutputTensorInfos(outputInfos) {}
-
-    void VisitInputLayer(const armnn::IConnectableLayer*, armnn::LayerBindingId, const char*) override {}
-
-    void VisitOutputLayer(const armnn::IConnectableLayer*, armnn::LayerBindingId, const char*) override {}
-
-protected:
-    void VerifyNameAndConnections(const armnn::IConnectableLayer* layer, const char* name)
-    {
-        BOOST_TEST(name == m_LayerName.c_str());
-
-        BOOST_TEST(layer->GetNumInputSlots() == m_InputTensorInfos.size());
-        BOOST_TEST(layer->GetNumOutputSlots() == m_OutputTensorInfos.size());
-
-        for (unsigned int i = 0; i < m_InputTensorInfos.size(); i++)
-        {
-            const armnn::IOutputSlot* connectedOutput = layer->GetInputSlot(i).GetConnection();
-            BOOST_CHECK(connectedOutput);
-
-            const armnn::TensorInfo& connectedInfo = connectedOutput->GetTensorInfo();
-            BOOST_TEST(connectedInfo.GetShape() == m_InputTensorInfos[i].GetShape());
-            BOOST_TEST(
-                GetDataTypeName(connectedInfo.GetDataType()) == GetDataTypeName(m_InputTensorInfos[i].GetDataType()));
-
-            BOOST_TEST(connectedInfo.GetQuantizationScale() == m_InputTensorInfos[i].GetQuantizationScale());
-            BOOST_TEST(connectedInfo.GetQuantizationOffset() == m_InputTensorInfos[i].GetQuantizationOffset());
-        }
-
-        for (unsigned int i = 0; i < m_OutputTensorInfos.size(); i++)
-        {
-            const armnn::TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
-            BOOST_TEST(outputInfo.GetShape() == m_OutputTensorInfos[i].GetShape());
-            BOOST_TEST(
-                GetDataTypeName(outputInfo.GetDataType()) == GetDataTypeName(m_OutputTensorInfos[i].GetDataType()));
-
-            BOOST_TEST(outputInfo.GetQuantizationScale() == m_OutputTensorInfos[i].GetQuantizationScale());
-            BOOST_TEST(outputInfo.GetQuantizationOffset() == m_OutputTensorInfos[i].GetQuantizationOffset());
-        }
-    }
-
-    void VerifyConstTensors(const std::string& tensorName,
-                            const armnn::ConstTensor* expectedPtr,
-                            const armnn::ConstTensor* actualPtr)
-    {
-        if (expectedPtr == nullptr)
-        {
-            BOOST_CHECK_MESSAGE(actualPtr == nullptr, tensorName + " should not exist");
-        }
-        else
-        {
-            BOOST_CHECK_MESSAGE(actualPtr != nullptr, tensorName + " should have been set");
-            if (actualPtr != nullptr)
-            {
-                const armnn::TensorInfo& expectedInfo = expectedPtr->GetInfo();
-                const armnn::TensorInfo& actualInfo = actualPtr->GetInfo();
-
-                BOOST_CHECK_MESSAGE(expectedInfo.GetShape() == actualInfo.GetShape(),
-                                    tensorName + " shapes don't match");
-                BOOST_CHECK_MESSAGE(
-                        GetDataTypeName(expectedInfo.GetDataType()) == GetDataTypeName(actualInfo.GetDataType()),
-                        tensorName + " data types don't match");
-
-                BOOST_CHECK_MESSAGE(expectedPtr->GetNumBytes() == actualPtr->GetNumBytes(),
-                                    tensorName + " (GetNumBytes) data sizes do not match");
-                if (expectedPtr->GetNumBytes() == actualPtr->GetNumBytes())
-                {
-                    //check the data is identical
-                    const char* expectedData = static_cast<const char*>(expectedPtr->GetMemoryArea());
-                    const char* actualData = static_cast<const char*>(actualPtr->GetMemoryArea());
-                    bool same = true;
-                    for (unsigned int i = 0; i < expectedPtr->GetNumBytes(); ++i)
-                    {
-                        same = expectedData[i] == actualData[i];
-                        if (!same)
-                        {
-                            break;
-                        }
-                    }
-                    BOOST_CHECK_MESSAGE(same, tensorName + " data does not match");
-                }
-            }
-        }
-    }
-
-private:
-    std::string m_LayerName;
-    std::vector<armnn::TensorInfo> m_InputTensorInfos;
-    std::vector<armnn::TensorInfo> m_OutputTensorInfos;
-};
-
-template<typename Descriptor>
-class LayerVerifierBaseWithDescriptor : public LayerVerifierBase
-{
-public:
-    LayerVerifierBaseWithDescriptor(const std::string& layerName,
-                                    const std::vector<armnn::TensorInfo>& inputInfos,
-                                    const std::vector<armnn::TensorInfo>& outputInfos,
-                                    const Descriptor& descriptor)
-        : LayerVerifierBase(layerName, inputInfos, outputInfos)
-        , m_Descriptor(descriptor) {}
-
-protected:
-    void VerifyDescriptor(const Descriptor& descriptor)
-    {
-        BOOST_CHECK(descriptor == m_Descriptor);
-    }
-
-    Descriptor m_Descriptor;
-};
-
-template<typename T>
-void CompareConstTensorData(const void* data1, const void* data2, unsigned int numElements)
-{
-    T typedData1 = static_cast<T>(data1);
-    T typedData2 = static_cast<T>(data2);
-    BOOST_CHECK(typedData1);
-    BOOST_CHECK(typedData2);
-
-    for (unsigned int i = 0; i < numElements; i++)
-    {
-        BOOST_TEST(typedData1[i] == typedData2[i]);
-    }
-}
-
-void CompareConstTensor(const armnn::ConstTensor& tensor1, const armnn::ConstTensor& tensor2)
-{
-    BOOST_TEST(tensor1.GetShape() == tensor2.GetShape());
-    BOOST_TEST(GetDataTypeName(tensor1.GetDataType()) == GetDataTypeName(tensor2.GetDataType()));
-
-    switch (tensor1.GetDataType())
-    {
-        case armnn::DataType::Float32:
-            CompareConstTensorData<const float*>(
-                tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
-            break;
-        case armnn::DataType::QAsymmU8:
-        case armnn::DataType::Boolean:
-            CompareConstTensorData<const uint8_t*>(
-                tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
-            break;
-        case armnn::DataType::QSymmS8:
-            CompareConstTensorData<const int8_t*>(
-                tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
-            break;
-        case armnn::DataType::Signed32:
-            CompareConstTensorData<const int32_t*>(
-                tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
-            break;
-        default:
-            // Note that Float16 is not yet implemented
-            BOOST_TEST_MESSAGE("Unexpected datatype");
-            BOOST_TEST(false);
-    }
-}
-
-armnn::INetworkPtr DeserializeNetwork(const std::string& serializerString)
-{
-    std::vector<std::uint8_t> const serializerVector{serializerString.begin(), serializerString.end()};
-    return IDeserializer::Create()->CreateNetworkFromBinary(serializerVector);
-}
-
-std::string SerializeNetwork(const armnn::INetwork& network)
-{
-    armnnSerializer::ISerializerPtr serializer = armnnSerializer::ISerializer::Create();
-
-    serializer->Serialize(network);
-
-    std::stringstream stream;
-    serializer->SaveSerializedToStream(stream);
-
-    std::string serializerString{stream.str()};
-    return serializerString;
-}
-
-template<typename DataType>
-static std::vector<DataType> GenerateRandomData(size_t size)
-{
-    constexpr bool isIntegerType = std::is_integral<DataType>::value;
-    using Distribution =
-        typename std::conditional<isIntegerType,
-                                  std::uniform_int_distribution<DataType>,
-                                  std::uniform_real_distribution<DataType>>::type;
-
-    static constexpr DataType lowerLimit = std::numeric_limits<DataType>::min();
-    static constexpr DataType upperLimit = std::numeric_limits<DataType>::max();
-
-    static Distribution distribution(lowerLimit, upperLimit);
-    static std::default_random_engine generator;
-
-    std::vector<DataType> randomData(size);
-    std::generate(randomData.begin(), randomData.end(), []() { return distribution(generator); });
-
-    return randomData;
-}
-
-} // anonymous namespace
-
 BOOST_AUTO_TEST_SUITE(SerializerTests)
 
+BOOST_AUTO_TEST_CASE(SerializeAbs)
+{
+    const std::string layerName("abs");
+    const armnn::TensorInfo tensorInfo({1, 2, 3}, armnn::DataType::Float32);
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
+
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
+    armnn::IConnectableLayer* const absLayer = network->AddAbsLayer(layerName.c_str());
+    ARMNN_NO_DEPRECATE_WARN_END
+    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+
+    inputLayer->GetOutputSlot(0).Connect(absLayer->GetInputSlot(0));
+    absLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+    inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+    absLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    BOOST_CHECK(deserializedNetwork);
+
+    LayerVerifierBase verifier(layerName, {tensorInfo}, {tensorInfo});
+    deserializedNetwork->ExecuteStrategy(verifier);
+}
+
 BOOST_AUTO_TEST_CASE(SerializeAddition)
 {
-    DECLARE_LAYER_VERIFIER_CLASS(Addition)
-
     const std::string layerName("addition");
     const armnn::TensorInfo tensorInfo({1, 2, 3}, armnn::DataType::Float32);
 
@@ -294,17 +68,16 @@
     inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
     additionLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    std::string serializedNetwork = SerializeNetwork(*network);
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(serializedNetwork);
     BOOST_CHECK(deserializedNetwork);
 
-    AdditionLayerVerifier verifier(layerName, {tensorInfo, tensorInfo}, {tensorInfo});
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBase verifier(layerName, {tensorInfo, tensorInfo}, {tensorInfo});
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeArgMinMax)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(ArgMinMax)
-
     const std::string layerName("argminmax");
     const armnn::TensorInfo inputInfo({1, 2, 3}, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({1, 3}, armnn::DataType::Signed32);
@@ -327,54 +100,15 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    ArgMinMaxLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::ArgMinMaxDescriptor> verifier(layerName,
+                                                                         {inputInfo},
+                                                                         {outputInfo},
+                                                                         descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeBatchNormalization)
 {
-    using Descriptor = armnn::BatchNormalizationDescriptor;
-    class BatchNormalizationLayerVerifier : public LayerVerifierBaseWithDescriptor<Descriptor>
-    {
-    public:
-        BatchNormalizationLayerVerifier(const std::string& layerName,
-                                        const std::vector<armnn::TensorInfo>& inputInfos,
-                                        const std::vector<armnn::TensorInfo>& outputInfos,
-                                        const Descriptor& descriptor,
-                                        const armnn::ConstTensor& mean,
-                                        const armnn::ConstTensor& variance,
-                                        const armnn::ConstTensor& beta,
-                                        const armnn::ConstTensor& gamma)
-            : LayerVerifierBaseWithDescriptor<Descriptor>(layerName, inputInfos, outputInfos, descriptor)
-            , m_Mean(mean)
-            , m_Variance(variance)
-            , m_Beta(beta)
-            , m_Gamma(gamma) {}
-
-        void VisitBatchNormalizationLayer(const armnn::IConnectableLayer* layer,
-                                          const Descriptor& descriptor,
-                                          const armnn::ConstTensor& mean,
-                                          const armnn::ConstTensor& variance,
-                                          const armnn::ConstTensor& beta,
-                                          const armnn::ConstTensor& gamma,
-                                          const char* name) override
-        {
-            VerifyNameAndConnections(layer, name);
-            VerifyDescriptor(descriptor);
-
-            CompareConstTensor(mean, m_Mean);
-            CompareConstTensor(variance, m_Variance);
-            CompareConstTensor(beta, m_Beta);
-            CompareConstTensor(gamma, m_Gamma);
-        }
-
-    private:
-        armnn::ConstTensor m_Mean;
-        armnn::ConstTensor m_Variance;
-        armnn::ConstTensor m_Beta;
-        armnn::ConstTensor m_Gamma;
-    };
-
     const std::string layerName("batchNormalization");
     const armnn::TensorInfo inputInfo ({ 1, 3, 3, 1 }, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
@@ -393,15 +127,21 @@
     std::vector<float> betaData({1.0});
     std::vector<float> gammaData({0.0});
 
-    armnn::ConstTensor mean(meanInfo, meanData);
-    armnn::ConstTensor variance(varianceInfo, varianceData);
-    armnn::ConstTensor beta(betaInfo, betaData);
-    armnn::ConstTensor gamma(gammaInfo, gammaData);
+    std::vector<armnn::ConstTensor> constants;
+    constants.emplace_back(armnn::ConstTensor(meanInfo, meanData));
+    constants.emplace_back(armnn::ConstTensor(varianceInfo, varianceData));
+    constants.emplace_back(armnn::ConstTensor(betaInfo, betaData));
+    constants.emplace_back(armnn::ConstTensor(gammaInfo, gammaData));
 
     armnn::INetworkPtr network = armnn::INetwork::Create();
     armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
     armnn::IConnectableLayer* const batchNormalizationLayer =
-        network->AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma, layerName.c_str());
+        network->AddBatchNormalizationLayer(descriptor,
+                                            constants[0],
+                                            constants[1],
+                                            constants[2],
+                                            constants[3],
+                                            layerName.c_str());
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
 
     inputLayer->GetOutputSlot(0).Connect(batchNormalizationLayer->GetInputSlot(0));
@@ -413,15 +153,13 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    BatchNormalizationLayerVerifier verifier(
-        layerName, {inputInfo}, {outputInfo}, descriptor, mean, variance, beta, gamma);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptorAndConstants<armnn::BatchNormalizationDescriptor> verifier(
+        layerName, {inputInfo}, {outputInfo}, descriptor, constants);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeBatchToSpaceNd)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(BatchToSpaceNd)
-
     const std::string layerName("spaceToBatchNd");
     const armnn::TensorInfo inputInfo({4, 1, 2, 2}, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({1, 1, 4, 4}, armnn::DataType::Float32);
@@ -445,14 +183,15 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    BatchToSpaceNdLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::BatchToSpaceNdDescriptor> verifier(layerName,
+                                                                              {inputInfo},
+                                                                              {outputInfo},
+                                                                              desc);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeComparison)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Comparison)
-
     const std::string layerName("comparison");
 
     const armnn::TensorShape shape{2, 1, 2, 4};
@@ -479,8 +218,11 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    ComparisonLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo }, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::ComparisonDescriptor> verifier(layerName,
+                                                                          { inputInfo, inputInfo },
+                                                                          { outputInfo },
+                                                                          descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeConstant)
@@ -491,22 +233,37 @@
         ConstantLayerVerifier(const std::string& layerName,
                               const std::vector<armnn::TensorInfo>& inputInfos,
                               const std::vector<armnn::TensorInfo>& outputInfos,
-                              const armnn::ConstTensor& layerInput)
+                              const std::vector<armnn::ConstTensor>& constants)
             : LayerVerifierBase(layerName, inputInfos, outputInfos)
-            , m_LayerInput(layerInput) {}
+            , m_Constants(constants) {}
 
-        void VisitConstantLayer(const armnn::IConnectableLayer* layer,
-                                const armnn::ConstTensor& input,
-                                const char* name) override
+        void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                             const armnn::BaseDescriptor& descriptor,
+                             const std::vector<armnn::ConstTensor>& constants,
+                             const char* name,
+                             const armnn::LayerBindingId id = 0) override
         {
-            VerifyNameAndConnections(layer, name);
-            CompareConstTensor(input, m_LayerInput);
+            armnn::IgnoreUnused(descriptor, id);
+
+            switch (layer->GetType())
+            {
+                case armnn::LayerType::Input: break;
+                case armnn::LayerType::Output: break;
+                case armnn::LayerType::Addition: break;
+                default:
+                {
+                    this->VerifyNameAndConnections(layer, name);
+
+                    for (std::size_t i = 0; i < constants.size(); i++)
+                    {
+                        CompareConstTensor(constants[i], m_Constants[i]);
+                    }
+                }
+            }
         }
 
-        void VisitAdditionLayer(const armnn::IConnectableLayer*, const char*) override {}
-
     private:
-        armnn::ConstTensor m_LayerInput;
+        const std::vector<armnn::ConstTensor> m_Constants;
     };
 
     const std::string layerName("constant");
@@ -532,53 +289,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    ConstantLayerVerifier verifier(layerName, {}, {info}, constTensor);
-    deserializedNetwork->Accept(verifier);
+    ConstantLayerVerifier verifier(layerName, {}, {info}, {constTensor});
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeConvolution2d)
 {
-    using Descriptor = armnn::Convolution2dDescriptor;
-    class Convolution2dLayerVerifier : public LayerVerifierBaseWithDescriptor<Descriptor>
-    {
-    public:
-        Convolution2dLayerVerifier(const std::string& layerName,
-                                   const std::vector<armnn::TensorInfo>& inputInfos,
-                                   const std::vector<armnn::TensorInfo>& outputInfos,
-                                   const Descriptor& descriptor,
-                                   const armnn::ConstTensor& weights,
-                                   const armnn::Optional<armnn::ConstTensor>& biases)
-            : LayerVerifierBaseWithDescriptor<Descriptor>(layerName, inputInfos, outputInfos, descriptor)
-            , m_Weights(weights)
-            , m_Biases(biases) {}
-
-        void VisitConvolution2dLayer(const armnn::IConnectableLayer* layer,
-                                     const Descriptor& descriptor,
-                                     const armnn::ConstTensor& weights,
-                                     const armnn::Optional<armnn::ConstTensor>& biases,
-                                     const char* name) override
-        {
-            VerifyNameAndConnections(layer, name);
-            VerifyDescriptor(descriptor);
-
-            // check weights
-            CompareConstTensor(weights, m_Weights);
-
-            // check biases
-            BOOST_CHECK(biases.has_value() == descriptor.m_BiasEnabled);
-            BOOST_CHECK(biases.has_value() == m_Biases.has_value());
-
-            if (biases.has_value() && m_Biases.has_value())
-            {
-                CompareConstTensor(biases.value(), m_Biases.value());
-            }
-        }
-
-    private:
-        armnn::ConstTensor                  m_Weights;
-        armnn::Optional<armnn::ConstTensor> m_Biases;
-    };
-
     const std::string layerName("convolution2d");
     const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
@@ -622,53 +338,14 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    Convolution2dLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor, weights, biases);
-    deserializedNetwork->Accept(verifier);
+    const std::vector<armnn::ConstTensor>& constants {weights, biases};
+    LayerVerifierBaseWithDescriptorAndConstants<armnn::Convolution2dDescriptor> verifier(
+            layerName, {inputInfo}, {outputInfo}, descriptor, constants);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeConvolution2dWithPerAxisParams)
 {
-    using Descriptor = armnn::Convolution2dDescriptor;
-    class Convolution2dLayerVerifier : public LayerVerifierBaseWithDescriptor<Descriptor>
-    {
-    public:
-        Convolution2dLayerVerifier(const std::string& layerName,
-                                   const std::vector<armnn::TensorInfo>& inputInfos,
-                                   const std::vector<armnn::TensorInfo>& outputInfos,
-                                   const Descriptor& descriptor,
-                                   const armnn::ConstTensor& weights,
-                                   const armnn::Optional<armnn::ConstTensor>& biases)
-            : LayerVerifierBaseWithDescriptor<Descriptor>(layerName, inputInfos, outputInfos, descriptor)
-            , m_Weights(weights)
-            , m_Biases(biases) {}
-
-        void VisitConvolution2dLayer(const armnn::IConnectableLayer* layer,
-                                     const Descriptor& descriptor,
-                                     const armnn::ConstTensor& weights,
-                                     const armnn::Optional<armnn::ConstTensor>& biases,
-                                     const char* name) override
-        {
-            VerifyNameAndConnections(layer, name);
-            VerifyDescriptor(descriptor);
-
-            // check weights
-            CompareConstTensor(weights, m_Weights);
-
-            // check biases
-            BOOST_CHECK(biases.has_value() == descriptor.m_BiasEnabled);
-            BOOST_CHECK(biases.has_value() == m_Biases.has_value());
-
-            if (biases.has_value() && m_Biases.has_value())
-            {
-                CompareConstTensor(biases.value(), m_Biases.value());
-            }
-        }
-
-    private:
-        armnn::ConstTensor                  m_Weights;
-        armnn::Optional<armnn::ConstTensor> m_Biases;
-    };
-
     using namespace armnn;
 
     const std::string layerName("convolution2dWithPerAxis");
@@ -716,14 +393,14 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    Convolution2dLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor, weights, biases);
-    deserializedNetwork->Accept(verifier);
+    const std::vector<armnn::ConstTensor>& constants {weights, biases};
+    LayerVerifierBaseWithDescriptorAndConstants<Convolution2dDescriptor> verifier(
+            layerName, {inputInfo}, {outputInfo}, descriptor, constants);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeDepthToSpace)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(DepthToSpace)
-
     const std::string layerName("depthToSpace");
 
     const armnn::TensorInfo inputInfo ({ 1,  8, 4, 12 }, armnn::DataType::Float32);
@@ -747,53 +424,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    DepthToSpaceLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::DepthToSpaceDescriptor> verifier(layerName, {inputInfo}, {outputInfo}, desc);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeDepthwiseConvolution2d)
 {
-    using Descriptor = armnn::DepthwiseConvolution2dDescriptor;
-    class DepthwiseConvolution2dLayerVerifier : public LayerVerifierBaseWithDescriptor<Descriptor>
-    {
-    public:
-        DepthwiseConvolution2dLayerVerifier(const std::string& layerName,
-                                            const std::vector<armnn::TensorInfo>& inputInfos,
-                                            const std::vector<armnn::TensorInfo>& outputInfos,
-                                            const Descriptor& descriptor,
-                                            const armnn::ConstTensor& weights,
-                                            const armnn::Optional<armnn::ConstTensor>& biases) :
-            LayerVerifierBaseWithDescriptor<Descriptor>(layerName, inputInfos, outputInfos, descriptor),
-            m_Weights(weights),
-            m_Biases(biases) {}
-
-        void VisitDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
-                                              const Descriptor& descriptor,
-                                              const armnn::ConstTensor& weights,
-                                              const armnn::Optional<armnn::ConstTensor>& biases,
-                                              const char* name) override
-        {
-            VerifyNameAndConnections(layer, name);
-            VerifyDescriptor(descriptor);
-
-            // check weights
-            CompareConstTensor(weights, m_Weights);
-
-            // check biases
-            BOOST_CHECK(biases.has_value() == descriptor.m_BiasEnabled);
-            BOOST_CHECK(biases.has_value() == m_Biases.has_value());
-
-            if (biases.has_value() && m_Biases.has_value())
-            {
-                CompareConstTensor(biases.value(), m_Biases.value());
-            }
-        }
-
-    private:
-        armnn::ConstTensor                      m_Weights;
-        armnn::Optional<armnn::ConstTensor>     m_Biases;
-    };
-
     const std::string layerName("depwiseConvolution2d");
     const armnn::TensorInfo inputInfo ({ 1, 5, 5, 3 }, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({ 1, 3, 3, 3 }, armnn::DataType::Float32);
@@ -837,53 +473,14 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    DepthwiseConvolution2dLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor, weights, biases);
-    deserializedNetwork->Accept(verifier);
+    const std::vector<armnn::ConstTensor>& constants {weights, biases};
+    LayerVerifierBaseWithDescriptorAndConstants<armnn::DepthwiseConvolution2dDescriptor> verifier(
+            layerName, {inputInfo}, {outputInfo}, descriptor, constants);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeDepthwiseConvolution2dWithPerAxisParams)
 {
-    using Descriptor = armnn::DepthwiseConvolution2dDescriptor;
-    class DepthwiseConvolution2dLayerVerifier : public LayerVerifierBaseWithDescriptor<Descriptor>
-    {
-    public:
-        DepthwiseConvolution2dLayerVerifier(const std::string& layerName,
-                                            const std::vector<armnn::TensorInfo>& inputInfos,
-                                            const std::vector<armnn::TensorInfo>& outputInfos,
-                                            const Descriptor& descriptor,
-                                            const armnn::ConstTensor& weights,
-                                            const armnn::Optional<armnn::ConstTensor>& biases) :
-            LayerVerifierBaseWithDescriptor<Descriptor>(layerName, inputInfos, outputInfos, descriptor),
-            m_Weights(weights),
-            m_Biases(biases) {}
-
-        void VisitDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
-                                              const Descriptor& descriptor,
-                                              const armnn::ConstTensor& weights,
-                                              const armnn::Optional<armnn::ConstTensor>& biases,
-                                              const char* name) override
-        {
-            VerifyNameAndConnections(layer, name);
-            VerifyDescriptor(descriptor);
-
-            // check weights
-            CompareConstTensor(weights, m_Weights);
-
-            // check biases
-            BOOST_CHECK(biases.has_value() == descriptor.m_BiasEnabled);
-            BOOST_CHECK(biases.has_value() == m_Biases.has_value());
-
-            if (biases.has_value() && m_Biases.has_value())
-            {
-                CompareConstTensor(biases.value(), m_Biases.value());
-            }
-        }
-
-    private:
-        armnn::ConstTensor                      m_Weights;
-        armnn::Optional<armnn::ConstTensor>     m_Biases;
-    };
-
     using namespace armnn;
 
     const std::string layerName("depwiseConvolution2dWithPerAxis");
@@ -933,14 +530,14 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    DepthwiseConvolution2dLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor, weights, biases);
-    deserializedNetwork->Accept(verifier);
+    const std::vector<armnn::ConstTensor>& constants {weights, biases};
+    LayerVerifierBaseWithDescriptorAndConstants<armnn::DepthwiseConvolution2dDescriptor> verifier(
+            layerName, {inputInfo}, {outputInfo}, descriptor, constants);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeDequantize)
 {
-    DECLARE_LAYER_VERIFIER_CLASS(Dequantize)
-
     const std::string layerName("dequantize");
     const armnn::TensorInfo inputInfo({ 1, 5, 2, 3 }, armnn::DataType::QAsymmU8, 0.5f, 1);
     const armnn::TensorInfo outputInfo({ 1, 5, 2, 3 }, armnn::DataType::Float32);
@@ -959,39 +556,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    DequantizeLayerVerifier verifier(layerName, {inputInfo}, {outputInfo});
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBase verifier(layerName, {inputInfo}, {outputInfo});
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeDeserializeDetectionPostProcess)
 {
-    using Descriptor = armnn::DetectionPostProcessDescriptor;
-    class DetectionPostProcessLayerVerifier : public LayerVerifierBaseWithDescriptor<Descriptor>
-    {
-    public:
-        DetectionPostProcessLayerVerifier(const std::string& layerName,
-                                          const std::vector<armnn::TensorInfo>& inputInfos,
-                                          const std::vector<armnn::TensorInfo>& outputInfos,
-                                          const Descriptor& descriptor,
-                                          const armnn::ConstTensor& anchors)
-            : LayerVerifierBaseWithDescriptor<Descriptor>(layerName, inputInfos, outputInfos, descriptor)
-            , m_Anchors(anchors) {}
-
-        void VisitDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
-                                            const Descriptor& descriptor,
-                                            const armnn::ConstTensor& anchors,
-                                            const char* name) override
-        {
-            VerifyNameAndConnections(layer, name);
-            VerifyDescriptor(descriptor);
-
-            CompareConstTensor(anchors, m_Anchors);
-        }
-
-    private:
-        armnn::ConstTensor m_Anchors;
-    };
-
     const std::string layerName("detectionPostProcess");
 
     const std::vector<armnn::TensorInfo> inputInfos({
@@ -1051,14 +621,14 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    DetectionPostProcessLayerVerifier verifier(layerName, inputInfos, outputInfos, descriptor, anchors);
-    deserializedNetwork->Accept(verifier);
+    const std::vector<armnn::ConstTensor>& constants {anchors};
+    LayerVerifierBaseWithDescriptorAndConstants<armnn::DetectionPostProcessDescriptor> verifier(
+            layerName, inputInfos, outputInfos, descriptor, constants);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeDivision)
 {
-    DECLARE_LAYER_VERIFIER_CLASS(Division)
-
     const std::string layerName("division");
     const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
 
@@ -1079,131 +649,41 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    DivisionLayerVerifier verifier(layerName, {info, info}, {info});
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBase verifier(layerName, {info, info}, {info});
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-class EqualLayerVerifier : public LayerVerifierBase
+BOOST_AUTO_TEST_CASE(SerializeDeserializeEqual)
 {
-public:
-    EqualLayerVerifier(const std::string& layerName,
-                       const std::vector<armnn::TensorInfo>& inputInfos,
-                       const std::vector<armnn::TensorInfo>& outputInfos)
-        : LayerVerifierBase(layerName, inputInfos, outputInfos) {}
-
-    void VisitComparisonLayer(const armnn::IConnectableLayer* layer,
-                              const armnn::ComparisonDescriptor& descriptor,
-                              const char* name) override
-    {
-        VerifyNameAndConnections(layer, name);
-        BOOST_CHECK(descriptor.m_Operation == armnn::ComparisonOperation::Equal);
-    }
-
-    void VisitEqualLayer(const armnn::IConnectableLayer*, const char*) override
-    {
-        throw armnn::Exception("EqualLayer should have translated to ComparisonLayer");
-    }
-};
-
-// NOTE: Until the deprecated AddEqualLayer disappears this test checks that calling
-//       AddEqualLayer places a ComparisonLayer into the serialized format and that
-//       when this deserialises we have a ComparisonLayer
-BOOST_AUTO_TEST_CASE(SerializeEqual)
-{
-    const std::string layerName("equal");
-
-    const armnn::TensorShape shape{2, 1, 2, 4};
-
-    const armnn::TensorInfo inputInfo  = armnn::TensorInfo(shape, armnn::DataType::Float32);
-    const armnn::TensorInfo outputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean);
+    const std::string layerName("EqualLayer");
+    const armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({2, 1, 2, 4}, armnn::DataType::Float32);
+    const armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({2, 1, 2, 4}, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({2, 1, 2, 4}, armnn::DataType::Boolean);
 
     armnn::INetworkPtr network = armnn::INetwork::Create();
-    armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
-    armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
+    armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const inputLayer2 = network->AddInputLayer(1);
     ARMNN_NO_DEPRECATE_WARN_BEGIN
     armnn::IConnectableLayer* const equalLayer = network->AddEqualLayer(layerName.c_str());
     ARMNN_NO_DEPRECATE_WARN_END
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
 
-    inputLayer0->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(0));
-    inputLayer1->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(1));
+    inputLayer1->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(0));
+    inputLayer1->GetOutputSlot(0).SetTensorInfo(inputTensorInfo1);
+    inputLayer2->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(1));
+    inputLayer2->GetOutputSlot(0).SetTensorInfo(inputTensorInfo2);
     equalLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
-    inputLayer0->GetOutputSlot(0).SetTensorInfo(inputInfo);
-    inputLayer1->GetOutputSlot(0).SetTensorInfo(inputInfo);
-    equalLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+    equalLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    EqualLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo });
-    deserializedNetwork->Accept(verifier);
-}
-
-BOOST_AUTO_TEST_CASE(EnsureEqualBackwardCompatibility)
-{
-    // The hex data below is a flat buffer containing a simple network with two inputs,
-    // an EqualLayer (now deprecated) and an output
-    //
-    // This test verifies that we can still deserialize this old-style model by replacing
-    // the EqualLayer with an equivalent ComparisonLayer
-    const std::vector<uint8_t> equalModel =
-    {
-        0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x0A, 0x00,
-        0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
-        0xCC, 0x01, 0x00, 0x00, 0x20, 0x01, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x02, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
-        0x60, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0B, 0x04, 0x00, 0x00, 0x00, 0xFE, 0xFE, 0xFF, 0xFF, 0x04, 0x00,
-        0x00, 0x00, 0x06, 0xFF, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0xEA, 0xFE, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00,
-        0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x64, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB4, 0xFE, 0xFF, 0xFF, 0x00, 0x00,
-        0x00, 0x13, 0x04, 0x00, 0x00, 0x00, 0x52, 0xFF, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x36, 0xFF, 0xFF, 0xFF,
-        0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x1C, 0x00,
-        0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x65, 0x71, 0x75, 0x61, 0x6C, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
-        0x5C, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x34, 0xFF,
-        0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x92, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x04, 0x08, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00,
-        0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x08, 0x00, 0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00,
-        0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x00, 0x00,
-        0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0E, 0x00,
-        0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x06, 0x00, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0E, 0x00,
-        0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x0E, 0x00, 0x18, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x10, 0x00, 0x14, 0x00, 0x0E, 0x00, 0x00, 0x00,
-        0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-        0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00,
-        0x00, 0x00, 0x66, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x04, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00,
-        0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09,
-        0x04, 0x00, 0x00, 0x00, 0xF6, 0xFF, 0xFF, 0xFF, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x0A, 0x00,
-        0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x14, 0x00, 0x00, 0x00,
-        0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x10, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00,
-        0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0A, 0x00, 0x00, 0x00,
-        0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x08, 0x00,
-        0x07, 0x00, 0x0C, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
-        0x04, 0x00, 0x00, 0x00
-    };
-
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(std::string(equalModel.begin(), equalModel.end()));
-    BOOST_CHECK(deserializedNetwork);
-
-    const armnn::TensorShape shape{ 2, 1, 2, 4 };
-
-    const armnn::TensorInfo inputInfo  = armnn::TensorInfo(shape, armnn::DataType::Float32);
-    const armnn::TensorInfo outputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean);
-
-    EqualLayerVerifier verifier("equal", { inputInfo, inputInfo }, { outputInfo });
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBase verifier(layerName, {inputTensorInfo1, inputTensorInfo2}, {outputTensorInfo});
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeFill)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Fill)
-
     const std::string layerName("fill");
     const armnn::TensorInfo inputInfo({4}, armnn::DataType::Signed32);
     const armnn::TensorInfo outputInfo({1, 3, 3, 1}, armnn::DataType::Float32);
@@ -1224,15 +704,13 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    FillLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor);
+    LayerVerifierBaseWithDescriptor<armnn::FillDescriptor> verifier(layerName, {inputInfo}, {outputInfo}, descriptor);
 
-    deserializedNetwork->Accept(verifier);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeFloor)
 {
-    DECLARE_LAYER_VERIFIER_CLASS(Floor)
-
     const std::string layerName("floor");
     const armnn::TensorInfo info({4,4}, armnn::DataType::Float32);
 
@@ -1250,51 +728,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    FloorLayerVerifier verifier(layerName, {info}, {info});
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBase verifier(layerName, {info}, {info});
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeFullyConnected)
 {
-    using Descriptor = armnn::FullyConnectedDescriptor;
-    class FullyConnectedLayerVerifier : public LayerVerifierBaseWithDescriptor<Descriptor>
-    {
-    public:
-        FullyConnectedLayerVerifier(const std::string& layerName,
-                                    const std::vector<armnn::TensorInfo>& inputInfos,
-                                    const std::vector<armnn::TensorInfo>& outputInfos,
-                                    const Descriptor& descriptor,
-                                    const armnn::ConstTensor& weight,
-                                    const armnn::Optional<armnn::ConstTensor>& bias)
-            : LayerVerifierBaseWithDescriptor<Descriptor>(layerName, inputInfos, outputInfos, descriptor)
-            , m_Weight(weight)
-            , m_Bias(bias) {}
-
-        void VisitFullyConnectedLayer(const armnn::IConnectableLayer* layer,
-                                      const Descriptor& descriptor,
-                                      const armnn::ConstTensor& weight,
-                                      const armnn::Optional<armnn::ConstTensor>& bias,
-                                      const char* name) override
-        {
-            VerifyNameAndConnections(layer, name);
-            VerifyDescriptor(descriptor);
-
-            CompareConstTensor(weight, m_Weight);
-
-            BOOST_TEST(bias.has_value() == descriptor.m_BiasEnabled);
-            BOOST_TEST(bias.has_value() == m_Bias.has_value());
-
-            if (bias.has_value() && m_Bias.has_value())
-            {
-                CompareConstTensor(bias.value(), m_Bias.value());
-            }
-        }
-
-    private:
-        armnn::ConstTensor m_Weight;
-        armnn::Optional<armnn::ConstTensor> m_Bias;
-    };
-
     const std::string layerName("fullyConnected");
     const armnn::TensorInfo inputInfo ({ 2, 5, 1, 1 }, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({ 2, 3 }, armnn::DataType::Float32);
@@ -1328,8 +767,10 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    FullyConnectedLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor, weights, biases);
-    deserializedNetwork->Accept(verifier);
+    const std::vector<armnn::ConstTensor> constants {weights, biases};
+    LayerVerifierBaseWithDescriptorAndConstants<armnn::FullyConnectedDescriptor> verifier(
+            layerName, {inputInfo}, {outputInfo}, descriptor, constants);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeGather)
@@ -1344,17 +785,26 @@
                             const GatherDescriptor& descriptor)
             : LayerVerifierBaseWithDescriptor<GatherDescriptor>(layerName, inputInfos, outputInfos, descriptor) {}
 
-        void VisitGatherLayer(const armnn::IConnectableLayer* layer,
-                              const GatherDescriptor& descriptor,
-                              const char *name) override
+        void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                             const armnn::BaseDescriptor& descriptor,
+                             const std::vector<armnn::ConstTensor>& constants,
+                             const char* name,
+                             const armnn::LayerBindingId id = 0) override
         {
-            VerifyNameAndConnections(layer, name);
-            BOOST_CHECK(descriptor.m_Axis == m_Descriptor.m_Axis);
+            armnn::IgnoreUnused(constants, id);
+            switch (layer->GetType())
+            {
+                case armnn::LayerType::Input: break;
+                case armnn::LayerType::Output: break;
+                case armnn::LayerType::Constant: break;
+                default:
+                {
+                    VerifyNameAndConnections(layer, name);
+                    const GatherDescriptor& layerDescriptor = static_cast<const GatherDescriptor&>(descriptor);
+                    BOOST_CHECK(layerDescriptor.m_Axis == m_Descriptor.m_Axis);
+                }
+            }
         }
-
-        void VisitConstantLayer(const armnn::IConnectableLayer*,
-                                const armnn::ConstTensor&,
-                                const char*) override {}
     };
 
     const std::string layerName("gather");
@@ -1390,35 +840,14 @@
     BOOST_CHECK(deserializedNetwork);
 
     GatherLayerVerifier verifier(layerName, {paramsInfo, indicesInfo}, {outputInfo}, descriptor);
-    deserializedNetwork->Accept(verifier);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-class GreaterLayerVerifier : public LayerVerifierBase
-{
-public:
-    GreaterLayerVerifier(const std::string& layerName,
-                         const std::vector<armnn::TensorInfo>& inputInfos,
-                         const std::vector<armnn::TensorInfo>& outputInfos)
-        : LayerVerifierBase(layerName, inputInfos, outputInfos) {}
-
-    void VisitComparisonLayer(const armnn::IConnectableLayer* layer,
-                              const armnn::ComparisonDescriptor& descriptor,
-                              const char* name) override
-    {
-        VerifyNameAndConnections(layer, name);
-        BOOST_CHECK(descriptor.m_Operation == armnn::ComparisonOperation::Greater);
-    }
-
-    void VisitGreaterLayer(const armnn::IConnectableLayer*, const char*) override
-    {
-        throw armnn::Exception("GreaterLayer should have translated to ComparisonLayer");
-    }
-};
 
 // NOTE: Until the deprecated AddGreaterLayer disappears this test checks that calling
 //       AddGreaterLayer places a ComparisonLayer into the serialized format and that
 //       when this deserialises we have a ComparisonLayer
-BOOST_AUTO_TEST_CASE(SerializeGreater)
+BOOST_AUTO_TEST_CASE(SerializeGreaterDeprecated)
 {
     const std::string layerName("greater");
 
@@ -1446,74 +875,13 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    GreaterLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo });
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBase verifier(layerName, { inputInfo, inputInfo }, { outputInfo });
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(EnsureGreaterBackwardCompatibility)
-{
-    // The hex data below is a flat buffer containing a simple network with two inputs,
-    // an GreaterLayer (now deprecated) and an output
-    //
-    // This test verifies that we can still deserialize this old-style model by replacing
-    // the GreaterLayer with an equivalent ComparisonLayer
-    const std::vector<uint8_t> greaterModel =
-    {
-        0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x0A, 0x00,
-        0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
-        0xCC, 0x01, 0x00, 0x00, 0x20, 0x01, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x02, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
-        0x60, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0B, 0x04, 0x00, 0x00, 0x00, 0xFE, 0xFE, 0xFF, 0xFF, 0x04, 0x00,
-        0x00, 0x00, 0x06, 0xFF, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0xEA, 0xFE, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00,
-        0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x64, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB4, 0xFE, 0xFF, 0xFF, 0x00, 0x00,
-        0x00, 0x19, 0x04, 0x00, 0x00, 0x00, 0x52, 0xFF, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x36, 0xFF, 0xFF, 0xFF,
-        0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x1C, 0x00,
-        0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x67, 0x72, 0x65, 0x61, 0x74, 0x65, 0x72, 0x00, 0x02, 0x00, 0x00, 0x00,
-        0x5C, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x34, 0xFF,
-        0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x92, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x04, 0x08, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00,
-        0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x08, 0x00, 0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00,
-        0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x00, 0x00,
-        0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0E, 0x00,
-        0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x06, 0x00, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0E, 0x00,
-        0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x0E, 0x00, 0x18, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x10, 0x00, 0x14, 0x00, 0x0E, 0x00, 0x00, 0x00,
-        0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-        0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00,
-        0x00, 0x00, 0x66, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00,
-        0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09,
-        0x04, 0x00, 0x00, 0x00, 0xF6, 0xFF, 0xFF, 0xFF, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x0A, 0x00,
-        0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x14, 0x00, 0x00, 0x00,
-        0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x10, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00,
-        0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0A, 0x00, 0x00, 0x00,
-        0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x08, 0x00,
-        0x07, 0x00, 0x0C, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
-        0x02, 0x00, 0x00, 0x00
-    };
-
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(std::string(greaterModel.begin(), greaterModel.end()));
-    BOOST_CHECK(deserializedNetwork);
-
-    const armnn::TensorShape shape{ 1, 2, 2, 2 };
-
-    const armnn::TensorInfo inputInfo  = armnn::TensorInfo(shape, armnn::DataType::Float32);
-    const armnn::TensorInfo outputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean);
-
-    GreaterLayerVerifier verifier("greater", { inputInfo, inputInfo }, { outputInfo });
-    deserializedNetwork->Accept(verifier);
-}
 
 BOOST_AUTO_TEST_CASE(SerializeInstanceNormalization)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(InstanceNormalization)
-
     const std::string layerName("instanceNormalization");
     const armnn::TensorInfo info({ 1, 2, 1, 5 }, armnn::DataType::Float32);
 
@@ -1538,12 +906,11 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    InstanceNormalizationLayerVerifier verifier(layerName, {info}, {info}, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::InstanceNormalizationDescriptor> verifier(
+            layerName, {info}, {info}, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(L2Normalization)
-
 BOOST_AUTO_TEST_CASE(SerializeL2Normalization)
 {
     const std::string l2NormLayerName("l2Normalization");
@@ -1567,8 +934,9 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    L2NormalizationLayerVerifier verifier(l2NormLayerName, {info}, {info}, desc);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::L2NormalizationDescriptor> verifier(
+            l2NormLayerName, {info}, {info}, desc);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(EnsureL2NormalizationBackwardCompatibility)
@@ -1623,14 +991,13 @@
     // Since this variable does not exist in the l2NormalizationModel dump, the default value will be loaded
     desc.m_Eps = 1e-12f;
 
-    L2NormalizationLayerVerifier verifier(layerName, {inputInfo}, {inputInfo}, desc);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::L2NormalizationDescriptor> verifier(
+            layerName, {inputInfo}, {inputInfo}, desc);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeLogicalBinary)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(LogicalBinary)
-
     const std::string layerName("logicalBinaryAnd");
 
     const armnn::TensorShape shape{2, 1, 2, 2};
@@ -1657,14 +1024,13 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    LogicalBinaryLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo }, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::LogicalBinaryDescriptor> verifier(
+            layerName, { inputInfo, inputInfo }, { outputInfo }, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeLogicalUnary)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(ElementwiseUnary)
-
     const std::string layerName("elementwiseUnaryLogicalNot");
 
     const armnn::TensorShape shape{2, 1, 2, 2};
@@ -1690,15 +1056,14 @@
 
     BOOST_CHECK(deserializedNetwork);
 
-    ElementwiseUnaryLayerVerifier verifier(layerName, { inputInfo }, { outputInfo }, descriptor);
+    LayerVerifierBaseWithDescriptor<armnn::ElementwiseUnaryDescriptor> verifier(
+            layerName, { inputInfo }, { outputInfo }, descriptor);
 
-    deserializedNetwork->Accept(verifier);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeLogSoftmax)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(LogSoftmax)
-
     const std::string layerName("log_softmax");
     const armnn::TensorInfo info({1, 10}, armnn::DataType::Float32);
 
@@ -1720,14 +1085,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    LogSoftmaxLayerVerifier verifier(layerName, {info}, {info}, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::LogSoftmaxDescriptor> verifier(layerName, {info}, {info}, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeMaximum)
 {
-    DECLARE_LAYER_VERIFIER_CLASS(Maximum)
-
     const std::string layerName("maximum");
     const armnn::TensorInfo info({ 1, 2, 2, 3 }, armnn::DataType::Float32);
 
@@ -1748,14 +1111,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    MaximumLayerVerifier verifier(layerName, {info, info}, {info});
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBase verifier(layerName, {info, info}, {info});
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeMean)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Mean)
-
     const std::string layerName("mean");
     const armnn::TensorInfo inputInfo({1, 1, 3, 2}, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({1, 1, 1, 2}, armnn::DataType::Float32);
@@ -1778,14 +1139,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    MeanLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::MeanDescriptor> verifier(layerName, {inputInfo}, {outputInfo}, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeMerge)
 {
-    DECLARE_LAYER_VERIFIER_CLASS(Merge)
-
     const std::string layerName("merge");
     const armnn::TensorInfo info({ 1, 2, 2, 3 }, armnn::DataType::Float32);
 
@@ -1806,8 +1165,8 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    MergeLayerVerifier verifier(layerName, {info, info}, {info});
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBase verifier(layerName, {info, info}, {info});
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 class MergerLayerVerifier : public LayerVerifierBaseWithDescriptor<armnn::OriginsDescriptor>
@@ -1819,19 +1178,35 @@
                         const armnn::OriginsDescriptor& descriptor)
         : LayerVerifierBaseWithDescriptor<armnn::OriginsDescriptor>(layerName, inputInfos, outputInfos, descriptor) {}
 
-    void VisitMergerLayer(const armnn::IConnectableLayer*,
-                          const armnn::OriginsDescriptor&,
-                          const char*) override
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
     {
-        throw armnn::Exception("MergerLayer should have translated to ConcatLayer");
-    }
-
-    void VisitConcatLayer(const armnn::IConnectableLayer* layer,
-                          const armnn::OriginsDescriptor& descriptor,
-                          const char* name) override
-    {
-        VerifyNameAndConnections(layer, name);
-        VerifyDescriptor(descriptor);
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::Input: break;
+            case armnn::LayerType::Output: break;
+            case armnn::LayerType::Merge:
+            {
+                throw armnn::Exception("MergerLayer should have translated to ConcatLayer");
+                break;
+            }
+            case armnn::LayerType::Concat:
+            {
+                VerifyNameAndConnections(layer, name);
+                const armnn::MergerDescriptor& layerDescriptor =
+                        static_cast<const armnn::MergerDescriptor&>(descriptor);
+                VerifyDescriptor(layerDescriptor);
+                break;
+            }
+            default:
+            {
+                throw armnn::Exception("Unexpected layer type in Merge test model");
+            }
+        }
     }
 };
 
@@ -1870,7 +1245,7 @@
     BOOST_CHECK(deserializedNetwork);
 
     MergerLayerVerifier verifier(layerName, {inputInfo, inputInfo}, {outputInfo}, descriptor);
-    deserializedNetwork->Accept(verifier);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(EnsureMergerLayerBackwardCompatibility)
@@ -1939,7 +1314,7 @@
             armnn::CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), 0);
 
     MergerLayerVerifier verifier("merger", { inputInfo, inputInfo }, { outputInfo }, descriptor);
-    deserializedNetwork->Accept(verifier);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeConcat)
@@ -1974,13 +1349,11 @@
     // NOTE: using the MergerLayerVerifier to ensure that it is a concat layer and not a
     //       merger layer that gets placed into the graph.
     MergerLayerVerifier verifier(layerName, {inputInfo, inputInfo}, {outputInfo}, descriptor);
-    deserializedNetwork->Accept(verifier);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeMinimum)
 {
-    DECLARE_LAYER_VERIFIER_CLASS(Minimum)
-
     const std::string layerName("minimum");
     const armnn::TensorInfo info({ 1, 2, 2, 3 }, armnn::DataType::Float32);
 
@@ -2001,14 +1374,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    MinimumLayerVerifier verifier(layerName, {info, info}, {info});
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBase verifier(layerName, {info, info}, {info});
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeMultiplication)
 {
-    DECLARE_LAYER_VERIFIER_CLASS(Multiplication)
-
     const std::string layerName("multiplication");
     const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
 
@@ -2029,14 +1400,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    MultiplicationLayerVerifier verifier(layerName, {info, info}, {info});
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBase verifier(layerName, {info, info}, {info});
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializePrelu)
 {
-    DECLARE_LAYER_VERIFIER_CLASS(Prelu)
-
     const std::string layerName("prelu");
 
     armnn::TensorInfo inputTensorInfo ({ 4, 1, 2 }, armnn::DataType::Float32);
@@ -2060,14 +1429,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    PreluLayerVerifier verifier(layerName, {inputTensorInfo, alphaTensorInfo}, {outputTensorInfo});
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBase verifier(layerName, {inputTensorInfo, alphaTensorInfo}, {outputTensorInfo});
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeNormalization)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Normalization)
-
     const std::string layerName("normalization");
     const armnn::TensorInfo info({2, 1, 2, 2}, armnn::DataType::Float32);
 
@@ -2092,12 +1459,10 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    NormalizationLayerVerifier verifier(layerName, {info}, {info}, desc);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::NormalizationDescriptor> verifier(layerName, {info}, {info}, desc);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Pad)
-
 BOOST_AUTO_TEST_CASE(SerializePad)
 {
     const std::string layerName("pad");
@@ -2120,8 +1485,11 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    PadLayerVerifier verifier(layerName, {inputTensorInfo}, {outputTensorInfo}, desc);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::PadDescriptor> verifier(layerName,
+                                                                   {inputTensorInfo},
+                                                                   {outputTensorInfo},
+                                                                   desc);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(EnsurePadBackwardCompatibility)
@@ -2174,14 +1542,12 @@
 
     armnn::PadDescriptor descriptor({{ 0, 0 }, { 1, 0 }, { 1, 1 }, { 1, 2 }});
 
-    PadLayerVerifier verifier("pad", { inputInfo }, { outputInfo }, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::PadDescriptor> verifier("pad", { inputInfo }, { outputInfo }, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializePermute)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Permute)
-
     const std::string layerName("permute");
     const armnn::TensorInfo inputTensorInfo({4, 3, 2, 1}, armnn::DataType::Float32);
     const armnn::TensorInfo outputTensorInfo({1, 2, 3, 4}, armnn::DataType::Float32);
@@ -2202,14 +1568,13 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    PermuteLayerVerifier verifier(layerName, {inputTensorInfo}, {outputTensorInfo}, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::PermuteDescriptor> verifier(
+            layerName, {inputTensorInfo}, {outputTensorInfo}, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializePooling2d)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Pooling2d)
-
     const std::string layerName("pooling2d");
     const armnn::TensorInfo inputInfo({1, 2, 2, 1}, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({1, 1, 1, 1}, armnn::DataType::Float32);
@@ -2242,14 +1607,13 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    Pooling2dLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::Pooling2dDescriptor> verifier(
+            layerName, {inputInfo}, {outputInfo}, desc);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeQuantize)
 {
-    DECLARE_LAYER_VERIFIER_CLASS(Quantize)
-
     const std::string layerName("quantize");
     const armnn::TensorInfo info({ 1, 2, 2, 3 }, armnn::DataType::Float32);
 
@@ -2267,14 +1631,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    QuantizeLayerVerifier verifier(layerName, {info}, {info});
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBase verifier(layerName, {info}, {info});
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeRank)
 {
-    DECLARE_LAYER_VERIFIER_CLASS(Rank)
-
     const std::string layerName("rank");
     const armnn::TensorInfo inputInfo({1, 9}, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({1}, armnn::DataType::Signed32);
@@ -2293,14 +1655,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    RankLayerVerifier verifier(layerName, {inputInfo}, {outputInfo});
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBase verifier(layerName, {inputInfo}, {outputInfo});
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeReduceSum)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Reduce)
-
     const std::string layerName("Reduce_Sum");
     const armnn::TensorInfo inputInfo({1, 1, 3, 2}, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({1, 1, 1, 2}, armnn::DataType::Float32);
@@ -2323,14 +1683,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    ReduceLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::ReduceDescriptor> verifier(layerName, {inputInfo}, {outputInfo}, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeReshape)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Reshape)
-
     const std::string layerName("reshape");
     const armnn::TensorInfo inputInfo({1, 9}, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({3, 3}, armnn::DataType::Float32);
@@ -2351,14 +1709,13 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    ReshapeLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::ReshapeDescriptor> verifier(
+            layerName, {inputInfo}, {outputInfo}, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeResize)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Resize)
-
     const std::string layerName("resize");
     const armnn::TensorInfo inputInfo  = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32);
@@ -2384,8 +1741,8 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    ResizeLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::ResizeDescriptor> verifier(layerName, {inputInfo}, {outputInfo}, desc);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 class ResizeBilinearLayerVerifier : public LayerVerifierBaseWithDescriptor<armnn::ResizeBilinearDescriptor>
@@ -2398,25 +1755,36 @@
         : LayerVerifierBaseWithDescriptor<armnn::ResizeBilinearDescriptor>(
             layerName, inputInfos, outputInfos, descriptor) {}
 
-    void VisitResizeLayer(const armnn::IConnectableLayer* layer,
-                          const armnn::ResizeDescriptor& descriptor,
-                          const char* name) override
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
     {
-        VerifyNameAndConnections(layer, name);
-
-        BOOST_CHECK(descriptor.m_Method             == armnn::ResizeMethod::Bilinear);
-        BOOST_CHECK(descriptor.m_TargetWidth        == m_Descriptor.m_TargetWidth);
-        BOOST_CHECK(descriptor.m_TargetHeight       == m_Descriptor.m_TargetHeight);
-        BOOST_CHECK(descriptor.m_DataLayout         == m_Descriptor.m_DataLayout);
-        BOOST_CHECK(descriptor.m_AlignCorners       == m_Descriptor.m_AlignCorners);
-        BOOST_CHECK(descriptor.m_HalfPixelCenters   == m_Descriptor.m_HalfPixelCenters);
-    }
-
-    void VisitResizeBilinearLayer(const armnn::IConnectableLayer*,
-                                  const armnn::ResizeBilinearDescriptor&,
-                                  const char*) override
-    {
-        throw armnn::Exception("ResizeBilinearLayer should have translated to ResizeLayer");
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::Input: break;
+            case armnn::LayerType::Output: break;
+            case armnn::LayerType::Resize:
+            {
+                VerifyNameAndConnections(layer, name);
+                const armnn::ResizeDescriptor& layerDescriptor =
+                        static_cast<const armnn::ResizeDescriptor&>(descriptor);
+                BOOST_CHECK(layerDescriptor.m_Method             == armnn::ResizeMethod::Bilinear);
+                BOOST_CHECK(layerDescriptor.m_TargetWidth        == m_Descriptor.m_TargetWidth);
+                BOOST_CHECK(layerDescriptor.m_TargetHeight       == m_Descriptor.m_TargetHeight);
+                BOOST_CHECK(layerDescriptor.m_DataLayout         == m_Descriptor.m_DataLayout);
+                BOOST_CHECK(layerDescriptor.m_AlignCorners       == m_Descriptor.m_AlignCorners);
+                BOOST_CHECK(layerDescriptor.m_HalfPixelCenters   == m_Descriptor.m_HalfPixelCenters);
+                break;
+            }
+            default:
+            {
+                throw armnn::Exception("Unexpected layer type in test model. ResizeBiliniar "
+                                       "should have translated to Resize");
+            }
+        }
     }
 };
 
@@ -2452,7 +1820,7 @@
     BOOST_CHECK(deserializedNetwork);
 
     ResizeBilinearLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc);
-    deserializedNetwork->Accept(verifier);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(EnsureResizeBilinearBackwardCompatibility)
@@ -2508,13 +1876,11 @@
     descriptor.m_TargetHeight = 2u;
 
     ResizeBilinearLayerVerifier verifier("resizeBilinear", { inputInfo }, { outputInfo }, descriptor);
-    deserializedNetwork->Accept(verifier);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeSlice)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Slice)
-
     const std::string layerName{"slice"};
 
     const armnn::TensorInfo inputInfo  = armnn::TensorInfo({3, 2, 3, 1}, armnn::DataType::Float32);
@@ -2537,14 +1903,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    SliceLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::SliceDescriptor> verifier(layerName, {inputInfo}, {outputInfo}, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeSoftmax)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Softmax)
-
     const std::string layerName("softmax");
     const armnn::TensorInfo info({1, 10}, armnn::DataType::Float32);
 
@@ -2565,14 +1929,12 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    SoftmaxLayerVerifier verifier(layerName, {info}, {info}, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::SoftmaxDescriptor> verifier(layerName, {info}, {info}, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeSpaceToBatchNd)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(SpaceToBatchNd)
-
     const std::string layerName("spaceToBatchNd");
     const armnn::TensorInfo inputInfo({2, 1, 2, 4}, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({8, 1, 1, 3}, armnn::DataType::Float32);
@@ -2596,14 +1958,13 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    SpaceToBatchNdLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::SpaceToBatchNdDescriptor> verifier(
+            layerName, {inputInfo}, {outputInfo}, desc);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeSpaceToDepth)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(SpaceToDepth)
-
     const std::string layerName("spaceToDepth");
 
     const armnn::TensorInfo inputInfo ({ 1, 16, 8,  3 }, armnn::DataType::Float32);
@@ -2627,14 +1988,13 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    SpaceToDepthLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::SpaceToDepthDescriptor> verifier(
+            layerName, {inputInfo}, {outputInfo}, desc);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeSplitter)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Splitter)
-
     const unsigned int numViews = 3;
     const unsigned int numDimensions = 4;
     const unsigned int inputShape[] = {1, 18, 4, 4};
@@ -2682,14 +2042,13 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    SplitterLayerVerifier verifier(layerName, {inputInfo}, {outputInfo, outputInfo, outputInfo}, desc);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::ViewsDescriptor> verifier(
+            layerName, {inputInfo}, {outputInfo, outputInfo, outputInfo}, desc);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeStack)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Stack)
-
     const std::string layerName("stack");
 
     armnn::TensorInfo inputTensorInfo ({4, 3, 5}, armnn::DataType::Float32);
@@ -2714,14 +2073,13 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    StackLayerVerifier verifier(layerName, {inputTensorInfo, inputTensorInfo}, {outputTensorInfo}, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::StackDescriptor> verifier(
+            layerName, {inputTensorInfo, inputTensorInfo}, {outputTensorInfo}, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeStandIn)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(StandIn)
-
     const std::string layerName("standIn");
 
     armnn::TensorInfo tensorInfo({ 1u }, armnn::DataType::Float32);
@@ -2749,14 +2107,13 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    StandInLayerVerifier verifier(layerName, { tensorInfo, tensorInfo }, { tensorInfo, tensorInfo }, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::StandInDescriptor> verifier(
+            layerName, { tensorInfo, tensorInfo }, { tensorInfo, tensorInfo }, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeStridedSlice)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(StridedSlice)
-
     const std::string layerName("stridedSlice");
     const armnn::TensorInfo inputInfo = armnn::TensorInfo({3, 2, 3, 1}, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo = armnn::TensorInfo({3, 1}, armnn::DataType::Float32);
@@ -2780,14 +2137,13 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    StridedSliceLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::StridedSliceDescriptor> verifier(
+            layerName, {inputInfo}, {outputInfo}, desc);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeSubtraction)
 {
-    DECLARE_LAYER_VERIFIER_CLASS(Subtraction)
-
     const std::string layerName("subtraction");
     const armnn::TensorInfo info({ 1, 4 }, armnn::DataType::Float32);
 
@@ -2808,8 +2164,8 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    SubtractionLayerVerifier verifier(layerName, {info, info}, {info});
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBase verifier(layerName, {info, info}, {info});
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeSwitch)
@@ -2820,16 +2176,31 @@
         SwitchLayerVerifier(const std::string& layerName,
                             const std::vector<armnn::TensorInfo>& inputInfos,
                             const std::vector<armnn::TensorInfo>& outputInfos)
-            : LayerVerifierBase(layerName, inputInfos, outputInfos) {}
+                : LayerVerifierBase(layerName, inputInfos, outputInfos) {}
 
-        void VisitSwitchLayer(const armnn::IConnectableLayer* layer, const char* name) override
+        void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                             const armnn::BaseDescriptor& descriptor,
+                             const std::vector<armnn::ConstTensor>& constants,
+                             const char* name,
+                             const armnn::LayerBindingId id = 0) override
         {
-            VerifyNameAndConnections(layer, name);
+            armnn::IgnoreUnused(descriptor, constants, id);
+            switch (layer->GetType())
+            {
+                case armnn::LayerType::Input: break;
+                case armnn::LayerType::Output: break;
+                case armnn::LayerType::Constant: break;
+                case armnn::LayerType::Switch:
+                {
+                    VerifyNameAndConnections(layer, name);
+                    break;
+                }
+                default:
+                {
+                    throw armnn::Exception("Unexpected layer type in Switch test model");
+                }
+            }
         }
-
-        void VisitConstantLayer(const armnn::IConnectableLayer*,
-                                const armnn::ConstTensor&,
-                                const char*) override {}
     };
 
     const std::string layerName("switch");
@@ -2859,13 +2230,11 @@
     BOOST_CHECK(deserializedNetwork);
 
     SwitchLayerVerifier verifier(layerName, {info, info}, {info, info});
-    deserializedNetwork->Accept(verifier);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeTranspose)
 {
-    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Transpose)
-
     const std::string layerName("transpose");
     const armnn::TensorInfo inputTensorInfo({4, 3, 2, 1}, armnn::DataType::Float32);
     const armnn::TensorInfo outputTensorInfo({1, 2, 3, 4}, armnn::DataType::Float32);
@@ -2886,54 +2255,13 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    TransposeLayerVerifier verifier(layerName, {inputTensorInfo}, {outputTensorInfo}, descriptor);
-    deserializedNetwork->Accept(verifier);
+    LayerVerifierBaseWithDescriptor<armnn::TransposeDescriptor> verifier(
+            layerName, {inputTensorInfo}, {outputTensorInfo}, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeTransposeConvolution2d)
 {
-    using Descriptor = armnn::TransposeConvolution2dDescriptor;
-    class TransposeConvolution2dLayerVerifier : public LayerVerifierBaseWithDescriptor<Descriptor>
-    {
-    public:
-        TransposeConvolution2dLayerVerifier(const std::string& layerName,
-                                            const std::vector<armnn::TensorInfo>& inputInfos,
-                                            const std::vector<armnn::TensorInfo>& outputInfos,
-                                            const Descriptor& descriptor,
-                                            const armnn::ConstTensor& weights,
-                                            const armnn::Optional<armnn::ConstTensor>& biases)
-            : LayerVerifierBaseWithDescriptor<Descriptor>(layerName, inputInfos, outputInfos, descriptor)
-            , m_Weights(weights)
-            , m_Biases(biases)
-        {}
-
-        void VisitTransposeConvolution2dLayer(const armnn::IConnectableLayer* layer,
-                                              const Descriptor& descriptor,
-                                              const armnn::ConstTensor& weights,
-                                              const armnn::Optional<armnn::ConstTensor>& biases,
-                                              const char* name) override
-        {
-            VerifyNameAndConnections(layer, name);
-            VerifyDescriptor(descriptor);
-
-            // check weights
-            CompareConstTensor(weights, m_Weights);
-
-            // check biases
-            BOOST_CHECK(biases.has_value() == descriptor.m_BiasEnabled);
-            BOOST_CHECK(biases.has_value() == m_Biases.has_value());
-
-            if (biases.has_value() && m_Biases.has_value())
-            {
-                CompareConstTensor(biases.value(), m_Biases.value());
-            }
-        }
-
-    private:
-        armnn::ConstTensor                      m_Weights;
-        armnn::Optional<armnn::ConstTensor>     m_Biases;
-    };
-
     const std::string layerName("transposeConvolution2d");
     const armnn::TensorInfo inputInfo ({ 1, 7, 7, 1 }, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({ 1, 9, 9, 1 }, armnn::DataType::Float32);
@@ -2975,8 +2303,10 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
-    TransposeConvolution2dLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor, weights, biases);
-    deserializedNetwork->Accept(verifier);
+    const std::vector<armnn::ConstTensor> constants {weights, biases};
+    LayerVerifierBaseWithDescriptorAndConstants<armnn::TransposeConvolution2dDescriptor> verifier(
+            layerName, {inputInfo}, {outputInfo}, descriptor, constants);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_CASE(SerializeDeserializeNonLinearNetwork)
@@ -2991,16 +2321,31 @@
             : LayerVerifierBase(layerName, inputInfos, outputInfos)
             , m_LayerInput(layerInput) {}
 
-        void VisitConstantLayer(const armnn::IConnectableLayer* layer,
-                                const armnn::ConstTensor& input,
-                                const char* name) override
+        void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                             const armnn::BaseDescriptor& descriptor,
+                             const std::vector<armnn::ConstTensor>& constants,
+                             const char* name,
+                             const armnn::LayerBindingId id = 0) override
         {
-            VerifyNameAndConnections(layer, name);
-            CompareConstTensor(input, m_LayerInput);
+            armnn::IgnoreUnused(descriptor, constants, id);
+            switch (layer->GetType())
+            {
+                case armnn::LayerType::Input: break;
+                case armnn::LayerType::Output: break;
+                case armnn::LayerType::Addition: break;
+                case armnn::LayerType::Constant:
+                {
+                    VerifyNameAndConnections(layer, name);
+                    CompareConstTensor(constants.at(0), m_LayerInput);
+                    break;
+                }
+                default:
+                {
+                    throw armnn::Exception("Unexpected layer type in test model");
+                }
+            }
         }
 
-        void VisitAdditionLayer(const armnn::IConnectableLayer*, const char*) override {}
-
     private:
         armnn::ConstTensor m_LayerInput;
     };
@@ -3029,2125 +2374,7 @@
     BOOST_CHECK(deserializedNetwork);
 
     ConstantLayerVerifier verifier(layerName, {}, {info}, constTensor);
-    deserializedNetwork->Accept(verifier);
-}
-
-class VerifyLstmLayer : public LayerVerifierBaseWithDescriptor<armnn::LstmDescriptor>
-{
-public:
-    VerifyLstmLayer(const std::string& layerName,
-                    const std::vector<armnn::TensorInfo>& inputInfos,
-                    const std::vector<armnn::TensorInfo>& outputInfos,
-                    const armnn::LstmDescriptor& descriptor,
-                    const armnn::LstmInputParams& inputParams)
-        : LayerVerifierBaseWithDescriptor<armnn::LstmDescriptor>(layerName, inputInfos, outputInfos, descriptor)
-        , m_InputParams(inputParams) {}
-
-    void VisitLstmLayer(const armnn::IConnectableLayer* layer,
-                        const armnn::LstmDescriptor& descriptor,
-                        const armnn::LstmInputParams& params,
-                        const char* name)
-    {
-        VerifyNameAndConnections(layer, name);
-        VerifyDescriptor(descriptor);
-        VerifyInputParameters(params);
-    }
-
-protected:
-    void VerifyInputParameters(const armnn::LstmInputParams& params)
-    {
-        VerifyConstTensors(
-            "m_InputToInputWeights", m_InputParams.m_InputToInputWeights, params.m_InputToInputWeights);
-        VerifyConstTensors(
-            "m_InputToForgetWeights", m_InputParams.m_InputToForgetWeights, params.m_InputToForgetWeights);
-        VerifyConstTensors(
-            "m_InputToCellWeights", m_InputParams.m_InputToCellWeights, params.m_InputToCellWeights);
-        VerifyConstTensors(
-            "m_InputToOutputWeights", m_InputParams.m_InputToOutputWeights, params.m_InputToOutputWeights);
-        VerifyConstTensors(
-            "m_RecurrentToInputWeights", m_InputParams.m_RecurrentToInputWeights, params.m_RecurrentToInputWeights);
-        VerifyConstTensors(
-            "m_RecurrentToForgetWeights", m_InputParams.m_RecurrentToForgetWeights, params.m_RecurrentToForgetWeights);
-        VerifyConstTensors(
-            "m_RecurrentToCellWeights", m_InputParams.m_RecurrentToCellWeights, params.m_RecurrentToCellWeights);
-        VerifyConstTensors(
-            "m_RecurrentToOutputWeights", m_InputParams.m_RecurrentToOutputWeights, params.m_RecurrentToOutputWeights);
-        VerifyConstTensors(
-            "m_CellToInputWeights", m_InputParams.m_CellToInputWeights, params.m_CellToInputWeights);
-        VerifyConstTensors(
-            "m_CellToForgetWeights", m_InputParams.m_CellToForgetWeights, params.m_CellToForgetWeights);
-        VerifyConstTensors(
-            "m_CellToOutputWeights", m_InputParams.m_CellToOutputWeights, params.m_CellToOutputWeights);
-        VerifyConstTensors(
-            "m_InputGateBias", m_InputParams.m_InputGateBias, params.m_InputGateBias);
-        VerifyConstTensors(
-            "m_ForgetGateBias", m_InputParams.m_ForgetGateBias, params.m_ForgetGateBias);
-        VerifyConstTensors(
-            "m_CellBias", m_InputParams.m_CellBias, params.m_CellBias);
-        VerifyConstTensors(
-            "m_OutputGateBias", m_InputParams.m_OutputGateBias, params.m_OutputGateBias);
-        VerifyConstTensors(
-            "m_ProjectionWeights", m_InputParams.m_ProjectionWeights, params.m_ProjectionWeights);
-        VerifyConstTensors(
-            "m_ProjectionBias", m_InputParams.m_ProjectionBias, params.m_ProjectionBias);
-        VerifyConstTensors(
-            "m_InputLayerNormWeights", m_InputParams.m_InputLayerNormWeights, params.m_InputLayerNormWeights);
-        VerifyConstTensors(
-            "m_ForgetLayerNormWeights", m_InputParams.m_ForgetLayerNormWeights, params.m_ForgetLayerNormWeights);
-        VerifyConstTensors(
-            "m_CellLayerNormWeights", m_InputParams.m_CellLayerNormWeights, params.m_CellLayerNormWeights);
-        VerifyConstTensors(
-            "m_OutputLayerNormWeights", m_InputParams.m_OutputLayerNormWeights, params.m_OutputLayerNormWeights);
-    }
-
-private:
-    armnn::LstmInputParams m_InputParams;
-};
-
-BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmCifgPeepholeNoProjection)
-{
-    armnn::LstmDescriptor descriptor;
-    descriptor.m_ActivationFunc = 4;
-    descriptor.m_ClippingThresProj = 0.0f;
-    descriptor.m_ClippingThresCell = 0.0f;
-    descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
-    descriptor.m_ProjectionEnabled = false;
-    descriptor.m_PeepholeEnabled = true;
-
-    const uint32_t batchSize = 1;
-    const uint32_t inputSize = 2;
-    const uint32_t numUnits = 4;
-    const uint32_t outputSize = numUnits;
-
-    armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32);
-    std::vector<float> inputToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
-    armnn::ConstTensor inputToForgetWeights(inputWeightsInfo1, inputToForgetWeightsData);
-
-    std::vector<float> inputToCellWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
-    armnn::ConstTensor inputToCellWeights(inputWeightsInfo1, inputToCellWeightsData);
-
-    std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
-    armnn::ConstTensor inputToOutputWeights(inputWeightsInfo1, inputToOutputWeightsData);
-
-    armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32);
-    std::vector<float> recurrentToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
-    armnn::ConstTensor recurrentToForgetWeights(inputWeightsInfo2, recurrentToForgetWeightsData);
-
-    std::vector<float> recurrentToCellWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
-    armnn::ConstTensor recurrentToCellWeights(inputWeightsInfo2, recurrentToCellWeightsData);
-
-    std::vector<float> recurrentToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
-    armnn::ConstTensor recurrentToOutputWeights(inputWeightsInfo2, recurrentToOutputWeightsData);
-
-    armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32);
-    std::vector<float> cellToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo3.GetNumElements());
-    armnn::ConstTensor cellToForgetWeights(inputWeightsInfo3, cellToForgetWeightsData);
-
-    std::vector<float> cellToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo3.GetNumElements());
-    armnn::ConstTensor cellToOutputWeights(inputWeightsInfo3, cellToOutputWeightsData);
-
-    std::vector<float> forgetGateBiasData(numUnits, 1.0f);
-    armnn::ConstTensor forgetGateBias(inputWeightsInfo3, forgetGateBiasData);
-
-    std::vector<float> cellBiasData(numUnits, 0.0f);
-    armnn::ConstTensor cellBias(inputWeightsInfo3, cellBiasData);
-
-    std::vector<float> outputGateBiasData(numUnits, 0.0f);
-    armnn::ConstTensor outputGateBias(inputWeightsInfo3, outputGateBiasData);
-
-    armnn::LstmInputParams params;
-    params.m_InputToForgetWeights = &inputToForgetWeights;
-    params.m_InputToCellWeights = &inputToCellWeights;
-    params.m_InputToOutputWeights = &inputToOutputWeights;
-    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
-    params.m_RecurrentToCellWeights = &recurrentToCellWeights;
-    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
-    params.m_ForgetGateBias = &forgetGateBias;
-    params.m_CellBias = &cellBias;
-    params.m_OutputGateBias = &outputGateBias;
-    params.m_CellToForgetWeights = &cellToForgetWeights;
-    params.m_CellToOutputWeights = &cellToOutputWeights;
-
-    armnn::INetworkPtr network = armnn::INetwork::Create();
-    armnn::IConnectableLayer* const inputLayer   = network->AddInputLayer(0);
-    armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1);
-    armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2);
-    const std::string layerName("lstm");
-    armnn::IConnectableLayer* const lstmLayer = network->AddLstmLayer(descriptor, params, layerName.c_str());
-    armnn::IConnectableLayer* const scratchBuffer  = network->AddOutputLayer(0);
-    armnn::IConnectableLayer* const outputStateOut  = network->AddOutputLayer(1);
-    armnn::IConnectableLayer* const cellStateOut  = network->AddOutputLayer(2);
-    armnn::IConnectableLayer* const outputLayer  = network->AddOutputLayer(3);
-
-    // connect up
-    armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32);
-    armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32);
-    armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32);
-    armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 3 }, armnn::DataType::Float32);
-
-    inputLayer->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(0));
-    inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
-
-    outputStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(1));
-    outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo);
-
-    cellStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(2));
-    cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo);
-
-    lstmLayer->GetOutputSlot(0).Connect(scratchBuffer->GetInputSlot(0));
-    lstmLayer->GetOutputSlot(0).SetTensorInfo(lstmTensorInfoScratchBuff);
-
-    lstmLayer->GetOutputSlot(1).Connect(outputStateOut->GetInputSlot(0));
-    lstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo);
-
-    lstmLayer->GetOutputSlot(2).Connect(cellStateOut->GetInputSlot(0));
-    lstmLayer->GetOutputSlot(2).SetTensorInfo(cellStateTensorInfo);
-
-    lstmLayer->GetOutputSlot(3).Connect(outputLayer->GetInputSlot(0));
-    lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo);
-
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
-
-    VerifyLstmLayer checker(
-        layerName,
-        {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo},
-        {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo},
-        descriptor,
-        params);
-    deserializedNetwork->Accept(checker);
-}
-
-BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmNoCifgWithPeepholeAndProjection)
-{
-    armnn::LstmDescriptor descriptor;
-    descriptor.m_ActivationFunc = 4;
-    descriptor.m_ClippingThresProj = 0.0f;
-    descriptor.m_ClippingThresCell = 0.0f;
-    descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams
-    descriptor.m_ProjectionEnabled = true;
-    descriptor.m_PeepholeEnabled = true;
-
-    const uint32_t batchSize = 2;
-    const uint32_t inputSize = 5;
-    const uint32_t numUnits = 20;
-    const uint32_t outputSize = 16;
-
-    armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
-    std::vector<float> inputToInputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
-    armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
-
-    std::vector<float> inputToForgetWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
-    armnn::ConstTensor inputToForgetWeights(tensorInfo20x5, inputToForgetWeightsData);
-
-    std::vector<float> inputToCellWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
-    armnn::ConstTensor inputToCellWeights(tensorInfo20x5, inputToCellWeightsData);
-
-    std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
-    armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
-
-    armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
-    std::vector<float> inputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
-
-    std::vector<float> forgetGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor forgetGateBias(tensorInfo20, forgetGateBiasData);
-
-    std::vector<float> cellBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor cellBias(tensorInfo20, cellBiasData);
-
-    std::vector<float> outputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
-
-    armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
-    std::vector<float> recurrentToInputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
-    armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
-
-    std::vector<float> recurrentToForgetWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
-    armnn::ConstTensor recurrentToForgetWeights(tensorInfo20x16, recurrentToForgetWeightsData);
-
-    std::vector<float> recurrentToCellWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
-    armnn::ConstTensor recurrentToCellWeights(tensorInfo20x16, recurrentToCellWeightsData);
-
-    std::vector<float> recurrentToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
-    armnn::ConstTensor recurrentToOutputWeights(tensorInfo20x16, recurrentToOutputWeightsData);
-
-    std::vector<float> cellToInputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor cellToInputWeights(tensorInfo20, cellToInputWeightsData);
-
-    std::vector<float> cellToForgetWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor cellToForgetWeights(tensorInfo20, cellToForgetWeightsData);
-
-    std::vector<float> cellToOutputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor cellToOutputWeights(tensorInfo20,  cellToOutputWeightsData);
-
-    armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
-    std::vector<float> projectionWeightsData = GenerateRandomData<float>(tensorInfo16x20.GetNumElements());
-    armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
-
-    armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
-    std::vector<float> projectionBiasData(outputSize, 0.f);
-    armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
-
-    armnn::LstmInputParams params;
-    params.m_InputToForgetWeights = &inputToForgetWeights;
-    params.m_InputToCellWeights = &inputToCellWeights;
-    params.m_InputToOutputWeights = &inputToOutputWeights;
-    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
-    params.m_RecurrentToCellWeights = &recurrentToCellWeights;
-    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
-    params.m_ForgetGateBias = &forgetGateBias;
-    params.m_CellBias = &cellBias;
-    params.m_OutputGateBias = &outputGateBias;
-
-    // additional params because: descriptor.m_CifgEnabled = false
-    params.m_InputToInputWeights = &inputToInputWeights;
-    params.m_RecurrentToInputWeights = &recurrentToInputWeights;
-    params.m_CellToInputWeights = &cellToInputWeights;
-    params.m_InputGateBias = &inputGateBias;
-
-    // additional params because: descriptor.m_ProjectionEnabled = true
-    params.m_ProjectionWeights = &projectionWeights;
-    params.m_ProjectionBias = &projectionBias;
-
-    // additional params because: descriptor.m_PeepholeEnabled = true
-    params.m_CellToForgetWeights = &cellToForgetWeights;
-    params.m_CellToOutputWeights = &cellToOutputWeights;
-
-    armnn::INetworkPtr network = armnn::INetwork::Create();
-    armnn::IConnectableLayer* const inputLayer   = network->AddInputLayer(0);
-    armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1);
-    armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2);
-    const std::string layerName("lstm");
-    armnn::IConnectableLayer* const lstmLayer = network->AddLstmLayer(descriptor, params, layerName.c_str());
-    armnn::IConnectableLayer* const scratchBuffer  = network->AddOutputLayer(0);
-    armnn::IConnectableLayer* const outputStateOut  = network->AddOutputLayer(1);
-    armnn::IConnectableLayer* const cellStateOut  = network->AddOutputLayer(2);
-    armnn::IConnectableLayer* const outputLayer  = network->AddOutputLayer(3);
-
-    // connect up
-    armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32);
-    armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32);
-    armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32);
-    armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32);
-
-    inputLayer->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(0));
-    inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
-
-    outputStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(1));
-    outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo);
-
-    cellStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(2));
-    cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo);
-
-    lstmLayer->GetOutputSlot(0).Connect(scratchBuffer->GetInputSlot(0));
-    lstmLayer->GetOutputSlot(0).SetTensorInfo(lstmTensorInfoScratchBuff);
-
-    lstmLayer->GetOutputSlot(1).Connect(outputStateOut->GetInputSlot(0));
-    lstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo);
-
-    lstmLayer->GetOutputSlot(2).Connect(cellStateOut->GetInputSlot(0));
-    lstmLayer->GetOutputSlot(2).SetTensorInfo(cellStateTensorInfo);
-
-    lstmLayer->GetOutputSlot(3).Connect(outputLayer->GetInputSlot(0));
-    lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo);
-
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
-
-    VerifyLstmLayer checker(
-        layerName,
-        {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo},
-        {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo},
-        descriptor,
-        params);
-    deserializedNetwork->Accept(checker);
-}
-
-BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm)
-{
-    armnn::LstmDescriptor descriptor;
-    descriptor.m_ActivationFunc = 4;
-    descriptor.m_ClippingThresProj = 0.0f;
-    descriptor.m_ClippingThresCell = 0.0f;
-    descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams
-    descriptor.m_ProjectionEnabled = true;
-    descriptor.m_PeepholeEnabled = true;
-    descriptor.m_LayerNormEnabled = true;
-
-    const uint32_t batchSize = 2;
-    const uint32_t inputSize = 5;
-    const uint32_t numUnits = 20;
-    const uint32_t outputSize = 16;
-
-    armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
-    std::vector<float> inputToInputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
-    armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
-
-    std::vector<float> inputToForgetWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
-    armnn::ConstTensor inputToForgetWeights(tensorInfo20x5, inputToForgetWeightsData);
-
-    std::vector<float> inputToCellWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
-    armnn::ConstTensor inputToCellWeights(tensorInfo20x5, inputToCellWeightsData);
-
-    std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
-    armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
-
-    armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
-    std::vector<float> inputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
-
-    std::vector<float> forgetGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor forgetGateBias(tensorInfo20, forgetGateBiasData);
-
-    std::vector<float> cellBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor cellBias(tensorInfo20, cellBiasData);
-
-    std::vector<float> outputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
-
-    armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
-    std::vector<float> recurrentToInputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
-    armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
-
-    std::vector<float> recurrentToForgetWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
-    armnn::ConstTensor recurrentToForgetWeights(tensorInfo20x16, recurrentToForgetWeightsData);
-
-    std::vector<float> recurrentToCellWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
-    armnn::ConstTensor recurrentToCellWeights(tensorInfo20x16, recurrentToCellWeightsData);
-
-    std::vector<float> recurrentToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
-    armnn::ConstTensor recurrentToOutputWeights(tensorInfo20x16, recurrentToOutputWeightsData);
-
-    std::vector<float> cellToInputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor cellToInputWeights(tensorInfo20, cellToInputWeightsData);
-
-    std::vector<float> cellToForgetWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor cellToForgetWeights(tensorInfo20, cellToForgetWeightsData);
-
-    std::vector<float> cellToOutputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor cellToOutputWeights(tensorInfo20,  cellToOutputWeightsData);
-
-    armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
-    std::vector<float> projectionWeightsData = GenerateRandomData<float>(tensorInfo16x20.GetNumElements());
-    armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
-
-    armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
-    std::vector<float> projectionBiasData(outputSize, 0.f);
-    armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
-
-    std::vector<float> inputLayerNormWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor inputLayerNormWeights(tensorInfo20, forgetGateBiasData);
-
-    std::vector<float> forgetLayerNormWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor forgetLayerNormWeights(tensorInfo20, forgetGateBiasData);
-
-    std::vector<float> cellLayerNormWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor cellLayerNormWeights(tensorInfo20, forgetGateBiasData);
-
-    std::vector<float> outLayerNormWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
-    armnn::ConstTensor outLayerNormWeights(tensorInfo20, forgetGateBiasData);
-
-    armnn::LstmInputParams params;
-    params.m_InputToForgetWeights = &inputToForgetWeights;
-    params.m_InputToCellWeights = &inputToCellWeights;
-    params.m_InputToOutputWeights = &inputToOutputWeights;
-    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
-    params.m_RecurrentToCellWeights = &recurrentToCellWeights;
-    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
-    params.m_ForgetGateBias = &forgetGateBias;
-    params.m_CellBias = &cellBias;
-    params.m_OutputGateBias = &outputGateBias;
-
-    // additional params because: descriptor.m_CifgEnabled = false
-    params.m_InputToInputWeights = &inputToInputWeights;
-    params.m_RecurrentToInputWeights = &recurrentToInputWeights;
-    params.m_CellToInputWeights = &cellToInputWeights;
-    params.m_InputGateBias = &inputGateBias;
-
-    // additional params because: descriptor.m_ProjectionEnabled = true
-    params.m_ProjectionWeights = &projectionWeights;
-    params.m_ProjectionBias = &projectionBias;
-
-    // additional params because: descriptor.m_PeepholeEnabled = true
-    params.m_CellToForgetWeights = &cellToForgetWeights;
-    params.m_CellToOutputWeights = &cellToOutputWeights;
-
-    // additional params because: despriptor.m_LayerNormEnabled = true
-    params.m_InputLayerNormWeights = &inputLayerNormWeights;
-    params.m_ForgetLayerNormWeights = &forgetLayerNormWeights;
-    params.m_CellLayerNormWeights = &cellLayerNormWeights;
-    params.m_OutputLayerNormWeights = &outLayerNormWeights;
-
-    armnn::INetworkPtr network = armnn::INetwork::Create();
-    armnn::IConnectableLayer* const inputLayer   = network->AddInputLayer(0);
-    armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1);
-    armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2);
-    const std::string layerName("lstm");
-    armnn::IConnectableLayer* const lstmLayer = network->AddLstmLayer(descriptor, params, layerName.c_str());
-    armnn::IConnectableLayer* const scratchBuffer  = network->AddOutputLayer(0);
-    armnn::IConnectableLayer* const outputStateOut  = network->AddOutputLayer(1);
-    armnn::IConnectableLayer* const cellStateOut  = network->AddOutputLayer(2);
-    armnn::IConnectableLayer* const outputLayer  = network->AddOutputLayer(3);
-
-    // connect up
-    armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32);
-    armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32);
-    armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32);
-    armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32);
-
-    inputLayer->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(0));
-    inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
-
-    outputStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(1));
-    outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo);
-
-    cellStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(2));
-    cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo);
-
-    lstmLayer->GetOutputSlot(0).Connect(scratchBuffer->GetInputSlot(0));
-    lstmLayer->GetOutputSlot(0).SetTensorInfo(lstmTensorInfoScratchBuff);
-
-    lstmLayer->GetOutputSlot(1).Connect(outputStateOut->GetInputSlot(0));
-    lstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo);
-
-    lstmLayer->GetOutputSlot(2).Connect(cellStateOut->GetInputSlot(0));
-    lstmLayer->GetOutputSlot(2).SetTensorInfo(cellStateTensorInfo);
-
-    lstmLayer->GetOutputSlot(3).Connect(outputLayer->GetInputSlot(0));
-    lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo);
-
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
-
-    VerifyLstmLayer checker(
-            layerName,
-            {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo},
-            {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo},
-            descriptor,
-            params);
-    deserializedNetwork->Accept(checker);
-}
-
-BOOST_AUTO_TEST_CASE(EnsureLstmLayersBackwardCompatibility)
-{
-    // The hex data below is a flat buffer containing a lstm layer with no Cifg, with peephole and projection
-    // enabled. That data was obtained before additional layer normalization parameters where added to the
-    // lstm serializer. That way it can be tested if a lstm model with the old parameter configuration can
-    // still be loaded
-    const std::vector<uint8_t> lstmNoCifgWithPeepholeAndProjectionModel =
-    {
-        0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x0A, 0x00,
-        0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
-        0xDC, 0x29, 0x00, 0x00, 0x38, 0x29, 0x00, 0x00, 0xB4, 0x28, 0x00, 0x00, 0x94, 0x01, 0x00, 0x00, 0x3C, 0x01,
-        0x00, 0x00, 0xE0, 0x00, 0x00, 0x00, 0x84, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00,
-        0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x70, 0xD6, 0xFF, 0xFF,
-        0x00, 0x00, 0x00, 0x0B, 0x04, 0x00, 0x00, 0x00, 0x06, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x88, 0xD7,
-        0xFF, 0xFF, 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF6, 0xD6, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00,
-        0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0xE8, 0xD7, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xC8, 0xD6, 0xFF, 0xFF, 0x00, 0x00,
-        0x00, 0x0B, 0x04, 0x00, 0x00, 0x00, 0x5E, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0xE0, 0xD7, 0xFF, 0xFF,
-        0x08, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x4E, 0xD7, 0xFF, 0xFF, 0x06, 0x00, 0x00, 0x00, 0x10, 0x00,
-        0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0xD8,
-        0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0xD7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0B,
-        0x04, 0x00, 0x00, 0x00, 0xB6, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x38, 0xD8, 0xFF, 0xFF, 0x08, 0x00,
-        0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xA6, 0xD7, 0xFF, 0xFF, 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
-        0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0xD8, 0xFF, 0xFF,
-        0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x78, 0xD7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0B, 0x04, 0x00,
-        0x00, 0x00, 0x0E, 0xD8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x16, 0xD8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00,
-        0xFA, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00,
-        0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-        0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xD8, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x6C, 0xD8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x23, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00,
-        0x12, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x0A, 0x00, 0x00, 0x00, 0xE0, 0x25, 0x00, 0x00, 0xD0, 0x25,
-        0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x00, 0x48, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00,
-        0x10, 0x00, 0x14, 0x00, 0x18, 0x00, 0x1C, 0x00, 0x20, 0x00, 0x24, 0x00, 0x28, 0x00, 0x2C, 0x00, 0x30, 0x00,
-        0x34, 0x00, 0x38, 0x00, 0x3C, 0x00, 0x40, 0x00, 0x44, 0x00, 0x26, 0x00, 0x00, 0x00, 0xC4, 0x23, 0x00, 0x00,
-        0xF8, 0x21, 0x00, 0x00, 0x2C, 0x20, 0x00, 0x00, 0xF0, 0x1A, 0x00, 0x00, 0xB4, 0x15, 0x00, 0x00, 0x78, 0x10,
-        0x00, 0x00, 0xF0, 0x0F, 0x00, 0x00, 0x68, 0x0F, 0x00, 0x00, 0xE0, 0x0E, 0x00, 0x00, 0x14, 0x0D, 0x00, 0x00,
-        0xD8, 0x07, 0x00, 0x00, 0x50, 0x07, 0x00, 0x00, 0xC8, 0x06, 0x00, 0x00, 0x8C, 0x01, 0x00, 0x00, 0x14, 0x01,
-        0x00, 0x00, 0x8C, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xEE, 0xD7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03,
-        0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xFE, 0xD8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5A, 0xD8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01,
-        0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x72, 0xD8,
-        0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x82, 0xD9, 0xFF, 0xFF,
-        0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xD8,
-        0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-        0x14, 0x00, 0x00, 0x00, 0xF6, 0xD8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x54, 0x00, 0x00, 0x00, 0x04, 0x00,
-        0x00, 0x00, 0x06, 0xDA, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0xD9, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x6A, 0xD9, 0xFF, 0xFF, 0x00, 0x00,
-        0x00, 0x03, 0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7A, 0xDA, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00,
-        0x40, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xDE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xA2, 0xDE,
-        0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xB2, 0xDF, 0xFF, 0xFF,
-        0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0xDF,
-        0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-        0x14, 0x00, 0x00, 0x00, 0x26, 0xDF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00,
-        0x00, 0x00, 0x36, 0xE0, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x92, 0xDF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xAA, 0xDF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03,
-        0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xBA, 0xE0, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0xC6, 0xE4, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xE2, 0xE4, 0xFF, 0xFF,
-        0x00, 0x00, 0x00, 0x03, 0xA4, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xF2, 0xE5, 0xFF, 0xFF, 0x04, 0x00,
-        0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0xE6, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01,
-        0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x05, 0x00,
-        0x00, 0x00, 0xAA, 0xE6, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
-        0xBA, 0xE7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x16, 0xE7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x2E, 0xE7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00,
-        0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x3E, 0xE8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0xE7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xB2, 0xE7, 0xFF, 0xFF,
-        0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xC2, 0xE8, 0xFF, 0xFF, 0x04, 0x00,
-        0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0xE8, 0xFF, 0xFF,
-        0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00,
-        0x00, 0x00, 0x36, 0xE8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
-        0x46, 0xE9, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0xED, 0xFF, 0xFF,
-        0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00,
-        0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x6E, 0xED, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x14, 0x05, 0x00, 0x00,
-        0x04, 0x00, 0x00, 0x00, 0x7E, 0xEE, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x8A, 0xF2, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
-        0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xA6, 0xF2, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03,
-        0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xB6, 0xF3, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0xC2, 0xF7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xDE, 0xF7, 0xFF, 0xFF,
-        0x00, 0x00, 0x00, 0x03, 0xA4, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xEE, 0xF8, 0xFF, 0xFF, 0x04, 0x00,
-        0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xF9, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01,
-        0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x05, 0x00,
-        0x00, 0x00, 0xA6, 0xF9, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xA4, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
-        0xB6, 0xFA, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0xFB,
-        0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
-        0x14, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x6E, 0xFB, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xA4, 0x01,
-        0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7E, 0xFC, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x1A, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x0C, 0x00,
-        0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x01, 0x01, 0x04, 0x00, 0x00, 0x00, 0x2E, 0xFE, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
-        0x22, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6C, 0x73,
-        0x74, 0x6D, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xEC, 0x00, 0x00, 0x00, 0xD0, 0x00, 0x00, 0x00,
-        0xB4, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00, 0x30, 0x00,
-        0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x14, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
-        0xA6, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
-        0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x3C, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00,
-        0x04, 0x00, 0x00, 0x00, 0xCE, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64, 0xFF, 0xFF, 0xFF,
-        0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xF6, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
-        0xB4, 0xFE, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x1A, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00,
-        0xF0, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
-        0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x09, 0x04, 0x00, 0x00, 0x00,
-        0x7E, 0xFF, 0xFF, 0xFF, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00,
-        0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x76, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00,
-        0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
-        0x68, 0xFF, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0xCE, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
-        0x08, 0x00, 0x0E, 0x00, 0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x0C, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
-        0x08, 0x00, 0x0E, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x01, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x18, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x10, 0x00, 0x14, 0x00,
-        0x0E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00,
-        0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x01, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00,
-        0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6E, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00,
-        0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x08, 0x00,
-        0x0C, 0x00, 0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x04, 0x00, 0x00, 0x00,
-        0xF6, 0xFF, 0xFF, 0xFF, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x0A, 0x00, 0x04, 0x00, 0x06, 0x00,
-        0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00,
-        0x0C, 0x00, 0x10, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00,
-        0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-        0x01, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00,
-        0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x08, 0x00, 0x07, 0x00, 0x0C, 0x00,
-        0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
-        0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00
-    };
-
-    armnn::INetworkPtr deserializedNetwork =
-        DeserializeNetwork(std::string(lstmNoCifgWithPeepholeAndProjectionModel.begin(),
-                                       lstmNoCifgWithPeepholeAndProjectionModel.end()));
-
-    BOOST_CHECK(deserializedNetwork);
-
-    // generating the same model parameters which where used to serialize the model (Layer norm is not specified)
-    armnn::LstmDescriptor descriptor;
-    descriptor.m_ActivationFunc    = 4;
-    descriptor.m_ClippingThresProj = 0.0f;
-    descriptor.m_ClippingThresCell = 0.0f;
-    descriptor.m_CifgEnabled       = false;
-    descriptor.m_ProjectionEnabled = true;
-    descriptor.m_PeepholeEnabled   = true;
-
-    const uint32_t batchSize  = 2u;
-    const uint32_t inputSize  = 5u;
-    const uint32_t numUnits   = 20u;
-    const uint32_t outputSize = 16u;
-
-    armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
-    std::vector<float> inputToInputWeightsData(tensorInfo20x5.GetNumElements(), 0.0f);
-    armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
-
-    std::vector<float> inputToForgetWeightsData(tensorInfo20x5.GetNumElements(), 0.0f);
-    armnn::ConstTensor inputToForgetWeights(tensorInfo20x5, inputToForgetWeightsData);
-
-    std::vector<float> inputToCellWeightsData(tensorInfo20x5.GetNumElements(), 0.0f);
-    armnn::ConstTensor inputToCellWeights(tensorInfo20x5, inputToCellWeightsData);
-
-    std::vector<float> inputToOutputWeightsData(tensorInfo20x5.GetNumElements(), 0.0f);
-    armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
-
-    armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
-    std::vector<float> inputGateBiasData(tensorInfo20.GetNumElements(), 0.0f);
-    armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
-
-    std::vector<float> forgetGateBiasData(tensorInfo20.GetNumElements(), 0.0f);
-    armnn::ConstTensor forgetGateBias(tensorInfo20, forgetGateBiasData);
-
-    std::vector<float> cellBiasData(tensorInfo20.GetNumElements(), 0.0f);
-    armnn::ConstTensor cellBias(tensorInfo20, cellBiasData);
-
-    std::vector<float> outputGateBiasData(tensorInfo20.GetNumElements(), 0.0f);
-    armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
-
-    armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
-    std::vector<float> recurrentToInputWeightsData(tensorInfo20x16.GetNumElements(), 0.0f);
-    armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
-
-    std::vector<float> recurrentToForgetWeightsData(tensorInfo20x16.GetNumElements(), 0.0f);
-    armnn::ConstTensor recurrentToForgetWeights(tensorInfo20x16, recurrentToForgetWeightsData);
-
-    std::vector<float> recurrentToCellWeightsData(tensorInfo20x16.GetNumElements(), 0.0f);
-    armnn::ConstTensor recurrentToCellWeights(tensorInfo20x16, recurrentToCellWeightsData);
-
-    std::vector<float> recurrentToOutputWeightsData(tensorInfo20x16.GetNumElements(), 0.0f);
-    armnn::ConstTensor recurrentToOutputWeights(tensorInfo20x16, recurrentToOutputWeightsData);
-
-    std::vector<float> cellToInputWeightsData(tensorInfo20.GetNumElements(), 0.0f);
-    armnn::ConstTensor cellToInputWeights(tensorInfo20, cellToInputWeightsData);
-
-    std::vector<float> cellToForgetWeightsData(tensorInfo20.GetNumElements(), 0.0f);
-    armnn::ConstTensor cellToForgetWeights(tensorInfo20, cellToForgetWeightsData);
-
-    std::vector<float> cellToOutputWeightsData(tensorInfo20.GetNumElements(), 0.0f);
-    armnn::ConstTensor cellToOutputWeights(tensorInfo20,  cellToOutputWeightsData);
-
-    armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
-    std::vector<float> projectionWeightsData(tensorInfo16x20.GetNumElements(), 0.0f);
-    armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
-
-    armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
-    std::vector<float> projectionBiasData(outputSize, 0.0f);
-    armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
-
-    armnn::LstmInputParams params;
-    params.m_InputToForgetWeights     = &inputToForgetWeights;
-    params.m_InputToCellWeights       = &inputToCellWeights;
-    params.m_InputToOutputWeights     = &inputToOutputWeights;
-    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
-    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
-    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
-    params.m_ForgetGateBias           = &forgetGateBias;
-    params.m_CellBias                 = &cellBias;
-    params.m_OutputGateBias           = &outputGateBias;
-
-    // additional params because: descriptor.m_CifgEnabled = false
-    params.m_InputToInputWeights      = &inputToInputWeights;
-    params.m_RecurrentToInputWeights  = &recurrentToInputWeights;
-    params.m_CellToInputWeights       = &cellToInputWeights;
-    params.m_InputGateBias            = &inputGateBias;
-
-    // additional params because: descriptor.m_ProjectionEnabled = true
-    params.m_ProjectionWeights        = &projectionWeights;
-    params.m_ProjectionBias           = &projectionBias;
-
-    // additional params because: descriptor.m_PeepholeEnabled = true
-    params.m_CellToForgetWeights      = &cellToForgetWeights;
-    params.m_CellToOutputWeights      = &cellToOutputWeights;
-
-    const std::string layerName("lstm");
-    armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32);
-    armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32);
-    armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32);
-    armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32);
-
-    VerifyLstmLayer checker(
-            layerName,
-            {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo},
-            {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo},
-            descriptor,
-            params);
-    deserializedNetwork->Accept(checker);
-}
-class VerifyQuantizedLstmLayer : public LayerVerifierBase
-{
-
-public:
-    VerifyQuantizedLstmLayer(const std::string& layerName,
-                             const std::vector<armnn::TensorInfo>& inputInfos,
-                             const std::vector<armnn::TensorInfo>& outputInfos,
-                             const armnn::QuantizedLstmInputParams& inputParams)
-        : LayerVerifierBase(layerName, inputInfos, outputInfos), m_InputParams(inputParams) {}
-
-    void VisitQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
-                                 const armnn::QuantizedLstmInputParams& params,
-                                 const char* name)
-    {
-        VerifyNameAndConnections(layer, name);
-        VerifyInputParameters(params);
-    }
-
-protected:
-    void VerifyInputParameters(const armnn::QuantizedLstmInputParams& params)
-    {
-        VerifyConstTensors("m_InputToInputWeights",
-                           m_InputParams.m_InputToInputWeights, params.m_InputToInputWeights);
-        VerifyConstTensors("m_InputToForgetWeights",
-                           m_InputParams.m_InputToForgetWeights, params.m_InputToForgetWeights);
-        VerifyConstTensors("m_InputToCellWeights",
-                           m_InputParams.m_InputToCellWeights, params.m_InputToCellWeights);
-        VerifyConstTensors("m_InputToOutputWeights",
-                           m_InputParams.m_InputToOutputWeights, params.m_InputToOutputWeights);
-        VerifyConstTensors("m_RecurrentToInputWeights",
-                           m_InputParams.m_RecurrentToInputWeights, params.m_RecurrentToInputWeights);
-        VerifyConstTensors("m_RecurrentToForgetWeights",
-                           m_InputParams.m_RecurrentToForgetWeights, params.m_RecurrentToForgetWeights);
-        VerifyConstTensors("m_RecurrentToCellWeights",
-                           m_InputParams.m_RecurrentToCellWeights, params.m_RecurrentToCellWeights);
-        VerifyConstTensors("m_RecurrentToOutputWeights",
-                           m_InputParams.m_RecurrentToOutputWeights, params.m_RecurrentToOutputWeights);
-        VerifyConstTensors("m_InputGateBias",
-                           m_InputParams.m_InputGateBias, params.m_InputGateBias);
-        VerifyConstTensors("m_ForgetGateBias",
-                           m_InputParams.m_ForgetGateBias, params.m_ForgetGateBias);
-        VerifyConstTensors("m_CellBias",
-                           m_InputParams.m_CellBias, params.m_CellBias);
-        VerifyConstTensors("m_OutputGateBias",
-                           m_InputParams.m_OutputGateBias, params.m_OutputGateBias);
-    }
-
-private:
-    armnn::QuantizedLstmInputParams m_InputParams;
-};
-
-BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
-{
-    const uint32_t batchSize = 1;
-    const uint32_t inputSize = 2;
-    const uint32_t numUnits = 4;
-    const uint32_t outputSize = numUnits;
-
-    // Scale/Offset for input/output, cellState In/Out, weights, bias
-    float inputOutputScale = 0.0078125f;
-    int32_t inputOutputOffset = 128;
-
-    float cellStateScale = 0.00048828125f;
-    int32_t cellStateOffset = 0;
-
-    float weightsScale = 0.00408021f;
-    int32_t weightsOffset = 100;
-
-    float biasScale = 3.1876640625e-05f;
-    int32_t biasOffset = 0;
-
-    // The shape of weight data is {outputSize, inputSize} = {4, 2}
-    armnn::TensorShape inputToInputWeightsShape = {4, 2};
-    std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
-    armnn::TensorInfo inputToInputWeightsInfo(inputToInputWeightsShape,
-                                              armnn::DataType::QAsymmU8,
-                                              weightsScale,
-                                              weightsOffset);
-    armnn::ConstTensor inputToInputWeights(inputToInputWeightsInfo, inputToInputWeightsData);
-
-    armnn::TensorShape inputToForgetWeightsShape = {4, 2};
-    std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
-    armnn::TensorInfo inputToForgetWeightsInfo(inputToForgetWeightsShape,
-                                               armnn::DataType::QAsymmU8,
-                                               weightsScale,
-                                               weightsOffset);
-    armnn::ConstTensor inputToForgetWeights(inputToForgetWeightsInfo, inputToForgetWeightsData);
-
-    armnn::TensorShape inputToCellWeightsShape = {4, 2};
-    std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
-    armnn::TensorInfo inputToCellWeightsInfo(inputToCellWeightsShape,
-                                             armnn::DataType::QAsymmU8,
-                                             weightsScale,
-                                             weightsOffset);
-    armnn::ConstTensor inputToCellWeights(inputToCellWeightsInfo, inputToCellWeightsData);
-
-    armnn::TensorShape inputToOutputWeightsShape = {4, 2};
-    std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
-    armnn::TensorInfo inputToOutputWeightsInfo(inputToOutputWeightsShape,
-                                               armnn::DataType::QAsymmU8,
-                                               weightsScale,
-                                               weightsOffset);
-    armnn::ConstTensor inputToOutputWeights(inputToOutputWeightsInfo, inputToOutputWeightsData);
-
-    // The shape of recurrent weight data is {outputSize, outputSize} = {4, 4}
-    armnn::TensorShape recurrentToInputWeightsShape = {4, 4};
-    std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
-    armnn::TensorInfo recurrentToInputWeightsInfo(recurrentToInputWeightsShape,
-                                                  armnn::DataType::QAsymmU8,
-                                                  weightsScale,
-                                                  weightsOffset);
-    armnn::ConstTensor recurrentToInputWeights(recurrentToInputWeightsInfo, recurrentToInputWeightsData);
-
-    armnn::TensorShape recurrentToForgetWeightsShape = {4, 4};
-    std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
-    armnn::TensorInfo recurrentToForgetWeightsInfo(recurrentToForgetWeightsShape,
-                                                   armnn::DataType::QAsymmU8,
-                                                   weightsScale,
-                                                   weightsOffset);
-    armnn::ConstTensor recurrentToForgetWeights(recurrentToForgetWeightsInfo, recurrentToForgetWeightsData);
-
-    armnn::TensorShape recurrentToCellWeightsShape = {4, 4};
-    std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
-    armnn::TensorInfo recurrentToCellWeightsInfo(recurrentToCellWeightsShape,
-                                                 armnn::DataType::QAsymmU8,
-                                                 weightsScale,
-                                                 weightsOffset);
-    armnn::ConstTensor recurrentToCellWeights(recurrentToCellWeightsInfo, recurrentToCellWeightsData);
-
-    armnn::TensorShape recurrentToOutputWeightsShape = {4, 4};
-    std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
-    armnn::TensorInfo recurrentToOutputWeightsInfo(recurrentToOutputWeightsShape,
-                                                   armnn::DataType::QAsymmU8,
-                                                   weightsScale,
-                                                   weightsOffset);
-    armnn::ConstTensor recurrentToOutputWeights(recurrentToOutputWeightsInfo, recurrentToOutputWeightsData);
-
-    // The shape of bias data is {outputSize} = {4}
-    armnn::TensorShape inputGateBiasShape = {4};
-    std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4};
-    armnn::TensorInfo inputGateBiasInfo(inputGateBiasShape,
-                                        armnn::DataType::Signed32,
-                                        biasScale,
-                                        biasOffset);
-    armnn::ConstTensor inputGateBias(inputGateBiasInfo, inputGateBiasData);
-
-    armnn::TensorShape forgetGateBiasShape = {4};
-    std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4};
-    armnn::TensorInfo forgetGateBiasInfo(forgetGateBiasShape,
-                                         armnn::DataType::Signed32,
-                                         biasScale,
-                                         biasOffset);
-    armnn::ConstTensor forgetGateBias(forgetGateBiasInfo, forgetGateBiasData);
-
-    armnn::TensorShape cellBiasShape = {4};
-    std::vector<int32_t> cellBiasData = {1, 2, 3, 4};
-    armnn::TensorInfo cellBiasInfo(cellBiasShape,
-                                   armnn::DataType::Signed32,
-                                   biasScale,
-                                   biasOffset);
-    armnn::ConstTensor cellBias(cellBiasInfo, cellBiasData);
-
-    armnn::TensorShape outputGateBiasShape = {4};
-    std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4};
-    armnn::TensorInfo outputGateBiasInfo(outputGateBiasShape,
-                                         armnn::DataType::Signed32,
-                                         biasScale,
-                                         biasOffset);
-    armnn::ConstTensor outputGateBias(outputGateBiasInfo, outputGateBiasData);
-
-    armnn::QuantizedLstmInputParams params;
-    params.m_InputToInputWeights = &inputToInputWeights;
-    params.m_InputToForgetWeights = &inputToForgetWeights;
-    params.m_InputToCellWeights = &inputToCellWeights;
-    params.m_InputToOutputWeights = &inputToOutputWeights;
-    params.m_RecurrentToInputWeights = &recurrentToInputWeights;
-    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
-    params.m_RecurrentToCellWeights = &recurrentToCellWeights;
-    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
-    params.m_InputGateBias = &inputGateBias;
-    params.m_ForgetGateBias = &forgetGateBias;
-    params.m_CellBias = &cellBias;
-    params.m_OutputGateBias = &outputGateBias;
-
-    armnn::INetworkPtr network = armnn::INetwork::Create();
-    armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
-    armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1);
-    armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2);
-    const std::string layerName("QuantizedLstm");
-    armnn::IConnectableLayer* const quantizedLstmLayer = network->AddQuantizedLstmLayer(params, layerName.c_str());
-    armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(0);
-    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(1);
-
-    // Connect up
-    armnn::TensorInfo inputTensorInfo({ batchSize, inputSize },
-                                      armnn::DataType::QAsymmU8,
-                                      inputOutputScale,
-                                      inputOutputOffset);
-    armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits },
-                                          armnn::DataType::QSymmS16,
-                                          cellStateScale,
-                                          cellStateOffset);
-    armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize },
-                                            armnn::DataType::QAsymmU8,
-                                            inputOutputScale,
-                                            inputOutputOffset);
-
-    inputLayer->GetOutputSlot(0).Connect(quantizedLstmLayer->GetInputSlot(0));
-    inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
-
-    cellStateIn->GetOutputSlot(0).Connect(quantizedLstmLayer->GetInputSlot(1));
-    cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo);
-
-    outputStateIn->GetOutputSlot(0).Connect(quantizedLstmLayer->GetInputSlot(2));
-    outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo);
-
-    quantizedLstmLayer->GetOutputSlot(0).Connect(cellStateOut->GetInputSlot(0));
-    quantizedLstmLayer->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo);
-
-    quantizedLstmLayer->GetOutputSlot(1).Connect(outputLayer->GetInputSlot(0));
-    quantizedLstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo);
-
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
-
-    VerifyQuantizedLstmLayer checker(layerName,
-                                     {inputTensorInfo, cellStateTensorInfo, outputStateTensorInfo},
-                                     {cellStateTensorInfo, outputStateTensorInfo},
-                                     params);
-
-    deserializedNetwork->Accept(checker);
-}
-
-class VerifyQLstmLayer : public LayerVerifierBaseWithDescriptor<armnn::QLstmDescriptor>
-{
-public:
-    VerifyQLstmLayer(const std::string& layerName,
-                     const std::vector<armnn::TensorInfo>& inputInfos,
-                     const std::vector<armnn::TensorInfo>& outputInfos,
-                     const armnn::QLstmDescriptor& descriptor,
-                     const armnn::LstmInputParams& inputParams)
-        : LayerVerifierBaseWithDescriptor<armnn::QLstmDescriptor>(layerName, inputInfos, outputInfos, descriptor)
-        , m_InputParams(inputParams) {}
-
-    void VisitQLstmLayer(const armnn::IConnectableLayer* layer,
-                         const armnn::QLstmDescriptor& descriptor,
-                         const armnn::LstmInputParams& params,
-                         const char* name)
-    {
-        VerifyNameAndConnections(layer, name);
-        VerifyDescriptor(descriptor);
-        VerifyInputParameters(params);
-    }
-
-protected:
-    void VerifyInputParameters(const armnn::LstmInputParams& params)
-    {
-        VerifyConstTensors(
-            "m_InputToInputWeights", m_InputParams.m_InputToInputWeights, params.m_InputToInputWeights);
-        VerifyConstTensors(
-            "m_InputToForgetWeights", m_InputParams.m_InputToForgetWeights, params.m_InputToForgetWeights);
-        VerifyConstTensors(
-            "m_InputToCellWeights", m_InputParams.m_InputToCellWeights, params.m_InputToCellWeights);
-        VerifyConstTensors(
-            "m_InputToOutputWeights", m_InputParams.m_InputToOutputWeights, params.m_InputToOutputWeights);
-        VerifyConstTensors(
-            "m_RecurrentToInputWeights", m_InputParams.m_RecurrentToInputWeights, params.m_RecurrentToInputWeights);
-        VerifyConstTensors(
-            "m_RecurrentToForgetWeights", m_InputParams.m_RecurrentToForgetWeights, params.m_RecurrentToForgetWeights);
-        VerifyConstTensors(
-            "m_RecurrentToCellWeights", m_InputParams.m_RecurrentToCellWeights, params.m_RecurrentToCellWeights);
-        VerifyConstTensors(
-            "m_RecurrentToOutputWeights", m_InputParams.m_RecurrentToOutputWeights, params.m_RecurrentToOutputWeights);
-        VerifyConstTensors(
-            "m_CellToInputWeights", m_InputParams.m_CellToInputWeights, params.m_CellToInputWeights);
-        VerifyConstTensors(
-            "m_CellToForgetWeights", m_InputParams.m_CellToForgetWeights, params.m_CellToForgetWeights);
-        VerifyConstTensors(
-            "m_CellToOutputWeights", m_InputParams.m_CellToOutputWeights, params.m_CellToOutputWeights);
-        VerifyConstTensors(
-            "m_InputGateBias", m_InputParams.m_InputGateBias, params.m_InputGateBias);
-        VerifyConstTensors(
-            "m_ForgetGateBias", m_InputParams.m_ForgetGateBias, params.m_ForgetGateBias);
-        VerifyConstTensors(
-            "m_CellBias", m_InputParams.m_CellBias, params.m_CellBias);
-        VerifyConstTensors(
-            "m_OutputGateBias", m_InputParams.m_OutputGateBias, params.m_OutputGateBias);
-        VerifyConstTensors(
-            "m_ProjectionWeights", m_InputParams.m_ProjectionWeights, params.m_ProjectionWeights);
-        VerifyConstTensors(
-            "m_ProjectionBias", m_InputParams.m_ProjectionBias, params.m_ProjectionBias);
-        VerifyConstTensors(
-            "m_InputLayerNormWeights", m_InputParams.m_InputLayerNormWeights, params.m_InputLayerNormWeights);
-        VerifyConstTensors(
-            "m_ForgetLayerNormWeights", m_InputParams.m_ForgetLayerNormWeights, params.m_ForgetLayerNormWeights);
-        VerifyConstTensors(
-            "m_CellLayerNormWeights", m_InputParams.m_CellLayerNormWeights, params.m_CellLayerNormWeights);
-        VerifyConstTensors(
-            "m_OutputLayerNormWeights", m_InputParams.m_OutputLayerNormWeights, params.m_OutputLayerNormWeights);
-    }
-
-private:
-    armnn::LstmInputParams m_InputParams;
-};
-
-BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmBasic)
-{
-    armnn::QLstmDescriptor descriptor;
-
-    descriptor.m_CifgEnabled       = true;
-    descriptor.m_ProjectionEnabled = false;
-    descriptor.m_PeepholeEnabled   = false;
-    descriptor.m_LayerNormEnabled  = false;
-
-    descriptor.m_CellClip       = 0.0f;
-    descriptor.m_ProjectionClip = 0.0f;
-
-    descriptor.m_InputIntermediateScale  = 0.00001f;
-    descriptor.m_ForgetIntermediateScale = 0.00001f;
-    descriptor.m_CellIntermediateScale   = 0.00001f;
-    descriptor.m_OutputIntermediateScale = 0.00001f;
-
-    descriptor.m_HiddenStateScale     = 0.07f;
-    descriptor.m_HiddenStateZeroPoint = 0;
-
-    const unsigned int numBatches = 2;
-    const unsigned int inputSize  = 5;
-    const unsigned int outputSize = 4;
-    const unsigned int numUnits   = 4;
-
-    // Scale/Offset quantization info
-    float inputScale    = 0.0078f;
-    int32_t inputOffset = 0;
-
-    float outputScale    = 0.0078f;
-    int32_t outputOffset = 0;
-
-    float cellStateScale    = 3.5002e-05f;
-    int32_t cellStateOffset = 0;
-
-    float weightsScale    = 0.007f;
-    int32_t weightsOffset = 0;
-
-    float biasScale    = 3.5002e-05f / 1024;
-    int32_t biasOffset = 0;
-
-    // Weights and bias tensor and quantization info
-    armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
-                                       armnn::DataType::QSymmS8,
-                                       weightsScale,
-                                       weightsOffset);
-
-    armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
-                                           armnn::DataType::QSymmS8,
-                                           weightsScale,
-                                           weightsOffset);
-
-    armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, biasOffset);
-
-    std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
-    std::vector<int8_t> inputToCellWeightsData   = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
-    std::vector<int8_t> inputToOutputWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
-
-    armnn::ConstTensor inputToForgetWeights(inputWeightsInfo, inputToForgetWeightsData);
-    armnn::ConstTensor inputToCellWeights(inputWeightsInfo, inputToCellWeightsData);
-    armnn::ConstTensor inputToOutputWeights(inputWeightsInfo, inputToOutputWeightsData);
-
-    std::vector<int8_t> recurrentToForgetWeightsData =
-            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
-    std::vector<int8_t> recurrentToCellWeightsData   =
-            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
-    std::vector<int8_t> recurrentToOutputWeightsData =
-            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
-
-    armnn::ConstTensor recurrentToForgetWeights(recurrentWeightsInfo, recurrentToForgetWeightsData);
-    armnn::ConstTensor recurrentToCellWeights(recurrentWeightsInfo, recurrentToCellWeightsData);
-    armnn::ConstTensor recurrentToOutputWeights(recurrentWeightsInfo, recurrentToOutputWeightsData);
-
-    std::vector<int32_t> forgetGateBiasData(numUnits, 1);
-    std::vector<int32_t> cellBiasData(numUnits, 0);
-    std::vector<int32_t> outputGateBiasData(numUnits, 0);
-
-    armnn::ConstTensor forgetGateBias(biasInfo, forgetGateBiasData);
-    armnn::ConstTensor cellBias(biasInfo, cellBiasData);
-    armnn::ConstTensor outputGateBias(biasInfo, outputGateBiasData);
-
-    // Set up params
-    armnn::LstmInputParams params;
-    params.m_InputToForgetWeights = &inputToForgetWeights;
-    params.m_InputToCellWeights   = &inputToCellWeights;
-    params.m_InputToOutputWeights = &inputToOutputWeights;
-
-    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
-    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
-    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
-
-    params.m_ForgetGateBias = &forgetGateBias;
-    params.m_CellBias       = &cellBias;
-    params.m_OutputGateBias = &outputGateBias;
-
-    // Create network
-    armnn::INetworkPtr network = armnn::INetwork::Create();
-    const std::string layerName("qLstm");
-
-    armnn::IConnectableLayer* const input         = network->AddInputLayer(0);
-    armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(1);
-    armnn::IConnectableLayer* const cellStateIn   = network->AddInputLayer(2);
-
-    armnn::IConnectableLayer* const qLstmLayer = network->AddQLstmLayer(descriptor, params, layerName.c_str());
-
-    armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(0);
-    armnn::IConnectableLayer* const cellStateOut   = network->AddOutputLayer(1);
-    armnn::IConnectableLayer* const outputLayer    = network->AddOutputLayer(2);
-
-    // Input/Output tensor info
-    armnn::TensorInfo inputInfo({numBatches , inputSize},
-                                armnn::DataType::QAsymmS8,
-                                inputScale,
-                                inputOffset);
-
-    armnn::TensorInfo cellStateInfo({numBatches , numUnits},
-                                    armnn::DataType::QSymmS16,
-                                    cellStateScale,
-                                    cellStateOffset);
-
-    armnn::TensorInfo outputStateInfo({numBatches , outputSize},
-                                      armnn::DataType::QAsymmS8,
-                                      outputScale,
-                                      outputOffset);
-
-    // Connect input/output slots
-    input->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(0));
-    input->GetOutputSlot(0).SetTensorInfo(inputInfo);
-
-    outputStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(1));
-    outputStateIn->GetOutputSlot(0).SetTensorInfo(cellStateInfo);
-
-    cellStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(2));
-    cellStateIn->GetOutputSlot(0).SetTensorInfo(outputStateInfo);
-
-    qLstmLayer->GetOutputSlot(0).Connect(outputStateOut->GetInputSlot(0));
-    qLstmLayer->GetOutputSlot(0).SetTensorInfo(outputStateInfo);
-
-    qLstmLayer->GetOutputSlot(1).Connect(cellStateOut->GetInputSlot(0));
-    qLstmLayer->GetOutputSlot(1).SetTensorInfo(cellStateInfo);
-
-    qLstmLayer->GetOutputSlot(2).Connect(outputLayer->GetInputSlot(0));
-    qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo);
-
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
-
-    VerifyQLstmLayer checker(layerName,
-                             {inputInfo, cellStateInfo, outputStateInfo},
-                             {outputStateInfo, cellStateInfo, outputStateInfo},
-                             descriptor,
-                             params);
-
-    deserializedNetwork->Accept(checker);
-}
-
-BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmCifgLayerNorm)
-{
-    armnn::QLstmDescriptor descriptor;
-
-    // CIFG params are used when CIFG is disabled
-    descriptor.m_CifgEnabled       = true;
-    descriptor.m_ProjectionEnabled = false;
-    descriptor.m_PeepholeEnabled   = false;
-    descriptor.m_LayerNormEnabled  = true;
-
-    descriptor.m_CellClip       = 0.0f;
-    descriptor.m_ProjectionClip = 0.0f;
-
-    descriptor.m_InputIntermediateScale  = 0.00001f;
-    descriptor.m_ForgetIntermediateScale = 0.00001f;
-    descriptor.m_CellIntermediateScale   = 0.00001f;
-    descriptor.m_OutputIntermediateScale = 0.00001f;
-
-    descriptor.m_HiddenStateScale     = 0.07f;
-    descriptor.m_HiddenStateZeroPoint = 0;
-
-    const unsigned int numBatches = 2;
-    const unsigned int inputSize  = 5;
-    const unsigned int outputSize = 4;
-    const unsigned int numUnits   = 4;
-
-    // Scale/Offset quantization info
-    float inputScale    = 0.0078f;
-    int32_t inputOffset = 0;
-
-    float outputScale    = 0.0078f;
-    int32_t outputOffset = 0;
-
-    float cellStateScale    = 3.5002e-05f;
-    int32_t cellStateOffset = 0;
-
-    float weightsScale    = 0.007f;
-    int32_t weightsOffset = 0;
-
-    float layerNormScale    = 3.5002e-05f;
-    int32_t layerNormOffset = 0;
-
-    float biasScale    = layerNormScale / 1024;
-    int32_t biasOffset = 0;
-
-    // Weights and bias tensor and quantization info
-    armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
-                                       armnn::DataType::QSymmS8,
-                                       weightsScale,
-                                       weightsOffset);
-
-    armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
-                                           armnn::DataType::QSymmS8,
-                                           weightsScale,
-                                           weightsOffset);
-
-    armnn::TensorInfo biasInfo({numUnits},
-                               armnn::DataType::Signed32,
-                               biasScale,
-                               biasOffset);
-
-    armnn::TensorInfo layerNormWeightsInfo({numUnits},
-                                           armnn::DataType::QSymmS16,
-                                           layerNormScale,
-                                           layerNormOffset);
-
-    // Mandatory params
-    std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
-    std::vector<int8_t> inputToCellWeightsData   = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
-    std::vector<int8_t> inputToOutputWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
-
-    armnn::ConstTensor inputToForgetWeights(inputWeightsInfo, inputToForgetWeightsData);
-    armnn::ConstTensor inputToCellWeights(inputWeightsInfo, inputToCellWeightsData);
-    armnn::ConstTensor inputToOutputWeights(inputWeightsInfo, inputToOutputWeightsData);
-
-    std::vector<int8_t> recurrentToForgetWeightsData =
-            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
-    std::vector<int8_t> recurrentToCellWeightsData   =
-            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
-    std::vector<int8_t> recurrentToOutputWeightsData =
-            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
-
-    armnn::ConstTensor recurrentToForgetWeights(recurrentWeightsInfo, recurrentToForgetWeightsData);
-    armnn::ConstTensor recurrentToCellWeights(recurrentWeightsInfo, recurrentToCellWeightsData);
-    armnn::ConstTensor recurrentToOutputWeights(recurrentWeightsInfo, recurrentToOutputWeightsData);
-
-    std::vector<int32_t> forgetGateBiasData(numUnits, 1);
-    std::vector<int32_t> cellBiasData(numUnits, 0);
-    std::vector<int32_t> outputGateBiasData(numUnits, 0);
-
-    armnn::ConstTensor forgetGateBias(biasInfo, forgetGateBiasData);
-    armnn::ConstTensor cellBias(biasInfo, cellBiasData);
-    armnn::ConstTensor outputGateBias(biasInfo, outputGateBiasData);
-
-    // Layer Norm
-    std::vector<int16_t> forgetLayerNormWeightsData =
-            GenerateRandomData<int16_t>(layerNormWeightsInfo.GetNumElements());
-    std::vector<int16_t> cellLayerNormWeightsData =
-            GenerateRandomData<int16_t>(layerNormWeightsInfo.GetNumElements());
-    std::vector<int16_t> outputLayerNormWeightsData =
-            GenerateRandomData<int16_t>(layerNormWeightsInfo.GetNumElements());
-
-    armnn::ConstTensor forgetLayerNormWeights(layerNormWeightsInfo, forgetLayerNormWeightsData);
-    armnn::ConstTensor cellLayerNormWeights(layerNormWeightsInfo, cellLayerNormWeightsData);
-    armnn::ConstTensor outputLayerNormWeights(layerNormWeightsInfo, outputLayerNormWeightsData);
-
-    // Set up params
-    armnn::LstmInputParams params;
-
-    // Mandatory params
-    params.m_InputToForgetWeights = &inputToForgetWeights;
-    params.m_InputToCellWeights   = &inputToCellWeights;
-    params.m_InputToOutputWeights = &inputToOutputWeights;
-
-    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
-    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
-    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
-
-    params.m_ForgetGateBias = &forgetGateBias;
-    params.m_CellBias       = &cellBias;
-    params.m_OutputGateBias = &outputGateBias;
-
-    // Layer Norm
-    params.m_ForgetLayerNormWeights = &forgetLayerNormWeights;
-    params.m_CellLayerNormWeights   = &cellLayerNormWeights;
-    params.m_OutputLayerNormWeights = &outputLayerNormWeights;
-
-    // Create network
-    armnn::INetworkPtr network = armnn::INetwork::Create();
-    const std::string layerName("qLstm");
-
-    armnn::IConnectableLayer* const input         = network->AddInputLayer(0);
-    armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(1);
-    armnn::IConnectableLayer* const cellStateIn   = network->AddInputLayer(2);
-
-    armnn::IConnectableLayer* const qLstmLayer = network->AddQLstmLayer(descriptor, params, layerName.c_str());
-
-    armnn::IConnectableLayer* const outputStateOut  = network->AddOutputLayer(0);
-    armnn::IConnectableLayer* const cellStateOut  = network->AddOutputLayer(1);
-    armnn::IConnectableLayer* const outputLayer  = network->AddOutputLayer(2);
-
-    // Input/Output tensor info
-    armnn::TensorInfo inputInfo({numBatches , inputSize},
-                                armnn::DataType::QAsymmS8,
-                                inputScale,
-                                inputOffset);
-
-    armnn::TensorInfo cellStateInfo({numBatches , numUnits},
-                                    armnn::DataType::QSymmS16,
-                                    cellStateScale,
-                                    cellStateOffset);
-
-    armnn::TensorInfo outputStateInfo({numBatches , outputSize},
-                                      armnn::DataType::QAsymmS8,
-                                      outputScale,
-                                      outputOffset);
-
-    // Connect input/output slots
-    input->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(0));
-    input->GetOutputSlot(0).SetTensorInfo(inputInfo);
-
-    outputStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(1));
-    outputStateIn->GetOutputSlot(0).SetTensorInfo(cellStateInfo);
-
-    cellStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(2));
-    cellStateIn->GetOutputSlot(0).SetTensorInfo(outputStateInfo);
-
-    qLstmLayer->GetOutputSlot(0).Connect(outputStateOut->GetInputSlot(0));
-    qLstmLayer->GetOutputSlot(0).SetTensorInfo(outputStateInfo);
-
-    qLstmLayer->GetOutputSlot(1).Connect(cellStateOut->GetInputSlot(0));
-    qLstmLayer->GetOutputSlot(1).SetTensorInfo(cellStateInfo);
-
-    qLstmLayer->GetOutputSlot(2).Connect(outputLayer->GetInputSlot(0));
-    qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo);
-
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
-
-    VerifyQLstmLayer checker(layerName,
-                             {inputInfo, cellStateInfo, outputStateInfo},
-                             {outputStateInfo, cellStateInfo, outputStateInfo},
-                             descriptor,
-                             params);
-
-    deserializedNetwork->Accept(checker);
-}
-
-BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmAdvanced)
-{
-    armnn::QLstmDescriptor descriptor;
-
-    descriptor.m_CifgEnabled       = false;
-    descriptor.m_ProjectionEnabled = true;
-    descriptor.m_PeepholeEnabled   = true;
-    descriptor.m_LayerNormEnabled  = true;
-
-    descriptor.m_CellClip       = 0.1f;
-    descriptor.m_ProjectionClip = 0.1f;
-
-    descriptor.m_InputIntermediateScale  = 0.00001f;
-    descriptor.m_ForgetIntermediateScale = 0.00001f;
-    descriptor.m_CellIntermediateScale   = 0.00001f;
-    descriptor.m_OutputIntermediateScale = 0.00001f;
-
-    descriptor.m_HiddenStateScale     = 0.07f;
-    descriptor.m_HiddenStateZeroPoint = 0;
-
-    const unsigned int numBatches = 2;
-    const unsigned int inputSize  = 5;
-    const unsigned int outputSize = 4;
-    const unsigned int numUnits   = 4;
-
-    // Scale/Offset quantization info
-    float inputScale    = 0.0078f;
-    int32_t inputOffset = 0;
-
-    float outputScale    = 0.0078f;
-    int32_t outputOffset = 0;
-
-    float cellStateScale    = 3.5002e-05f;
-    int32_t cellStateOffset = 0;
-
-    float weightsScale    = 0.007f;
-    int32_t weightsOffset = 0;
-
-    float layerNormScale    = 3.5002e-05f;
-    int32_t layerNormOffset = 0;
-
-    float biasScale    = layerNormScale / 1024;
-    int32_t biasOffset = 0;
-
-    // Weights and bias tensor and quantization info
-    armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
-                                       armnn::DataType::QSymmS8,
-                                       weightsScale,
-                                       weightsOffset);
-
-    armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
-                                           armnn::DataType::QSymmS8,
-                                           weightsScale,
-                                           weightsOffset);
-
-    armnn::TensorInfo biasInfo({numUnits},
-                               armnn::DataType::Signed32,
-                               biasScale,
-                               biasOffset);
-
-    armnn::TensorInfo peepholeWeightsInfo({numUnits},
-                                          armnn::DataType::QSymmS16,
-                                          weightsScale,
-                                          weightsOffset);
-
-    armnn::TensorInfo layerNormWeightsInfo({numUnits},
-                                           armnn::DataType::QSymmS16,
-                                           layerNormScale,
-                                           layerNormOffset);
-
-    armnn::TensorInfo projectionWeightsInfo({outputSize, numUnits},
-                                             armnn::DataType::QSymmS8,
-                                             weightsScale,
-                                             weightsOffset);
-
-    // Mandatory params
-    std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
-    std::vector<int8_t> inputToCellWeightsData   = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
-    std::vector<int8_t> inputToOutputWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
-
-    armnn::ConstTensor inputToForgetWeights(inputWeightsInfo, inputToForgetWeightsData);
-    armnn::ConstTensor inputToCellWeights(inputWeightsInfo, inputToCellWeightsData);
-    armnn::ConstTensor inputToOutputWeights(inputWeightsInfo, inputToOutputWeightsData);
-
-    std::vector<int8_t> recurrentToForgetWeightsData =
-            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
-    std::vector<int8_t> recurrentToCellWeightsData   =
-            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
-    std::vector<int8_t> recurrentToOutputWeightsData =
-            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
-
-    armnn::ConstTensor recurrentToForgetWeights(recurrentWeightsInfo, recurrentToForgetWeightsData);
-    armnn::ConstTensor recurrentToCellWeights(recurrentWeightsInfo, recurrentToCellWeightsData);
-    armnn::ConstTensor recurrentToOutputWeights(recurrentWeightsInfo, recurrentToOutputWeightsData);
-
-    std::vector<int32_t> forgetGateBiasData(numUnits, 1);
-    std::vector<int32_t> cellBiasData(numUnits, 0);
-    std::vector<int32_t> outputGateBiasData(numUnits, 0);
-
-    armnn::ConstTensor forgetGateBias(biasInfo, forgetGateBiasData);
-    armnn::ConstTensor cellBias(biasInfo, cellBiasData);
-    armnn::ConstTensor outputGateBias(biasInfo, outputGateBiasData);
-
-    // CIFG
-    std::vector<int8_t> inputToInputWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
-    std::vector<int8_t> recurrentToInputWeightsData =
-            GenerateRandomData<int8_t>(recurrentWeightsInfo.GetNumElements());
-    std::vector<int32_t> inputGateBiasData(numUnits, 1);
-
-    armnn::ConstTensor inputToInputWeights(inputWeightsInfo, inputToInputWeightsData);
-    armnn::ConstTensor recurrentToInputWeights(recurrentWeightsInfo, recurrentToInputWeightsData);
-    armnn::ConstTensor inputGateBias(biasInfo, inputGateBiasData);
-
-    // Peephole
-    std::vector<int16_t> cellToInputWeightsData  = GenerateRandomData<int16_t>(peepholeWeightsInfo.GetNumElements());
-    std::vector<int16_t> cellToForgetWeightsData = GenerateRandomData<int16_t>(peepholeWeightsInfo.GetNumElements());
-    std::vector<int16_t> cellToOutputWeightsData = GenerateRandomData<int16_t>(peepholeWeightsInfo.GetNumElements());
-
-    armnn::ConstTensor cellToInputWeights(peepholeWeightsInfo, cellToInputWeightsData);
-    armnn::ConstTensor cellToForgetWeights(peepholeWeightsInfo, cellToForgetWeightsData);
-    armnn::ConstTensor cellToOutputWeights(peepholeWeightsInfo, cellToOutputWeightsData);
-
-    // Projection
-    std::vector<int8_t> projectionWeightsData = GenerateRandomData<int8_t>(projectionWeightsInfo.GetNumElements());
-    std::vector<int32_t> projectionBiasData(outputSize, 1);
-
-    armnn::ConstTensor projectionWeights(projectionWeightsInfo, projectionWeightsData);
-    armnn::ConstTensor projectionBias(biasInfo, projectionBiasData);
-
-    // Layer Norm
-    std::vector<int16_t> inputLayerNormWeightsData =
-            GenerateRandomData<int16_t>(layerNormWeightsInfo.GetNumElements());
-    std::vector<int16_t> forgetLayerNormWeightsData =
-            GenerateRandomData<int16_t>(layerNormWeightsInfo.GetNumElements());
-    std::vector<int16_t> cellLayerNormWeightsData =
-            GenerateRandomData<int16_t>(layerNormWeightsInfo.GetNumElements());
-    std::vector<int16_t> outputLayerNormWeightsData =
-            GenerateRandomData<int16_t>(layerNormWeightsInfo.GetNumElements());
-
-    armnn::ConstTensor inputLayerNormWeights(layerNormWeightsInfo, inputLayerNormWeightsData);
-    armnn::ConstTensor forgetLayerNormWeights(layerNormWeightsInfo, forgetLayerNormWeightsData);
-    armnn::ConstTensor cellLayerNormWeights(layerNormWeightsInfo, cellLayerNormWeightsData);
-    armnn::ConstTensor outputLayerNormWeights(layerNormWeightsInfo, outputLayerNormWeightsData);
-
-    // Set up params
-    armnn::LstmInputParams params;
-
-    // Mandatory params
-    params.m_InputToForgetWeights = &inputToForgetWeights;
-    params.m_InputToCellWeights   = &inputToCellWeights;
-    params.m_InputToOutputWeights = &inputToOutputWeights;
-
-    params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
-    params.m_RecurrentToCellWeights   = &recurrentToCellWeights;
-    params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
-
-    params.m_ForgetGateBias = &forgetGateBias;
-    params.m_CellBias       = &cellBias;
-    params.m_OutputGateBias = &outputGateBias;
-
-    // CIFG
-    params.m_InputToInputWeights     = &inputToInputWeights;
-    params.m_RecurrentToInputWeights = &recurrentToInputWeights;
-    params.m_InputGateBias           = &inputGateBias;
-
-    // Peephole
-    params.m_CellToInputWeights  = &cellToInputWeights;
-    params.m_CellToForgetWeights = &cellToForgetWeights;
-    params.m_CellToOutputWeights = &cellToOutputWeights;
-
-    // Projection
-    params.m_ProjectionWeights = &projectionWeights;
-    params.m_ProjectionBias    = &projectionBias;
-
-    // Layer Norm
-    params.m_InputLayerNormWeights  = &inputLayerNormWeights;
-    params.m_ForgetLayerNormWeights = &forgetLayerNormWeights;
-    params.m_CellLayerNormWeights   = &cellLayerNormWeights;
-    params.m_OutputLayerNormWeights = &outputLayerNormWeights;
-
-    // Create network
-    armnn::INetworkPtr network = armnn::INetwork::Create();
-    const std::string layerName("qLstm");
-
-    armnn::IConnectableLayer* const input         = network->AddInputLayer(0);
-    armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(1);
-    armnn::IConnectableLayer* const cellStateIn   = network->AddInputLayer(2);
-
-    armnn::IConnectableLayer* const qLstmLayer = network->AddQLstmLayer(descriptor, params, layerName.c_str());
-
-    armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(0);
-    armnn::IConnectableLayer* const cellStateOut   = network->AddOutputLayer(1);
-    armnn::IConnectableLayer* const outputLayer    = network->AddOutputLayer(2);
-
-    // Input/Output tensor info
-    armnn::TensorInfo inputInfo({numBatches , inputSize},
-                                armnn::DataType::QAsymmS8,
-                                inputScale,
-                                inputOffset);
-
-    armnn::TensorInfo cellStateInfo({numBatches , numUnits},
-                                    armnn::DataType::QSymmS16,
-                                    cellStateScale,
-                                    cellStateOffset);
-
-    armnn::TensorInfo outputStateInfo({numBatches , outputSize},
-                                      armnn::DataType::QAsymmS8,
-                                      outputScale,
-                                      outputOffset);
-
-    // Connect input/output slots
-    input->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(0));
-    input->GetOutputSlot(0).SetTensorInfo(inputInfo);
-
-    outputStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(1));
-    outputStateIn->GetOutputSlot(0).SetTensorInfo(cellStateInfo);
-
-    cellStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(2));
-    cellStateIn->GetOutputSlot(0).SetTensorInfo(outputStateInfo);
-
-    qLstmLayer->GetOutputSlot(0).Connect(outputStateOut->GetInputSlot(0));
-    qLstmLayer->GetOutputSlot(0).SetTensorInfo(outputStateInfo);
-
-    qLstmLayer->GetOutputSlot(1).Connect(cellStateOut->GetInputSlot(0));
-    qLstmLayer->GetOutputSlot(1).SetTensorInfo(cellStateInfo);
-
-    qLstmLayer->GetOutputSlot(2).Connect(outputLayer->GetInputSlot(0));
-    qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo);
-
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
-
-    VerifyQLstmLayer checker(layerName,
-                             {inputInfo, cellStateInfo, outputStateInfo},
-                             {outputStateInfo, cellStateInfo, outputStateInfo},
-                             descriptor,
-                             params);
-
-    deserializedNetwork->Accept(checker);
+    deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 BOOST_AUTO_TEST_SUITE_END()