IVGCVSW-3633 Refactor HalPolicy to fully support V1.2 models

 * Templated and moved V1.0 and V1.1 Convert methods to ensure they can work with later versions of models, operations and operands.
 * The V1.2 HalPolicy no longer converts V1.2 models, operations and operands to earlier versions.
 * The V1.2 HalPolicy no longer passes operations to the V1.1 or V1.0 HalPolicies for conversion.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I5de59d43a3abb1f8ac0253dc637ad68318960c76
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
index 6c8dcb5..cff678a 100644
--- a/1.0/HalPolicy.cpp
+++ b/1.0/HalPolicy.cpp
@@ -74,63 +74,7 @@
 bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_0::HalPolicy::ConvertAdd()");
-
-    LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
-    LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
-
-    if (!input0.IsValid() || !input1.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    // The FuseActivation parameter is always the input index 2
-    // and it should be optional
-    ActivationFn activationFunction;
-    if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
-    if (!outputOperand)
-    {
-        return false;
-    }
-
-    const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
-    const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsAdditionSupported,
-                               data.m_Backends,
-                               isSupported,
-                               inputInfo0,
-                               inputInfo1,
-                               outputInfo);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
-    armnn::IConnectableLayer* const endLayer   = ProcessActivation(outputInfo, activationFunction, startLayer, data);
-
-    if (endLayer != nullptr)
-    {
-        BroadcastTensor(input0, input1, startLayer, *data.m_Network);
-        return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
-    }
-    else
-    {
-        return Fail("%s: ProcessActivation failed", __func__);
-    }
+    return ::ConvertAdd<hal_1_0::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
@@ -160,187 +104,19 @@
 bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_0::HalPolicy::ConvertDequantize()");
-
-    LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid input", __func__);
-    }
-
-    const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
-    if (!outputOperand)
-    {
-        return Fail("%s: Operation has invalid outputs", __func__);
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsDequantizeSupported,
-                               data.m_Backends,
-                               isSupported,
-                               input.GetTensorInfo(),
-                               GetTensorInfoForOperand(*outputOperand));
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
+    return ::ConvertDequantize<hal_1_0::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_0::HalPolicy::ConvertFloor()");
-
-    LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
-    if (!outputOperand)
-    {
-        return Fail("%s: Operation has invalid outputs", __func__);
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsFloorSupported,
-                               data.m_Backends,
-                               isSupported,
-                               input.GetTensorInfo(),
-                               outputInfo);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
+    return ::ConvertFloor<hal_1_0::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_0::HalPolicy::ConvertFullyConnected()");
-
-    LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
-    // ArmNN does not currently support non-fixed weights or bias
-    ConstTensorPin weightsPin =
-        ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 1, model, data); // 2D
-    ConstTensorPin biasPin =
-        ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data); // 1D
-
-    if (!weightsPin.IsValid() || !biasPin.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    armnn::ConstTensor weights = weightsPin.GetConstTensor();
-    armnn::ConstTensor bias    = biasPin.GetConstTensor();
-    armnn::TensorInfo reshapedInfo = inputInfo;
-
-    try
-    {
-        reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
-    } catch (const std::exception &e) {
-        return Fail("%s: %s", __func__, e.what());
-    }
-
-    // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
-    SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
-
-    ActivationFn activationFunction;
-    if (!GetInputActivationFunction<hal_1_0::HalPolicy>(operation, 3, activationFunction, model, data))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    armnn::FullyConnectedDescriptor desc;
-    desc.m_TransposeWeightMatrix = true;
-    desc.m_BiasEnabled           = true;
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsFullyConnectedSupported,
-                               data.m_Backends,
-                               isSupported,
-                               reshapedInfo,
-                               outputInfo,
-                               weights.GetInfo(),
-                               bias.GetInfo(),
-                               desc);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* startLayer =
-            data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
-    armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
-
-    if (endLayer != nullptr)
-    {
-        if (inputInfo.GetNumDimensions() > 2U)
-        {
-            armnn::ReshapeDescriptor reshapeDescriptor;
-            reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
-
-            armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
-            assert(reshapeLayer != nullptr);
-            input.Connect(reshapeLayer->GetInputSlot(0));
-            reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
-            reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
-        }
-        else
-        {
-            input.Connect(startLayer->GetInputSlot(0));
-        }
-
-        return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
-    }
-    else
-    {
-        return Fail("%s: ProcessActivation failed", __func__);
-    }
+    return ::ConvertFullyConnected<hal_1_0::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
@@ -348,74 +124,13 @@
                                                   ConversionData& data)
 {
     ALOGV("hal_1_0::HalPolicy::ConvertLocalResponseNormalization()");
-
-    LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
-    armnn::NormalizationDescriptor descriptor;
-    descriptor.m_DataLayout      = armnn::DataLayout::NHWC;
-    descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
-    descriptor.m_NormMethodType  = armnn::NormalizationAlgorithmMethod::LocalBrightness;
-
-    if (!input.IsValid() ||
-        !GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
-        !GetInputFloat32<hal_1_0::HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
-        !GetInputFloat32<hal_1_0::HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
-        !GetInputFloat32<hal_1_0::HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    // ArmNN expects normSize to be the full size of the normalization
-    // window rather than the radius as in AndroidNN.
-    descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsNormalizationSupported,
-                               data.m_Backends,
-                               isSupported,
-                               inputInfo,
-                               outputInfo,
-                               descriptor);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-
-    armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
+    return ::ConvertLocalResponseNormalization<hal_1_0::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_0::HalPolicy::ConvertLogistic()");
-
-    armnn::ActivationDescriptor desc;
-    desc.m_Function = armnn::ActivationFunction::Sigmoid;
-
-    return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
+    return ::ConvertLogistic<hal_1_0::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
@@ -775,48 +490,7 @@
 bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_0::HalPolicy::ConvertL2Normalization()");
-
-    LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
-    armnn::L2NormalizationDescriptor desc;
-    desc.m_DataLayout = armnn::DataLayout::NHWC;
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsL2NormalizationSupported,
-                               data.m_Backends,
-                               isSupported,
-                               inputInfo,
-                               outputInfo,
-                               desc);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
+    return ::ConvertL2Normalization<hal_1_0::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
@@ -834,64 +508,7 @@
 bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_0::HalPolicy::ConvertMul()");
-
-    LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
-    LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
-
-    if (!input0.IsValid() || !input1.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    // The FuseActivation parameter is always the input index 2
-    // and it should be optional
-    ActivationFn activationFunction;
-    if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
-
-    if (outputOperand == nullptr)
-    {
-        return false;
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsMultiplicationSupported,
-                               data.m_Backends,
-                               isSupported,
-                               input0.GetTensorInfo(),
-                               input1.GetTensorInfo(),
-                               outputInfo);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
-    armnn::IConnectableLayer* const endLayer   = ProcessActivation(outputInfo, activationFunction, startLayer, data);
-
-    const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
-    const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
-
-    if (endLayer != nullptr)
-    {
-        BroadcastTensor(input0, input1, startLayer, *data.m_Network);
-        return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
-    }
-    else
-    {
-        return Fail("%s: ProcessActivation failed", __func__);
-    }
+    return ::ConvertMul<hal_1_0::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
@@ -1029,74 +646,7 @@
 bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_0::HalPolicy::ConvertReshape()");
-
-    const Operand* inputOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 0, model);
-    const Operand* requestedShapeOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 1, model);
-    const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
-
-    if (inputOperand == nullptr
-        || requestedShapeOperand == nullptr
-        || outputOperand == nullptr)
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-
-    if (requestedShapeOperand->dimensions.size() != 1)
-    {
-        return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
-            __func__, requestedShapeOperand->dimensions.size());
-    }
-
-    std::vector<int32_t> targetDimensions;
-    if (!GetTensorInt32Values<hal_1_0::HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
-    {
-        return Fail("%s: Could not read values of input 1", __func__);
-    }
-
-    const Shape inputOperandShape = GetOperandShape(*inputOperand);
-
-    Shape requestedShape;
-    // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
-    // function that resolves these values into a fully specified tensor shape.
-    if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
-    {
-        return Fail("%s: Failed to resolve the requested shape", __func__);
-    }
-
-    const Shape outputOperandShape = GetOperandShape(*outputOperand);
-    if (!SameShape(requestedShape, outputOperandShape))
-    {
-        return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
-    }
-
-    LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Could not read input 0", __func__);
-    }
-
-    armnn::ReshapeDescriptor reshapeDescriptor;
-    reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
-                                                         requestedShape.dimensions.data());
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsReshapeSupported,
-                               data.m_Backends,
-                               isSupported,
-                               input.GetTensorInfo(),
-                               reshapeDescriptor);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
+    return ::ConvertReshape<hal_1_0::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index e75b5c2..aa650e9 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -106,61 +106,7 @@
 bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_1::HalPolicy::ConvertDiv()");
-
-    LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
-    LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 1, model, data);
-
-    if (!input0.IsValid() || !input1.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    // The FuseActivation parameter is always the input index 2
-    // and it should be optional
-    ActivationFn activationFunction;
-    if (!GetOptionalInputActivation<hal_1_1::HalPolicy>(operation, 2, activationFunction, model, data))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsDivisionSupported,
-                               data.m_Backends,
-                               isSupported,
-                               input0.GetTensorInfo(),
-                               input1.GetTensorInfo(),
-                               outputInfo);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
-    armnn::IConnectableLayer* const endLayer   = ProcessActivation(outputInfo, activationFunction, startLayer, data);
-
-    const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
-    const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
-
-    if (endLayer)
-    {
-        BroadcastTensor(input0, input1, startLayer, *data.m_Network);
-        return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *endLayer, model, data);
-    }
-
-    return Fail("%s: ProcessActivation failed", __func__);
+    return ::ConvertDiv<hal_1_1::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
@@ -172,75 +118,7 @@
 bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_1::HalPolicy::ConvertMean()");
-
-    LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
-    const Operand* axisOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
-    if (!axisOperand)
-    {
-        return Fail("%s: Could not read input 1", __func__);
-    }
-
-    std::vector<int32_t> axis;
-    if (!GetTensorInt32Values<hal_1_1::HalPolicy>(*axisOperand, axis, model, data))
-    {
-        return Fail("%s: Input 1 has invalid values", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
-
-    // Convert the axis to unsigned int and remove duplicates.
-    unsigned int rank = inputInfo.GetNumDimensions();
-    std::set<unsigned int> uniqueAxis;
-    std::transform(axis.begin(), axis.end(),
-                   std::inserter(uniqueAxis, uniqueAxis.begin()),
-                   [rank](int i) -> unsigned int { return (i + rank) % rank; });
-
-    // Get the "keep dims" flag.
-    int32_t keepDims = 0;
-    if (!GetInputInt32<hal_1_1::HalPolicy>(operation, 2, keepDims, model, data))
-    {
-        return Fail("%s: Could not read input 2", __func__);
-    }
-
-    armnn::MeanDescriptor descriptor;
-    descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
-    descriptor.m_KeepDims = keepDims > 0;
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsMeanSupported,
-                               data.m_Backends,
-                               isSupported,
-                               inputInfo,
-                               outputInfo,
-                               descriptor);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
+    return ::ConvertMean<hal_1_1::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
@@ -258,261 +136,19 @@
 bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_1::HalPolicy::ConvertSqueeze()");
-
-    LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
-    unsigned int rank = inputInfo.GetNumDimensions();
-    if (rank > 4)
-    {
-        Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
-    }
-
-    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
-    // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
-    // if the operand index is out of bounds.
-    const Operand* axisOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model, false);
-
-    const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
-
-    std::vector<int32_t> axis;
-    if (!axisOperand)
-    {
-        axis.assign(dimensionSequence,
-                    dimensionSequence + rank);
-    }
-    else
-    {
-        GetTensorInt32Values<hal_1_1::HalPolicy>(*axisOperand, axis, model, data);
-    }
-
-
-    std::vector<uint32_t> outputDims;
-    for (unsigned int i = 0; i < rank; i++)
-    {
-        bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
-        auto currentDimension = inputInfo.GetShape()[i];
-        if (skipSqueeze || currentDimension != 1)
-        {
-            outputDims.push_back(currentDimension);
-        }
-    }
-
-    armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
-
-    armnn::TensorInfo outputInfo = inputInfo;
-    outputInfo.SetShape(outShape);
-
-    armnn::ReshapeDescriptor reshapeDesc;
-    reshapeDesc.m_TargetShape = outputInfo.GetShape();
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsReshapeSupported,
-                               data.m_Backends,
-                               isSupported,
-                               inputInfo,
-                               reshapeDesc);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
+    return ::ConvertSqueeze<hal_1_1::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_1::HalPolicy::ConvertStridedSlice()");
-
-    LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
-    unsigned int rank = inputInfo.GetNumDimensions();
-    if (rank > 4)
-    {
-        Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
-    }
-
-    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
-    const Operand* beginOperand   = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
-    const Operand* endOperand     = GetInputOperand<hal_1_1::HalPolicy>(operation, 2, model);
-    const Operand* stridesOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 3, model);
-
-    std::vector<int32_t> beginValues;
-    std::vector<int32_t> endValues;
-    std::vector<int32_t> stridesValues;
-
-    // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
-    auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
-    {
-        if (!GetTensorInt32Values<hal_1_1::HalPolicy>(operand, operandValues, model, data))
-        {
-            return false;
-        }
-
-        if (operandValues.size() != rank)
-        {
-            return false;
-        }
-
-        return true;
-    };
-
-    if (!ValidateInputOperands(*beginOperand, beginValues)
-        || !ValidateInputOperands(*endOperand, endValues)
-        || !ValidateInputOperands(*stridesOperand, stridesValues))
-    {
-        return Fail("%s: Operation has invalid input operand", __func__);
-    }
-
-    // Stride cannot have value '0'
-    if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
-    {
-        return Fail("%s: Stride must be non-zero value.", __func__);
-    }
-
-    armnn::StridedSliceDescriptor descriptor;
-    descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
-    descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
-    descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
-    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
-
-    // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
-    if (!GetInputInt32<hal_1_1::HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
-        !GetInputInt32<hal_1_1::HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
-        !GetInputInt32<hal_1_1::HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsStridedSliceSupported,
-                               data.m_Backends,
-                               isSupported,
-                               inputInfo,
-                               outputInfo,
-                               descriptor);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
+    return ::ConvertStridedSlice<hal_1_1::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_1::HalPolicy::ConvertTranspose()");
-
-    LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
-    unsigned int rank = inputInfo.GetNumDimensions();
-    if (rank > 4)
-    {
-        Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
-    }
-
-    // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
-    // if the operand index is out of bounds.
-    const Operand* permOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model, false);
-
-    std::vector<int32_t> perm(rank);
-    if (!permOperand)
-    {
-        // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
-        for (unsigned int i = rank; i > 0; i--)
-        {
-            perm[rank - i] = boost::numeric_cast<int> (i - 1);
-        }
-    }
-    else
-    {
-        GetTensorInt32Values<hal_1_1::HalPolicy>(*permOperand, perm, model, data);
-    }
-
-    std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
-
-    auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
-    if (!permutationVector.IsEqual(NHWCToArmNN)
-        && !permutationVector.IsEqual(ArmNNToNHWC)
-        && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
-    {
-       return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
-    }
-
-    armnn::PermuteDescriptor permuteDesc;
-    permuteDesc.m_DimMappings = permutationVector;
-
-    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsPermuteSupported,
-                               data.m_Backends,
-                               isSupported,
-                               inputInfo,
-                               outputInfo,
-                               permuteDesc);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
+    return ::ConvertTranspose<hal_1_1::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index c8e242e..7fe5f88 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -7,9 +7,6 @@
 
 #include "Utils.hpp"
 
-#include "../1.0/HalPolicy.hpp"
-#include "../1.1/HalPolicy.hpp"
-
 #include <DataLayoutIndexed.hpp>
 #include <Half.hpp>
 
@@ -20,109 +17,12 @@
 namespace hal_1_2
 {
 
-bool HandledByV1_0(V1_2::OperationType operationType)
-{
-    switch (static_cast<V1_0::OperationType>(operationType))
-    {
-        case V1_0::OperationType::ADD:
-        case V1_0::OperationType::DEPTH_TO_SPACE:
-        case V1_0::OperationType::DEQUANTIZE:
-        case V1_0::OperationType::EMBEDDING_LOOKUP:
-        case V1_0::OperationType::FLOOR:
-        case V1_0::OperationType::FULLY_CONNECTED:
-        case V1_0::OperationType::HASHTABLE_LOOKUP:
-        case V1_0::OperationType::L2_NORMALIZATION:
-        case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
-        case V1_0::OperationType::LOGISTIC:
-        case V1_0::OperationType::LSH_PROJECTION:
-        case V1_0::OperationType::MUL:
-        case V1_0::OperationType::RESHAPE:
-        case V1_0::OperationType::RNN:
-        case V1_0::OperationType::SVDF:
-        case V1_0::OperationType::OEM_OPERATION:
-            return true;
-        default:
-            return false;
-    }
-}
-
-bool HandledByV1_1(V1_2::OperationType operationType)
-{
-    if (HandledByV1_0(operationType))
-    {
-        return true;
-    }
-    switch (static_cast<V1_1::OperationType>(operationType))
-    {
-        case V1_1::OperationType::DIV:
-        case V1_1::OperationType::MEAN:
-        case V1_1::OperationType::SQUEEZE:
-        case V1_1::OperationType::STRIDED_SLICE:
-        case V1_1::OperationType::TRANSPOSE:
-            return true;
-        default:
-            return false;
-    }
-}
-
-bool HandledByV1_0(const V1_2::Operation& operation)
-{
-    return HandledByV1_0(operation.type);
-}
-
-bool HandledByV1_1(const V1_2::Operation& operation)
-{
-    return HandledByV1_1(operation.type);
-}
-
-V1_0::OperationType CastToV1_0(V1_2::OperationType type)
-{
-    return static_cast<V1_0::OperationType>(type);
-}
-
-V1_1::OperationType CastToV1_1(V1_2::OperationType type)
-{
-    return static_cast<V1_1::OperationType>(type);
-}
-
-V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
-{
-    V1_0::Operation op;
-    op.type = CastToV1_0(operation.type);
-    op.inputs = operation.inputs;
-    op.outputs = operation.outputs;
-    return op;
-}
-
-V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
-{
-    V1_1::Operation op;
-    op.type = CastToV1_1(operation.type);
-    op.inputs = operation.inputs;
-    op.outputs = operation.outputs;
-    return op;
-}
-
 bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
 {
-    if (HandledByV1_0(operation) && compliantWithV1_0(model))
-    {
-        hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
-        hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
-
-        return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
-    }
-
-    if (HandledByV1_1(operation) && compliantWithV1_1(model))
-    {
-        hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
-        hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
-
-        return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
-    }
-
     switch (operation.type)
     {
+        case V1_2::OperationType::ADD:
+            return ConvertAdd(operation, model, data);
         case V1_2::OperationType::AVERAGE_POOL_2D:
             return ConvertAveragePool2d(operation, model, data);
         case V1_2::OperationType::BATCH_TO_SPACE_ND:
@@ -133,14 +33,34 @@
             return ConvertConv2d(operation, model, data);
         case V1_2::OperationType::DEPTHWISE_CONV_2D:
             return ConvertDepthwiseConv2d(operation, model, data);
+        case V1_2::OperationType::DEQUANTIZE:
+            return ConvertDequantize(operation, model, data);
+        case V1_2::OperationType::DIV:
+            return ConvertDiv(operation, model, data);
+        case V1_2::OperationType::FLOOR:
+            return ConvertFloor(operation, model, data);
+        case V1_2::OperationType::FULLY_CONNECTED:
+            return ConvertFullyConnected(operation, model, data);
+        case V1_2::OperationType::L2_NORMALIZATION:
+            return ConvertL2Normalization(operation, model, data);
         case V1_2::OperationType::L2_POOL_2D:
             return ConvertL2Pool2d(operation, model, data);
+        case V1_2::OperationType::LOCAL_RESPONSE_NORMALIZATION:
+            return ConvertLocalResponseNormalization(operation, model, data);
+        case V1_2::OperationType::LOGISTIC:
+            return ConvertLogistic(operation, model, data);
+        case V1_2::OperationType::LSTM:
+            return ConvertLstm(operation, model, data);
         case V1_2::OperationType::MAX_POOL_2D:
             return ConvertMaxPool2d(operation, model, data);
         case V1_2::OperationType::MAXIMUM:
             return ConvertMaximum(operation, model, data);
+        case V1_2::OperationType::MEAN:
+            return ConvertMean(operation, model, data);
         case V1_2::OperationType::MINIMUM:
             return ConvertMinimum(operation, model, data);
+        case V1_2::OperationType::MUL:
+            return ConvertMul(operation, model, data);
         case V1_2::OperationType::PAD:
             return ConvertPad(operation, model, data);
         case V1_2::OperationType::PAD_V2:
@@ -157,10 +77,18 @@
             return ConvertReLu1(operation, model, data);
         case V1_2::OperationType::RELU6:
             return ConvertReLu6(operation, model, data);
+        case V1_2::OperationType::RESHAPE:
+            return ConvertReshape(operation, model, data);
         case V1_2::OperationType::RESIZE_BILINEAR:
             return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
         case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
             return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
+        case V1_2::OperationType::SQUEEZE:
+            return ConvertSqueeze(operation, model, data);
+        case V1_2::OperationType::STRIDED_SLICE:
+            return ConvertStridedSlice(operation, model, data);
+        case V1_2::OperationType::TRANSPOSE:
+            return ConvertTranspose(operation, model, data);
         case V1_2::OperationType::TRANSPOSE_CONV_2D:
             return ConvertTransposeConv2d(operation, model, data);
         case V1_2::OperationType::SOFTMAX:
@@ -173,14 +101,18 @@
             return ConvertSub(operation, model, data);
         case V1_2::OperationType::TANH:
             return ConvertTanH(operation, model, data);
-        case V1_2::OperationType::LSTM:
-            return ConvertLstm(operation, model, data);
         default:
             return Fail("%s: Operation type %s not supported in ArmnnDriver",
                         __func__, toString(operation.type).c_str());
     }
 }
 
+bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertAdd()");
+    return ::ConvertAdd<hal_1_2::HalPolicy>(operation, model, data);
+}
+
 bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_2::HalPolicy::ConvertAveragePool2d()");
@@ -517,12 +449,56 @@
     return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
 }
 
+bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertDequantize()");
+    return ::ConvertDequantize<hal_1_2::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertDiv()");
+    return ::ConvertDiv<hal_1_2::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertFloor()");
+    return ::ConvertFloor<hal_1_2::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertFullyConnected()");
+    return ::ConvertFullyConnected<hal_1_2::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertL2Normalization()");
+    return ::ConvertL2Normalization<hal_1_2::HalPolicy>(operation, model, data);
+}
+
 bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_2::HalPolicy::ConvertL2Pool2d()");
     return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
 }
 
+bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
+                                                  const Model& model,
+                                                  ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertLocalResponseNormalization()");
+    return ::ConvertLocalResponseNormalization<hal_1_2::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertLogistic()");
+    return ::ConvertLogistic<hal_1_2::HalPolicy>(operation, model, data);
+}
+
 bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_2::HalPolicy::ConvertMaxPool2d()");
@@ -574,6 +550,12 @@
     return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
 }
 
+bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertMean()");
+    return ::ConvertMean<hal_1_2::HalPolicy>(operation, model, data);
+}
+
 bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
@@ -619,6 +601,12 @@
     return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
 }
 
+bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertMul()");
+    return ::ConvertMul<hal_1_2::HalPolicy>(operation, model, data);
+}
+
 bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_2::HalPolicy::ConvertPad()");
@@ -1039,6 +1027,12 @@
     return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
 }
 
+bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertReshape()");
+    return ::ConvertReshape<hal_1_2::HalPolicy>(operation, model, data);
+}
+
 bool HalPolicy::ConvertResize(const Operation& operation,
                               const Model& model,
                               ConversionData& data,
@@ -1733,6 +1727,24 @@
             SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 3, *layer, 3, model, data));
 }
 
+bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_1::HalPolicy::ConvertSqueeze()");
+    return ::ConvertSqueeze<hal_1_2::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_1::HalPolicy::ConvertStridedSlice()");
+    return ::ConvertStridedSlice<hal_1_2::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_1::HalPolicy::ConvertTranspose()");
+    return ::ConvertTranspose<hal_1_2::HalPolicy>(operation, model, data);
+}
+
 bool HalPolicy::ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data)
 {
     LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index a268b3d..4a785d9 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -31,6 +31,8 @@
     static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data);
 
 private:
+    static bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data);
@@ -41,14 +43,36 @@
 
     static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data);
 
+    static bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data);
 
+    static bool ConvertLocalResponseNormalization(const Operation& operation,
+                                                  const Model& model,
+                                                  ConversionData& data);
+
+    static bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertLstm(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data);
 
+    static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data);
 
+    static bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertPad(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data);
@@ -65,6 +89,8 @@
 
     static bool ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data);
 
+    static bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertResize(const Operation& operation,
                               const Model& model,
                               ConversionData& data,
@@ -76,11 +102,15 @@
 
     static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data);
 
+    static bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
 
-    static bool ConvertLstm(const Operation& operation, const Model& model, ConversionData& data);
+    static bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data);
 };
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 32efa54..cfbef5a 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -14,6 +14,8 @@
 #include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
 #include "armnn/src/armnnUtils/Permute.hpp"
 
+#include "1.0/FullyConnected.hpp"
+
 #include <ActivationFunctor.h>
 #include <CpuExecutor.h>
 #include <OperationsUtils.h>
@@ -341,6 +343,20 @@
     return shape;
 }
 
+#ifdef ARMNN_ANDROID_NN_V1_2
+
+Shape GetOperandShape(const V1_2::Operand& operand)
+{
+    Shape shape;
+    shape.type = OperandType(operand.type);
+    shape.dimensions = operand.dimensions;
+    shape.scale = operand.scale;
+    shape.offset = operand.zeroPoint;
+    return shape;
+}
+
+#endif
+
 // ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
 // what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
 // we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
@@ -1420,6 +1436,71 @@
 template<typename HalPolicy,
          typename Operation = typename HalPolicy::Operation,
          typename Model     = typename HalPolicy::Model>
+bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
+{
+    using Operand = typename HalPolicy::Operand;
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
+
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // The FuseActivation parameter is always the input index 2
+    // and it should be optional
+    ActivationFn activationFunction;
+    if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!outputOperand)
+    {
+        return false;
+    }
+
+    const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
+    const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsAdditionSupported,
+                               data.m_Backends,
+                               isSupported,
+                               inputInfo0,
+                               inputInfo1,
+                               outputInfo);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
+    armnn::IConnectableLayer* const endLayer   = ProcessActivation(outputInfo, activationFunction, startLayer, data);
+
+    if (endLayer != nullptr)
+    {
+        BroadcastTensor(input0, input1, startLayer, *data.m_Network);
+        return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
+    }
+    else
+    {
+        return Fail("%s: ProcessActivation failed", __func__);
+    }
+}
+
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
 bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
 {
     using HalOperand = typename HalPolicy::Operand;
@@ -1918,11 +1999,549 @@
 }
 
 template<typename HalPolicy,
-         typename HalOperation = typename HalPolicy::Operation,
-         typename HalOperand   = typename HalPolicy::Operand,
-         typename HalModel     = typename HalPolicy::Model>
-bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
 {
+    using Operand = typename HalPolicy::Operand;
+
+    LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid input", __func__);
+    }
+
+    const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!outputOperand)
+    {
+        return Fail("%s: Operation has invalid outputs", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsDequantizeSupported,
+                               data.m_Backends,
+                               isSupported,
+                               input.GetTensorInfo(),
+                               GetTensorInfoForOperand(*outputOperand));
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+}
+
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
+{
+    using Operand = typename HalPolicy::Operand;
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
+
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // The FuseActivation parameter is always the input index 2
+    // and it should be optional
+    ActivationFn activationFunction;
+    if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsDivisionSupported,
+                               data.m_Backends,
+                               isSupported,
+                               input0.GetTensorInfo(),
+                               input1.GetTensorInfo(),
+                               outputInfo);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
+    armnn::IConnectableLayer* const endLayer   = ProcessActivation(outputInfo, activationFunction, startLayer, data);
+
+    if (endLayer)
+    {
+        BroadcastTensor(input0, input1, startLayer, *data.m_Network);
+        return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
+    }
+    return Fail("%s: ProcessActivation failed", __func__);
+}
+
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
+{
+    using Operand = typename HalPolicy::Operand;
+
+    LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!outputOperand)
+    {
+        return Fail("%s: Operation has invalid outputs", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsFloorSupported,
+                               data.m_Backends,
+                               isSupported,
+                               input.GetTensorInfo(),
+                               outputInfo);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+}
+
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
+{
+    using Operand = typename HalPolicy::Operand;
+
+    LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    // ArmNN does not currently support non-fixed weights or bias
+    ConstTensorPin weightsPin =
+            ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data); // 2D
+    ConstTensorPin biasPin =
+            ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
+
+    if (!weightsPin.IsValid() || !biasPin.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    armnn::ConstTensor weights = weightsPin.GetConstTensor();
+    armnn::ConstTensor bias    = biasPin.GetConstTensor();
+    armnn::TensorInfo reshapedInfo = inputInfo;
+
+    try
+    {
+        reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
+    } catch (const std::exception &e) {
+        return Fail("%s: %s", __func__, e.what());
+    }
+
+    // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
+    SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
+
+    ActivationFn activationFunction;
+    if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    armnn::FullyConnectedDescriptor desc;
+    desc.m_TransposeWeightMatrix = true;
+    desc.m_BiasEnabled           = true;
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsFullyConnectedSupported,
+                               data.m_Backends,
+                               isSupported,
+                               reshapedInfo,
+                               outputInfo,
+                               weights.GetInfo(),
+                               bias.GetInfo(),
+                               desc);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* startLayer =
+            data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
+    armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
+
+    if (endLayer != nullptr)
+    {
+        if (inputInfo.GetNumDimensions() > 2U)
+        {
+            armnn::ReshapeDescriptor reshapeDescriptor;
+            reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
+
+            armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
+            assert(reshapeLayer != nullptr);
+            input.Connect(reshapeLayer->GetInputSlot(0));
+            reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
+            reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
+        }
+        else
+        {
+            input.Connect(startLayer->GetInputSlot(0));
+        }
+
+        return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
+    }
+    else
+    {
+        return Fail("%s: ProcessActivation failed", __func__);
+    }
+}
+
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
+{
+    using Operand = typename HalPolicy::Operand;
+
+    LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+    if (outputInfo.GetNumDimensions() != 4u)
+    {
+        return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
+    }
+
+    armnn::L2NormalizationDescriptor desc;
+    desc.m_DataLayout = armnn::DataLayout::NHWC;
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsL2NormalizationSupported,
+                               data.m_Backends,
+                               isSupported,
+                               inputInfo,
+                               outputInfo,
+                               desc);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+}
+
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertLocalResponseNormalization(const Operation& operation,
+                                       const Model& model,
+                                       ConversionData& data)
+{
+    using Operand     = typename HalPolicy::Operand;
+    using OperandType = typename HalPolicy::OperandType;
+
+    LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+    if (outputInfo.GetNumDimensions() != 4u)
+    {
+        return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
+    }
+
+    armnn::NormalizationDescriptor descriptor;
+    descriptor.m_DataLayout      = armnn::DataLayout::NHWC;
+    descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
+    descriptor.m_NormMethodType  = armnn::NormalizationAlgorithmMethod::LocalBrightness;
+
+    if (!input.IsValid() ||
+        !GetInputScalar<HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
+        !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
+        !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
+        !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // ArmNN expects normSize to be the full size of the normalization
+    // window rather than the radius as in AndroidNN.
+    descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsNormalizationSupported,
+                               data.m_Backends,
+                               isSupported,
+                               inputInfo,
+                               outputInfo,
+                               descriptor);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+}
+
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
+{
+    using Operand = typename HalPolicy::Operand;
+
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::Sigmoid;
+
+    return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
+}
+
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
+{
+    using Operand = typename HalPolicy::Operand;
+
+    LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
+    if (!axisOperand)
+    {
+        return Fail("%s: Could not read input 1", __func__);
+    }
+
+    std::vector<int32_t> axis;
+    if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
+    {
+        return Fail("%s: Input 1 has invalid values", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+
+    // Convert the axis to unsigned int and remove duplicates.
+    unsigned int rank = inputInfo.GetNumDimensions();
+    std::set<unsigned int> uniqueAxis;
+    std::transform(axis.begin(), axis.end(),
+                   std::inserter(uniqueAxis, uniqueAxis.begin()),
+                   [rank](int i) -> unsigned int { return (i + rank) % rank; });
+
+    // Get the "keep dims" flag.
+    int32_t keepDims = 0;
+    if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
+    {
+        return Fail("%s: Could not read input 2", __func__);
+    }
+
+    armnn::MeanDescriptor descriptor;
+    descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
+    descriptor.m_KeepDims = keepDims > 0;
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsMeanSupported,
+                               data.m_Backends,
+                               isSupported,
+                               inputInfo,
+                               outputInfo,
+                               descriptor);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+}
+
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
+{
+    using Operand = typename HalPolicy::Operand;
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
+
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // The FuseActivation parameter is always the input index 2
+    // and it should be optional
+    ActivationFn activationFunction;
+    if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
+
+    if (outputOperand == nullptr)
+    {
+        return false;
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsMultiplicationSupported,
+                               data.m_Backends,
+                               isSupported,
+                               input0.GetTensorInfo(),
+                               input1.GetTensorInfo(),
+                               outputInfo);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
+    armnn::IConnectableLayer* const endLayer   = ProcessActivation(outputInfo, activationFunction, startLayer, data);
+
+    const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
+    const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
+
+    if (endLayer != nullptr)
+    {
+        BroadcastTensor(input0, input1, startLayer, *data.m_Network);
+        return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
+    }
+    else
+    {
+        return Fail("%s: ProcessActivation failed", __func__);
+    }
+}
+
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertPad(Operation& operation, const Model& model, ConversionData& data)
+{
+    using Operand = typename HalPolicy::Operand;
+
     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
     if (!input.IsValid())
     {
@@ -1946,7 +2565,7 @@
         descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
     }
 
-    const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
     if (!output)
     {
         return Fail("%s: Could not read output", __func__);
@@ -1981,10 +2600,86 @@
 
 template<typename HalPolicy,
          typename Operation = typename HalPolicy::Operation,
-         typename Operand   = typename HalPolicy::Operand,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
+{
+    using Operand = typename HalPolicy::Operand;
+
+    const Operand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
+    const Operand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
+    const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
+
+    if (inputOperand == nullptr
+        || requestedShapeOperand == nullptr
+        || outputOperand == nullptr)
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    if (requestedShapeOperand->dimensions.size() != 1)
+    {
+        return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
+                    __func__, requestedShapeOperand->dimensions.size());
+    }
+
+    std::vector<int32_t> targetDimensions;
+    if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
+    {
+        return Fail("%s: Could not read values of input 1", __func__);
+    }
+
+    const Shape inputOperandShape = GetOperandShape(*inputOperand);
+
+    Shape requestedShape;
+    // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
+    // function that resolves these values into a fully specified tensor shape.
+    if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
+    {
+        return Fail("%s: Failed to resolve the requested shape", __func__);
+    }
+
+    const Shape outputOperandShape = GetOperandShape(*outputOperand);
+    if (!SameShape(requestedShape, outputOperandShape))
+    {
+        return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
+    }
+
+    LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Could not read input 0", __func__);
+    }
+
+    armnn::ReshapeDescriptor reshapeDescriptor;
+    reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
+                                                         requestedShape.dimensions.data());
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsReshapeSupported,
+                               data.m_Backends,
+                               isSupported,
+                               input.GetTensorInfo(),
+                               reshapeDescriptor);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+}
+
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
          typename Model     = typename HalPolicy::Model>
 bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
 {
+    using Operand = typename HalPolicy::Operand;
+
     LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
     LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
 
@@ -2042,6 +2737,274 @@
 }
 
 template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
+{
+    using Operand = typename HalPolicy::Operand;
+
+    LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    unsigned int rank = inputInfo.GetNumDimensions();
+    if (rank > 4)
+    {
+        Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
+    }
+
+    const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
+    // if the operand index is out of bounds.
+    const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
+
+    const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
+
+    std::vector<int32_t> axis;
+    if (!axisOperand)
+    {
+        axis.assign(dimensionSequence,
+                    dimensionSequence + rank);
+    }
+    else
+    {
+        GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data);
+    }
+
+    std::vector<uint32_t> outputDims;
+    for (unsigned int i = 0; i < rank; i++)
+    {
+        bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
+        auto currentDimension = inputInfo.GetShape()[i];
+        if (skipSqueeze || currentDimension != 1)
+        {
+            outputDims.push_back(currentDimension);
+        }
+    }
+
+    armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
+
+    armnn::TensorInfo outputInfo = inputInfo;
+    outputInfo.SetShape(outShape);
+
+    armnn::ReshapeDescriptor reshapeDesc;
+    reshapeDesc.m_TargetShape = outputInfo.GetShape();
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsReshapeSupported,
+                               data.m_Backends,
+                               isSupported,
+                               inputInfo,
+                               reshapeDesc);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+}
+
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
+{
+    using Operand = typename HalPolicy::Operand;
+
+    LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+    unsigned int rank = inputInfo.GetNumDimensions();
+    if (rank > 4)
+    {
+        Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
+    }
+
+    const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    const Operand* beginOperand   = GetInputOperand<HalPolicy>(operation, 1, model);
+    const Operand* endOperand     = GetInputOperand<HalPolicy>(operation, 2, model);
+    const Operand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
+
+    std::vector<int32_t> beginValues;
+    std::vector<int32_t> endValues;
+    std::vector<int32_t> stridesValues;
+
+    // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
+    auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
+    {
+        if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
+        {
+            return false;
+        }
+
+        if (operandValues.size() != rank)
+        {
+            return false;
+        }
+
+        return true;
+    };
+
+    if (!ValidateInputOperands(*beginOperand, beginValues)
+        || !ValidateInputOperands(*endOperand, endValues)
+        || !ValidateInputOperands(*stridesOperand, stridesValues))
+    {
+        return Fail("%s: Operation has invalid input operand", __func__);
+    }
+
+    // Stride cannot have value '0'
+    if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
+    {
+        return Fail("%s: Stride must be non-zero value.", __func__);
+    }
+
+    armnn::StridedSliceDescriptor descriptor;
+    descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
+    descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
+    descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
+    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+    // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
+    if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
+        !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
+        !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsStridedSliceSupported,
+                               data.m_Backends,
+                               isSupported,
+                               inputInfo,
+                               outputInfo,
+                               descriptor);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+}
+
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
+{
+    using Operand = typename HalPolicy::Operand;
+
+    LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+    unsigned int rank = inputInfo.GetNumDimensions();
+    if (rank > 4)
+    {
+        Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
+    }
+
+    // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
+    // if the operand index is out of bounds.
+    const Operand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
+
+    std::vector<int32_t> perm(rank);
+    if (!permOperand)
+    {
+        // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
+        for (unsigned int i = rank; i > 0; i--)
+        {
+            perm[rank - i] = boost::numeric_cast<int> (i - 1);
+        }
+    }
+    else
+    {
+        GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data);
+    }
+
+    std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
+
+    auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
+    if (!permutationVector.IsEqual(NHWCToArmNN)
+        && !permutationVector.IsEqual(ArmNNToNHWC)
+        && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
+    {
+        return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
+    }
+
+    armnn::PermuteDescriptor permuteDesc;
+    permuteDesc.m_DimMappings = permutationVector;
+
+    const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsPermuteSupported,
+                               data.m_Backends,
+                               isSupported,
+                               inputInfo,
+                               outputInfo,
+                               permuteDesc);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+}
+
+template<typename HalPolicy,
          typename HalOperation   = typename HalPolicy::Operation,
          typename HalOperand     = typename HalPolicy::Operand,
          typename HalModel       = typename HalPolicy::Model>