IVGCVSW-1806: Refactor Android-NN-Driver ModelToINetworkConverter

* Moved conversion logic into new V1_0 and V1_1 HalPolicy classes
* Extracted common helper functions into ConversionUtils class

Change-Id: I1ab50edc266dd528c0cb22a5cd1aa65e103674d9
diff --git a/1.0/ArmnnDriver.hpp b/1.0/ArmnnDriver.hpp
index 560b0d3..a048973 100644
--- a/1.0/ArmnnDriver.hpp
+++ b/1.0/ArmnnDriver.hpp
@@ -9,67 +9,62 @@
 
 #include "ArmnnDevice.hpp"
 #include "ArmnnDriverImpl.hpp"
+#include "HalPolicy.hpp"
+
 #include "../ArmnnDriverImpl.hpp"
 
 #include <log/log.h>
 
 namespace armnn_driver
 {
-namespace V1_0
+namespace hal_1_0
 {
 
-class ArmnnDriver : public ArmnnDevice, public ::android::hardware::neuralnetworks::V1_0::IDevice
+class ArmnnDriver : public ArmnnDevice, public V1_0::IDevice
 {
 public:
     ArmnnDriver(DriverOptions options)
         : ArmnnDevice(std::move(options))
     {
-        ALOGV("V1_0::ArmnnDriver::ArmnnDriver()");
+        ALOGV("hal_1_0::ArmnnDriver::ArmnnDriver()");
     }
     ~ArmnnDriver() {}
 
 public:
-    Return<void> getCapabilities(
-            ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb) override
+    Return<void> getCapabilities(V1_0::IDevice::getCapabilities_cb cb) override
     {
-        ALOGV("V1_0::ArmnnDriver::getCapabilities()");
+        ALOGV("hal_1_0::ArmnnDriver::getCapabilities()");
 
-        return V1_0::ArmnnDriverImpl::getCapabilities(m_Runtime,
-                                                      cb);
+        return hal_1_0::ArmnnDriverImpl::getCapabilities(m_Runtime, cb);
     }
 
-    Return<void> getSupportedOperations(
-            const ::android::hardware::neuralnetworks::V1_0::Model& model,
-            ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb) override
+    Return<void> getSupportedOperations(const V1_0::Model& model,
+                                        V1_0::IDevice::getSupportedOperations_cb cb) override
     {
-        ALOGV("V1_0::ArmnnDriver::getSupportedOperations()");
+        ALOGV("hal_1_0::ArmnnDriver::getSupportedOperations()");
 
-        return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::getSupportedOperations(m_Runtime,
-                                                                                     m_Options,
-                                                                                     model,
-                                                                                     cb);
+        return armnn_driver::ArmnnDriverImpl<HalPolicy>::getSupportedOperations(m_Runtime, m_Options, model, cb);
     }
 
-    Return<ErrorStatus> prepareModel(
-            const ::android::hardware::neuralnetworks::V1_0::Model& model,
-            const android::sp<IPreparedModelCallback>& cb) override
+    Return<ErrorStatus> prepareModel(const V1_0::Model& model,
+                                     const android::sp<IPreparedModelCallback>& cb) override
     {
-        ALOGV("V1_0::ArmnnDriver::prepareModel()");
+        ALOGV("hal_1_0::ArmnnDriver::prepareModel()");
 
-        return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::prepareModel(m_Runtime,
-                                                                           m_ClTunedParameters,
-                                                                           m_Options,
-                                                                           model,
-                                                                           cb);
+        return armnn_driver::ArmnnDriverImpl<HalPolicy>::prepareModel(m_Runtime,
+                                                                      m_ClTunedParameters,
+                                                                      m_Options,
+                                                                      model,
+                                                                      cb);
     }
 
     Return<DeviceStatus> getStatus() override
     {
-        ALOGV("V1_0::ArmnnDriver::getStatus()");
+        ALOGV("hal_1_0::ArmnnDriver::getStatus()");
 
-        return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::getStatus();
+        return armnn_driver::ArmnnDriverImpl<HalPolicy>::getStatus();
     }
 };
 
-} // armnn_driver::namespace V1_0
-} // namespace armnn_driver
+} // namespace hal_1_0
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/1.0/ArmnnDriverImpl.cpp b/1.0/ArmnnDriverImpl.cpp
index c7c0f7e..a35bb0e 100644
--- a/1.0/ArmnnDriverImpl.cpp
+++ b/1.0/ArmnnDriverImpl.cpp
@@ -8,33 +8,27 @@
 
 #include <log/log.h>
 
-using namespace std;
-using namespace android;
-using namespace android::nn;
-using namespace android::hardware;
-
 namespace
 {
 
-const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime";
-const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage";
-const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime";
+const char *g_Float32PerformanceExecTimeName      = "ArmNN.float32Performance.execTime";
+const char *g_Float32PerformancePowerUsageName    = "ArmNN.float32Performance.powerUsage";
+const char *g_Quantized8PerformanceExecTimeName   = "ArmNN.quantized8Performance.execTime";
 const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage";
 
 } // anonymous namespace
 
 namespace armnn_driver
 {
-namespace V1_0
+namespace hal_1_0
 {
 
-Return<void> ArmnnDriverImpl::getCapabilities(
-        const armnn::IRuntimePtr& runtime,
-        neuralnetworks::V1_0::IDevice::getCapabilities_cb cb)
+Return<void> ArmnnDriverImpl::getCapabilities(const armnn::IRuntimePtr& runtime,
+                                              V1_0::IDevice::getCapabilities_cb cb)
 {
-    ALOGV("V1_0::ArmnnDriverImpl::getCapabilities()");
+    ALOGV("hal_1_0::ArmnnDriverImpl::getCapabilities()");
 
-    neuralnetworks::V1_0::Capabilities capabilities;
+    V1_0::Capabilities capabilities;
     if (runtime)
     {
         capabilities.float32Performance.execTime =
@@ -53,9 +47,9 @@
     }
     else
     {
-        capabilities.float32Performance.execTime = 0;
-        capabilities.float32Performance.powerUsage = 0;
-        capabilities.quantized8Performance.execTime = 0;
+        capabilities.float32Performance.execTime      = 0;
+        capabilities.float32Performance.powerUsage    = 0;
+        capabilities.quantized8Performance.execTime   = 0;
         capabilities.quantized8Performance.powerUsage = 0;
 
         cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
@@ -64,5 +58,5 @@
     return Void();
 }
 
-} // namespace armnn_driver::V1_0
-} // namespace armnn_driver
+} // namespace hal_1_0
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/1.0/ArmnnDriverImpl.hpp b/1.0/ArmnnDriverImpl.hpp
index a6af74d..7f033e0 100644
--- a/1.0/ArmnnDriverImpl.hpp
+++ b/1.0/ArmnnDriverImpl.hpp
@@ -11,18 +11,18 @@
 
 #include <armnn/ArmNN.hpp>
 
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+
 namespace armnn_driver
 {
-namespace V1_0
+namespace hal_1_0
 {
 
 class ArmnnDriverImpl
 {
 public:
-    static Return<void> getCapabilities(
-            const armnn::IRuntimePtr& runtime,
-            ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb);
+    static Return<void> getCapabilities(const armnn::IRuntimePtr& runtime, V1_0::IDevice::getCapabilities_cb cb);
 };
 
-} // namespace armnn_driver::V1_0
+} // namespace hal_1_0
 } // namespace armnn_driver
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
new file mode 100644
index 0000000..d3c6dba
--- /dev/null
+++ b/1.0/HalPolicy.cpp
@@ -0,0 +1,1360 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "HalPolicy.hpp"
+
+namespace armnn_driver
+{
+namespace hal_1_0
+{
+
+bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
+{
+    switch (operation.type)
+    {
+        case V1_0::OperationType::ADD:
+            return ConvertAdd(operation, model, data);
+        case V1_0::OperationType::AVERAGE_POOL_2D:
+            return ConvertAveragePool2d(operation, model, data);
+        case V1_0::OperationType::CONCATENATION:
+            return ConvertConcatenation(operation, model, data);
+        case V1_0::OperationType::CONV_2D:
+            return ConvertConv2d(operation, model, data);
+        case V1_0::OperationType::DEPTHWISE_CONV_2D:
+            return ConvertDepthwiseConv2d(operation, model, data);
+        case V1_0::OperationType::FLOOR:
+            return ConvertFloor(operation, model, data);
+        case V1_0::OperationType::FULLY_CONNECTED:
+            return ConvertFullyConnected(operation, model, data);
+        case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
+            return ConvertLocalResponseNormalization(operation, model, data);
+        case V1_0::OperationType::LOGISTIC:
+            return ConvertLogistic(operation, model, data);
+        case V1_0::OperationType::LSTM:
+            return ConvertLstm(operation, model, data);
+        case V1_0::OperationType::L2_NORMALIZATION:
+            return ConvertL2Normalization(operation, model, data);
+        case V1_0::OperationType::L2_POOL_2D:
+            return ConvertL2Pool2d(operation, model, data);
+        case V1_0::OperationType::MAX_POOL_2D:
+            return ConvertMaxPool2d(operation, model, data);
+        case V1_0::OperationType::MUL:
+            return ConvertMul(operation, model, data);
+        case V1_0::OperationType::RELU:
+            return ConvertReLu(operation, model, data);
+        case V1_0::OperationType::RELU1:
+            return ConvertReLu1(operation, model, data);
+        case V1_0::OperationType::RELU6:
+            return ConvertReLu6(operation, model, data);
+        case V1_0::OperationType::SOFTMAX:
+            return ConvertSoftmax(operation, model, data);
+        case V1_0::OperationType::TANH:
+            return ConvertTanH(operation, model, data);
+        case V1_0::OperationType::RESHAPE:
+            return ConvertReshape(operation, model, data);
+        case V1_0::OperationType::RESIZE_BILINEAR:
+            return ConvertResizeBilinear(operation, model, data);
+        default:
+            return Fail("%s: Operation type %s not supported in ArmnnDriver",
+                        __func__, toString(operation.type).c_str());
+    }
+}
+
+bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
+{
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // The FuseActivation parameter is always the input index 2
+    // and it should be optional
+    ActivationFn activationFunction;
+    if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return false;
+    }
+
+    const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
+
+    if (!IsLayerSupported(__func__,
+                          armnn::IsAdditionSupported,
+                          data.m_Compute,
+                          input0.GetTensorInfo(),
+                          input1.GetTensorInfo(),
+                          outInfo))
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
+    armnn::IConnectableLayer* const endLayer   = ProcessActivation(outInfo, activationFunction, startLayer, data);
+
+    const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
+    const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
+
+    if (endLayer != nullptr)
+    {
+        BroadcastTensor(input0, input1, startLayer, *data.m_Network);
+        return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
+    }
+    else
+    {
+        return Fail("%s: ProcessActivation failed", __func__);
+    }
+}
+
+bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+    return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
+}
+
+bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
+{
+    // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
+    if (operation.inputs.size() <= 1)
+    {
+        return Fail("%s: Operation has insufficient arguments", __func__);
+    }
+
+    // Get inputs and outputs
+    const std::size_t numInputTensors = operation.inputs.size() - 1;
+
+    int32_t concatDim;
+    if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return Fail("%s: Operation has no outputs", __func__);
+    }
+
+
+    armnn::TensorInfo  outputInfo  = GetTensorInfoForOperand(*outputOperand);
+    armnn::TensorShape outputShape = outputInfo.GetShape();
+
+    //
+    // handle negative concat dims along the lines of tensorflow as described here:
+    //    https://www.tensorflow.org/api_docs/python/tf/concat
+    // "negative axis refers to axis + rank(values)-th dimension"
+    //
+    if (concatDim < 0)
+    {
+        concatDim += outputShape.GetNumDimensions();
+    }
+
+    if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
+    {
+        return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
+    }
+
+    std::vector<LayerInputHandle>   inputHandles;
+    std::vector<armnn::TensorShape> inputShapes;
+
+    inputHandles.reserve(numInputTensors);
+    inputShapes.reserve(numInputTensors);
+
+    bool inputsHaveBeenReshaped        = false;
+    unsigned int tensorDimensionsAdded = 0;
+
+    for (uint32_t i = 0; i < numInputTensors; ++i)
+    {
+        const Operand* const operand = GetInputOperand(operation, i, model);
+        if (!operand)
+        {
+            return Fail("%s: Operation has invalid inputs", __func__);
+        }
+
+        armnn::TensorShape operandShape     = GetTensorShapeForOperand(*operand);
+        LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i, model, data);
+
+        if (operandShape.GetNumDimensions() == 0)
+        {
+            return Fail("%s: Operands with rank 0 are not supported", __func__);
+        }
+
+        if (RequiresReshape(operandShape))
+        {
+            inputsHaveBeenReshaped = true;
+
+            armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
+
+            // Expand the tensor to three dimensions
+            if (operandShape.GetNumDimensions() == 2)
+            {
+                reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
+                tensorDimensionsAdded = 1;
+            }
+            else
+            {
+                reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
+                tensorDimensionsAdded = 2;
+            }
+
+            armnn::IConnectableLayer& newReshape = AddReshapeLayer(
+                    *data.m_Network,
+                    operandInputHandle,
+                    reshapeInfo
+            );
+
+            // Point to the reshape operation rather then the input operation
+            operandShape = reshapeInfo.GetShape();
+            operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
+        }
+
+        inputShapes.emplace_back(operandShape);
+        inputHandles.emplace_back(operandInputHandle);
+
+        if (!inputHandles.back().IsValid())
+        {
+            return Fail("%s: Operation has invalid inputs", __func__);
+        }
+    }
+
+    BOOST_ASSERT(inputShapes.size() == inputHandles.size());
+
+    if (inputsHaveBeenReshaped)
+    {
+        // Adjust the concatenation dimension by the amount of dimensions added (if any)
+        concatDim += tensorDimensionsAdded;
+
+        // Add extra dimensions to the output shape to reflect the addition of the reshape layers
+        if (tensorDimensionsAdded == 1)
+        {
+            outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
+        }
+        else if (tensorDimensionsAdded == 2)
+        {
+            outputShape = armnn::TensorShape({1, 1, outputShape[0], outputShape[1]});
+        }
+    }
+
+    // Get the pair of permutations required for the concatenation
+    std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
+            std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
+
+    CreatePermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
+
+    outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
+    outputInfo.SetShape(outputShape);
+
+    // this is no-op for identity swizzles, otherwise it replaces both
+    // the handles and shapes with the swizzled layer output handles and shapes
+    SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
+
+    // Create an armnn merger layer descriptor - this will also perform validation on the input shapes
+    armnn::OriginsDescriptor mergerDescriptor;
+    try
+    {
+        // The merger descriptor is always created across the only supported concat
+        // dimension, which is 0 or 1
+        mergerDescriptor =
+            armnn::CreateMergerDescriptorForConcatenation(
+                inputShapes.begin(), inputShapes.end(), concatDim);
+    }
+    catch (const armnn::Exception& error)
+    {
+        return Fail("%s: Error preparing merger descriptor. %s", __func__, error.what());
+    }
+
+    // Validate the output shape is correct given the input shapes based on the
+    // only valid concat dimension which is 0 or 1
+    if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
+    {
+        return Fail("%s: Error validating the output shape for concat", __func__);
+    }
+
+    std::vector<const armnn::TensorInfo*> inputTensorInfos;
+    std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
+        [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
+    if (!IsLayerSupported(__func__,
+                          armnn::IsMergerSupported,
+                          data.m_Compute,
+                          inputTensorInfos,
+                          mergerDescriptor))
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddMergerLayer(mergerDescriptor);
+    assert(layer != nullptr);
+    layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    // Connect inputs to the layer
+    const int numInputSlots = layer->GetNumInputSlots();
+    assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
+    for (int i = 0; i < numInputSlots; ++i)
+    {
+        // connect the input directly to the merge (concat) layer
+        inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
+    }
+
+    // Add permutation layer and connect the output to it, the permutation becomes the output layer
+    armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
+                                                               layer->GetOutputSlot(0),
+                                                               permutationPair.second);
+    layer = &deswizzleLayer;
+
+    if (inputsHaveBeenReshaped)
+    {
+        armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
+
+        // Undo the reshape knowing the amount of dimensions added
+        if (tensorDimensionsAdded == 1)
+        {
+            afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
+                                                          afterConcatInfo.GetShape()[2] }));
+        }
+        else if (tensorDimensionsAdded == 2)
+        {
+            afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2],
+                                                          afterConcatInfo.GetShape()[3] }));
+        }
+
+        layer = &AddReshapeLayer(
+                *data.m_Network,
+                layer->GetOutputSlot(0),
+                afterConcatInfo
+        );
+    }
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+}
+
+bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    const armnn::TensorInfo swizzledInputInfo  = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
+    const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
+
+    // ArmNN does not currently support non-fixed weights or bias
+    const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data, NHWCToArmNN);
+    const ConstTensorPin biasPin    = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
+
+    if (!weightsPin.IsValid() || !biasPin.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    armnn::ConstTensor weights = weightsPin.GetConstTensor();
+    armnn::ConstTensor bias = biasPin.GetConstTensor();
+    SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), swizzledInputInfo);
+
+    armnn::Convolution2dDescriptor desc;
+    ActivationFn activation;
+
+    if (operation.inputs.size() == 10)
+    {
+        if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data)   ||
+            !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data)  ||
+            !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data)    ||
+            !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
+            !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data)   ||
+            !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data)   ||
+            !GetInputActivationFunction(operation, 9, activation, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs", __func__);
+        }
+    }
+    else if (operation.inputs.size() == 7)
+    {
+        android::nn::PaddingScheme paddingScheme;
+        if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data)               ||
+            !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
+            !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
+            !GetInputActivationFunction(operation, 6, activation, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs", __func__);
+        }
+
+        const uint32_t kernelX = weights.GetShape()[3];
+        const uint32_t kernelY = weights.GetShape()[2];
+        const uint32_t inputX  = swizzledInputInfo.GetShape()[3];
+        const uint32_t inputY  = swizzledInputInfo.GetShape()[2];
+
+        CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
+        CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
+    }
+    else
+    {
+        return Fail("%s: Unsupported number of operation inputs", __func__);
+    }
+
+    desc.m_BiasEnabled = true;
+    auto biases = boost::make_optional(bias.GetInfo());
+
+    if (!IsLayerSupported(__func__,
+                          armnn::IsConvolution2dSupported,
+                          data.m_Compute,
+                          swizzledInputInfo,
+                          swizzledOutputInfo,
+                          desc,
+                          weights.GetInfo(),
+                          biases))
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc, weights, bias);
+    armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer, data);
+
+    if (endLayer != nullptr)
+    {
+        armnn::IConnectableLayer& outSwizzleLayer =
+                SwizzleInDeswizzleOut(*data.m_Network, input, *startLayer, *endLayer);
+        return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer, model, data);
+    }
+    else
+    {
+        return Fail("%s: ProcessActivation failed", __func__);
+    }
+}
+
+bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    const armnn::TensorInfo swizzledInputInfo  = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
+    const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
+
+    // ArmNN does not currently support non-fixed weights or bias
+
+    // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
+    // but in ArmNN it needs to be [ M, I, H, W ]
+    const Operand* weightsOperand = GetInputOperand(operation, 1, model);
+
+    if (weightsOperand == nullptr)
+    {
+        return Fail("%s: Operand is invalid", __func__);
+    }
+
+    // Reinterpret weight data as [ H, W, I, M ]
+    armnn::TensorShape weightsShape({ weightsOperand->dimensions[1], weightsOperand->dimensions[2],
+                                      inputInfo.GetShape()[3],
+                                      weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
+
+    // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
+    const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
+    ConstTensorPin weightsPin =
+            ConvertOperationInputToConstTensorPin(operation, 1, model, data, HWIMToMIHW, &weightsShape);
+
+    // Bias is a 1D tensor
+    ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
+
+    if (!weightsPin.IsValid() || !biasPin.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    armnn::ConstTensor weights = weightsPin.GetConstTensor();
+    armnn::ConstTensor bias = biasPin.GetConstTensor();
+    SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), swizzledInputInfo);
+
+    armnn::DepthwiseConvolution2dDescriptor desc;
+    ActivationFn activation;
+
+    if (operation.inputs.size() == 11)
+    {
+        if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data)         ||
+            !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data)        ||
+            !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data)          ||
+            !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data)       ||
+            !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data)         ||
+            !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data)         ||
+            !GetInputActivationFunction(operation,  10, activation, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs", __func__);
+        }
+    }
+    else if (operation.inputs.size() == 8)
+    {
+        android::nn::PaddingScheme paddingScheme;
+        if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data)                       ||
+            !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data)         ||
+            !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data)         ||
+            !GetInputActivationFunction(operation, 7, activation, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs", __func__);
+        }
+
+        const uint32_t kernelX = weights.GetShape()[3];
+        const uint32_t kernelY = weights.GetShape()[2];
+        const uint32_t inputX  = swizzledInputInfo.GetShape()[3];
+        const uint32_t inputY  = swizzledInputInfo.GetShape()[2];
+
+        CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
+        CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
+    }
+    else
+    {
+        return Fail("%s: Unsupported number of operation inputs", __func__);
+    }
+
+    desc.m_BiasEnabled = true;
+    auto biases = boost::make_optional(bias.GetInfo());
+
+    if (!IsLayerSupported(__func__,
+                          armnn::IsDepthwiseConvolutionSupported,
+                          data.m_Compute,
+                          swizzledInputInfo,
+                          swizzledOutputInfo,
+                          desc,
+                          weights.GetInfo(),
+                          biases))
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, bias);
+    armnn::IConnectableLayer* endLayer   = ProcessActivation(swizzledOutputInfo, activation, startLayer, data);
+
+    if (endLayer != nullptr)
+    {
+        armnn::IConnectableLayer& outSwizzleLayer =
+                SwizzleInDeswizzleOut(*data.m_Network, input, *startLayer, *endLayer);
+        return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer, model, data);
+    }
+    else
+    {
+        return Fail("%s: ProcessActivation failed", __func__);
+    }
+}
+
+bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
+{
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return Fail("%s: Operation has invalid outputs", __func__);
+    }
+
+    if (!IsLayerSupported(__func__,
+                          armnn::IsFloorSupported,
+                          data.m_Compute,
+                          input.GetTensorInfo(),
+                          GetTensorInfoForOperand(*outputOperand)))
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+}
+
+bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
+{
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    // ArmNN does not currently support non-fixed weights or bias
+    ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data); // 2D
+    ConstTensorPin biasPin    = ConvertOperationInputToConstTensorPin(operation, 2, model, data);    // 1D
+
+    if (!weightsPin.IsValid() || !biasPin.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    armnn::ConstTensor weights = weightsPin.GetConstTensor();
+    armnn::ConstTensor bias    = biasPin.GetConstTensor();
+
+    armnn::TensorInfo reshapedInfo = inputInfo;
+    if (inputInfo.GetNumDimensions() > 2U)
+    {
+        unsigned int dim0 = inputInfo.GetShape()[0];
+        unsigned int dim1 = inputInfo.GetShape()[1];
+
+        for (unsigned int i = 2U; i < inputInfo.GetNumDimensions(); ++i)
+        {
+            dim1 *= inputInfo.GetShape()[i];
+        }
+
+        unsigned int divisor = weights.GetInfo().GetShape()[1] / dim1;
+        if(dim0 % divisor != 0)
+        {
+            return Fail("%s: Failed to deduce tensor shape", __func__);
+        }
+
+        reshapedInfo.SetShape(armnn::TensorShape({dim0 / divisor, dim1 * divisor}));
+    }
+
+    // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
+    SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
+
+    ActivationFn activationFunction;
+    if (!GetInputActivationFunction(operation, 3, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    armnn::FullyConnectedDescriptor desc;
+    desc.m_TransposeWeightMatrix = true;
+    desc.m_BiasEnabled           = true;
+
+    if (!IsLayerSupported(__func__,
+                          armnn::IsFullyConnectedSupported,
+                          data.m_Compute,
+                          inputInfo,
+                          outputInfo,
+                          weights.GetInfo(),
+                          bias.GetInfo(),
+                          desc))
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc, weights, bias);
+    armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
+
+    if (endLayer != nullptr)
+    {
+        if (inputInfo.GetNumDimensions() > 2U)
+        {
+            armnn::ReshapeDescriptor reshapeDescriptor;
+            reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
+
+            armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
+            assert(reshapeLayer != nullptr);
+            input.Connect(reshapeLayer->GetInputSlot(0));
+            reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
+            reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
+        }
+        else
+        {
+            input.Connect(startLayer->GetInputSlot(0));
+        }
+
+        return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
+    }
+    else
+    {
+        return Fail("%s: ProcessActivation failed", __func__);
+    }
+}
+
+bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
+                                                  const Model& model,
+                                                  ConversionData& data)
+{
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    const armnn::TensorInfo swizzledInputInfo  = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
+    const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
+
+    armnn::NormalizationDescriptor descriptor;
+
+    descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
+    descriptor.m_NormMethodType  = armnn::NormalizationAlgorithmMethod::LocalBrightness;
+
+    if (!input.IsValid() ||
+        !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
+        !GetInputFloat32(operation, 2, descriptor.m_K, model, data) ||
+        !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) ||
+        !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // ArmNN expects normSize to be the full size of the normalization
+    // window rather than the radius as in AndroidNN.
+    descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
+
+    if (!IsLayerSupported(__func__,
+                        armnn::IsNormalizationSupported,
+                        data.m_Compute,
+                        swizzledInputInfo,
+                        swizzledOutputInfo,
+                        descriptor))
+    {
+        return false;
+    }
+
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
+    assert(layer != nullptr);
+    layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
+
+    armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*data.m_Network, input, *layer);
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer, model, data);
+}
+
+bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
+{
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::Sigmoid;
+
+    return ConvertToActivation(operation, __func__, desc, model, data);
+}
+
+bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
+{
+    // Inputs:
+    // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
+    //      “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Could not read input 0: input", __func__);
+    }
+    // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
+    LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18, model, data);
+    if (!outputStateIn.IsValid())
+    {
+        return Fail("%s: Could not read input 18: outputStateIn", __func__);
+    }
+    // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
+    LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19, model, data);
+    if (!cellStateIn.IsValid())
+    {
+        return Fail("%s: Could not read input 19: cellStateIn", __func__);
+    }
+
+    // Get the mandatory input tensors:
+    // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [num_units, input_size].
+    const ConstTensorPin inputToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
+    // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
+    const ConstTensorPin inputToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 3, model, data);
+    // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [num_units, input_size].
+    const ConstTensorPin inputToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 4, model, data);
+    // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [num_units, output_size].
+    const ConstTensorPin recurrentToForgetWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation, 6, model, data);
+    // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [num_units, output_size].
+    const ConstTensorPin recurrentToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 7, model, data);
+    // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [num_units, output_size].
+    const ConstTensorPin recurrentToOutputWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation, 8, model, data);
+    // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+    const ConstTensorPin forgetGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 13, model, data);
+    // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+    const ConstTensorPin cellBiasPin = ConvertOperationInputToConstTensorPin(operation, 14, model, data);
+    // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+    const ConstTensorPin outputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 15, model, data);
+
+    if (!inputToForgetWeightsPin.IsValid() ||
+        !inputToCellWeightsPin.IsValid() ||
+        !inputToOutputWeightsPin.IsValid() ||
+        !recurrentToForgetWeightsPin.IsValid() ||
+        !recurrentToCellWeightsPin.IsValid() ||
+        !recurrentToOutputWeightsPin.IsValid() ||
+        !forgetGateBiasPin.IsValid() ||
+        !cellBiasPin.IsValid() ||
+        !outputGateBiasPin.IsValid())
+    {
+        return Fail("%s: Operation has invalid tensor inputs", __func__);
+    }
+
+    // Get the optional input tensors:
+    // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [num_units, input_size], where “num_units” corresponds to the number of cell units.
+    const ConstTensorPin inputToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data);
+    // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
+    //     “num_units”), or the second dimension of the “projection_weights”, if defined.
+    const ConstTensorPin recurrentToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 5, model, data);
+    // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+    const ConstTensorPin cellToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 9, model, data);
+    // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+    const ConstTensorPin cellToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 10, model, data);
+    // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+    const ConstTensorPin cellToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 11, model, data);
+    // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+    const ConstTensorPin inputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 12, model, data);
+    // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [output_size, num_units].
+    const ConstTensorPin projectionWeightsPin = ConvertOperationInputToConstTensorPin(operation, 16, model, data);
+    // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
+    const ConstTensorPin projectionBiasPin = ConvertOperationInputToConstTensorPin(operation, 17, model, data);
+
+    if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
+        (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
+        (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
+        (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
+        (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
+        (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
+        (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
+        (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
+    {
+        return Fail("%s: Operation has invalid tensor inputs", __func__);
+    }
+
+    // Get the mandatory input scalars (actually 1-D tensors of size 1):
+    // 20: The activation function: A value indicating the activation function:
+    //     0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
+    // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
+    //     If set to 0.0 then clipping is disabled.
+    // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
+    //     [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+    ActivationFn activation;
+    float cellClip;
+    float projClip;
+    if (!GetInputActivationFunctionFromTensor(operation, 20, activation, model, data) ||
+        !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
+        !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip, model, data))
+    {
+        return Fail("%s: Operation has invalid scalar inputs", __func__);
+    }
+
+    // Outputs:
+    // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
+    //     CIFG, or [batch_size, num_units * 3] without CIFG.
+    const Operand* scratchBuffer = GetOutputOperand(operation, 0, model);
+    if (!scratchBuffer)
+    {
+        return Fail("%s: Could not read output 0: scratchBuffer", __func__);
+    }
+    // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
+    const Operand* outputStateOut = GetOutputOperand(operation, 1, model);
+    if (!outputStateOut)
+    {
+        return Fail("%s: Could not read output 1: outputStateOut", __func__);
+    }
+    // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
+    const Operand* cellStateOut = GetOutputOperand(operation, 2, model);
+    if (!cellStateOut)
+    {
+        return Fail("%s: Could not read output 2: cellStateOut", __func__);
+    }
+    // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
+    //     effectively the same as the current “output state (out)” value.
+    const Operand* output = GetOutputOperand(operation, 3, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 3: output", __func__);
+    }
+
+    // set the params structure for the AddLstmLayer call
+    armnn::LstmInputParams params;
+    params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
+    params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
+    params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
+    params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
+    params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
+    params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
+    params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
+    params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
+    params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
+    params.m_CellBias = cellBiasPin.GetConstTensorPtr();
+    params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
+    params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
+    params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
+
+    // set the layer descriptor
+    armnn::LstmDescriptor desc;
+    desc.m_ActivationFunc = activation;
+    desc.m_ClippingThresCell = cellClip;
+    desc.m_ClippingThresProj = projClip;
+    desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
+                          params.m_RecurrentToInputWeights == nullptr ||
+                          params.m_InputGateBias == nullptr);
+    desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
+                              params.m_CellToOutputWeights != nullptr);
+    desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
+
+    // validate the optional input groups
+    if (desc.m_CifgEnabled &&
+        (params.m_InputToInputWeights != nullptr ||
+         params.m_RecurrentToInputWeights != nullptr ||
+         params.m_InputGateBias != nullptr))
+    {
+        return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
+                    " and input gate bias must be provided", __func__);
+    }
+
+    if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
+    {
+        return Fail("%s: projection bias should not be provided without projection weights", __func__);
+    }
+
+    if (desc.m_PeepholeEnabled &&
+        (params.m_CellToForgetWeights == nullptr ||
+         params.m_CellToOutputWeights == nullptr ||
+         (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
+    {
+        return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
+                    " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
+    }
+
+    // Check if the layer is supported
+    // Inputs
+    const armnn::TensorInfo& inputInfo         = input.GetTensorInfo();
+    const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
+    const armnn::TensorInfo& cellStateInInfo   = cellStateIn.GetTensorInfo();
+
+    // Outputs
+    const armnn::TensorInfo& scratchBufferInfo  = GetTensorInfoForOperand(*scratchBuffer);
+    const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
+    const armnn::TensorInfo& cellStateOutInfo   = GetTensorInfoForOperand(*cellStateOut);
+    const armnn::TensorInfo& outputInfo         = GetTensorInfoForOperand(*output);
+
+    // Basic parameters
+    const armnn::TensorInfo& inputToForgetWeights = params.m_InputToForgetWeights->GetInfo();
+    const armnn::TensorInfo& inputToCellWeights   = params.m_InputToCellWeights->GetInfo();
+    const armnn::TensorInfo& inputToOutputWeights = params.m_InputToOutputWeights->GetInfo();
+    const armnn::TensorInfo& recurrentToForgetWeights = params.m_RecurrentToForgetWeights->GetInfo();
+    const armnn::TensorInfo& recurrentToCellWeights = params.m_RecurrentToCellWeights->GetInfo();
+    const armnn::TensorInfo& recurrentToOutputWeights = params.m_RecurrentToOutputWeights->GetInfo();
+    const armnn::TensorInfo& forgetGateBias = params.m_ForgetGateBias->GetInfo();
+    const armnn::TensorInfo& cellBias = params.m_CellBias->GetInfo();
+    const armnn::TensorInfo& outputGateBias = params.m_OutputGateBias->GetInfo();
+
+    //Optional parameters
+    const armnn::TensorInfo* inputToInputWeights = nullptr;
+    const armnn::TensorInfo* recurrentToInputWeights = nullptr;
+    const armnn::TensorInfo* cellToInputWeights = nullptr;
+    const armnn::TensorInfo* inputGateBias = nullptr;
+    const armnn::TensorInfo* projectionWeights = nullptr;
+    const armnn::TensorInfo* projectionBias    = nullptr;
+    const armnn::TensorInfo* cellToForgetWeights = nullptr;
+    const armnn::TensorInfo* cellToOutputWeights = nullptr;
+
+    if(!desc.m_CifgEnabled)
+    {
+        inputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
+        recurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
+        if (params.m_CellToInputWeights != nullptr)
+        {
+            cellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
+        }
+        inputGateBias = &(params.m_InputGateBias->GetInfo());
+    }
+
+    if(desc.m_ProjectionEnabled)
+    {
+        projectionWeights = &(params.m_ProjectionWeights->GetInfo());
+        if (params.m_ProjectionBias != nullptr)
+        {
+            projectionBias = &(params.m_ProjectionBias->GetInfo());
+        }
+    }
+
+    if(desc.m_PeepholeEnabled)
+    {
+        cellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
+        cellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
+    }
+
+    if (!IsLayerSupported(__func__,
+                          armnn::IsLstmSupported,
+                          data.m_Compute,
+                          inputInfo,
+                          outputStateInInfo,
+                          cellStateInInfo,
+                          scratchBufferInfo,
+                          outputStateOutInfo,
+                          cellStateOutInfo,
+                          outputInfo,
+                          desc,
+                          inputToForgetWeights,
+                          inputToCellWeights,
+                          inputToOutputWeights,
+                          recurrentToForgetWeights,
+                          recurrentToCellWeights,
+                          recurrentToOutputWeights,
+                          forgetGateBias,
+                          cellBias,
+                          outputGateBias,
+                          inputToInputWeights,
+                          recurrentToInputWeights,
+                          cellToInputWeights,
+                          inputGateBias,
+                          projectionWeights,
+                          projectionBias,
+                          cellToForgetWeights,
+                          cellToOutputWeights))
+    {
+        return false;
+    }
+
+    // Add the layer
+    armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
+
+    input.Connect(layer->GetInputSlot(0));
+    outputStateIn.Connect(layer->GetInputSlot(1));
+    cellStateIn.Connect(layer->GetInputSlot(2));
+
+    return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
+            SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
+            SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
+            SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3, model, data));
+}
+
+bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
+{
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    const armnn::TensorInfo swizzledInputInfo  = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
+    const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
+
+    if (!IsLayerSupported(__func__,
+                          armnn::IsL2NormalizationSupported,
+                          data.m_Compute,
+                          swizzledInputInfo,
+                          swizzledOutputInfo))
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer();
+    assert(layer != nullptr);
+    layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
+
+    armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*data.m_Network, input, *layer);
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer, model, data);
+}
+
+bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+    return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
+}
+
+bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+    return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
+}
+
+bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
+{
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // The FuseActivation parameter is always the input index 2
+    // and it should be optional
+    ActivationFn activationFunction;
+    if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+
+    if (outputOperand == nullptr)
+    {
+        return false;
+    }
+
+    const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
+
+    if (!IsLayerSupported(__func__,
+                          armnn::IsMultiplicationSupported,
+                          data.m_Compute,
+                          input0.GetTensorInfo(),
+                          input1.GetTensorInfo(),
+                          outInfo))
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
+    armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
+
+    const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
+    const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
+
+    if (endLayer != nullptr)
+    {
+        BroadcastTensor(input0, input1, startLayer, *data.m_Network);
+        return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
+    }
+    else
+    {
+        return Fail("%s: ProcessActivation failed", __func__);
+    }
+}
+
+bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
+{
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::ReLu;
+
+    return ConvertToActivation(operation, __func__, desc, model, data);
+}
+
+bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
+{
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::BoundedReLu;
+    desc.m_A        = 1.0f;
+    desc.m_B        = -1.0f;
+
+    return ConvertToActivation(operation, __func__, desc, model, data);
+}
+
+bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
+{
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::BoundedReLu;
+    desc.m_A        = 6.0f;
+
+    return ConvertToActivation(operation, __func__, desc, model, data);
+}
+
+bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
+{
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return Fail("%s: Operation has no outputs", __func__);
+    }
+
+    const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
+
+    armnn::SoftmaxDescriptor desc;
+    if (!GetInputFloat32(operation, 1, desc.m_Beta, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    if (!IsLayerSupported(__func__,
+                          armnn::IsSoftmaxSupported,
+                          data.m_Compute,
+                          input.GetTensorInfo(),
+                          outInfo,
+                          desc))
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+}
+
+bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
+{
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::TanH;
+    desc.m_A = 1.0f; // android nn does not support tanH parameters
+    desc.m_B = 1.0f; // set to 1.0f for unity scaling
+
+    return ConvertToActivation(operation, __func__, desc, model, data);
+}
+
+bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
+{
+    const Operand* inputOperand = GetInputOperand(operation, 0, model);
+    const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model);
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+
+    if (inputOperand == nullptr
+        || requestedShapeOperand == nullptr
+        || outputOperand == nullptr)
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+
+    if (requestedShapeOperand->dimensions.size() != 1)
+    {
+        return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
+            __func__, requestedShapeOperand->dimensions.size());
+    }
+
+    std::vector<int32_t> targetDimensions;
+    if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data))
+    {
+        return Fail("%s: Could not read values of input 1", __func__);
+    }
+
+    const Shape inputOperandShape = GetOperandShape(*inputOperand);
+
+    Shape requestedShape;
+    // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
+    // function that resolves these values into a fully specified tensor shape.
+    if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
+    {
+        return Fail("%s: Failed to resolve the requested shape", __func__);
+    }
+
+    const Shape outputOperandShape = GetOperandShape(*outputOperand);
+    if (!SameShape(requestedShape, outputOperandShape))
+    {
+        return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
+    }
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Could not read input 0", __func__);
+    }
+
+    if (!IsLayerSupported(__func__,
+                          armnn::IsReshapeSupported,
+                          data.m_Compute,
+                          input.GetTensorInfo()))
+    {
+        return false;
+    }
+
+
+    armnn::ReshapeDescriptor reshapeDescriptor;
+    reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
+                                                         requestedShape.dimensions.data());
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+}
+
+bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
+{
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Could not read input 0", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
+    const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
+
+    if (!IsLayerSupported(__func__,
+                          armnn::IsResizeBilinearSupported,
+                          data.m_Compute,
+                          swizzledInputInfo))
+    {
+        return false;
+    }
+
+    armnn::ResizeBilinearDescriptor desc;
+
+    if (   !GetInputScalar(operation, 1, OperandType::INT32, desc.m_TargetHeight, model, data)
+        || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_TargetWidth, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddResizeBilinearLayer(desc);
+    assert(layer != nullptr);
+    layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
+
+    armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*data.m_Network, input, *layer);
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer, model, data);
+
+}
+
+} // namespace hal_1_0
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/1.0/HalPolicy.hpp b/1.0/HalPolicy.hpp
new file mode 100644
index 0000000..c596075
--- /dev/null
+++ b/1.0/HalPolicy.hpp
@@ -0,0 +1,75 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "ConversionUtils.hpp"
+
+#include <HalInterfaces.h>
+
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+
+namespace armnn_driver
+{
+namespace hal_1_0
+{
+
+class HalPolicy
+{
+public:
+    using Model                     = V1_0::Model;
+    using Operation                 = V1_0::Operation;
+    using getSupportedOperations_cb = V1_0::IDevice::getSupportedOperations_cb;
+
+    static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data);
+
+private:
+    static bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertLocalResponseNormalization(const Operation& operation,
+                                                  const Model& model,
+                                                  ConversionData& data);
+
+    static bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertLstm(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertReLu(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data);
+};
+
+} // namespace hal_1_0
+} // namespace armnn_driver
diff --git a/1.1/ArmnnDriver.hpp b/1.1/ArmnnDriver.hpp
index 3824805..ef8bca8 100644
--- a/1.1/ArmnnDriver.hpp
+++ b/1.1/ArmnnDriver.hpp
@@ -9,114 +9,109 @@
 
 #include "ArmnnDevice.hpp"
 #include "ArmnnDriverImpl.hpp"
+#include "HalPolicy.hpp"
+
 #include "../ArmnnDriverImpl.hpp"
 #include "../1.0/ArmnnDriverImpl.hpp"
+#include "../1.0/HalPolicy.hpp"
 
 #include <log/log.h>
 
 namespace armnn_driver
 {
-namespace V1_1
+namespace hal_1_1
 {
 
-class ArmnnDriver : public ArmnnDevice, public ::android::hardware::neuralnetworks::V1_1::IDevice
+class ArmnnDriver : public ArmnnDevice, public V1_1::IDevice
 {
 public:
     ArmnnDriver(DriverOptions options)
         : ArmnnDevice(std::move(options))
     {
-        ALOGV("V1_1::ArmnnDriver::ArmnnDriver()");
+        ALOGV("hal_1_1::ArmnnDriver::ArmnnDriver()");
     }
     ~ArmnnDriver() {}
 
 public:
-    Return<void> getCapabilities(
-            ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb) override
+    Return<void> getCapabilities(V1_0::IDevice::getCapabilities_cb cb) override
     {
-        ALOGV("V1_1::ArmnnDriver::getCapabilities()");
+        ALOGV("hal_1_1::ArmnnDriver::getCapabilities()");
 
-        return V1_0::ArmnnDriverImpl::getCapabilities(m_Runtime,
-                                                      cb);
+        return hal_1_0::ArmnnDriverImpl::getCapabilities(m_Runtime, cb);
     }
 
-    Return<void> getSupportedOperations(
-            const ::android::hardware::neuralnetworks::V1_0::Model& model,
-            ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb) override
+    Return<void> getSupportedOperations(const V1_0::Model& model,
+                                        V1_0::IDevice::getSupportedOperations_cb cb) override
     {
-        ALOGV("V1_1::ArmnnDriver::getSupportedOperations()");
+        ALOGV("hal_1_1::ArmnnDriver::getSupportedOperations()");
 
-        return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::getSupportedOperations(m_Runtime,
-                                                                                     m_Options,
-                                                                                     model,
-                                                                                     cb);
+        return armnn_driver::ArmnnDriverImpl<hal_1_0::HalPolicy>::getSupportedOperations(m_Runtime,
+                                                                                         m_Options,
+                                                                                         model,
+                                                                                         cb);
     }
 
-    Return<ErrorStatus> prepareModel(
-            const ::android::hardware::neuralnetworks::V1_0::Model& model,
-            const android::sp<IPreparedModelCallback>& cb) override
+    Return<ErrorStatus> prepareModel(const V1_0::Model& model,
+                                     const android::sp<IPreparedModelCallback>& cb) override
     {
-        ALOGV("V1_1::ArmnnDriver::prepareModel()");
+        ALOGV("hal_1_1::ArmnnDriver::prepareModel()");
 
-        return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::prepareModel(m_Runtime,
-                                                                           m_ClTunedParameters,
-                                                                           m_Options,
-                                                                           model,
-                                                                           cb);
+        return armnn_driver::ArmnnDriverImpl<hal_1_0::HalPolicy>::prepareModel(m_Runtime,
+                                                                               m_ClTunedParameters,
+                                                                               m_Options,
+                                                                               model,
+                                                                               cb);
     }
 
-    Return<void> getCapabilities_1_1(
-            ::android::hardware::neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb) override
+    Return<void> getCapabilities_1_1(V1_1::IDevice::getCapabilities_1_1_cb cb) override
     {
-        ALOGV("V1_1::ArmnnDriver::getCapabilities_1_1()");
+        ALOGV("hal_1_1::ArmnnDriver::getCapabilities_1_1()");
 
-        return V1_1::ArmnnDriverImpl::getCapabilities_1_1(m_Runtime,
-                                                          cb);
+        return hal_1_1::ArmnnDriverImpl::getCapabilities_1_1(m_Runtime, cb);
     }
 
-    Return<void> getSupportedOperations_1_1(
-            const ::android::hardware::neuralnetworks::V1_1::Model& model,
-            ::android::hardware::neuralnetworks::V1_1::IDevice::getSupportedOperations_1_1_cb cb) override
+    Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+                                            V1_1::IDevice::getSupportedOperations_1_1_cb cb) override
     {
-        ALOGV("V1_1::ArmnnDriver::getSupportedOperations_1_1()");
+        ALOGV("hal_1_1::ArmnnDriver::getSupportedOperations_1_1()");
 
-        return armnn_driver::ArmnnDriverImpl<HalVersion_1_1>::getSupportedOperations(m_Runtime,
-                                                                                     m_Options,
-                                                                                     model,
-                                                                                     cb);
+        return armnn_driver::ArmnnDriverImpl<hal_1_1::HalPolicy>::getSupportedOperations(m_Runtime,
+                                                                                         m_Options,
+                                                                                         model,
+                                                                                         cb);
     }
 
-    Return<ErrorStatus> prepareModel_1_1(
-            const ::android::hardware::neuralnetworks::V1_1::Model& model,
-            ::android::hardware::neuralnetworks::V1_1::ExecutionPreference preference,
-            const android::sp<IPreparedModelCallback>& cb) override
+    Return<ErrorStatus> prepareModel_1_1(const V1_1::Model& model,
+                                         V1_1::ExecutionPreference preference,
+                                         const android::sp<IPreparedModelCallback>& cb) override
     {
-        ALOGV("V1_1::ArmnnDriver::prepareModel_1_1()");
+        ALOGV("hal_1_1::ArmnnDriver::prepareModel_1_1()");
 
         if (!(preference == ExecutionPreference::LOW_POWER ||
               preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
               preference == ExecutionPreference::SUSTAINED_SPEED))
         {
-            ALOGV("V1_1::ArmnnDriver::prepareModel_1_1: Invalid execution preference");
+            ALOGV("hal_1_1::ArmnnDriver::prepareModel_1_1: Invalid execution preference");
             cb->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
             return ErrorStatus::INVALID_ARGUMENT;
         }
 
-        return armnn_driver::ArmnnDriverImpl<HalVersion_1_1>::prepareModel(m_Runtime,
-                                                                           m_ClTunedParameters,
-                                                                           m_Options,
-                                                                           model,
-                                                                           cb,
-                                                                           model.relaxComputationFloat32toFloat16
-                                                                           && m_Options.GetFp16Enabled());
+        return armnn_driver::ArmnnDriverImpl<hal_1_1::HalPolicy>::prepareModel(m_Runtime,
+                                                                               m_ClTunedParameters,
+                                                                               m_Options,
+                                                                               model,
+                                                                               cb,
+                                                                               model.relaxComputationFloat32toFloat16
+                                                                               && m_Options.GetFp16Enabled());
     }
 
     Return<DeviceStatus> getStatus() override
     {
-        ALOGV("V1_1::ArmnnDriver::getStatus()");
+        ALOGV("hal_1_1::ArmnnDriver::getStatus()");
 
-        return armnn_driver::ArmnnDriverImpl<HalVersion_1_1>::getStatus();
+        return armnn_driver::ArmnnDriverImpl<hal_1_1::HalPolicy>::getStatus();
     }
 };
 
-} // armnn_driver::namespace V1_1
-} // namespace armnn_driver
+} // namespace hal_1_1
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/1.1/ArmnnDriverImpl.cpp b/1.1/ArmnnDriverImpl.cpp
index 0a68953..d8939a0 100644
--- a/1.1/ArmnnDriverImpl.cpp
+++ b/1.1/ArmnnDriverImpl.cpp
@@ -8,34 +8,28 @@
 
 #include <log/log.h>
 
-using namespace std;
-using namespace android;
-using namespace android::nn;
-using namespace android::hardware;
-
 namespace
 {
 
-const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime";
-const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage";
-const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime";
-const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage";
+const char *g_Float32PerformanceExecTimeName             = "ArmNN.float32Performance.execTime";
+const char *g_Float32PerformancePowerUsageName           = "ArmNN.float32Performance.powerUsage";
+const char *g_Quantized8PerformanceExecTimeName          = "ArmNN.quantized8Performance.execTime";
+const char *g_Quantized8PerformancePowerUsageName        = "ArmNN.quantized8Performance.powerUsage";
 const char *g_RelaxedFloat32toFloat16PerformanceExecTime = "ArmNN.relaxedFloat32toFloat16Performance.execTime";
 
 } // anonymous namespace
 
 namespace armnn_driver
 {
-namespace V1_1
+namespace hal_1_1
 {
 
-Return<void> ArmnnDriverImpl::getCapabilities_1_1(
-        const armnn::IRuntimePtr& runtime,
-        neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb)
+Return<void> ArmnnDriverImpl::getCapabilities_1_1(const armnn::IRuntimePtr& runtime,
+                                                  V1_1::IDevice::getCapabilities_1_1_cb cb)
 {
-    ALOGV("V1_1::ArmnnDriverImpl::getCapabilities()");
+    ALOGV("hal_1_1::ArmnnDriverImpl::getCapabilities()");
 
-    neuralnetworks::V1_1::Capabilities capabilities;
+    V1_1::Capabilities capabilities;
     if (runtime)
     {
         capabilities.float32Performance.execTime =
@@ -57,10 +51,10 @@
     }
     else
     {
-        capabilities.float32Performance.execTime = 0;
-        capabilities.float32Performance.powerUsage = 0;
-        capabilities.quantized8Performance.execTime = 0;
-        capabilities.quantized8Performance.powerUsage = 0;
+        capabilities.float32Performance.execTime                 = 0;
+        capabilities.float32Performance.powerUsage               = 0;
+        capabilities.quantized8Performance.execTime              = 0;
+        capabilities.quantized8Performance.powerUsage            = 0;
         capabilities.relaxedFloat32toFloat16Performance.execTime = 0;
 
         cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
@@ -69,5 +63,5 @@
     return Void();
 }
 
-} // namespace armnn_driver::V1_1
-} // namespace armnn_driver
+} // namespace hal_1_1
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/1.1/ArmnnDriverImpl.hpp b/1.1/ArmnnDriverImpl.hpp
index bdb2585..4308bac 100644
--- a/1.1/ArmnnDriverImpl.hpp
+++ b/1.1/ArmnnDriverImpl.hpp
@@ -13,16 +13,15 @@
 
 namespace armnn_driver
 {
-namespace V1_1
+namespace hal_1_1
 {
 
 class ArmnnDriverImpl
 {
 public:
-    static Return<void> getCapabilities_1_1(
-            const armnn::IRuntimePtr& runtime,
-            ::android::hardware::neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb);
+    static Return<void> getCapabilities_1_1(const armnn::IRuntimePtr& runtime,
+                                            V1_1::IDevice::getCapabilities_1_1_cb cb);
 };
 
-} // namespace armnn_driver::V1_1
+} // namespace hal_1_1
 } // namespace armnn_driver
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
new file mode 100644
index 0000000..0e66943
--- /dev/null
+++ b/1.1/HalPolicy.cpp
@@ -0,0 +1,89 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "HalPolicy.hpp"
+
+#include "../1.0/HalPolicy.hpp"
+
+namespace armnn_driver
+{
+namespace hal_1_1
+{
+
+bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
+{
+    if (compliantWithV1_0(operation))
+    {
+        hal_1_0::HalPolicy::Operation v10Operation = convertToV1_0(operation);
+        hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
+
+        return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
+    }
+    else
+    {
+        switch (operation.type)
+        {
+            case V1_1::OperationType::DIV:
+                return ConvertDiv(operation, model, data);
+            default:
+                return Fail("%s: Operation type %s not supported in ArmnnDriver",
+                            __func__, toString(operation.type).c_str());
+        }
+    }
+}
+
+bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
+{
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // The FuseActivation parameter is always the input index 2
+    // and it should be optional
+    ActivationFn activationFunction;
+    if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return false;
+    }
+
+    const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
+
+    if (!IsLayerSupported(__func__,
+                          armnn::IsDivisionSupported,
+                          data.m_Compute,
+                          input0.GetTensorInfo(),
+                          input1.GetTensorInfo(),
+                          outInfo))
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
+    armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
+
+    const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
+    const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
+
+    if (endLayer)
+    {
+        BroadcastTensor(input0, input1, startLayer, *data.m_Network);
+        return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
+    }
+
+    return Fail("%s: ProcessActivation failed", __func__);
+}
+
+} // namespace hal_1_1
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/1.1/HalPolicy.hpp b/1.1/HalPolicy.hpp
new file mode 100644
index 0000000..3722d49
--- /dev/null
+++ b/1.1/HalPolicy.hpp
@@ -0,0 +1,31 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "ConversionUtils.hpp"
+
+#include <HalInterfaces.h>
+
+namespace armnn_driver
+{
+namespace hal_1_1
+{
+
+class HalPolicy
+{
+public:
+    using Model                     = V1_1::Model;
+    using Operation                 = V1_1::Operation;
+    using getSupportedOperations_cb = V1_1::IDevice::getSupportedOperations_1_1_cb;
+
+    static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data);
+
+private:
+    static bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data);
+};
+
+} // namespace hal_1_1
+} // namespace armnn_driver
diff --git a/Android.mk b/Android.mk
index bcfa447..f9d59c8 100644
--- a/Android.mk
+++ b/Android.mk
@@ -48,13 +48,15 @@
 
 LOCAL_SRC_FILES := \
         1.0/ArmnnDriverImpl.cpp \
+        1.0/HalPolicy.cpp \
         ArmnnDriverImpl.cpp \
         DriverOptions.cpp \
         ArmnnDevice.cpp \
         ArmnnPreparedModel.cpp \
         ModelToINetworkConverter.cpp \
         RequestThread.cpp \
-        Utils.cpp
+        Utils.cpp \
+        ConversionUtils.cpp
 
 LOCAL_STATIC_LIBRARIES := \
         libneuralnetworks_common \
@@ -120,14 +122,17 @@
 
 LOCAL_SRC_FILES := \
         1.0/ArmnnDriverImpl.cpp \
+        1.0/HalPolicy.cpp \
         1.1/ArmnnDriverImpl.cpp \
+        1.1/HalPolicy.cpp \
         ArmnnDriverImpl.cpp \
         DriverOptions.cpp \
         ArmnnDevice.cpp \
         ArmnnPreparedModel.cpp \
         ModelToINetworkConverter.cpp \
         RequestThread.cpp \
-        Utils.cpp
+        Utils.cpp \
+        ConversionUtils.cpp
 
 LOCAL_STATIC_LIBRARIES := \
         libneuralnetworks_common \
diff --git a/ArmnnDriver.hpp b/ArmnnDriver.hpp
index 2bf47eb..fd5cfad 100644
--- a/ArmnnDriver.hpp
+++ b/ArmnnDriver.hpp
@@ -9,18 +9,18 @@
 
 #include <log/log.h>
 
-#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
+#if defined(ARMNN_ANDROID_NN_V1_1)
 
 #include "1.1/ArmnnDriver.hpp"
 
 namespace armnn_driver
 {
 
-class ArmnnDriver : public V1_1::ArmnnDriver
+class ArmnnDriver : public hal_1_1::ArmnnDriver
 {
 public:
     ArmnnDriver(DriverOptions options)
-        : V1_1::ArmnnDriver(std::move(options))
+        : hal_1_1::ArmnnDriver(std::move(options))
     {
         ALOGV("ArmnnDriver::ArmnnDriver()");
     }
@@ -36,11 +36,11 @@
 namespace armnn_driver
 {
 
-class ArmnnDriver : public V1_0::ArmnnDriver
+class ArmnnDriver : public hal_1_0::ArmnnDriver
 {
 public:
     ArmnnDriver(DriverOptions options)
-        : V1_0::ArmnnDriver(std::move(options))
+        : hal_1_0::ArmnnDriver(std::move(options))
     {
         ALOGV("ArmnnDriver::ArmnnDriver()");
     }
diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp
index c894aef..10da1dd 100644
--- a/ArmnnDriverImpl.cpp
+++ b/ArmnnDriverImpl.cpp
@@ -6,8 +6,8 @@
 #define LOG_TAG "ArmnnDriver"
 
 #include "ArmnnDriverImpl.hpp"
-#include "ModelToINetworkConverter.hpp"
 #include "ArmnnPreparedModel.hpp"
+#include "ModelToINetworkConverter.hpp"
 #include "SystemPropertiesUtils.hpp"
 
 #if defined(ARMNN_ANDROID_P)
@@ -53,12 +53,11 @@
 namespace armnn_driver
 {
 
-template <typename HalVersion>
-Return<void> ArmnnDriverImpl<HalVersion>::getSupportedOperations(
-        const armnn::IRuntimePtr& runtime,
-        const DriverOptions& options,
-        const HalModel& model,
-        HalGetSupportedOperations_cb cb)
+template<typename HalPolicy>
+Return<void> ArmnnDriverImpl<HalPolicy>::getSupportedOperations(const armnn::IRuntimePtr& runtime,
+                                                                const DriverOptions& options,
+                                                                const HalModel& model,
+                                                                HalGetSupportedOperations_cb cb)
 {
     ALOGV("ArmnnDriverImpl::getSupportedOperations()");
 
@@ -78,7 +77,7 @@
     }
 
     // Attempt to convert the model to an ArmNN input network (INetwork).
-    ModelToINetworkConverter<HalVersion> modelConverter(options.GetComputeDevice(),
+    ModelToINetworkConverter<HalPolicy> modelConverter(options.GetComputeDevice(),
                                                         model,
                                                         options.GetForcedUnsupportedOperations());
 
@@ -102,8 +101,8 @@
     return Void();
 }
 
-template <typename HalVersion>
-Return<ErrorStatus> ArmnnDriverImpl<HalVersion>::prepareModel(
+template<typename HalPolicy>
+Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
         const armnn::IRuntimePtr& runtime,
         const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
         const DriverOptions& options,
@@ -135,7 +134,7 @@
     // at this point we're being asked to prepare a model that we've already declared support for
     // and the operation indices may be different to those in getSupportedOperations anyway.
     set<unsigned int> unsupportedOperations;
-    ModelToINetworkConverter<HalVersion> modelConverter(options.GetComputeDevice(),
+    ModelToINetworkConverter<HalPolicy> modelConverter(options.GetComputeDevice(),
                                                         model,
                                                         unsupportedOperations);
 
@@ -196,8 +195,8 @@
         return ErrorStatus::NONE;
     }
 
-    unique_ptr<ArmnnPreparedModel<HalVersion>> preparedModel(
-                new ArmnnPreparedModel<HalVersion>(
+    unique_ptr<ArmnnPreparedModel<HalPolicy>> preparedModel(
+                new ArmnnPreparedModel<HalPolicy>(
                     netId,
                     runtime.get(),
                     model,
@@ -228,19 +227,22 @@
     return ErrorStatus::NONE;
 }
 
-template <typename HalVersion>
-Return<DeviceStatus> ArmnnDriverImpl<HalVersion>::getStatus()
+template<typename HalPolicy>
+Return<DeviceStatus> ArmnnDriverImpl<HalPolicy>::getStatus()
 {
     ALOGV("ArmnnDriver::getStatus()");
 
     return DeviceStatus::AVAILABLE;
 }
 
-// Class template specializations
-template class ArmnnDriverImpl<HalVersion_1_0>;
+///
+/// Class template specializations
+///
 
-#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
-template class ArmnnDriverImpl<HalVersion_1_1>;
+template class ArmnnDriverImpl<hal_1_0::HalPolicy>;
+
+#if defined(ARMNN_ANDROID_NN_V1_1)
+template class ArmnnDriverImpl<hal_1_1::HalPolicy>;
 #endif
 
-} // namespace armnn_driver
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/ArmnnDriverImpl.hpp b/ArmnnDriverImpl.hpp
index fbfbc43..7f1c9b9 100644
--- a/ArmnnDriverImpl.hpp
+++ b/ArmnnDriverImpl.hpp
@@ -5,41 +5,26 @@
 
 #pragma once
 
-#include <HalInterfaces.h>
-
 #include "DriverOptions.hpp"
 
-#include <armnn/ArmNN.hpp>
+#include <HalInterfaces.h>
 
 namespace armnn_driver
 {
 
-struct HalVersion_1_0
-{
-    using Model = ::android::hardware::neuralnetworks::V1_0::Model;
-    using getSupportedOperations_cb = ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb;
-};
-
-#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
-struct HalVersion_1_1
-{
-    using Model = ::android::hardware::neuralnetworks::V1_1::Model;
-    using getSupportedOperations_cb = ::android::hardware::neuralnetworks::V1_1::IDevice::getSupportedOperations_1_1_cb;
-};
-#endif
-
-template <typename HalVersion>
+template<typename HalPolicy>
 class ArmnnDriverImpl
 {
 public:
-    using HalModel = typename HalVersion::Model;
-    using HalGetSupportedOperations_cb = typename HalVersion::getSupportedOperations_cb;
+    using HalModel                     = typename HalPolicy::Model;
+    using HalGetSupportedOperations_cb = typename HalPolicy::getSupportedOperations_cb;
 
     static Return<void> getSupportedOperations(
             const armnn::IRuntimePtr& runtime,
             const DriverOptions& options,
             const HalModel& model,
             HalGetSupportedOperations_cb);
+
     static Return<ErrorStatus> prepareModel(
             const armnn::IRuntimePtr& runtime,
             const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
@@ -47,6 +32,7 @@
             const HalModel& model,
             const android::sp<IPreparedModelCallback>& cb,
             bool float32ToFloat16 = false);
+
     static Return<DeviceStatus> getStatus();
 };
 
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index 7a275af..e4a8b14 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -295,11 +295,14 @@
     }
 }
 
-// Class template specializations
-template class ArmnnPreparedModel<HalVersion_1_0>;
+///
+/// Class template specializations
+///
 
-#ifdef ARMNN_ANDROID_NN_V1_1 // Using ::android::hardware::neuralnetworks::V1_1.
-template class ArmnnPreparedModel<HalVersion_1_1>;
+template class ArmnnPreparedModel<hal_1_0::HalPolicy>;
+
+#if defined(ARMNN_ANDROID_NN_V1_1)
+template class ArmnnPreparedModel<hal_1_1::HalPolicy>;
 #endif
 
-} // namespace armnn_driver
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/ArmnnPreparedModel.hpp b/ArmnnPreparedModel.hpp
index a7f004c..3c4b32b 100644
--- a/ArmnnPreparedModel.hpp
+++ b/ArmnnPreparedModel.hpp
@@ -5,10 +5,9 @@
 
 #pragma once
 
-#include "RequestThread.hpp"
-
 #include "ArmnnDriver.hpp"
 #include "ArmnnDriverImpl.hpp"
+#include "RequestThread.hpp"
 
 #include <NeuralNetworks.h>
 #include <armnn/ArmNN.hpp>
diff --git a/ConversionUtils.cpp b/ConversionUtils.cpp
new file mode 100644
index 0000000..60d1a1f
--- /dev/null
+++ b/ConversionUtils.cpp
@@ -0,0 +1,172 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ConversionUtils.hpp"
+
+///
+/// Helper classes
+///
+
+namespace armnn_driver
+{
+
+LayerInputHandle::LayerInputHandle()
+    : m_OutputSlot(nullptr)
+    , m_Valid(false)
+{}
+
+LayerInputHandle::LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo)
+    : m_OutputSlot(outputSlot)
+    , m_Valid(valid)
+    , m_TensorInfo(tensorInfo)
+{}
+
+bool LayerInputHandle::IsValid() const
+{
+    return m_Valid;
+}
+
+void LayerInputHandle::Connect(armnn::IInputSlot& inputSlot)
+{
+    BOOST_ASSERT(IsValid());
+    if (m_OutputSlot)
+    {
+        m_OutputSlot->Connect(inputSlot);
+    }
+}
+
+const armnn::TensorInfo& LayerInputHandle::GetTensorInfo() const
+{
+    return m_TensorInfo;
+}
+
+ConstTensorPin::ConstTensorPin(bool optional)
+    : m_Optional(optional)
+{}
+
+ConstTensorPin::ConstTensorPin(const armnn::TensorInfo& tensorInfo,
+                               const void* valueStart,
+                               uint32_t numBytes,
+                               const armnn::PermutationVector& mappings)
+{
+    boost::ignore_unused(numBytes);
+    assert(tensorInfo.GetNumBytes() == numBytes);
+
+    const bool needsSwizzling = (mappings.GetSize() > 0);
+    if (needsSwizzling)
+    {
+        m_SwizzledTensorData.resize(tensorInfo.GetNumBytes());
+        SwizzleAndroidNn4dTensorToArmNn(tensorInfo, valueStart, m_SwizzledTensorData.data(), mappings);
+
+        m_ConstTensor = armnn::ConstTensor(armnnUtils::Permuted(tensorInfo, mappings), m_SwizzledTensorData.data());
+    }
+    else
+    {
+        m_ConstTensor = armnn::ConstTensor(tensorInfo, valueStart);
+    }
+}
+
+bool ConstTensorPin::IsValid() const
+{
+    return m_ConstTensor.GetMemoryArea() != nullptr;
+}
+
+bool ConstTensorPin::IsOptional() const
+{
+    return m_Optional;
+}
+
+const armnn::ConstTensor& ConstTensorPin::GetConstTensor() const
+{
+    return m_ConstTensor;
+}
+
+const armnn::ConstTensor* ConstTensorPin::GetConstTensorPtr() const
+{
+    if (IsValid() && m_ConstTensor.GetNumElements() > 0)
+    {
+        return &m_ConstTensor;
+    }
+    // tensor is either invalid, or has no elements (indicating an optional tensor that was not provided)
+    return nullptr;
+}
+
+///
+/// Utility functions
+///
+
+armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
+                                            ActivationFn activation,
+                                            armnn::IConnectableLayer* prevLayer,
+                                            ConversionData& data)
+{
+    BOOST_ASSERT(prevLayer->GetNumOutputSlots() == 1);
+
+    prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    armnn::IConnectableLayer* activationLayer = prevLayer;
+
+    if (activation != ActivationFn::kActivationNone)
+    {
+        armnn::ActivationDescriptor activationDesc;
+        switch (activation)
+        {
+            case ActivationFn::kActivationRelu:
+            {
+                activationDesc.m_Function = armnn::ActivationFunction::ReLu;
+                break;
+            }
+            case ActivationFn::kActivationRelu1:
+            {
+                activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+                activationDesc.m_A = 1.0f;
+                activationDesc.m_B = -1.0f;
+                break;
+            }
+            case ActivationFn::kActivationRelu6:
+            {
+                activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+                activationDesc.m_A = 6.0f;
+                break;
+            }
+            case ActivationFn::kActivationSigmoid:
+            {
+                activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
+                break;
+            }
+            case ActivationFn::kActivationTanh:
+            {
+                activationDesc.m_Function = armnn::ActivationFunction::TanH;
+                activationDesc.m_A = 1.0f;
+                activationDesc.m_B = 1.0f;
+                break;
+            }
+            default:
+            {
+                Fail("%s: Invalid activation enum value %i", __func__, activation);
+                return nullptr;
+            }
+        }
+
+        if (!IsLayerSupported(__func__,
+                              armnn::IsActivationSupported,
+                              data.m_Compute,
+                              prevLayer->GetOutputSlot(0).GetTensorInfo(),
+                              tensorInfo,
+                              activationDesc))
+        {
+            return nullptr;
+        }
+
+        activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+
+        prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
+        activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+    }
+
+    return activationLayer;
+}
+
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
new file mode 100644
index 0000000..a812183
--- /dev/null
+++ b/ConversionUtils.hpp
@@ -0,0 +1,1039 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+
+#include "armnn/src/armnnUtils/Permute.hpp"
+#include "Utils.hpp"
+
+#include <ActivationFunctor.h>
+#include <CpuExecutor.h>
+#include <OperationsUtils.h>
+
+#include <boost/assert.hpp>
+#include <boost/core/ignore_unused.hpp>
+#include <boost/test/tools/floating_point_comparison.hpp>
+
+#include <log/log.h>
+
+namespace armnn_driver
+{
+
+///
+/// Helper classes
+///
+
+struct ConversionData
+{
+    ConversionData(armnn::Compute compute)
+            : m_Compute(compute)
+            , m_Network(nullptr, nullptr)
+    {}
+
+    const armnn::Compute                      m_Compute;
+    armnn::INetworkPtr                        m_Network;
+    std::vector<armnn::IOutputSlot*>          m_OutputSlotForOperand;
+    std::vector<android::nn::RunTimePoolInfo> m_MemPools;
+};
+
+class LayerInputHandle
+{
+public:
+    LayerInputHandle();
+    LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
+
+    bool IsValid() const;
+
+    void Connect(armnn::IInputSlot& inputSlot);
+
+    const armnn::TensorInfo& GetTensorInfo() const;
+
+private:
+    armnn::IOutputSlot* m_OutputSlot;
+    bool                m_Valid;
+    armnn::TensorInfo   m_TensorInfo;
+};
+
+class ConstTensorPin
+{
+public:
+    // Creates an invalid tensor pin (can be used to signal errors)
+    // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
+    ConstTensorPin(bool optional = false);
+
+    // @param tensorInfo TensorInfo associated with the tensor.
+    // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
+    // the model being converted.
+    // @param numBytes Number of bytes for the tensor data.
+    ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
+                   const armnn::PermutationVector& mappings);
+
+    ConstTensorPin(const ConstTensorPin& other) = delete;
+    ConstTensorPin(ConstTensorPin&& other)      = default;
+
+    bool IsValid() const;
+    bool IsOptional() const;
+
+    const armnn::ConstTensor& GetConstTensor() const;
+    const armnn::ConstTensor* GetConstTensorPtr() const;
+
+private:
+    armnn::ConstTensor m_ConstTensor;
+
+    // Owned memory for swizzled tensor data, only required if the tensor needed
+    // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
+    // the pools associated with the model being converted.
+    std::vector<uint8_t> m_SwizzledTensorData;
+
+    // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
+    bool m_Optional;
+};
+
+} // namespace armnn_driver
+
+///
+/// Utility functions
+///
+
+namespace
+{
+
+using namespace armnn_driver;
+using namespace android::nn;
+
+// Convenience function to log the reason for failing to convert a model.
+// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
+template<class... Args>
+static bool Fail(const char* formatStr, Args&&... args)
+{
+    ALOGD(formatStr, std::forward<Args>(args)...);
+    return false;
+}
+
+// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
+// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
+template<typename IsLayerSupportedFunc, typename ... Args>
+bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
+{
+    std::vector<char> unsupportedReason(1024+1);
+    bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
+    if(isSupported)
+    {
+        return true;
+    }
+    else
+    {
+        std::string sUnsupportedReason(unsupportedReason.data());
+        if (sUnsupportedReason.size() > 0)
+        {
+            ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
+        } else
+        {
+            ALOGD("%s: not supported by armnn", funcName);
+        }
+        return false;
+    }
+}
+
+armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
+{
+    return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
+}
+
+inline bool IsOperandTypeSupportedForTensors(OperandType type)
+{
+    return type == OperandType::TENSOR_FLOAT32      ||
+           type == OperandType::TENSOR_QUANT8_ASYMM ||
+           type == OperandType::TENSOR_INT32;
+}
+
+void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, armnn::IConnectableLayer* startLayer,
+                     armnn::INetwork& network)
+{
+    BOOST_ASSERT(startLayer != nullptr);
+    const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
+    const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
+
+    if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+    {
+        // If the number of dimensions do not match then we need to add degenerate dimensions
+        // to the "smaller" tensor using a reshape:
+        //   Small  Big
+        //     |     |
+        //  Reshape  |
+        //      \   /
+        //       Add
+        bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions();
+
+        LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0;
+        const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo();
+
+        LayerInputHandle& bigTensorHandle =  input0IsBigger ? input0 : input1;
+        const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo();
+
+        const unsigned int bigTensorDimsNumber = bigTensorDims.GetNumDimensions();
+        std::vector<unsigned int> reshapedDims(bigTensorDimsNumber, 1);
+        unsigned int sizeDifference = bigTensorDimsNumber - smallTensorDims.GetNumDimensions();
+        for (unsigned i = sizeDifference; i < bigTensorDimsNumber; ++i)
+        {
+            reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference];
+        }
+        armnn::TensorInfo reshapedInfo = smallTensorDims;
+        reshapedInfo.SetShape(armnn::TensorShape{ static_cast<unsigned int>(reshapedDims.size()),
+                                                  reshapedDims.data() });
+
+        armnn::ReshapeDescriptor reshapeDesc;
+        reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
+        armnn::IConnectableLayer* const reshapeLayer = network.AddReshapeLayer(reshapeDesc);
+        smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0));
+        reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
+
+        // Connect the outputs from new reshape and original input layer
+        reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
+        bigTensorHandle.Connect(startLayer->GetInputSlot(1));
+    }
+    else
+    {
+        input0.Connect(startLayer->GetInputSlot(0));
+        input1.Connect(startLayer->GetInputSlot(1));
+    }
+}
+
+void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
+                 android::nn::PaddingScheme scheme)
+{
+    int32_t padHead;
+    int32_t padTail;
+    calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
+    outPadHead = boost::numeric_cast<uint32_t>(padHead);
+    outPadTail = boost::numeric_cast<uint32_t>(padTail);
+}
+
+Shape GetOperandShape(const Operand& operand)
+{
+    Shape shape;
+    shape.type = operand.type;
+    shape.dimensions = operand.dimensions;
+    shape.scale = operand.scale;
+    shape.offset = operand.zeroPoint;
+    return shape;
+}
+
+// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
+// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
+// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
+// (us, in this case) to ensure they match.
+void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
+                                   const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
+{
+    const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
+    if (biasInfo.GetQuantizationScale() != expectedBiasScale)
+    {
+        boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
+        if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
+        {
+            ALOGW("Bias quantization scale has been modified to match input*weights");
+            biasInfo.SetQuantizationScale(expectedBiasScale);
+        }
+    }
+}
+
+// 4D Tensor Permutations
+const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
+const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
+const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
+const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
+
+// 3D Permutation Vectors
+const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
+const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
+const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
+
+template<typename OSlot>
+armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
+                                          const armnn::PermutationVector& mappings)
+{
+    // Add swizzle layer
+    armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
+
+    BOOST_ASSERT(layer != nullptr);
+
+    // Connect input to swizzle layer
+    input.Connect(layer->GetInputSlot(0));
+
+    // Setup swizzled output
+    const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
+    layer->GetOutputSlot(0).SetTensorInfo(outInfo);
+
+    return *layer;
+}
+
+void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
+{
+    // Add swizzle layer
+    armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
+    // Connect swizzled input to layer
+    swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
+}
+
+armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
+{
+    // Add deswizzle layer
+    armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
+    return deswizzleLayer;
+}
+
+// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
+armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
+                                                LayerInputHandle& input,
+                                                armnn::IConnectableLayer& firstLayer,
+                                                armnn::IConnectableLayer& lastLayer)
+{
+    SwizzleIn(network, input, firstLayer, 0);
+    return DeswizzleOut(network, lastLayer, 0);
+}
+
+// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
+armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
+                                                armnn::IConnectableLayer& layer)
+{
+    return SwizzleInDeswizzleOut(network, input, layer, layer);
+}
+
+bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
+                               const armnn::TensorShape & outputShape,
+                               uint32_t concatDim)
+{
+    // Validate the output shape is correct given the input shapes (which have just been validated)
+    unsigned int numDimensions = inputShapes[0].GetNumDimensions();
+    if (outputShape.GetNumDimensions() != numDimensions)
+    {
+        return Fail("%s: Output shape has wrong number of dimensions", __func__);
+    }
+
+    unsigned int outputSizeAlongConcatenatedDimension = 0;
+    for (unsigned int i = 0; i < inputShapes.size(); i++)
+    {
+        outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
+    }
+
+    for (unsigned int i = 0; i < numDimensions; ++i)
+    {
+        if (i == concatDim)
+        {
+            if (outputShape[i] != outputSizeAlongConcatenatedDimension)
+            {
+                return Fail(
+                        "%s: Invalid output shape for dimension %d (%d != %d)",
+                        __func__,
+                        i,
+                        outputShape[i],
+                        outputSizeAlongConcatenatedDimension);
+            }
+        }
+        else
+        {
+            if (outputShape[i] != inputShapes[0][i])
+            {
+                return Fail("%s: Invalid output shape", __func__);
+            }
+        }
+    }
+
+    return true;
+}
+
+bool RequiresReshape(armnn::TensorShape & inputShape)
+{
+    return inputShape.GetNumDimensions() < 3;
+}
+
+template<typename OSlot>
+armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, OSlot& inputLayer,
+                                          armnn::TensorInfo reshapeInfo)
+{
+    armnn::ReshapeDescriptor reshapeDescriptor;
+    reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
+
+    armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
+    BOOST_ASSERT(reshapeLayer != nullptr);
+
+    // Attach the input layer to the reshape layer
+    inputLayer.Connect(reshapeLayer->GetInputSlot(0));
+    reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
+
+    return *reshapeLayer;
+}
+
+void SwizzleInputs(armnn::INetwork& network,
+                   std::vector<LayerInputHandle>& inputs,
+                   std::vector<armnn::TensorShape>& inputShapes,
+                   const armnn::PermutationVector& mapping)
+{
+    if (!mapping.IsEqual(IdentityPermutation4D))
+    {
+        size_t nInputs = inputs.size();
+        for (size_t i=0; i<nInputs; ++i)
+        {
+            // add swizzle layer
+            armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
+            auto& outputSlot = swizzleLayer.GetOutputSlot(0);
+            auto& outputInfo = outputSlot.GetTensorInfo();
+            // replace inputs with the swizzled ones
+            inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
+            inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
+        }
+    }
+}
+
+void CreatePermutationParameters(const unsigned int numberOfDimensions,
+                                 int32_t & concatDimension,
+                                 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
+{
+    BOOST_ASSERT(numberOfDimensions >= 3);
+
+    // ArmNN uses Compute Library subtensors to perform concatenation
+    // This only works when concatenating along dimension 0 or 1 for a 4-D tensor,
+    // or along dimension 0 for a 3-D tensor.
+    if (numberOfDimensions == 4)
+    {
+        if (concatDimension == 3)
+        {
+            concatDimension = 1;
+            permutationPair = std::make_pair(NHWCToArmNN, ArmNNToNHWC);
+        }
+        else if (concatDimension == 2)
+        {
+            concatDimension = 1;
+            permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
+        }
+        else
+        {
+            permutationPair = std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
+        }
+
+    }
+    else if (numberOfDimensions == 3)
+    {
+        if (concatDimension == 2)
+        {
+            concatDimension = 0;
+            permutationPair = std::make_pair(RotateTensorRight, RotateTensorLeft);
+        }
+        else if (concatDimension == 1)
+        {
+            concatDimension = 0;
+            permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
+        }
+        else
+        {
+            permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
+        }
+    }
+}
+
+} // anonymous namespace
+
+namespace armnn_driver
+{
+
+//// Creates an ArmNN activation layer and connects it to the given layer, if the
+//// passed in AndroidNN activation function requires so.
+//// @return The end layer of the sequence of layers built for the given AndroidNN
+//// activation function or nullptr if an error occurred (e.g. unsupported activation).
+//// Note that the end layer matches the input layer if no activation is required
+//// (the sequence of layers has length 1).
+armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
+                                            ActivationFn activation,
+                                            armnn::IConnectableLayer* prevLayer,
+                                            ConversionData& data);
+
+} // namespace armnn_driver
+
+///
+/// Utility templates
+///
+
+namespace armnn_driver
+{
+
+using namespace android::nn;
+
+template<typename HalOperation, typename HalModel>
+const Operand* GetInputOperand(const HalOperation& operation, uint32_t inputIndex, const HalModel& model)
+{
+    if (inputIndex >= operation.inputs.size())
+    {
+        Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
+        return nullptr;
+    }
+
+    BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
+    return &model.operands[operation.inputs[inputIndex]];
+}
+
+template<typename HalOperation, typename HalModel>
+const Operand* GetOutputOperand(const HalOperation& operation, uint32_t outputIndex, const HalModel& model)
+{
+    if (outputIndex >= operation.outputs.size())
+    {
+        Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
+        return nullptr;
+    }
+
+    // Model should have been validated beforehand
+    BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
+
+    return &model.operands[operation.outputs[outputIndex]];
+}
+
+template<typename HalModel>
+ConstTensorPin ConvertOperandToConstTensorPin(const Operand& operand,
+                                              const HalModel& model,
+                                              const ConversionData& data,
+                                              const armnn::PermutationVector& dimensionMappings = g_DontPermute,
+                                              const armnn::TensorShape* overrideTensorShape = nullptr,
+                                              bool optional = false)
+{
+    if (!IsOperandTypeSupportedForTensors(operand.type))
+    {
+        Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
+        return ConstTensorPin();
+    }
+
+    if (operand.lifetime != OperandLifeTime::CONSTANT_COPY && operand.lifetime != OperandLifeTime::CONSTANT_REFERENCE)
+    {
+        Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
+        return ConstTensorPin();
+    }
+
+    const void* const valueStart = GetOperandValueReadOnlyAddress(operand, model, data);
+    if (!valueStart)
+    {
+        if (optional)
+        {
+            // optional tensor with no values is not really an error; return it as invalid, but marked as optional
+            return ConstTensorPin(true);
+        }
+        // mandatory tensor with no values
+        Fail("%s: failed to get operand address", __func__);
+        return ConstTensorPin();
+    }
+
+    armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
+    if (overrideTensorShape != nullptr)
+    {
+        tensorInfo.SetShape(*overrideTensorShape);
+    }
+    return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
+}
+
+template<typename HalOperation, typename HalModel>
+ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
+                                                     uint32_t inputIndex,
+                                                     const HalModel& model,
+                                                     const ConversionData& data,
+                                                     const armnn::PermutationVector& dimensionMappings = g_DontPermute,
+                                                     const armnn::TensorShape* overrideTensorShape = nullptr,
+                                                     bool optional = false)
+{
+    const Operand* operand = GetInputOperand(operation, inputIndex, model);
+    if (!operand)
+    {
+        Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
+        return ConstTensorPin();
+    }
+    return ConvertOperandToConstTensorPin(*operand,
+                                          model,
+                                          data,
+                                          dimensionMappings,
+                                          overrideTensorShape,
+                                          optional);
+}
+
+template<typename HalModel>
+const void* GetOperandValueReadOnlyAddress(const Operand& operand, const HalModel& model, const ConversionData& data)
+{
+    const void* valueStart = nullptr;
+
+    switch (operand.lifetime)
+    {
+        case OperandLifeTime::CONSTANT_COPY:
+        {
+            // Constant found in model.operandValues
+            valueStart = &model.operandValues[operand.location.offset];
+            break;
+        }
+        case OperandLifeTime::CONSTANT_REFERENCE:
+        {
+            // Constant specified via a Memory object
+            valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
+            break;
+        }
+        default:
+        {
+            // Unsupported/invalid (e.g. can't get value of an input to the model)
+            Fail("%s: unsupported/invalid operand lifetime: %s",
+                 __func__, toString(operand.lifetime).c_str());
+            valueStart = nullptr;
+        }
+    }
+
+    return valueStart;
+}
+
+template<typename HalOperation, typename HalModel, typename OutputType>
+bool GetInputScalar(const HalOperation& operation,
+                    uint32_t inputIndex,
+                    OperandType type,
+                    OutputType& outValue,
+                    const HalModel& model,
+                    const ConversionData& data)
+{
+    const Operand* operand = GetInputOperand(operation, inputIndex, model);
+    if (!operand)
+    {
+        return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
+    }
+
+    if (operand->type != type)
+    {
+        return Fail("%s: unexpected operand type: %s (should be %s)",
+                    __func__, toString(operand->type).c_str(), toString(type).c_str());
+    }
+
+    if (operand->location.length != sizeof(OutputType))
+    {
+        return Fail("%s: incorrect operand location length: %i (should be %i)",
+                    __func__, operand->location.length, sizeof(OutputType));
+    }
+
+    const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
+    if (!valueAddress)
+    {
+        return Fail("%s: failed to get address for operand", __func__);
+    }
+
+    outValue = *(static_cast<const OutputType*>(valueAddress));
+    return true;
+}
+
+template<typename HalOperation, typename HalModel>
+bool GetInputInt32(const HalOperation& operation,
+                   uint32_t inputIndex,
+                   int32_t& outValue,
+                   const HalModel& model,
+                   const ConversionData& data)
+{
+    return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue, model, data);
+}
+
+
+template<typename HalOperation, typename HalModel>
+bool GetInputFloat32(const HalOperation& operation,
+                     uint32_t inputIndex,
+                     float& outValue,
+                     const HalModel& model,
+                     const ConversionData& data)
+{
+    return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue, model, data);
+}
+
+
+template<typename HalOperation, typename HalModel>
+bool GetInputActivationFunctionImpl(const HalOperation& operation,
+                                    uint32_t inputIndex,
+                                    OperandType type,
+                                    ActivationFn& outActivationFunction,
+                                    const HalModel& model,
+                                    const ConversionData& data)
+{
+    if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32)
+    {
+        return Fail("%s: unexpected operand type: %s (should be %s or %s)",
+                    __func__,
+                    toString(type).c_str(),
+                    toString(OperandType::INT32).c_str(),
+                    toString(OperandType::TENSOR_INT32).c_str());
+    }
+
+    int32_t activationFunctionAsInt;
+    if (!GetInputScalar(operation, inputIndex, type, activationFunctionAsInt, model, data))
+    {
+        return Fail("%s: failed to get activation input value", __func__);
+    }
+    outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
+    return true;
+}
+
+
+template<typename HalOperation, typename HalModel>
+bool GetInputActivationFunction(const HalOperation& operation,
+                                uint32_t inputIndex,
+                                ActivationFn& outActivationFunction,
+                                const HalModel& model,
+                                const ConversionData& data)
+{
+    return GetInputActivationFunctionImpl(operation,
+                                          inputIndex,
+                                          OperandType::INT32,
+                                          outActivationFunction,
+                                          model,
+                                          data);
+}
+
+template<typename HalOperation, typename HalModel>
+bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
+                                          uint32_t inputIndex,
+                                          ActivationFn& outActivationFunction,
+                                          const HalModel& model,
+                                          const ConversionData& data)
+{
+    // This only accepts a 1-D tensor of size 1
+    return GetInputActivationFunctionImpl(operation,
+                                          inputIndex,
+                                          OperandType::INT32,
+                                          outActivationFunction,
+                                          model,
+                                          data);
+}
+
+
+template<typename HalOperation, typename HalModel>
+bool GetOptionalInputActivation(const HalOperation& operation,
+                                uint32_t inputIndex,
+                                ActivationFn& activationFunction,
+                                const HalModel& model,
+                                const ConversionData& data)
+{
+    if (operation.inputs.size() <= inputIndex)
+    {
+        activationFunction = ActivationFn::kActivationNone;
+    }
+    else
+    {
+        if (!GetInputActivationFunction(operation, inputIndex, activationFunction, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs", __func__);
+        }
+    }
+    return true;
+}
+
+template<typename HalModel>
+bool GetTensorInt32Values(const Operand& operand,
+                          std::vector<int32_t>& outValues,
+                          const HalModel& model,
+                          const ConversionData& data)
+{
+    if (operand.type != OperandType::TENSOR_INT32)
+    {
+        return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
+    }
+
+    const void* startAddress = GetOperandValueReadOnlyAddress(operand, model, data);
+    if (!startAddress)
+    {
+        return Fail("%s: failed to get operand address", __func__, operand.type);
+    }
+
+    // Check number of bytes is sensible
+    const uint32_t numBytes = operand.location.length;
+    if (numBytes % sizeof(int32_t) != 0)
+    {
+        return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
+                    __func__, numBytes, sizeof(int32_t));
+    }
+
+    outValues.resize(numBytes / sizeof(int32_t));
+    memcpy(outValues.data(), startAddress, numBytes);
+    return true;
+}
+
+template<typename HalOperation, typename HalModel>
+bool GetInputPaddingScheme(const HalOperation& operation,
+                           uint32_t inputIndex,
+                           PaddingScheme& outPaddingScheme,
+                           const HalModel& model,
+                           const ConversionData& data)
+{
+    int32_t paddingSchemeAsInt;
+    if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt, model, data))
+    {
+        return Fail("%s: failed to get padding scheme input value", __func__);
+    }
+
+    outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
+    return true;
+}
+
+template<typename HalOperation, typename HalModel>
+LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
+                                           uint32_t inputIndex,
+                                           const HalModel& model,
+                                           ConversionData& data)
+{
+    const Operand* operand = GetInputOperand(operation, inputIndex, model);
+    if (!operand)
+    {
+        Fail("%s: failed to get input operand %i", __func__, inputIndex);
+        return LayerInputHandle();
+    }
+
+    if (!IsOperandTypeSupportedForTensors(operand->type))
+    {
+        Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
+        return LayerInputHandle();
+    }
+
+    armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
+
+    switch (operand->lifetime)
+    {
+        case OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
+        case OperandLifeTime::MODEL_INPUT:
+        {
+            // The tensor is either an operand internal to the model, or a model input.
+            // It can be associated with an ArmNN output slot for an existing layer.
+
+            // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
+            const uint32_t operandIndex = operation.inputs[inputIndex];
+            return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
+            break;
+        }
+        case OperandLifeTime::CONSTANT_COPY:
+        case OperandLifeTime::CONSTANT_REFERENCE:
+        {
+            // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
+            ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand, model, data);
+            if (tensorPin.IsValid())
+            {
+                if (!IsLayerSupported(__func__,
+                                      armnn::IsConstantSupported,
+                                      data.m_Compute,
+                                      tensorPin.GetConstTensor().GetInfo()))
+                {
+                    return LayerInputHandle();
+                }
+
+                armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
+                armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
+                outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
+
+                return LayerInputHandle(true, &outputSlot, operandTensorInfo);
+            }
+            else
+            {
+                Fail("%s: invalid operand tensor", __func__);
+                return LayerInputHandle();
+            }
+            break;
+        }
+        default:
+        {
+            // Unsupported lifetime for an input tensor
+            Fail("%s: unsupported lifetime for input tensor: %s",
+                 __func__, toString(operand->lifetime).c_str());
+            return LayerInputHandle();
+        }
+    }
+}
+
+template<typename HalOperation, typename HalModel>
+bool ConvertToActivation(const HalOperation& operation,
+                         const char* operationName,
+                         const armnn::ActivationDescriptor& activationDesc,
+                         const HalModel& model,
+                         ConversionData& data)
+{
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Input 0 is invalid", operationName);
+    }
+
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return false;
+    }
+    const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
+    if (!IsLayerSupported(__func__,
+                          armnn::IsActivationSupported,
+                          data.m_Compute,
+                          input.GetTensorInfo(),
+                          outInfo,
+                          activationDesc))
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
+    BOOST_ASSERT(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+}
+
+template<typename HalOperation, typename HalModel>
+bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
+                                  uint32_t operationOutputIndex,
+                                  armnn::IConnectableLayer& layer,
+                                  uint32_t layerOutputIndex,
+                                  const HalModel& model,
+                                  ConversionData& data)
+{
+    const Operand* outputOperand = GetOutputOperand(operation, operationOutputIndex, model);
+    if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
+    {
+        return false;
+    }
+
+    armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
+
+    const uint32_t operandIndex = operation.outputs[operationOutputIndex];
+    data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
+
+    outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
+
+    return true;
+}
+
+template<typename HalOperation, typename HalModel>
+bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
+                                  uint32_t outputIndex,
+                                  armnn::IConnectableLayer& layer,
+                                  const HalModel& model,
+                                  ConversionData& data)
+{
+    return SetupAndTrackLayerOutputSlot(operation, outputIndex, layer, outputIndex, model, data);
+}
+
+template<typename HalOperation, typename HalModel>
+bool ConvertPooling2d(const HalOperation& operation,
+                      const char* operationName,
+                      armnn::PoolingAlgorithm poolType,
+                      const HalModel& model,
+                      ConversionData& data)
+{
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Could not read input 0", operationName);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    const armnn::TensorInfo swizzledInputInfo  = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
+    const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
+
+    armnn::Pooling2dDescriptor desc;
+    desc.m_PoolType = poolType;
+    desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
+
+    ActivationFn activation;
+
+    if (operation.inputs.size() == 7)
+    {
+        // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
+        android::nn::PaddingScheme scheme;
+        if (!GetInputPaddingScheme(operation, 1, scheme, model, data)
+            || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_StrideX, model, data)
+            || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_StrideY, model, data)
+            || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PoolWidth, model, data)
+            || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PoolHeight, model, data)
+            || !GetInputActivationFunction(operation, 6, activation, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs", operationName);
+        }
+
+        const unsigned int inputWidth  = swizzledInputInfo.GetShape()[3];
+        const unsigned int inputHeight = swizzledInputInfo.GetShape()[2];
+
+        CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
+        CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
+    }
+    else
+    {
+        // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
+        if (!GetInputScalar(operation, 1, OperandType::INT32, desc.m_PadLeft, model, data)
+            || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_PadRight, model, data)
+            || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadTop, model, data)
+            || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadBottom, model, data)
+            || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideX, model, data)
+            || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_StrideY, model, data)
+            || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_PoolWidth, model, data)
+            || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_PoolHeight, model, data)
+            || !GetInputActivationFunction(operation, 9, activation, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs", operationName);
+        }
+    }
+
+    // ArmNN does not accept a pool size of 1, but the ArmNN driver is expected to cope.
+    // This is mapped to a trivial splitter instead.
+    armnn::IConnectableLayer* startLayer = nullptr;
+    if (desc.m_PoolWidth != 1 || desc.m_PoolHeight != 1)
+    {
+        if (!IsLayerSupported(__func__,
+                              armnn::IsPooling2dSupported,
+                              data.m_Compute,
+                              swizzledInputInfo,
+                              swizzledOutputInfo,
+                              desc))
+        {
+            return false;
+        }
+
+        startLayer = data.m_Network->AddPooling2dLayer(desc);
+    }
+    else
+    {
+        const unsigned int numDims = swizzledOutputInfo.GetNumDimensions();
+
+        armnn::ViewsDescriptor viewsDesc(1, numDims);
+
+        for (unsigned int i = 0; i < numDims; ++i)
+        {
+            viewsDesc.SetViewOriginCoord(0, i, 0);
+            viewsDesc.SetViewSize(0, i, swizzledOutputInfo.GetShape()[i]);
+        }
+
+        if (!IsLayerSupported(__func__,
+                              armnn::IsSplitterSupported,
+                              data.m_Compute,
+                              swizzledInputInfo,
+                              viewsDesc))
+        {
+            return false;
+        }
+
+        startLayer = data.m_Network->AddSplitterLayer(viewsDesc);
+    }
+
+    armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer, data);
+
+    if (endLayer != nullptr)
+    {
+        armnn::IConnectableLayer& outSwizzleLayer =
+                SwizzleInDeswizzleOut(*data.m_Network, input, *startLayer, *endLayer);
+        return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer, model, data);
+    }
+    else
+    {
+        return Fail("%s: ProcessActivation failed", operationName);
+    }
+}
+
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp
index 70873b8..1a63280 100644
--- a/ModelToINetworkConverter.cpp
+++ b/ModelToINetworkConverter.cpp
@@ -6,466 +6,19 @@
 #define LOG_TAG "ArmnnDriver"
 
 #include "ModelToINetworkConverter.hpp"
-#include <OperationsUtils.h>
-
-#include <armnn/LayerSupport.hpp>
-#include <Permute.hpp>
 
 #include <log/log.h>
-#include <cassert>
-
-#include <boost/format.hpp>
-#include <boost/core/ignore_unused.hpp>
-#include <boost/test/tools/floating_point_comparison.hpp>
-#include <boost/cast.hpp>
-#include <boost/optional.hpp>
-
-using namespace android::hardware;
 
 namespace armnn_driver
 {
 
-class LayerInputHandle
-{
-public:
-    LayerInputHandle()
-        : m_OutputSlot(nullptr)
-        , m_Valid(false)
-    {}
-
-    LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo)
-        : m_OutputSlot(outputSlot)
-        , m_Valid(valid)
-        , m_TensorInfo(tensorInfo)
-    {}
-
-    bool IsValid() const { return m_Valid; }
-    void Connect(armnn::IInputSlot& inputSlot)
-    {
-        assert(IsValid());
-
-        if (m_OutputSlot)
-        {
-            m_OutputSlot->Connect(inputSlot);
-        }
-    }
-    const armnn::TensorInfo& GetTensorInfo() const { return m_TensorInfo; }
-
-private:
-    armnn::IOutputSlot* m_OutputSlot;
-    bool m_Valid;
-    armnn::TensorInfo m_TensorInfo;
-};
-
-} // namespace armnn_driver
-
-namespace
-{
-
-using namespace armnn_driver;
-using namespace android::nn;
-
-// Convenience function to log the reason for failing to convert a model.
-// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
-template<class... Args>
-static bool Fail(const char* formatStr, Args&&... args)
-{
-    ALOGD(formatStr, std::forward<Args>(args)...);
-    return false;
-}
-
-// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
-// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
-template<typename IsLayerSupportedFunc, typename ... Args>
-bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
-{
-    std::vector<char> unsupportedReason(1024+1);
-    bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
-    if(isSupported)
-    {
-        return true;
-    }
-    else
-    {
-        std::string sUnsupportedReason(unsupportedReason.data());
-        if (sUnsupportedReason.size() > 0)
-        {
-            ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
-        } else
-        {
-            ALOGD("%s: not supported by armnn", funcName);
-        }
-        return false;
-    }
-}
-
-armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
-{
-    return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
-}
-
-inline bool IsOperandTypeSupportedForTensors(OperandType type)
-{
-    return type == OperandType::TENSOR_FLOAT32      ||
-           type == OperandType::TENSOR_QUANT8_ASYMM ||
-           type == OperandType::TENSOR_INT32;
-}
-
-void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, armnn::IConnectableLayer* startLayer,
-                     armnn::INetwork& network)
-{
-    BOOST_ASSERT(startLayer != nullptr);
-    const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
-    const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
-
-    if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
-    {
-        // If the number of dimensions do not match then we need to add degenerate dimensions
-        // to the "smaller" tensor using a reshape:
-        //   Small  Big
-        //     |     |
-        //  Reshape  |
-        //      \   /
-        //       Add
-        bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions();
-
-        LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0;
-        const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo();
-
-        LayerInputHandle& bigTensorHandle =  input0IsBigger ? input0 : input1;
-        const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo();
-
-        const unsigned int bigTensorDimsNumber = bigTensorDims.GetNumDimensions();
-        std::vector<unsigned int> reshapedDims(bigTensorDimsNumber, 1);
-        unsigned int sizeDifference = bigTensorDimsNumber - smallTensorDims.GetNumDimensions();
-        for (unsigned i = sizeDifference; i < bigTensorDimsNumber; ++i)
-        {
-            reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference];
-        }
-        armnn::TensorInfo reshapedInfo = smallTensorDims;
-        reshapedInfo.SetShape(armnn::TensorShape{ static_cast<unsigned int>(reshapedDims.size()),
-                                                  reshapedDims.data() });
-
-        armnn::ReshapeDescriptor reshapeDesc;
-        reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
-        armnn::IConnectableLayer* const reshapeLayer = network.AddReshapeLayer(reshapeDesc);
-        smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0));
-        reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
-
-        // Connect the outputs from new reshape and original input layer
-        reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
-        bigTensorHandle.Connect(startLayer->GetInputSlot(1));
-    }
-    else
-    {
-        input0.Connect(startLayer->GetInputSlot(0));
-        input1.Connect(startLayer->GetInputSlot(1));
-    }
-}
-
-void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
-                 android::nn::PaddingScheme scheme)
-{
-    int32_t padHead;
-    int32_t padTail;
-    calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
-    outPadHead = boost::numeric_cast<uint32_t>(padHead);
-    outPadTail = boost::numeric_cast<uint32_t>(padTail);
-}
-
-Shape GetOperandShape(const Operand& operand)
-{
-    Shape shape;
-    shape.type = operand.type;
-    shape.dimensions = operand.dimensions;
-    shape.scale = operand.scale;
-    shape.offset = operand.zeroPoint;
-    return shape;
-}
-
-// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
-// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
-// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
-// (us, in this case) to ensure they match.
-void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
-    const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
-{
-    const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
-    if (biasInfo.GetQuantizationScale() != expectedBiasScale)
-    {
-        boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
-        if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
-        {
-            ALOGW("Bias quantization scale has been modified to match input*weights");
-            biasInfo.SetQuantizationScale(expectedBiasScale);
-        }
-    }
-}
-
-// 4D Tensor Permutations
-const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
-const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
-const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
-const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
-
-// 3D Permutation Vectors
-const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
-const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
-const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
-
-template<typename OSlot>
-armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
-                                          const armnn::PermutationVector& mappings)
-{
-    // Add swizzle layer
-    armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
-
-    assert(layer != nullptr);
-
-    // Connect input to swizzle layer
-    input.Connect(layer->GetInputSlot(0));
-
-    // Setup swizzled output
-    const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
-    layer->GetOutputSlot(0).SetTensorInfo(outInfo);
-
-    return *layer;
-}
-
-void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
-{
-    // Add swizzle layer
-    armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
-    // Connect swizzled input to layer
-    swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
-}
-
-armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
-{
-    // Add deswizzle layer
-    armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
-    return deswizzleLayer;
-}
-
-// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
-armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
-                                                LayerInputHandle& input,
-                                                armnn::IConnectableLayer& firstLayer,
-                                                armnn::IConnectableLayer& lastLayer)
-{
-    SwizzleIn(network, input, firstLayer, 0);
-    return DeswizzleOut(network, lastLayer, 0);
-}
-
-// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
-armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
-                                                armnn::IConnectableLayer& layer)
-{
-    return SwizzleInDeswizzleOut(network, input, layer, layer);
-}
-
-bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
-                               const armnn::TensorShape & outputShape,
-                               uint32_t concatDim)
-{
-    // Validate the output shape is correct given the input shapes (which have just been validated)
-    unsigned int numDimensions = inputShapes[0].GetNumDimensions();
-    if (outputShape.GetNumDimensions() != numDimensions)
-    {
-        return Fail("%s: Output shape has wrong number of dimensions", __func__);
-    }
-
-    unsigned int outputSizeAlongConcatenatedDimension = 0;
-    for (unsigned int i = 0; i < inputShapes.size(); i++)
-    {
-        outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
-    }
-
-    for (unsigned int i = 0; i < numDimensions; ++i)
-    {
-        if (i == concatDim)
-        {
-            if (outputShape[i] != outputSizeAlongConcatenatedDimension)
-            {
-                return Fail(
-                    "%s: Invalid output shape for dimension %d (%d != %d)",
-                    __func__,
-                    i,
-                    outputShape[i],
-                    outputSizeAlongConcatenatedDimension);
-            }
-        }
-        else
-        {
-            if (outputShape[i] != inputShapes[0][i])
-            {
-                return Fail("%s: Invalid output shape", __func__);
-            }
-        }
-    }
-
-    return true;
-}
-
-bool RequiresReshape(armnn::TensorShape & inputShape)
-{
-    return inputShape.GetNumDimensions() < 3;
-}
-
-template<typename OSlot>
-armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, OSlot& inputLayer,
-                                          armnn::TensorInfo reshapeInfo)
-{
-    armnn::ReshapeDescriptor reshapeDescriptor;
-    reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
-
-    armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
-    assert(reshapeLayer != nullptr);
-
-    // Attach the input layer to the reshape layer
-    inputLayer.Connect(reshapeLayer->GetInputSlot(0));
-    reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
-
-    return *reshapeLayer;
-}
-
-void SwizzleInputs(armnn::INetwork& network,
-                   std::vector<LayerInputHandle>& inputs,
-                   std::vector<armnn::TensorShape>& inputShapes,
-                   const armnn::PermutationVector& mapping)
-{
-    if (!mapping.IsEqual(IdentityPermutation4D))
-    {
-        size_t nInputs = inputs.size();
-        for (size_t i=0; i<nInputs; ++i)
-        {
-            // add swizzle layer
-            armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
-            auto& outputSlot = swizzleLayer.GetOutputSlot(0);
-            auto& outputInfo = outputSlot.GetTensorInfo();
-            // replace inputs with the swizzled ones
-            inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
-            inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
-        }
-    }
-}
-
-void CreatePermutationParameters(const unsigned int numberOfDimensions,
-                       int32_t & concatDimension,
-                       std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
-{
-    assert(numberOfDimensions >= 3);
-
-    // ArmNN uses Compute Library subtensors to perform concatenation
-    // This only works when concatenating along dimension 0 or 1 for a 4-D tensor,
-    // or along dimension 0 for a 3-D tensor.
-    if (numberOfDimensions == 4)
-    {
-        if (concatDimension == 3)
-        {
-            concatDimension = 1;
-            permutationPair = std::make_pair(NHWCToArmNN, ArmNNToNHWC);
-        }
-        else if (concatDimension == 2)
-        {
-            concatDimension = 1;
-            permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
-        }
-        else
-        {
-            permutationPair = std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
-        }
-
-    }
-    else if (numberOfDimensions == 3)
-    {
-        if (concatDimension == 2)
-        {
-            concatDimension = 0;
-            permutationPair = std::make_pair(RotateTensorRight, RotateTensorLeft);
-        }
-        else if (concatDimension == 1)
-        {
-            concatDimension = 0;
-            permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
-        }
-        else
-        {
-            permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
-        }
-    }
-}
-
-} // anonymous namespace
-
-namespace armnn_driver
-{
-
-class ConstTensorPin
-{
-public:
-    // Creates an invalid tensor pin (can be used to signal errors)
-    // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
-    ConstTensorPin(bool optional = false) : m_Optional(optional) {}
-
-    // @param tensorInfo TensorInfo associated with the tensor.
-    // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
-    // the model being converted.
-    // @param numBytes Number of bytes for the tensor data.
-    ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
-                   const armnn::PermutationVector& mappings)
-    {
-        boost::ignore_unused(numBytes);
-        assert(tensorInfo.GetNumBytes() == numBytes);
-
-        const bool needsSwizzling = (mappings.GetSize() > 0);
-        if (needsSwizzling)
-        {
-            m_SwizzledTensorData.resize(tensorInfo.GetNumBytes());
-            SwizzleAndroidNn4dTensorToArmNn(tensorInfo, valueStart, m_SwizzledTensorData.data(), mappings);
-
-            m_ConstTensor = armnn::ConstTensor(armnnUtils::Permuted(tensorInfo, mappings), m_SwizzledTensorData.data());
-        }
-        else
-        {
-            m_ConstTensor = armnn::ConstTensor(tensorInfo, valueStart);
-        }
-    }
-
-    ConstTensorPin(const ConstTensorPin& other) = delete;
-    ConstTensorPin(ConstTensorPin&& other) = default;
-
-    bool IsValid() const { return m_ConstTensor.GetMemoryArea() != nullptr; }
-    bool IsOptional() const { return m_Optional; }
-    const armnn::ConstTensor& GetConstTensor() const { return m_ConstTensor; }
-    const armnn::ConstTensor* GetConstTensorPtr() const
-    {
-        if (IsValid() && m_ConstTensor.GetNumElements() > 0)
-        {
-            return &m_ConstTensor;
-        }
-        // tensor is either invalid, or has no elements (indicating an optional tensor that was not provided)
-        return nullptr;
-    }
-
-private:
-    armnn::ConstTensor m_ConstTensor;
-    // Owned memory for swizzled tensor data, only required if the tensor needed
-    // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
-    // the pools associated with the model being converted.
-    std::vector<uint8_t> m_SwizzledTensorData;
-    // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
-    bool m_Optional;
-};
-
-template<typename HalVersion>
-ModelToINetworkConverter<HalVersion>::ModelToINetworkConverter(armnn::Compute compute,
+template<typename HalPolicy>
+ModelToINetworkConverter<HalPolicy>::ModelToINetworkConverter(armnn::Compute compute,
     const HalModel& model,
     const std::set<unsigned int>& forcedUnsupportedOperations)
-    : m_Compute(compute)
+    : m_Data(compute)
     , m_Model(model)
     , m_ForcedUnsupportedOperations(forcedUnsupportedOperations)
-    , m_Network(nullptr, nullptr)
     , m_ConversionResult(ConversionResult::Success)
 {
     try
@@ -480,16 +33,16 @@
     }
 }
 
-template<typename HalVersion>
-void ModelToINetworkConverter<HalVersion>::Convert()
+template<typename HalPolicy>
+void ModelToINetworkConverter<HalPolicy>::Convert()
 {
-    using HalModel = typename HalVersion::Model;
+    using HalModel = typename HalPolicy::Model;
 
     ALOGV("ModelToINetworkConverter::Convert(): %s", GetModelSummary<HalModel>(m_Model).c_str());
 
     // map the memory pool into shared pointers
-    m_MemPools.clear();
-    if (!setRunTimePoolInfosFromHidlMemories(&m_MemPools, m_Model.pools))
+    m_Data.m_MemPools.clear();
+    if (!setRunTimePoolInfosFromHidlMemories(&m_Data.m_MemPools, m_Model.pools))
     {
         Fail("%s: Setting of run time pool infos from Hidl Memories has failed.", __func__);
         m_ConversionResult = ConversionResult::ErrorMappingPools;
@@ -503,11 +56,11 @@
     }
 
     // Create armnn::INetwork
-    m_Network = armnn::INetwork::Create();
+    m_Data.m_Network = armnn::INetwork::Create();
 
     // add operations to it
     // track which layer outputs each operand
-    m_OutputSlotForOperand = std::vector<armnn::IOutputSlot*>(m_Model.operands.size(), nullptr);
+    m_Data.m_OutputSlotForOperand = std::vector<armnn::IOutputSlot*>(m_Model.operands.size(), nullptr);
 
     try
     {
@@ -517,13 +70,13 @@
             uint32_t inputIndex = m_Model.inputIndexes[i];
             const Operand& operand = m_Model.operands[inputIndex];
             const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand);
-            armnn::IConnectableLayer* layer = m_Network->AddInputLayer(i);
+            armnn::IConnectableLayer* layer = m_Data.m_Network->AddInputLayer(i);
 
             armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
             outputSlot.SetTensorInfo(GetTensorInfoForOperand(operand));
 
             // store for later layers
-            m_OutputSlotForOperand[inputIndex] = &outputSlot;
+            m_Data.m_OutputSlotForOperand[inputIndex] = &outputSlot;
         }
     }
     catch (UnsupportedOperand& e)
@@ -552,7 +105,7 @@
         {
             try
             {
-                ok = ConvertOperation(operation);
+                ok = HalPolicy::ConvertOperation(operation, m_Model, m_Data);
             }
             catch (UnsupportedOperand& e)
             {
@@ -586,10 +139,10 @@
                 uint32_t outputIndex = m_Model.outputIndexes[i];
                 const Operand& operand = m_Model.operands[outputIndex];
                 const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand);
-                armnn::IConnectableLayer* layer = m_Network->AddOutputLayer(i);
+                armnn::IConnectableLayer* layer = m_Data.m_Network->AddOutputLayer(i);
 
-                assert(m_OutputSlotForOperand[outputIndex]);
-                m_OutputSlotForOperand[outputIndex]->Connect(layer->GetInputSlot(0));
+                assert(m_Data.m_OutputSlotForOperand[outputIndex]);
+                m_Data.m_OutputSlotForOperand[outputIndex]->Connect(layer->GetInputSlot(0));
             }
         }
     }
@@ -600,2067 +153,22 @@
     }
 }
 
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertOperation(const neuralnetworks::V1_0::Operation& operation)
-{
-    switch (operation.type)
-    {
-        case neuralnetworks::V1_0::OperationType::ADD:
-            return ConvertAdd(operation);
-        case neuralnetworks::V1_0::OperationType::AVERAGE_POOL_2D:
-            return ConvertAveragePool2d(operation);
-        case neuralnetworks::V1_0::OperationType::CONCATENATION:
-            return ConvertConcatenation(operation);
-        case neuralnetworks::V1_0::OperationType::CONV_2D:
-            return ConvertConv2d(operation);
-        case neuralnetworks::V1_0::OperationType::DEPTHWISE_CONV_2D:
-            return ConvertDepthwiseConv2d(operation);
-        case neuralnetworks::V1_0::OperationType::FLOOR:
-            return ConvertFloor(operation);
-        case neuralnetworks::V1_0::OperationType::FULLY_CONNECTED:
-            return ConvertFullyConnected(operation);
-        case neuralnetworks::V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
-            return ConvertLocalResponseNormalization(operation);
-        case neuralnetworks::V1_0::OperationType::LOGISTIC:
-            return ConvertLogistic(operation);
-        case neuralnetworks::V1_0::OperationType::LSTM:
-            return ConvertLstm(operation);
-        case neuralnetworks::V1_0::OperationType::L2_NORMALIZATION:
-            return ConvertL2Normalization(operation);
-        case neuralnetworks::V1_0::OperationType::L2_POOL_2D:
-            return ConvertL2Pool2d(operation);
-        case neuralnetworks::V1_0::OperationType::MAX_POOL_2D:
-            return ConvertMaxPool2d(operation);
-        case neuralnetworks::V1_0::OperationType::MUL:
-            return ConvertMul(operation);
-        case neuralnetworks::V1_0::OperationType::RELU:
-            return ConvertReLu(operation);
-        case neuralnetworks::V1_0::OperationType::RELU1:
-            return ConvertReLu1(operation);
-        case neuralnetworks::V1_0::OperationType::RELU6:
-            return ConvertReLu6(operation);
-        case neuralnetworks::V1_0::OperationType::SOFTMAX:
-            return ConvertSoftmax(operation);
-        case neuralnetworks::V1_0::OperationType::TANH:
-            return ConvertTanH(operation);
-        case neuralnetworks::V1_0::OperationType::RESHAPE:
-            return ConvertReshape(operation);
-        case neuralnetworks::V1_0::OperationType::RESIZE_BILINEAR:
-            return ConvertResizeBilinear(operation);
-        default:
-            return Fail("%s: Operation type %s not supported in ArmnnDriver",
-                        __func__, toString(operation.type).c_str());
-    }
-}
-
-#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertOperation(const neuralnetworks::V1_1::Operation& operation)
-{
-    if (compliantWithV1_0(operation))
-    {
-        neuralnetworks::V1_0::Operation v1Operation = convertToV1_0(operation);
-        return ConvertOperation(v1Operation);
-    }
-    else
-    {
-        switch (operation.type)
-        {
-            case neuralnetworks::V1_1::OperationType::DIV:
-                return ConvertDiv(operation);
-            default:
-                return Fail("%s: Operation type %s not supported in ArmnnDriver",
-                            __func__, toString(operation.type).c_str());
-        }
-    }
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertDiv(const neuralnetworks::V1_1::Operation& operation)
-{
-    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0);
-    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1);
-
-    if (!input0.IsValid() || !input1.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    // The FuseActivation parameter is always the input index 2
-    // and it should be optional
-    ActivationFn activationFunction;
-    if (!GetOptionalInputActivation(operation, 2, activationFunction))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* outputOperand = GetOutputOperand(operation, 0);
-    if (!outputOperand)
-    {
-        return false;
-    }
-
-    const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
-
-    if (!IsLayerSupported(__func__,
-                          armnn::IsDivisionSupported,
-                          m_Compute,
-                          input0.GetTensorInfo(),
-                          input1.GetTensorInfo(),
-                          outInfo))
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const startLayer = m_Network->AddDivisionLayer();
-    armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer);
-
-    const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
-    const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
-
-    if (endLayer)
-    {
-        BroadcastTensor(input0, input1, startLayer, *m_Network);
-        return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
-    }
-
-    return Fail("%s: ProcessActivation failed", __func__);
-}
-#endif
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertAdd(const neuralnetworks::V1_0::Operation& operation)
-{
-    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0);
-    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1);
-
-    if (!input0.IsValid() || !input1.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    // The FuseActivation parameter is always the input index 2
-    // and it should be optional
-    ActivationFn activationFunction;
-    if (!GetOptionalInputActivation(operation, 2, activationFunction))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* outputOperand = GetOutputOperand(operation, 0);
-    if (!outputOperand)
-    {
-        return false;
-    }
-
-    const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
-
-    if (!IsLayerSupported(__func__,
-                          armnn::IsAdditionSupported,
-                          m_Compute,
-                          input0.GetTensorInfo(),
-                          input1.GetTensorInfo(),
-                          outInfo))
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const startLayer = m_Network->AddAdditionLayer();
-    armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer);
-
-    const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
-    const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
-
-    if (endLayer != nullptr)
-    {
-        BroadcastTensor(input0, input1, startLayer, *m_Network);
-        return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
-    }
-    else
-    {
-        return Fail("%s: ProcessActivation failed", __func__);
-    }
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertAveragePool2d(const neuralnetworks::V1_0::Operation& operation)
-{
-    return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Average);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertConcatenation(const neuralnetworks::V1_0::Operation& operation)
-{
-    // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
-    if (operation.inputs.size() <= 1)
-    {
-        return Fail("%s: Operation has insufficient arguments", __func__);
-    }
-
-    // Get inputs and outputs
-    const std::size_t numInputTensors = operation.inputs.size() - 1;
-
-    int32_t concatDim;
-    if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* const outputOperand = GetOutputOperand(operation, 0);
-    if (!outputOperand)
-    {
-        return Fail("%s: Operation has no outputs", __func__);
-    }
-
-
-    armnn::TensorInfo  outputInfo  = GetTensorInfoForOperand(*outputOperand);
-    armnn::TensorShape outputShape = outputInfo.GetShape();
-
-    //
-    // handle negative concat dims along the lines of tensorflow as described here:
-    //    https://www.tensorflow.org/api_docs/python/tf/concat
-    // "negative axis refers to axis + rank(values)-th dimension"
-    //
-    if (concatDim < 0)
-    {
-        concatDim += outputShape.GetNumDimensions();
-    }
-
-    if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
-    {
-        return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
-    }
-
-    std::vector<LayerInputHandle> inputHandles;
-    std::vector<armnn::TensorShape> inputShapes;
-
-    inputHandles.reserve(numInputTensors);
-    inputShapes.reserve(numInputTensors);
-
-    bool inputsHaveBeenReshaped = false;
-    unsigned int tensorDimensionsAdded = 0;
-
-    for (uint32_t i = 0; i < numInputTensors; ++i)
-    {
-        const Operand* const operand = GetInputOperand(operation, i);
-        if (!operand)
-        {
-            return Fail("%s: Operation has invalid inputs", __func__);
-        }
-
-        armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
-        LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i);
-
-        if (operandShape.GetNumDimensions() == 0)
-        {
-            return Fail("%s: Operands with rank 0 are not supported", __func__);
-        }
-
-        if (RequiresReshape(operandShape))
-        {
-            inputsHaveBeenReshaped = true;
-
-            armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
-
-            // Expand the tensor to three dimensions
-            if (operandShape.GetNumDimensions() == 2)
-            {
-                reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
-                tensorDimensionsAdded = 1;
-            }
-            else
-            {
-                reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
-                tensorDimensionsAdded = 2;
-            }
-
-            armnn::IConnectableLayer& newReshape = AddReshapeLayer(
-                    *m_Network,
-                    operandInputHandle,
-                    reshapeInfo
-            );
-
-            // Point to the reshape operation rather then the input operation
-            operandShape = reshapeInfo.GetShape();
-            operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
-        }
-
-        inputShapes.emplace_back(operandShape);
-        inputHandles.emplace_back(operandInputHandle);
-
-        if (!inputHandles.back().IsValid())
-        {
-            return Fail("%s: Operation has invalid inputs", __func__);
-        }
-    }
-
-    assert(inputShapes.size() == inputHandles.size());
-
-    if (inputsHaveBeenReshaped)
-    {
-        // Adjust the concatenation dimension by the amount of dimensions added (if any)
-        concatDim += tensorDimensionsAdded;
-
-        // Add extra dimensions to the output shape to reflect the addition of the reshape layers
-        if (tensorDimensionsAdded == 1)
-        {
-            outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
-        }
-        else if (tensorDimensionsAdded == 2)
-        {
-            outputShape = armnn::TensorShape({1, 1, outputShape[0], outputShape[1]});
-        }
-    }
-
-    // Get the pair of permutations required for the concatenation
-    std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
-            std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
-
-    CreatePermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
-
-    outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
-    outputInfo.SetShape(outputShape);
-
-    // this is no-op for identity swizzles, otherwise it replaces both
-    // the handles and shapes with the swizzled layer output handles and shapes
-    SwizzleInputs(*m_Network, inputHandles, inputShapes, permutationPair.first);
-
-    // Create an armnn merger layer descriptor - this will also perform validation on the input shapes
-    armnn::OriginsDescriptor mergerDescriptor;
-    try
-    {
-        // The merger descriptor is always created across the only supported concat
-        // dimension, which is 0 or 1
-        mergerDescriptor =
-            armnn::CreateMergerDescriptorForConcatenation(
-                inputShapes.begin(), inputShapes.end(), concatDim);
-    }
-    catch (const armnn::Exception& error)
-    {
-        return Fail("%s: Error preparing merger descriptor. %s", __func__, error.what());
-    }
-
-    // Validate the output shape is correct given the input shapes based on the
-    // only valid concat dimension which is 0 or 1
-    if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
-    {
-        return Fail("%s: Error validating the output shape for concat", __func__);
-    }
-
-    std::vector<const armnn::TensorInfo*> inputTensorInfos;
-    std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
-        [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
-    if (!IsLayerSupported(__func__,
-                          armnn::IsMergerSupported,
-                          m_Compute,
-                          inputTensorInfos,
-                          mergerDescriptor))
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* layer = m_Network->AddMergerLayer(mergerDescriptor);
-    assert(layer != nullptr);
-    layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
-
-    // Connect inputs to the layer
-    const int numInputSlots = layer->GetNumInputSlots();
-    assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
-    for (int i = 0; i < numInputSlots; ++i)
-    {
-        // connect the input directly to the merge (concat) layer
-        inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
-    }
-
-    // Add permutation layer and connect the output to it, the permutation becomes the output layer
-    armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*m_Network,
-                                                               layer->GetOutputSlot(0),
-                                                               permutationPair.second);
-    layer = &deswizzleLayer;
-
-    if (inputsHaveBeenReshaped)
-    {
-        armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
-
-        // Undo the reshape knowing the amount of dimensions added
-        if (tensorDimensionsAdded == 1)
-        {
-            afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
-                                                          afterConcatInfo.GetShape()[2] }));
-        }
-        else if (tensorDimensionsAdded == 2)
-        {
-            afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2],
-                                                          afterConcatInfo.GetShape()[3] }));
-        }
-
-        layer = &AddReshapeLayer(
-                *m_Network,
-                layer->GetOutputSlot(0),
-                afterConcatInfo
-        );
-    }
-
-    return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertConv2d(const neuralnetworks::V1_0::Operation& operation)
-{
-    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* output = GetOutputOperand(operation, 0);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
-    const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
-    const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
-
-    // ArmNN does not currently support non-fixed weights or bias
-    const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, NHWCToArmNN);
-    const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2);
-
-    if (!weightsPin.IsValid() || !biasPin.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    armnn::ConstTensor weights = weightsPin.GetConstTensor();
-    armnn::ConstTensor bias = biasPin.GetConstTensor();
-    SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), swizzledInputInfo);
-
-    armnn::Convolution2dDescriptor desc;
-    ActivationFn activation;
-
-    if (operation.inputs.size() == 10)
-    {
-        if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft)   ||
-            !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight)  ||
-            !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop)    ||
-            !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom) ||
-            !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX)   ||
-            !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY)   ||
-            !GetInputActivationFunction(operation, 9, activation))
-        {
-            return Fail("%s: Operation has invalid inputs", __func__);
-        }
-    }
-    else if (operation.inputs.size() == 7)
-    {
-        android::nn::PaddingScheme paddingScheme;
-
-        if (!GetInputPaddingScheme(operation, 3, paddingScheme)               ||
-            !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX) ||
-            !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY) ||
-            !GetInputActivationFunction(operation, 6, activation))
-        {
-            return Fail("%s: Operation has invalid inputs", __func__);
-        }
-
-        const uint32_t kernelX = weights.GetShape()[3];
-        const uint32_t kernelY = weights.GetShape()[2];
-        const uint32_t inputX  = swizzledInputInfo.GetShape()[3];
-        const uint32_t inputY  = swizzledInputInfo.GetShape()[2];
-
-        CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
-        CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
-    }
-    else
-    {
-        return Fail("%s: Unsupported number of operation inputs", __func__);
-    }
-
-    desc.m_BiasEnabled = true;
-    auto biases = boost::make_optional(bias.GetInfo());
-
-    if (!IsLayerSupported(__func__,
-                          armnn::IsConvolution2dSupported,
-                          m_Compute,
-                          swizzledInputInfo,
-                          swizzledOutputInfo,
-                          desc,
-                          weights.GetInfo(),
-                          biases))
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* startLayer = m_Network->AddConvolution2dLayer(desc, weights, bias);
-    armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer);
-
-    if (endLayer != nullptr)
-    {
-        armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *startLayer, *endLayer);
-        return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
-    }
-    else
-    {
-        return Fail("%s: ProcessActivation failed", __func__);
-    }
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertDepthwiseConv2d(const neuralnetworks::V1_0::Operation& operation)
-{
-    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* output = GetOutputOperand(operation, 0);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
-    const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
-    const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
-
-    // ArmNN does not currently support non-fixed weights or bias
-
-    // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
-    // but in ArmNN it needs to be [ M, I, H, W ]
-    const Operand* weightsOperand = GetInputOperand(operation, 1);
-
-    if (weightsOperand == nullptr)
-    {
-        return Fail("%s: Operand is invalid", __func__);
-    }
-
-    // Reinterpret weight data as [ H, W, I, M ]
-    armnn::TensorShape weightsShape({ weightsOperand->dimensions[1], weightsOperand->dimensions[2],
-                                      inputInfo.GetShape()[3],
-                                      weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
-
-    // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
-    const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
-    ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, HWIMToMIHW, &weightsShape);
-
-    // Bias is a 1D tensor
-    ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2);
-
-    if (!weightsPin.IsValid() || !biasPin.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    armnn::ConstTensor weights = weightsPin.GetConstTensor();
-    armnn::ConstTensor bias = biasPin.GetConstTensor();
-    SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), swizzledInputInfo);
-
-    armnn::DepthwiseConvolution2dDescriptor desc;
-    ActivationFn activation;
-
-    if (operation.inputs.size() == 11)
-    {
-        if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft)         ||
-            !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight)        ||
-            !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop)          ||
-            !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom)       ||
-            !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX)         ||
-            !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY)         ||
-            !GetInputActivationFunction(operation,  10, activation))
-        {
-            return Fail("%s: Operation has invalid inputs", __func__);
-        }
-    }
-    else if (operation.inputs.size() == 8)
-    {
-        android::nn::PaddingScheme paddingScheme;
-
-        if (!GetInputPaddingScheme(operation, 3, paddingScheme)                       ||
-            !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX)         ||
-            !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY)         ||
-            !GetInputActivationFunction(operation, 7, activation))
-        {
-            return Fail("%s: Operation has invalid inputs", __func__);
-        }
-
-        const uint32_t kernelX = weights.GetShape()[3];
-        const uint32_t kernelY = weights.GetShape()[2];
-        const uint32_t inputX  = swizzledInputInfo.GetShape()[3];
-        const uint32_t inputY  = swizzledInputInfo.GetShape()[2];
-
-        CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
-        CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
-    }
-    else
-    {
-        return Fail("%s: Unsupported number of operation inputs", __func__);
-    }
-
-    desc.m_BiasEnabled = true;
-    auto biases = boost::make_optional(bias.GetInfo());
-
-    if (!IsLayerSupported(__func__,
-                          armnn::IsDepthwiseConvolutionSupported,
-                          m_Compute,
-                          swizzledInputInfo,
-                          swizzledOutputInfo,
-                          desc,
-                          weights.GetInfo(),
-                          biases))
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* startLayer = m_Network->AddDepthwiseConvolution2dLayer(desc, weights, bias);
-    armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer);
-
-    if (endLayer != nullptr)
-    {
-        armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *startLayer, *endLayer);
-        return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
-    }
-    else
-    {
-        return Fail("%s: ProcessActivation failed", __func__);
-    }
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertFloor(const neuralnetworks::V1_0::Operation& operation)
-{
-    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* const outputOperand = GetOutputOperand(operation, 0);
-    if (!outputOperand)
-    {
-        return Fail("%s: Operation has invalid outputs", __func__);
-    }
-
-    if (!IsLayerSupported(__func__,
-                          armnn::IsFloorSupported,
-                          m_Compute,
-                          input.GetTensorInfo(),
-                          GetTensorInfoForOperand(*outputOperand)))
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* layer = m_Network->AddFloorLayer();
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertFullyConnected(const neuralnetworks::V1_0::Operation& operation)
-{
-    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* output = GetOutputOperand(operation, 0);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
-    // ArmNN does not currently support non-fixed weights or bias
-    ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1); // 2D
-    ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2);    // 1D
-
-    if (!weightsPin.IsValid() || !biasPin.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    armnn::ConstTensor weights = weightsPin.GetConstTensor();
-    armnn::ConstTensor bias = biasPin.GetConstTensor();
-
-    armnn::TensorInfo reshapedInfo = inputInfo;
-    if (inputInfo.GetNumDimensions() > 2U)
-    {
-        unsigned int dim0 = inputInfo.GetShape()[0];
-        unsigned int dim1 = inputInfo.GetShape()[1];
-
-        for (unsigned int i = 2U; i < inputInfo.GetNumDimensions(); ++i)
-        {
-            dim1 *= inputInfo.GetShape()[i];
-        }
-
-        unsigned int divisor = weights.GetInfo().GetShape()[1] / dim1;
-        if(dim0 % divisor != 0)
-        {
-            return Fail("%s: Failed to deduce tensor shape", __func__);
-        }
-
-        reshapedInfo.SetShape(armnn::TensorShape({dim0 / divisor, dim1 * divisor}));
-    }
-
-    // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
-    SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
-
-    ActivationFn activationFunction;
-    if (!GetInputActivationFunction(operation, 3, activationFunction))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    armnn::FullyConnectedDescriptor desc;
-    desc.m_TransposeWeightMatrix = true;
-    desc.m_BiasEnabled           = true;
-
-    if (!IsLayerSupported(__func__,
-                          armnn::IsFullyConnectedSupported,
-                          m_Compute,
-                          inputInfo,
-                          outputInfo,
-                          weights.GetInfo(),
-                          bias.GetInfo(),
-                          desc))
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* startLayer = m_Network->AddFullyConnectedLayer(desc, weights, bias);
-    armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer);
-
-    if (endLayer != nullptr)
-    {
-        if (inputInfo.GetNumDimensions() > 2U)
-        {
-            armnn::ReshapeDescriptor reshapeDescriptor;
-            reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
-
-            armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor);
-            assert(reshapeLayer != nullptr);
-            input.Connect(reshapeLayer->GetInputSlot(0));
-            reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
-            reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
-        }
-        else
-        {
-            input.Connect(startLayer->GetInputSlot(0));
-        }
-
-        return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
-    }
-    else
-    {
-        return Fail("%s: ProcessActivation failed", __func__);
-    }
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertLocalResponseNormalization(
-    const neuralnetworks::V1_0::Operation& operation)
-{
-    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* output = GetOutputOperand(operation, 0);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
-    const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
-    const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
-
-    armnn::NormalizationDescriptor descriptor;
-
-    descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
-    descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
-
-    if (!input.IsValid() ||
-        !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize) ||
-        !GetInputFloat32(operation, 2, descriptor.m_K) ||
-        !GetInputFloat32(operation, 3, descriptor.m_Alpha) ||
-        !GetInputFloat32(operation, 4, descriptor.m_Beta))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    // ArmNN expects normSize to be the full size of the normalization
-    // window rather than the radius as in AndroidNN.
-    descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
-
-    if (!IsLayerSupported(__func__,
-                        armnn::IsNormalizationSupported,
-                        m_Compute,
-                        swizzledInputInfo,
-                        swizzledOutputInfo,
-                        descriptor))
-    {
-        return false;
-    }
-
-
-    armnn::IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor);
-    assert(layer != nullptr);
-    layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
-
-    armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *layer);
-
-    return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertLogistic(const neuralnetworks::V1_0::Operation& operation)
-{
-    armnn::ActivationDescriptor desc;
-    desc.m_Function = armnn::ActivationFunction::Sigmoid;
-
-    return ConvertToActivation(operation, __func__, desc);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertL2Normalization(const neuralnetworks::V1_0::Operation& operation)
-{
-    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* output = GetOutputOperand(operation, 0);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
-    const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
-    const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
-
-    if (!IsLayerSupported(__func__,
-                          armnn::IsL2NormalizationSupported,
-                          m_Compute,
-                          swizzledInputInfo,
-                          swizzledOutputInfo))
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* layer = m_Network->AddL2NormalizationLayer();
-    assert(layer != nullptr);
-    layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
-
-    armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *layer);
-
-    return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertL2Pool2d(const neuralnetworks::V1_0::Operation& operation)
-{
-    return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::L2);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertMaxPool2d(const neuralnetworks::V1_0::Operation& operation)
-{
-    return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Max);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertMul(const neuralnetworks::V1_0::Operation& operation)
-{
-    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0);
-    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1);
-
-    if (!input0.IsValid() || !input1.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    // The FuseActivation parameter is always the input index 2
-    // and it should be optional
-    ActivationFn activationFunction;
-    if (!GetOptionalInputActivation(operation, 2, activationFunction))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* outputOperand = GetOutputOperand(operation, 0);
-
-    if (outputOperand == nullptr)
-    {
-        return false;
-    }
-
-    const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
-
-    if (!IsLayerSupported(__func__,
-                          armnn::IsMultiplicationSupported,
-                          m_Compute,
-                          input0.GetTensorInfo(),
-                          input1.GetTensorInfo(),
-                          outInfo))
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const startLayer = m_Network->AddMultiplicationLayer();
-    armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer);
-
-    const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
-    const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
-
-    if (endLayer != nullptr)
-    {
-        BroadcastTensor(input0, input1, startLayer, *m_Network);
-        return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
-    }
-    else
-    {
-        return Fail("%s: ProcessActivation failed", __func__);
-    }
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertReLu(const neuralnetworks::V1_0::Operation& operation)
-{
-    armnn::ActivationDescriptor desc;
-    desc.m_Function = armnn::ActivationFunction::ReLu;
-
-    return ConvertToActivation(operation, __func__, desc);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertReLu1(const neuralnetworks::V1_0::Operation& operation)
-{
-    armnn::ActivationDescriptor desc;
-    desc.m_Function = armnn::ActivationFunction::BoundedReLu;
-    desc.m_A        = 1.0f;
-    desc.m_B        = -1.0f;
-
-    return ConvertToActivation(operation, __func__, desc);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertReLu6(const neuralnetworks::V1_0::Operation& operation)
-{
-    armnn::ActivationDescriptor desc;
-    desc.m_Function = armnn::ActivationFunction::BoundedReLu;
-    desc.m_A        = 6.0f;
-
-    return ConvertToActivation(operation, __func__, desc);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertSoftmax(const neuralnetworks::V1_0::Operation& operation)
-{
-    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* outputOperand = GetOutputOperand(operation, 0);
-    if (!outputOperand)
-    {
-        return Fail("%s: Operation has no outputs", __func__);
-    }
-
-    const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
-
-    armnn::SoftmaxDescriptor desc;
-    if (!GetInputFloat32(operation, 1, desc.m_Beta))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    if (!IsLayerSupported(__func__,
-                          armnn::IsSoftmaxSupported,
-                          m_Compute,
-                          input.GetTensorInfo(),
-                          outInfo,
-                          desc))
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* layer = m_Network->AddSoftmaxLayer(desc);
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertTanH(const neuralnetworks::V1_0::Operation& operation)
-{
-    armnn::ActivationDescriptor desc;
-    desc.m_Function = armnn::ActivationFunction::TanH;
-    desc.m_A = 1.0f; // android nn does not support tanH parameters
-    desc.m_B = 1.0f; // set to 1.0f for unity scaling
-
-    return ConvertToActivation(operation, __func__, desc);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertReshape(const neuralnetworks::V1_0::Operation& operation)
-{
-    const Operand* inputOperand = GetInputOperand(operation, 0);
-    const Operand* requestedShapeOperand = GetInputOperand(operation, 1);
-    const Operand* outputOperand = GetOutputOperand(operation, 0);
-
-    if (inputOperand == nullptr
-        || requestedShapeOperand == nullptr
-        || outputOperand == nullptr)
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-
-    if (requestedShapeOperand->dimensions.size() != 1)
-    {
-        return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
-            __func__, requestedShapeOperand->dimensions.size());
-    }
-
-    std::vector<int32_t> targetDimensions;
-    if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions))
-    {
-        return Fail("%s: Could not read values of input 1", __func__);
-    }
-
-    const Shape inputOperandShape = GetOperandShape(*inputOperand);
-
-    Shape requestedShape;
-    // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
-    // function that resolves these values into a fully specified tensor shape.
-    if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
-    {
-        return Fail("%s: Failed to resolve the requested shape", __func__);
-    }
-
-    const Shape outputOperandShape = GetOperandShape(*outputOperand);
-    if (!SameShape(requestedShape, outputOperandShape))
-    {
-        return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
-    }
-
-    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Could not read input 0", __func__);
-    }
-
-    if (!IsLayerSupported(__func__,
-                          armnn::IsReshapeSupported,
-                          m_Compute,
-                          input.GetTensorInfo()))
-    {
-        return false;
-    }
-
-
-    armnn::ReshapeDescriptor reshapeDescriptor;
-    reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
-                                                         requestedShape.dimensions.data());
-
-    armnn::IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDescriptor);
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertResizeBilinear(const neuralnetworks::V1_0::Operation& operation)
-{
-    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Could not read input 0", __func__);
-    }
-
-    const Operand* output = GetOutputOperand(operation, 0);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
-    const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
-    const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
-
-    if (!IsLayerSupported(__func__,
-                          armnn::IsResizeBilinearSupported,
-                          m_Compute,
-                          swizzledInputInfo))
-    {
-        return false;
-    }
-
-    armnn::ResizeBilinearDescriptor desc;
-
-    if (   !GetInputScalar(operation, 1, OperandType::INT32, desc.m_TargetHeight)
-        || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_TargetWidth))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    armnn::IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc);
-    assert(layer != nullptr);
-    layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
-
-    armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *layer);
-
-    return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
-
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertLstm(const neuralnetworks::V1_0::Operation& operation)
-{
-    // Inputs:
-    // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
-    //      “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
-    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Could not read input 0: input", __func__);
-    }
-    // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
-    LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18);
-    if (!outputStateIn.IsValid())
-    {
-        return Fail("%s: Could not read input 18: outputStateIn", __func__);
-    }
-    // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
-    LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19);
-    if (!cellStateIn.IsValid())
-    {
-        return Fail("%s: Could not read input 19: cellStateIn", __func__);
-    }
-
-    // Get the mandatory input tensors:
-    // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
-    //     [num_units, input_size].
-    const ConstTensorPin inputToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 2);
-    // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
-    const ConstTensorPin inputToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 3);
-    // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
-    //     [num_units, input_size].
-    const ConstTensorPin inputToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 4);
-    // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
-    //     [num_units, output_size].
-    const ConstTensorPin recurrentToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 6);
-    // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
-    //     [num_units, output_size].
-    const ConstTensorPin recurrentToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 7);
-    // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
-    //     [num_units, output_size].
-    const ConstTensorPin recurrentToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 8);
-    // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
-    const ConstTensorPin forgetGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 13);
-    // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
-    const ConstTensorPin cellBiasPin = ConvertOperationInputToConstTensorPin(operation, 14);
-    // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
-    const ConstTensorPin outputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 15);
-
-    if (!inputToForgetWeightsPin.IsValid() ||
-        !inputToCellWeightsPin.IsValid() ||
-        !inputToOutputWeightsPin.IsValid() ||
-        !recurrentToForgetWeightsPin.IsValid() ||
-        !recurrentToCellWeightsPin.IsValid() ||
-        !recurrentToOutputWeightsPin.IsValid() ||
-        !forgetGateBiasPin.IsValid() ||
-        !cellBiasPin.IsValid() ||
-        !outputGateBiasPin.IsValid())
-    {
-        return Fail("%s: Operation has invalid tensor inputs", __func__);
-    }
-
-    // Get the optional input tensors:
-    // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
-    //     [num_units, input_size], where “num_units” corresponds to the number of cell units.
-    const ConstTensorPin inputToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 1);
-    // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
-    //     [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
-    //     “num_units”), or the second dimension of the “projection_weights”, if defined.
-    const ConstTensorPin recurrentToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 5);
-    // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
-    const ConstTensorPin cellToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 9);
-    // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
-    const ConstTensorPin cellToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 10);
-    // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
-    const ConstTensorPin cellToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 11);
-    // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
-    const ConstTensorPin inputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 12);
-    // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
-    //     [output_size, num_units].
-    const ConstTensorPin projectionWeightsPin = ConvertOperationInputToConstTensorPin(operation, 16);
-    // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
-    const ConstTensorPin projectionBiasPin = ConvertOperationInputToConstTensorPin(operation, 17);
-
-    if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
-        (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
-        (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
-        (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
-        (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
-        (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
-        (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
-        (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
-    {
-        return Fail("%s: Operation has invalid tensor inputs", __func__);
-    }
-
-    // Get the mandatory input scalars (actually 1-D tensors of size 1):
-    // 20: The activation function: A value indicating the activation function:
-    //     0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
-    // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
-    //     If set to 0.0 then clipping is disabled.
-    // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
-    //     [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
-    ActivationFn activation;
-    float cellClip;
-    float projClip;
-    if (!GetInputActivationFunctionFromTensor(operation, 20, activation) ||
-        !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip) ||
-        !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip))
-    {
-        return Fail("%s: Operation has invalid scalar inputs", __func__);
-    }
-
-    // Outputs:
-    // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
-    //     CIFG, or [batch_size, num_units * 3] without CIFG.
-    const Operand* scratchBuffer = GetOutputOperand(operation, 0);
-    if (!scratchBuffer)
-    {
-        return Fail("%s: Could not read output 0: scratchBuffer", __func__);
-    }
-    // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
-    const Operand* outputStateOut = GetOutputOperand(operation, 1);
-    if (!outputStateOut)
-    {
-        return Fail("%s: Could not read output 1: outputStateOut", __func__);
-    }
-    // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
-    const Operand* cellStateOut = GetOutputOperand(operation, 2);
-    if (!cellStateOut)
-    {
-        return Fail("%s: Could not read output 2: cellStateOut", __func__);
-    }
-    // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
-    //     effectively the same as the current “output state (out)” value.
-    const Operand* output = GetOutputOperand(operation, 3);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 3: output", __func__);
-    }
-
-    // set the params structure for the AddLstmLayer call
-    armnn::LstmInputParams params;
-    params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
-    params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
-    params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
-    params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
-    params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
-    params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
-    params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
-    params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
-    params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
-    params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
-    params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
-    params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
-    params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
-    params.m_CellBias = cellBiasPin.GetConstTensorPtr();
-    params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
-    params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
-    params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
-
-    // set the layer descriptor
-    armnn::LstmDescriptor desc;
-    desc.m_ActivationFunc = activation;
-    desc.m_ClippingThresCell = cellClip;
-    desc.m_ClippingThresProj = projClip;
-    desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
-                          params.m_RecurrentToInputWeights == nullptr ||
-                          params.m_InputGateBias == nullptr);
-    desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
-                              params.m_CellToOutputWeights != nullptr);
-    desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
-
-    // validate the optional input groups
-    if (desc.m_CifgEnabled &&
-        (params.m_InputToInputWeights != nullptr ||
-         params.m_RecurrentToInputWeights != nullptr ||
-         params.m_InputGateBias != nullptr))
-    {
-        return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
-                    " and input gate bias must be provided", __func__);
-    }
-
-    if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
-    {
-        return Fail("%s: projection bias should not be provided without projection weights", __func__);
-    }
-
-    if (desc.m_PeepholeEnabled &&
-        (params.m_CellToForgetWeights == nullptr ||
-         params.m_CellToOutputWeights == nullptr ||
-         (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
-    {
-        return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
-                    " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
-    }
-
-    // Check if the layer is supported
-    // Inputs
-    const armnn::TensorInfo& inputInfo         = input.GetTensorInfo();
-    const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
-    const armnn::TensorInfo& cellStateInInfo   = cellStateIn.GetTensorInfo();
-
-    // Outputs
-    const armnn::TensorInfo& scratchBufferInfo  = GetTensorInfoForOperand(*scratchBuffer);
-    const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
-    const armnn::TensorInfo& cellStateOutInfo   = GetTensorInfoForOperand(*cellStateOut);
-    const armnn::TensorInfo& outputInfo         = GetTensorInfoForOperand(*output);
-
-    // Basic parameters
-    const armnn::TensorInfo& inputToForgetWeights = params.m_InputToForgetWeights->GetInfo();
-    const armnn::TensorInfo& inputToCellWeights   = params.m_InputToCellWeights->GetInfo();
-    const armnn::TensorInfo& inputToOutputWeights = params.m_InputToOutputWeights->GetInfo();
-    const armnn::TensorInfo& recurrentToForgetWeights = params.m_RecurrentToForgetWeights->GetInfo();
-    const armnn::TensorInfo& recurrentToCellWeights = params.m_RecurrentToCellWeights->GetInfo();
-    const armnn::TensorInfo& recurrentToOutputWeights = params.m_RecurrentToOutputWeights->GetInfo();
-    const armnn::TensorInfo& forgetGateBias = params.m_ForgetGateBias->GetInfo();
-    const armnn::TensorInfo& cellBias = params.m_CellBias->GetInfo();
-    const armnn::TensorInfo& outputGateBias = params.m_OutputGateBias->GetInfo();
-
-    //Optional parameters
-    const armnn::TensorInfo* inputToInputWeights = nullptr;
-    const armnn::TensorInfo* recurrentToInputWeights = nullptr;
-    const armnn::TensorInfo* cellToInputWeights = nullptr;
-    const armnn::TensorInfo* inputGateBias = nullptr;
-    const armnn::TensorInfo* projectionWeights = nullptr;
-    const armnn::TensorInfo* projectionBias    = nullptr;
-    const armnn::TensorInfo* cellToForgetWeights = nullptr;
-    const armnn::TensorInfo* cellToOutputWeights = nullptr;
-
-    if(!desc.m_CifgEnabled)
-    {
-        inputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
-        recurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
-        if (params.m_CellToInputWeights != nullptr)
-        {
-            cellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
-        }
-        inputGateBias = &(params.m_InputGateBias->GetInfo());
-    }
-
-    if(desc.m_ProjectionEnabled)
-    {
-        projectionWeights = &(params.m_ProjectionWeights->GetInfo());
-        if (params.m_ProjectionBias != nullptr)
-        {
-            projectionBias = &(params.m_ProjectionBias->GetInfo());
-        }
-    }
-
-    if(desc.m_PeepholeEnabled)
-    {
-        cellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
-        cellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
-    }
-
-    if (!IsLayerSupported(__func__,
-                          armnn::IsLstmSupported,
-                          m_Compute,
-                          inputInfo,
-                          outputStateInInfo,
-                          cellStateInInfo,
-                          scratchBufferInfo,
-                          outputStateOutInfo,
-                          cellStateOutInfo,
-                          outputInfo,
-                          desc,
-                          inputToForgetWeights,
-                          inputToCellWeights,
-                          inputToOutputWeights,
-                          recurrentToForgetWeights,
-                          recurrentToCellWeights,
-                          recurrentToOutputWeights,
-                          forgetGateBias,
-                          cellBias,
-                          outputGateBias,
-                          inputToInputWeights,
-                          recurrentToInputWeights,
-                          cellToInputWeights,
-                          inputGateBias,
-                          projectionWeights,
-                          projectionBias,
-                          cellToForgetWeights,
-                          cellToOutputWeights))
-    {
-        return false;
-    }
-
-    // Add the layer
-    armnn::IConnectableLayer* layer = m_Network->AddLstmLayer(desc, params, "Lstm");
-
-    input.Connect(layer->GetInputSlot(0));
-    outputStateIn.Connect(layer->GetInputSlot(1));
-    cellStateIn.Connect(layer->GetInputSlot(2));
-
-    return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0) &&
-            SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1) &&
-            SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2) &&
-            SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3));
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertToActivation(const neuralnetworks::V1_0::Operation& operation,
-    const char* operationName,
-    const armnn::ActivationDescriptor& activationDesc)
-{
-    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Input 0 is invalid", operationName);
-    }
-
-    const Operand* outputOperand = GetOutputOperand(operation, 0);
-    if (!outputOperand)
-    {
-        return false;
-    }
-    const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
-    if (!IsLayerSupported(__func__,
-                          armnn::IsActivationSupported,
-                          m_Compute,
-                          input.GetTensorInfo(),
-                          outInfo,
-                          activationDesc))
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* layer = m_Network->AddActivationLayer(activationDesc);
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::ConvertPooling2d(const neuralnetworks::V1_0::Operation& operation,
-    const char* operationName,
-    armnn::PoolingAlgorithm poolType)
-{
-    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Could not read input 0", operationName);
-    }
-
-    const Operand* output = GetOutputOperand(operation, 0);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
-    const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
-    const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
-
-    armnn::Pooling2dDescriptor desc;
-    desc.m_PoolType = poolType;
-    desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
-
-    ActivationFn activation;
-
-    if (operation.inputs.size() == 7)
-    {
-        // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
-        android::nn::PaddingScheme scheme;
-
-        if (   !GetInputPaddingScheme(operation, 1, scheme)
-            || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_StrideX)
-            || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_StrideY)
-            || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PoolWidth)
-            || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PoolHeight)
-            || !GetInputActivationFunction(operation, 6, activation))
-        {
-            return Fail("%s: Operation has invalid inputs", operationName);
-        }
-
-        const unsigned int inputWidth = swizzledInputInfo.GetShape()[3];
-        const unsigned int inputHeight = swizzledInputInfo.GetShape()[2];
-
-        CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
-        CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
-    }
-    else
-    {
-        // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
-        if (   !GetInputScalar(operation, 1, OperandType::INT32, desc.m_PadLeft)
-            || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_PadRight)
-            || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadTop)
-            || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadBottom)
-            || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideX)
-            || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_StrideY)
-            || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_PoolWidth)
-            || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_PoolHeight)
-            || !GetInputActivationFunction(operation, 9, activation))
-        {
-            return Fail("%s: Operation has invalid inputs", operationName);
-        }
-    }
-
-    // ArmNN does not accept a pool size of 1, but the ArmNN driver is expected to cope.
-    // This is mapped to a trivial splitter instead.
-    armnn::IConnectableLayer* startLayer = nullptr;
-    if (desc.m_PoolWidth != 1 || desc.m_PoolHeight != 1)
-    {
-        if (!IsLayerSupported(__func__,
-                              armnn::IsPooling2dSupported,
-                              m_Compute,
-                              swizzledInputInfo,
-                              swizzledOutputInfo,
-                              desc))
-        {
-            return false;
-        }
-
-        startLayer = m_Network->AddPooling2dLayer(desc);
-    }
-    else
-    {
-        const unsigned int numDims = swizzledOutputInfo.GetNumDimensions();
-
-        armnn::ViewsDescriptor viewsDesc(1, numDims);
-
-        for (unsigned int i = 0; i < numDims; ++i)
-        {
-            viewsDesc.SetViewOriginCoord(0, i, 0);
-            viewsDesc.SetViewSize(0, i, swizzledOutputInfo.GetShape()[i]);
-        }
-
-        if (!IsLayerSupported(__func__,
-                              armnn::IsSplitterSupported,
-                              m_Compute,
-                              swizzledInputInfo,
-                              viewsDesc))
-        {
-            return false;
-        }
-
-        startLayer = m_Network->AddSplitterLayer(viewsDesc);
-    }
-
-    armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer);
-
-    if (endLayer != nullptr)
-    {
-        armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *startLayer, *endLayer);
-        return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
-    }
-    else
-    {
-        return Fail("%s: ProcessActivation failed", operationName);
-    }
-}
-
-template<typename HalVersion>
-const void* ModelToINetworkConverter<HalVersion>::GetOperandValueReadOnlyAddress(const Operand& operand) const
-{
-    const void* valueStart = nullptr;
-
-    switch (operand.lifetime)
-    {
-        case OperandLifeTime::CONSTANT_COPY:
-        {
-            // Constant found in model.operandValues
-            valueStart = &m_Model.operandValues[operand.location.offset];
-            break;
-        }
-        case OperandLifeTime::CONSTANT_REFERENCE:
-        {
-            // Constant specified via a Memory object
-            valueStart = GetMemoryFromPool(operand.location, m_MemPools);
-            break;
-        }
-        default:
-        {
-            // Unsupported/invalid (e.g. can't get value of an input to the model)
-            Fail("%s: unsupported/invalid operand lifetime: %s",
-                __func__, toString(operand.lifetime).c_str());
-            valueStart = nullptr;
-        }
-    }
-
-    return valueStart;
-}
-
-template<typename HalVersion>
-template<typename HalOperation>
-const Operand* ModelToINetworkConverter<HalVersion>::GetInputOperand(const HalOperation& operation,
-                                                                     uint32_t inputIndex) const
-{
-    if (inputIndex >= operation.inputs.size())
-    {
-        Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
-        return nullptr;
-    }
-
-    assert(operation.inputs[inputIndex] < m_Model.operands.size()); // Model should have been validated beforehand
-    return &m_Model.operands[operation.inputs[inputIndex]];
-}
-
-template<typename HalVersion>
-template<typename HalOperation>
-const Operand* ModelToINetworkConverter<HalVersion>::GetOutputOperand(const HalOperation& operation,
-                                                                      uint32_t outputIndex) const
-{
-    if (outputIndex >= operation.outputs.size())
-    {
-        Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
-        return nullptr;
-    }
-
-    assert(operation.outputs[outputIndex] < m_Model.operands.size()); // Model should have been validated beforehand
-    return &m_Model.operands[operation.outputs[outputIndex]];
-}
-
-template<typename HalVersion>
-template<typename HalOperation, typename T>
-bool ModelToINetworkConverter<HalVersion>::GetInputScalar(const HalOperation& operation,
-                                                          uint32_t inputIndex,
-                                                          OperandType type,
-                                                          T& outValue) const
-{
-    const Operand* operand = GetInputOperand(operation, inputIndex);
-    if (!operand)
-    {
-        return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
-    }
-
-    if (operand->type != type)
-    {
-        return Fail("%s: unexpected operand type: %s (should be %s)",
-            __func__, toString(operand->type).c_str(), toString(type).c_str());
-    }
-
-    if (operand->location.length != sizeof(T))
-    {
-        return Fail("%s: incorrect operand location length: %i (should be %i)",
-            __func__, operand->location.length, sizeof(T));
-    }
-
-    const void* valueAddress = GetOperandValueReadOnlyAddress(*operand);
-    if (!valueAddress)
-    {
-        return Fail("%s: failed to get address for operand", __func__);
-    }
-
-    outValue = *(static_cast<const T*>(valueAddress));
-    return true;
-}
-
-template<typename HalVersion>
-template<typename HalOperation>
-bool ModelToINetworkConverter<HalVersion>::GetInputInt32(const HalOperation& operation,
-                                                         uint32_t inputIndex,
-                                                         int32_t& outValue) const
-{
-    return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue);
-}
-
-template<typename HalVersion>
-template<typename HalOperation>
-bool ModelToINetworkConverter<HalVersion>::GetInputFloat32(const HalOperation& operation,
-                                                           uint32_t inputIndex,
-                                                           float& outValue) const
-{
-    return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue);
-}
-
-template<typename HalVersion>
-template<typename HalOperation>
-bool ModelToINetworkConverter<HalVersion>::GetInputActivationFunctionImpl(const HalOperation& operation,
-                                                                          uint32_t inputIndex,
-                                                                          OperandType type,
-                                                                          ActivationFn& outActivationFunction) const
-{
-    if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32)
-    {
-        return Fail("%s: unexpected operand type: %s (should be %s or %s)",
-                    __func__,
-                    toString(type).c_str(),
-                    toString(OperandType::INT32).c_str(),
-                    toString(OperandType::TENSOR_INT32).c_str());
-    }
-
-    int32_t activationFunctionAsInt;
-    if (!GetInputScalar(operation, inputIndex, type, activationFunctionAsInt))
-    {
-        return Fail("%s: failed to get activation input value", __func__);
-    }
-    outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
-    return true;
-}
-
-template<typename HalVersion>
-template<typename HalOperation>
-bool ModelToINetworkConverter<HalVersion>::GetInputActivationFunction(const HalOperation& operation,
-                                                                      uint32_t inputIndex,
-                                                                      ActivationFn& outActivationFunction) const
-{
-    return GetInputActivationFunctionImpl(operation, inputIndex, OperandType::INT32, outActivationFunction);
-}
-
-template<typename HalVersion>
-template<typename HalOperation>
-bool ModelToINetworkConverter<HalVersion>::GetInputActivationFunctionFromTensor(
-    const HalOperation& operation,
-    uint32_t inputIndex,
-    ActivationFn& outActivationFunction) const
-{
-    // This only accepts a 1-D tensor of size 1
-    return GetInputActivationFunctionImpl(operation, inputIndex, OperandType::INT32, outActivationFunction);
-}
-
-template<typename HalVersion>
-template<typename HalOperation>
-bool ModelToINetworkConverter<HalVersion>::GetOptionalInputActivation(const HalOperation& operation,
-                                                                      uint32_t inputIndex,
-                                                                      ActivationFn& activationFunction) const
-{
-    if (operation.inputs.size() <= inputIndex)
-    {
-        activationFunction = ActivationFn::kActivationNone;
-    }
-    else
-    {
-        if (!GetInputActivationFunction(operation, inputIndex, activationFunction))
-        {
-            return Fail("%s: Operation has invalid inputs", __func__);
-        }
-    }
-    return true;
-}
-
-template<typename HalVersion>
-template<typename HalOperation>
-bool ModelToINetworkConverter<HalVersion>::GetInputPaddingScheme(const HalOperation& operation,
-                                                                 uint32_t inputIndex,
-                                                                 PaddingScheme& outPaddingScheme) const
-{
-    int32_t paddingSchemeAsInt;
-    if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt))
-    {
-        return Fail("%s: failed to get padding scheme input value", __func__);
-    }
-
-    outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
-    return true;
-}
-
-template<typename HalVersion>
-template<typename HalOperation>
-LayerInputHandle ModelToINetworkConverter<HalVersion>::ConvertToLayerInputHandle(const HalOperation& operation,
-                                                                                 uint32_t inputIndex)
-{
-    const Operand* operand = GetInputOperand(operation, inputIndex);
-    if (!operand)
-    {
-        Fail("%s: failed to get input operand %i", __func__, inputIndex);
-        return LayerInputHandle();
-    }
-
-    if (!IsOperandTypeSupportedForTensors(operand->type))
-    {
-        Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
-        return LayerInputHandle();
-    }
-
-    armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
-
-    switch (operand->lifetime)
-    {
-        case OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
-        case OperandLifeTime::MODEL_INPUT:
-        {
-            // The tensor is either an operand internal to the model, or a model input.
-            // It can be associated with an ArmNN output slot for an existing layer.
-
-            // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
-            const uint32_t operandIndex = operation.inputs[inputIndex];
-            return LayerInputHandle(true, m_OutputSlotForOperand[operandIndex], operandTensorInfo);
-            break;
-        }
-        case OperandLifeTime::CONSTANT_COPY:
-        case OperandLifeTime::CONSTANT_REFERENCE:
-        {
-            // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
-            ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand);
-            if (tensorPin.IsValid())
-            {
-                if (!IsLayerSupported(__func__,
-                                      armnn::IsConstantSupported,
-                                      m_Compute,
-                                      tensorPin.GetConstTensor().GetInfo()))
-                {
-                    return LayerInputHandle();
-                }
-
-                armnn::IConnectableLayer* constantLayer = m_Network->AddConstantLayer(tensorPin.GetConstTensor());
-                armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
-                outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
-
-                return LayerInputHandle(true, &outputSlot, operandTensorInfo);
-            }
-            else
-            {
-                Fail("%s: invalid operand tensor", __func__);
-                return LayerInputHandle();
-            }
-            break;
-        }
-        default:
-        {
-            // Unsupported lifetime for an input tensor
-            Fail("%s: unsupported lifetime for input tensor: %s",
-                __func__, toString(operand->lifetime).c_str());
-            return LayerInputHandle();
-        }
-    }
-}
-
-template<typename HalVersion>
-template<typename HalOperation>
-ConstTensorPin ModelToINetworkConverter<HalVersion>::ConvertOperationInputToConstTensorPin(
-    const HalOperation& operation,
-    uint32_t inputIndex,
-    const armnn::PermutationVector& dimensionMappings,
-    const armnn::TensorShape* overrideTensorShape,
-    bool optional)
-{
-    const Operand* operand = GetInputOperand(operation, inputIndex);
-    if (!operand)
-    {
-        Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
-        return ConstTensorPin();
-    }
-    return ConvertOperandToConstTensorPin(*operand, dimensionMappings, overrideTensorShape, optional);
-}
-
-template<typename HalVersion>
-ConstTensorPin ModelToINetworkConverter<HalVersion>::ConvertOperandToConstTensorPin(const Operand& operand,
-    const armnn::PermutationVector& dimensionMappings, const armnn::TensorShape* overrideTensorShape, bool optional)
-{
-    if (!IsOperandTypeSupportedForTensors(operand.type))
-    {
-        Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
-        return ConstTensorPin();
-    }
-
-    if (operand.lifetime != OperandLifeTime::CONSTANT_COPY && operand.lifetime != OperandLifeTime::CONSTANT_REFERENCE)
-    {
-        Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
-        return ConstTensorPin();
-    }
-
-    const void* const valueStart = GetOperandValueReadOnlyAddress(operand);
-    if (!valueStart)
-    {
-        if (optional)
-        {
-            // optional tensor with no values is not really an error; return it as invalid, but marked as optional
-            return ConstTensorPin(true);
-        }
-        // mandatory tensor with no values
-        Fail("%s: failed to get operand address", __func__);
-        return ConstTensorPin();
-    }
-
-    armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
-    if (overrideTensorShape != nullptr)
-    {
-        tensorInfo.SetShape(*overrideTensorShape);
-    }
-    return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::GetTensorInt32Values(const Operand& operand,
-    std::vector<int32_t>& outValues) const
-{
-    if (operand.type != OperandType::TENSOR_INT32)
-    {
-        return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
-    }
-
-    const void* startAddress = GetOperandValueReadOnlyAddress(operand);
-    if (!startAddress)
-    {
-        return Fail("%s: failed to get operand address", __func__, operand.type);
-    }
-
-    // Check number of bytes is sensible
-    const uint32_t numBytes = operand.location.length;
-    if (numBytes % sizeof(int32_t) != 0)
-    {
-        return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
-            __func__, numBytes, sizeof(int32_t));
-    }
-
-    outValues.resize(numBytes / sizeof(int32_t));
-    memcpy(outValues.data(), startAddress, numBytes);
-    return true;
-}
-
-// Creates an ArmNN activation layer and connects it to the given layer, if the
-// passed in AndroidNN activation function requires so.
-// @return The end layer of the sequence of layers built for the given AndroidNN
-// activation function or nullptr if an error occurred (e.g. unsupported activation).
-// Note that the end layer matches the input layer if no activation is required
-// (the sequence of layers has length 1).
-template<typename HalVersion>
-armnn::IConnectableLayer* ModelToINetworkConverter<HalVersion>::ProcessActivation(const armnn::TensorInfo& tensorInfo,
-    ActivationFn activation, armnn::IConnectableLayer* prevLayer)
-{
-    assert(prevLayer->GetNumOutputSlots() == 1);
-
-    prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    armnn::IConnectableLayer* activationLayer = prevLayer;
-
-    if (activation != ActivationFn::kActivationNone)
-    {
-        armnn::ActivationDescriptor activationDesc;
-        switch (activation)
-        {
-            case ActivationFn::kActivationRelu:
-            {
-                activationDesc.m_Function = armnn::ActivationFunction::ReLu;
-                break;
-            }
-            case ActivationFn::kActivationRelu1:
-            {
-                activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
-                activationDesc.m_A = 1.0f;
-                activationDesc.m_B = -1.0f;
-                break;
-            }
-            case ActivationFn::kActivationRelu6:
-            {
-                activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
-                activationDesc.m_A = 6.0f;
-                break;
-            }
-            case ActivationFn::kActivationSigmoid:
-            {
-                activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
-                break;
-            }
-            case ActivationFn::kActivationTanh:
-            {
-                activationDesc.m_Function = armnn::ActivationFunction::TanH;
-                activationDesc.m_A = 1.0f;
-                activationDesc.m_B = 1.0f;
-                break;
-            }
-            default:
-            {
-                Fail("%s: Invalid activation enum value %i", __func__, activation);
-                return nullptr;
-            }
-        }
-
-        if (!IsLayerSupported(__func__, armnn::IsActivationSupported, m_Compute,
-                              prevLayer->GetOutputSlot(0).GetTensorInfo(), tensorInfo, activationDesc))
-        {
-            return nullptr;
-        }
-
-        activationLayer = m_Network->AddActivationLayer(activationDesc);
-
-        prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
-        activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-    }
-
-    return activationLayer;
-}
-
-template<typename HalVersion>
-template<typename HalOperation>
-bool ModelToINetworkConverter<HalVersion>::SetupAndTrackLayerOutputSlot(const HalOperation& operation,
-                                                                        uint32_t operationOutputIndex,
-                                                                        armnn::IConnectableLayer& layer,
-                                                                        uint32_t layerOutputIndex)
-{
-    const Operand* outputOperand = GetOutputOperand(operation, operationOutputIndex);
-
-    if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
-    {
-        return false;
-    }
-
-    armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
-
-    const uint32_t operandIndex = operation.outputs[operationOutputIndex];
-    m_OutputSlotForOperand[operandIndex] = &outputSlot;
-
-    outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
-
-    return true;
-}
-
-template<typename HalVersion>
-template<typename HalOperation>
-bool ModelToINetworkConverter<HalVersion>::SetupAndTrackLayerOutputSlot(const HalOperation& operation,
-                                                                        uint32_t outputIndex,
-                                                                        armnn::IConnectableLayer& layer)
-{
-    return SetupAndTrackLayerOutputSlot(operation, outputIndex, layer, outputIndex);
-}
-
-template<typename HalVersion>
-bool ModelToINetworkConverter<HalVersion>::IsOperationSupported(uint32_t operationIndex) const
+template<typename HalPolicy>
+bool ModelToINetworkConverter<HalPolicy>::IsOperationSupported(uint32_t operationIndex) const
 {
     std::map<uint32_t, bool>::const_iterator it = m_OperationSupported.find(operationIndex);
     assert(it != m_OperationSupported.end());
     return it->second;
 }
 
-template class ModelToINetworkConverter<HalVersion_1_0>;
+///
+/// Class template specializations
+///
 
-#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
-template class ModelToINetworkConverter<HalVersion_1_1>;
+template class ModelToINetworkConverter<hal_1_0::HalPolicy>;
+
+#if defined(ARMNN_ANDROID_NN_V1_1)
+template class ModelToINetworkConverter<hal_1_1::HalPolicy>;
 #endif
 
 } // armnn_driver
diff --git a/ModelToINetworkConverter.hpp b/ModelToINetworkConverter.hpp
index 5cdfeb5..a3758fd 100644
--- a/ModelToINetworkConverter.hpp
+++ b/ModelToINetworkConverter.hpp
@@ -6,27 +6,15 @@
 #pragma once
 
 #include "ArmnnDriver.hpp"
-#include "ArmnnDriverImpl.hpp"
-
-#include <NeuralNetworks.h>
-#include <ActivationFunctor.h>
+#include "ConversionUtils.hpp"
 
 #include <armnn/ArmNN.hpp>
-#include <armnn/INetwork.hpp>
-#include <CpuExecutor.h>
 
-#include "Utils.hpp"
-
-#include <memory>
-#include <vector>
 #include <set>
 
 namespace armnn_driver
 {
 
-class ConstTensorPin;
-class LayerInputHandle;
-
 enum class ConversionResult
 {
     Success,
@@ -34,13 +22,13 @@
     UnsupportedFeature
 };
 
-// A helper performing the conversion from an AndroidNN driver Model representation,
+// A helper template class performing the conversion from an AndroidNN driver Model representation,
 // to an armnn::INetwork object
-template<typename HalVersion>
+template<typename HalPolicy>
 class ModelToINetworkConverter
 {
 public:
-    using HalModel = typename HalVersion::Model;
+    using HalModel = typename HalPolicy::Model;
 
     ModelToINetworkConverter(armnn::Compute compute,
                              const HalModel& model,
@@ -49,160 +37,23 @@
     ConversionResult GetConversionResult() const { return m_ConversionResult; }
 
     // Returns the ArmNN INetwork corresponding to the input model, if preparation went smoothly, nullptr otherwise.
-    armnn::INetwork* GetINetwork() const { return m_Network.get(); }
+    armnn::INetwork* GetINetwork() const { return m_Data.m_Network.get(); }
 
     bool IsOperationSupported(uint32_t operationIndex) const;
 
 private:
     void Convert();
 
-#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
-    bool ConvertOperation(const ::android::hardware::neuralnetworks::V1_1::Operation& operation);
-
-    bool ConvertDiv(const ::android::hardware::neuralnetworks::V1_1::Operation& operation);
-#endif
-
-    bool ConvertOperation(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertAdd(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertAveragePool2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertConcatenation(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertConv2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertDepthwiseConv2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertFloor(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertFullyConnected(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertLogistic(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertLocalResponseNormalization(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertL2Normalization(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertL2Pool2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertMaxPool2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertMul(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertReLu(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertReLu1(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertReLu6(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertSoftmax(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertTanH(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertReshape(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertResizeBilinear(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertLstm(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
-    bool ConvertToActivation(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
-                             const char* operationName,
-                             const armnn::ActivationDescriptor& activationDesc);
-
-    bool ConvertPooling2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
-                          const char* name, armnn::PoolingAlgorithm poolType);
-
-    const void* GetOperandValueReadOnlyAddress(const Operand& operand) const;
-
-    template<typename HalOperation>
-    const Operand* GetInputOperand(const HalOperation& operation, uint32_t inputIndex) const;
-
-    template<typename HalOperation>
-    const Operand* GetOutputOperand(const HalOperation& operation, uint32_t outputIndex) const;
-
-    template<typename HalOperation, typename T>
-    bool GetInputScalar(const HalOperation& operation, uint32_t inputIndex, OperandType type, T& outValue) const;
-
-    template<typename HalOperation>
-    bool GetInputInt32(const HalOperation& operation, uint32_t inputIndex, int32_t& outValue) const;
-
-    template<typename HalOperation>
-    bool GetInputFloat32(const HalOperation& operation, uint32_t inputIndex, float& outValue) const;
-
-    template<typename HalOperation>
-    bool GetInputActivationFunctionImpl(const HalOperation& operation,
-                                        uint32_t inputIndex,
-                                        OperandType type,
-                                        ActivationFn& outActivationFunction) const;
-
-    template<typename HalOperation>
-    bool GetInputActivationFunction(const HalOperation& operation,
-                                    uint32_t inputIndex,
-                                    ActivationFn& outActivationFunction) const;
-
-    template<typename HalOperation>
-    bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
-                                              uint32_t inputIndex,
-                                              ActivationFn& outActivationFunction) const;
-
-    template<typename HalOperation>
-    bool GetOptionalInputActivation(const HalOperation& operation,
-                                    uint32_t inputIndex,
-                                    ActivationFn& activationFunction) const;
-
-    template<typename HalOperation>
-    bool GetInputPaddingScheme(const HalOperation& operation,
-                               uint32_t inputIndex,
-                               android::nn::PaddingScheme& outPaddingScheme) const;
-
-    template<typename HalOperation>
-    LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation, uint32_t inputIndex);
-
-    template<typename HalOperation>
-    ConstTensorPin ConvertOperationInputToConstTensorPin(
-        const HalOperation& operation,
-        uint32_t inputIndex,
-        const armnn::PermutationVector& dimensionMappings = g_DontPermute,
-        const armnn::TensorShape* overrideTensorShape = nullptr,
-        bool optional = false);
-
-    ConstTensorPin ConvertOperandToConstTensorPin(
-        const Operand& operand,
-        const armnn::PermutationVector& dimensionMappings = g_DontPermute,
-        const armnn::TensorShape* overrideTensorShape = nullptr,
-        bool optional = false);
-
-    bool GetTensorInt32Values(const Operand& operand, std::vector<int32_t>& outValues) const;
-
-    armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
-                                                ActivationFn activation,
-                                                armnn::IConnectableLayer* prevLayer);
-
-    template<typename HalOperation>
-    bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
-                                      uint32_t operationOutputIndex,
-                                      armnn::IConnectableLayer& layer,
-                                      uint32_t layerOutputIndex);
-
-    template<typename HalOperation>
-    bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
-                                      uint32_t outputIndex,
-                                      armnn::IConnectableLayer& layer);
+    // Shared aggregate input/output/internal data
+    ConversionData m_Data;
 
     // Input data
-    armnn::Compute                m_Compute;
     const HalModel&               m_Model;
     const std::set<unsigned int>& m_ForcedUnsupportedOperations;
 
     // Output data
-    armnn::INetworkPtr       m_Network;
     ConversionResult         m_ConversionResult;
     std::map<uint32_t, bool> m_OperationSupported;
-
-    // Working/intermediate data
-    std::vector<armnn::IOutputSlot*>          m_OutputSlotForOperand;
-    std::vector<android::nn::RunTimePoolInfo> m_MemPools;
 };
 
 } // armnn_driver
diff --git a/RequestThread.cpp b/RequestThread.cpp
index aedd607..0b06b51 100644
--- a/RequestThread.cpp
+++ b/RequestThread.cpp
@@ -8,10 +8,10 @@
 #include "RequestThread.hpp"
 #include "ArmnnPreparedModel.hpp"
 
-#include <log/log.h>
-
 #include <boost/assert.hpp>
 
+#include <log/log.h>
+
 using namespace android;
 
 namespace armnn_driver
@@ -131,12 +131,14 @@
     }
 }
 
-// Class template specializations
-template class RequestThread<HalVersion_1_0>;
+///
+/// Class template specializations
+///
 
-#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
-template class RequestThread<HalVersion_1_1>;
+template class RequestThread<hal_1_0::HalPolicy>;
+
+#if defined(ARMNN_ANDROID_NN_V1_1)
+template class RequestThread<hal_1_1::HalPolicy>;
 #endif
 
-} // namespace armnn_driver
-
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/RequestThread.hpp b/RequestThread.hpp
index 23b71e5..53f145b 100644
--- a/RequestThread.hpp
+++ b/RequestThread.hpp
@@ -104,5 +104,4 @@
     std::condition_variable m_Cv;
 };
 
-} // namespace armnn_driver
-
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/Utils.hpp b/Utils.hpp
index a4402f2..812dfbd 100644
--- a/Utils.hpp
+++ b/Utils.hpp
@@ -5,12 +5,11 @@
 
 #pragma once
 
-#include "ArmnnDriver.hpp"
-
-#include <NeuralNetworks.h>
-
 #include <armnn/ArmNN.hpp>
+
 #include <CpuExecutor.h>
+#include <HalInterfaces.h>
+#include <NeuralNetworks.h>
 
 #include <boost/format.hpp>
 #include <log/log.h>
@@ -131,4 +130,4 @@
     }
 }
 
-}
+} // namespace armnn_driver
\ No newline at end of file