Add ConstTensorsAsInput support for Conv3d

 * Constant weights and biases are now stored as Constant layers.
 * Updated Serializer, Deserializer and unit tests to reflect this.
 * Updated TfLiteParser.
 * Updated Ref backend to handle constant weights and
   bias as inputs rather than reading from member variables.
 * Added Conv3d EndToEnd test.
 * Added NCDHW DataLayout and unit tests.

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: I10cdd354ca5f1c748730f92ffdb36bf810f83c8e
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 27b59ea..2716c82 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1320,7 +1320,12 @@
 {
     const std::string descriptorName{"Convolution3dQueueDescriptor"};
 
-    ValidateNumInputs(workloadInfo,  descriptorName, 1);
+    uint32_t numInputs = 2;
+    if (m_Parameters.m_BiasEnabled)
+    {
+        numInputs = 3;
+    }
+    ValidateNumInputs(workloadInfo,  descriptorName, numInputs);
     ValidateNumOutputs(workloadInfo, descriptorName, 1);
 
     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
@@ -1329,9 +1334,7 @@
     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 5, "input");
     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 5, "output");
 
-    ValidatePointer(m_Weight, descriptorName, "weight");
-
-    const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
+    const TensorInfo& weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
     ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 5, "weight");
 
     ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
@@ -1339,9 +1342,7 @@
     Optional<TensorInfo> optionalBiasTensorInfo;
     if (m_Parameters.m_BiasEnabled)
     {
-        ValidatePointer(m_Bias, descriptorName, "bias");
-
-        optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
+        optionalBiasTensorInfo = MakeOptional<TensorInfo>(workloadInfo.m_InputTensorInfos[2]);
         const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
 
         ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 29d39d1..4e56aaf 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -208,18 +208,9 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
-// Convolution 2D layer workload data.
+// Convolution 3D layer workload data.
 struct Convolution3dQueueDescriptor : QueueDescriptorWithParameters<Convolution3dDescriptor>
 {
-    Convolution3dQueueDescriptor()
-        : m_Weight(nullptr)
-        , m_Bias(nullptr)
-    {
-    }
-
-    const ConstTensorHandle* m_Weight;
-    const ConstTensorHandle* m_Bias;
-
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 3b7f3a0..55ce355 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -250,7 +250,11 @@
             const TensorInfo input  = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
                                                        dataType);
             const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
-            ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
+
+            ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
+                             "Convolution3dLayer: Weights should be connected as a Constant Layer.");
+            const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
+                                                        dataType);
 
             const Convolution3dDescriptor& descriptor = cLayer->GetParameters();
 
@@ -258,14 +262,15 @@
             Optional<TensorInfo> biases;
             if (descriptor.m_BiasEnabled)
             {
-                biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
+                biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
+                                          GetBiasTypeFromWeightsType(dataType));
             }
 
             result = layerSupportObject.IsConvolution3dSupported(
                                               input,
                                               output,
                                               descriptor,
-                                              OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
+                                              weights,
                                               biases,
                                               reason);
             break;
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index e3221c5..b90407f 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -13,6 +13,7 @@
     ChannelShuffleEndToEndTestImpl.hpp
     ComparisonEndToEndTestImpl.hpp
     CompatibilityTests.cpp
+    Convolution3dEndToEndTestImpl.hpp
     CustomMemoryOptimizerStrategyTests.cpp
     DefaultAsyncExecuteTest.cpp
     DepthToSpaceEndToEndTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
new file mode 100644
index 0000000..33bf9a1
--- /dev/null
+++ b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
@@ -0,0 +1,167 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "EndToEndTestImpl.hpp"
+#include "QuantizeHelper.hpp"
+
+#include <ResolveType.hpp>
+
+#include <backendsCommon/test/CommonTestUtils.hpp>
+#include <backendsCommon/test/DataLayoutUtils.hpp>
+
+#include <map>
+#include <vector>
+
+namespace
+{
+
+armnn::INetworkPtr CreateConvolution3dNetwork(const armnn::Convolution3dDescriptor& descriptor,
+                                              const armnn::TensorInfo& inputInfo,
+                                              const armnn::TensorInfo& weightsInfo,
+                                              const armnn::TensorInfo& biasInfo,
+                                              const armnn::TensorInfo& outputInfo,
+                                              const armnn::ConstTensor& weights,
+                                              const armnn::ConstTensor& biases)
+{
+    using namespace armnn;
+
+    INetworkPtr network(INetwork::Create());
+    IConnectableLayer* input = network->AddInputLayer(0, "input");
+    armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights, "Weights");
+    armnn::IConnectableLayer* biasLayer = network->AddConstantLayer(biases, "Bias");
+    IConnectableLayer* convolution3d = network->AddConvolution3dLayer(descriptor, "convolution3d");
+    IConnectableLayer* output = network->AddOutputLayer(0, "output");
+
+    Connect(input, convolution3d, inputInfo, 0, 0);
+    Connect(weightsLayer, convolution3d, weightsInfo, 0, 1);
+    Connect(biasLayer, convolution3d, biasInfo, 0, 2);
+    Connect(convolution3d, output, outputInfo, 0, 0);
+
+    return network;
+}
+
+} // anonymous namespace
+
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType>
+void Convolution3dEndToEnd(const std::vector<armnn::BackendId>& backends,
+                           armnn::DataLayout dataLayout)
+{
+    using namespace armnn;
+    using T  = ResolveType<ArmnnType>;
+    using BT = ResolveType<ArmnnBType>;
+
+    const float   qScale  = IsQuantizedType<T>() ? 0.25f : 1.0f;
+    const int32_t qOffset = IsQuantizedType<T>() ? 50    : 0;
+
+    TensorInfo inputInfo({ 1, 5, 5, 5, 1 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputInfo({ 1, 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
+    TensorInfo weightsInfo({ 3, 3, 3, 1, 1 }, ArmnnType, qScale, qOffset, true);
+    TensorInfo biasesInfo({ 1 }, ArmnnBType, qScale * qScale, 0, true);
+
+    std::vector<float> inputData =
+    {
+        0.0f,  1.0f,  2.0f,  3.0f,  4.0f,
+        5.0f,  6.0f,  7.0f,  8.0f,  9.0f,
+        10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
+        15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+
+        20.0f, 21.0f, 22.0f, 23.0f, 24.0f,
+        25.0f, 26.0f, 27.0f, 28.0f, 29.0f,
+        30.0f, 31.0f, 32.0f, 33.0f, 34.0f,
+        35.0f, 36.0f, 37.0f, 38.0f, 39.0f,
+        40.0f, 41.0f, 42.0f, 43.0f, 44.0f,
+
+        45.0f, 46.0f, 47.0f, 48.0f, 49.0f,
+        50.0f, 51.0f, 52.0f, 53.0f, 54.0f,
+        55.0f, 56.0f, 57.0f, 58.0f, 59.0f,
+        60.0f, 61.0f, 62.0f, 63.0f, 64.0f,
+        65.0f, 66.0f, 67.0f, 68.0f, 69.0f,
+
+        70.0f, 71.0f, 72.0f, 73.0f, 74.0f,
+        75.0f, 76.0f, 77.0f, 78.0f, 79.0f,
+        80.0f, 81.0f, 82.0f, 83.0f, 84.0f,
+        85.0f, 86.0f, 87.0f, 88.0f, 89.0f,
+        90.0f, 91.0f, 92.0f, 93.0f, 94.0f,
+        95.0f, 96.0f, 97.0f, 98.0f, 99.0f,
+
+        100.0f, 101.0f, 102.0f, 103.0f, 104.0f,
+        105.0f, 106.0f, 107.0f, 108.0f, 109.0f,
+        110.0f, 111.0f, 112.0f, 113.0f, 114.0f,
+        115.0f, 116.0f, 117.0f, 118.0f, 119.0f,
+        120.0f, 121.0f, 122.0f, 123.0f, 124.0f
+    };
+
+    std::vector<float> weightsData =
+    {
+        1.0f, 1.0f, 1.0f,
+        1.0f, 1.0f, 1.0f,
+        1.0f, 1.0f, 1.0f,
+
+        0.0f, 0.0f, 0.0f,
+        0.0f, 0.0f, 0.0f,
+        0.0f, 0.0f, 0.0f,
+
+        1.0f, 1.0f, 1.0f,
+        1.0f, 1.0f, 1.0f,
+        1.0f, 1.0f, 1.0f,
+    };
+
+    std::vector<float> biasesData = { 1.f };
+
+    std::vector<float> expectedOutputData =
+    {
+        559.0f, 595.0f,
+
+        739.0f, 775.0f,
+
+        1459.0f, 1495.0f,
+
+        1639.0f, 1675.0f,
+    };
+
+    Convolution3dDescriptor descriptor;
+    descriptor.m_PadLeft     = 0;
+    descriptor.m_PadRight    = 0;
+    descriptor.m_PadTop      = 0;
+    descriptor.m_PadBottom   = 0;
+    descriptor.m_PadFront    = 0;
+    descriptor.m_PadBack     = 0;
+    descriptor.m_StrideX     = 2;
+    descriptor.m_StrideY     = 2;
+    descriptor.m_StrideZ     = 2;
+    descriptor.m_BiasEnabled = true;
+    descriptor.m_DataLayout  = dataLayout;
+
+    // Permute input and output if NCDHW.
+    if (dataLayout == DataLayout::NCDHW)
+    {
+        PermuteTensorNdhwcToNcdhw(inputInfo, inputData);
+        PermuteTensorNdhwcToNcdhw(outputInfo, expectedOutputData);
+    }
+
+    // Quantize data
+    std::vector<T> qInputData          = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+    std::vector<T> qWeightsData        = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset);
+    std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
+
+    std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0);
+
+    ConstTensor weights(weightsInfo, qWeightsData);
+    ConstTensor biases(biasesInfo, qBiasesData);
+
+    INetworkPtr network = CreateConvolution3dNetwork(descriptor,
+                                                     inputInfo,
+                                                     weightsInfo,
+                                                     biasesInfo,
+                                                     outputInfo,
+                                                     weights,
+                                                     biases);
+
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
+                                                { { 0, qInputData } },
+                                                { { 0, qExpectedOutputData } },
+                                                backends);
+}
diff --git a/src/backends/backendsCommon/test/DataLayoutUtils.hpp b/src/backends/backendsCommon/test/DataLayoutUtils.hpp
index 9411212..89b3900 100644
--- a/src/backends/backendsCommon/test/DataLayoutUtils.hpp
+++ b/src/backends/backendsCommon/test/DataLayoutUtils.hpp
@@ -34,3 +34,27 @@
 
     tensorData = tmp;
 }
+
+template<typename T>
+void PermuteTensorNdhwcToNcdhw(armnn::TensorInfo& tensorInfo, std::vector<T>& tensorData)
+{
+    const armnn::PermutationVector ndhwcToNcdhw = { 0, 2, 3, 4, 1 };
+
+    tensorInfo = armnnUtils::Permuted(tensorInfo, ndhwcToNcdhw);
+
+    std::vector<T> tmp(tensorData.size());
+    armnnUtils::Permute(tensorInfo.GetShape(), ndhwcToNcdhw, tensorData.data(), tmp.data(), sizeof(T));
+    tensorData = tmp;
+}
+
+template<typename T>
+void PermuteTensorNcdhwToNdhwc(armnn::TensorInfo& tensorInfo, std::vector<T>& tensorData)
+{
+    const armnn::PermutationVector ncdhwToNdhwc = { 0, 4, 1, 2, 3 };
+
+    tensorInfo = armnnUtils::Permuted(tensorInfo, ncdhwToNdhwc);
+
+    std::vector<T> tmp(tensorData.size());
+    armnnUtils::Permute(tensorInfo.GetShape(), ncdhwToNdhwc, tensorData.data(), tmp.data(), sizeof(T));
+    tensorData = tmp;
+}
diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
index c3a6aa1..f9bdfde 100644
--- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
@@ -407,7 +407,7 @@
             }
             catch (const LayerValidationException& exc)
             {
-                CHECK(strcmp(exc.what(), "FullyConnected layer weights not set: Input slot(s) 1 not connected "
+                CHECK(strcmp(exc.what(), "Fully_Connected layer weights not set: Input slot(s) 1 not connected "
                                          "to an output slot on FullyConnected layer \"Fully_Connected\"") == 0);
             }
         }
@@ -434,7 +434,7 @@
             }
             catch (const LayerValidationException& exc)
             {
-                CHECK(strcmp(exc.what(), "FullyConnected layer bias not set: Input slot(s) 2 not connected "
+                CHECK(strcmp(exc.what(), "Fully_Connected layer bias not set: Input slot(s) 2 not connected "
                                          "to an output slot on FullyConnected layer \"Fully_Connected\"") == 0);
             }
         }
@@ -457,7 +457,7 @@
         }
         catch (const LayerValidationException& exc)
         {
-            CHECK(strcmp(exc.what(), "FullyConnected layer weights and bias not set: Input slot(s) 1 & 2 not "
+            CHECK(strcmp(exc.what(), "Fully_Connected layer weights and bias not set: Input slot(s) 1 & 2 not "
                                      "connected to an output slot on FullyConnected layer \"Fully_Connected\"") == 0);
         }
 
diff --git a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
index 259272d..1406ab0 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
@@ -11,6 +11,7 @@
 
 #include <backendsCommon/TensorHandle.hpp>
 
+#include <backendsCommon/test/DataLayoutUtils.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -228,23 +229,20 @@
                         biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset());
     }
 
-    std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ScopedTensorHandle weightsTensor(kernelDesc);
-    AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
-
-    armnn::ScopedTensorHandle biasTensor(biasDesc);
-    if (biasEnabled)
+    // Permute input and output if data layout is NCDHW.
+    if (dataLayout == armnn::DataLayout::NCDHW)
     {
-        AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
+        PermuteTensorNdhwcToNcdhw(inputTensorInfo, inputData);
+        PermuteTensorNdhwcToNcdhw(outputTensorInfo, outputData);
     }
 
+    std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
+    std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
     armnn::Convolution3dQueueDescriptor data;
-    data.m_Weight = &weightsTensor;
-    data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
     data.m_Parameters.m_StrideX = strideX;
     data.m_Parameters.m_StrideY = strideY;
     data.m_Parameters.m_StrideZ = strideZ;
@@ -261,14 +259,29 @@
     data.m_Parameters.m_BiasEnabled = biasEnabled;
 
     armnn::WorkloadInfo info;
-    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
+    AddInputToWorkload(data, info, kernelDesc, input1Handle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
+    std::unique_ptr<armnn::ITensorHandle> input2Handle = nullptr;
+    if (biasEnabled)
+    {
+        input2Handle = tensorHandleFactory.CreateTensorHandle(biasDesc);
+        AddInputToWorkload(data, info, biasDesc, input2Handle.get());
+    }
+
     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution3d(data, info);
-    inputHandle->Allocate();
+    input0Handle->Allocate();
+    input1Handle->Allocate();
     outputHandle->Allocate();
 
-    CopyDataToITensorHandle(inputHandle.get(), inputData.data());
+    CopyDataToITensorHandle(input0Handle.get(), inputData.data());
+    CopyDataToITensorHandle(input1Handle.get(), kernel.data());
+    if (biasEnabled)
+    {
+        input2Handle->Allocate();
+        CopyDataToITensorHandle(input2Handle.get(), bias.data());
+    }
 
     ExecuteWorkload(*workload, memoryManager);
 
@@ -840,40 +853,44 @@
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<int8_t, 5> SimpleConvolution3d3x3x3Int8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<uint8_t, 5> SimpleConvolution3d3x3x3Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<int16_t, 5> SimpleConvolution3d3x3x3Int16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 
@@ -881,158 +898,174 @@
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<int8_t, 5> Convolution3d2x2x2Strides3x5x5Int8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<uint8_t, 5> Convolution3d2x2x2Strides3x5x5Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<int16_t, 5> Convolution3d2x2x2Strides3x5x5Int16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<float, 5> Convolution3d2x2x2Dilation2x2x2Float32Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<int8_t, 5> Convolution3d2x2x2Dilation2x2x2Int8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<uint8_t, 5> Convolution3d2x2x2Dilation2x2x2Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<int16_t, 5> Convolution3d2x2x2Dilation2x2x2Int16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<float, 5> Convolution3dPaddingSame3x3x3Float32Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<int8_t, 5> Convolution3dPaddingSame3x3x3Int8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<uint8_t, 5> Convolution3dPaddingSame3x3x3Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<int16_t, 5> Convolution3dPaddingSame3x3x3Int16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<float, 5> Convolution3dStrideDilationPadding3x3x3Float32Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3dStrideDilationPadding3x3x3TestCommonFloat32(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<float, 5> Convolution3d2x2x2Stride3x3x3SmallFloat32Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3d2x2x2Stride3x3x3SmallTestCommonFloat32(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<armnn::Half, 5> Convolution3d2x3x3Float16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3d2x3x3TestCommonFloat16(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
 
 LayerTestResult<armnn::Half, 5> Convolution3d2x2x2SmallFloat16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled)
+        bool biasEnabled,
+        armnn::DataLayout dataLayout)
 {
     return Convolution3d2x2x2SmallTestCommonFloat16(
-            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+            workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
 }
diff --git a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp
index a07c183..c612e19 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp
@@ -24,118 +24,138 @@
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<int8_t , 5> SimpleConvolution3d3x3x3Int8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<uint8_t, 5> SimpleConvolution3d3x3x3Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<int16_t, 5> SimpleConvolution3d3x3x3Int16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<float, 5> Convolution3d2x2x2Strides3x5x5Float32Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<int8_t , 5> Convolution3d2x2x2Strides3x5x5Int8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<uint8_t, 5> Convolution3d2x2x2Strides3x5x5Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<int16_t, 5> Convolution3d2x2x2Strides3x5x5Int16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<float, 5> Convolution3d2x2x2Dilation2x2x2Float32Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<int8_t , 5> Convolution3d2x2x2Dilation2x2x2Int8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<uint8_t, 5> Convolution3d2x2x2Dilation2x2x2Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<int16_t, 5> Convolution3d2x2x2Dilation2x2x2Int16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<float, 5> Convolution3dPaddingSame3x3x3Float32Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<int8_t , 5> Convolution3dPaddingSame3x3x3Int8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<uint8_t, 5> Convolution3dPaddingSame3x3x3Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<int16_t, 5> Convolution3dPaddingSame3x3x3Int16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<float, 5> Convolution3dStrideDilationPadding3x3x3Float32Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<float, 5> Convolution3d2x2x2Stride3x3x3SmallFloat32Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<armnn::Half, 5> Convolution3d2x3x3Float16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);
 
 LayerTestResult<armnn::Half, 5> Convolution3d2x2x2SmallFloat16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::ITensorHandleFactory& tensorHandleFactory,
-        bool biasEnabled);
+        bool biasEnabled,
+        armnn::DataLayout dataLayout);