IVGCVSW-1888 Plumb data layout parameter for Convolution2D

	* Added the DataLayout parameter to the Convolution2dDescriptor
	* Added the DataLayout parameter the Convolution2dQueueDescriptor
	* Set the DataLayout on the Descriptor in CreateWorkload()
	* Added overloaded factory methods for CreateTensorHandle()
	* Updated BuildArmComputeTensorInfo() to take DataLayout parameter.
	* Updated handles to take DataLayout parameter
	* Updated (Cl/Neon)Convolution2dWorkloadValidate
	* Updated (Cl/Neon)Convolution2dFloatWorkload
	* Updated (Cl/Neon)Convolution2dUint8Workload

Change-Id: I8410668b3d727ca587bee66755cc4c4c78422f1f
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 8940e0b..dfd532f 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -216,6 +216,7 @@
     , m_StrideX(0)
     , m_StrideY(0)
     , m_BiasEnabled(false)
+    , m_DataLayout(DataLayout::NCHW)
     {};
 
     uint32_t             m_PadLeft;
@@ -225,6 +226,7 @@
     uint32_t             m_StrideX;
     uint32_t             m_StrideY;
     bool                 m_BiasEnabled;
+    DataLayout           m_DataLayout;
 };
 
 struct DepthwiseConvolution2dDescriptor
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index 71f54b8..07d6d7e 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -26,6 +26,9 @@
     Convolution2dQueueDescriptor descriptor;
 
     descriptor.m_Weight = m_Weight.get();
+
+    descriptor.m_DataLayout = GetParameters().m_DataLayout;
+
     if (m_Param.m_BiasEnabled)
     {
         BOOST_ASSERT_MSG(m_Bias != nullptr, "Convolution2dLayer: Bias data should not be null.");
diff --git a/src/backends/ArmComputeTensorUtils.cpp b/src/backends/ArmComputeTensorUtils.cpp
index ba9fb40..e65c4ad 100644
--- a/src/backends/ArmComputeTensorUtils.cpp
+++ b/src/backends/ArmComputeTensorUtils.cpp
@@ -5,6 +5,7 @@
 #include "ArmComputeTensorUtils.hpp"
 #include "ArmComputeUtils.hpp"
 
+#include "armnn/Exceptions.hpp"
 #include <armnn/Descriptors.hpp>
 
 namespace armnn
@@ -66,6 +67,33 @@
     return arm_compute::TensorInfo(aclTensorShape, 1, aclDataType, aclQuantizationInfo);
 }
 
+arm_compute::DataLayout ConvertDataLayout(armnn::DataLayout dataLayout)
+{
+    switch(dataLayout)
+    {
+        case armnn::DataLayout::NHWC : return arm_compute::DataLayout::NHWC;
+
+        case armnn::DataLayout::NCHW : return arm_compute::DataLayout::NCHW;
+
+        default: throw InvalidArgumentException("Unknown armnn::DataLayout: [" +
+                                                std::to_string(static_cast<int>(dataLayout)) + "]");
+    }
+}
+
+arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
+                                                  armnn::DataLayout dataLayout)
+{
+    const arm_compute::TensorShape aclTensorShape = BuildArmComputeTensorShape(tensorInfo.GetShape());
+    const arm_compute::DataType aclDataType = GetArmComputeDataType(tensorInfo.GetDataType());
+    const arm_compute::QuantizationInfo aclQuantizationInfo(tensorInfo.GetQuantizationScale(),
+                                                            tensorInfo.GetQuantizationOffset());
+
+    arm_compute::TensorInfo clTensorInfo(aclTensorShape, 1, aclDataType, aclQuantizationInfo);
+    clTensorInfo.set_data_layout(ConvertDataLayout(dataLayout));
+
+    return clTensorInfo;
+}
+
 arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDescriptor& descriptor)
 {
     using arm_compute::PoolingType;
diff --git a/src/backends/ArmComputeTensorUtils.hpp b/src/backends/ArmComputeTensorUtils.hpp
index 572e310..18f41ee 100644
--- a/src/backends/ArmComputeTensorUtils.hpp
+++ b/src/backends/ArmComputeTensorUtils.hpp
@@ -30,6 +30,16 @@
 /// armnn::ITensorInfo.
 arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo);
 
+/// Utility function used to convert armnn::DataLayout to arm_compute::DataLayout
+/// armnn::DataLayout.
+arm_compute::DataLayout ConvertDataLayout(armnn::DataLayout dataLayout);
+
+/// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given
+/// armnn::ITensorInfo.
+/// armnn::DataLayout.
+arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
+                                                  armnn::DataLayout dataLayout);
+
 /// Utility function used to setup an arm_compute::PoolingLayerInfo object from an armnn::Pooling2dDescriptor.
 arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDescriptor& descriptor);
 
@@ -59,6 +69,13 @@
     tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo));
 }
 
+/// Sets up the given ArmCompute tensor's dimensions based on the given ArmNN tensor.
+template <typename Tensor>
+void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo, DataLayout dataLayout)
+{
+    tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo, dataLayout));
+}
+
 template <typename Tensor>
 void InitialiseArmComputeTensorEmpty(Tensor& tensor)
 {
diff --git a/src/backends/ClTensorHandle.hpp b/src/backends/ClTensorHandle.hpp
index 9c78192..e3d7b5b 100644
--- a/src/backends/ClTensorHandle.hpp
+++ b/src/backends/ClTensorHandle.hpp
@@ -37,6 +37,11 @@
         armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
     }
 
+    ClTensorHandle(const TensorInfo& tensorInfo, DataLayout dataLayout)
+    {
+        armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
+    }
+
     arm_compute::CLTensor& GetTensor() override { return m_Tensor; }
     arm_compute::CLTensor const& GetTensor() const override { return m_Tensor; }
     virtual void Allocate() override {armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);}
diff --git a/src/backends/ClWorkloadFactory.cpp b/src/backends/ClWorkloadFactory.cpp
index 591fb85..5f395a2 100644
--- a/src/backends/ClWorkloadFactory.cpp
+++ b/src/backends/ClWorkloadFactory.cpp
@@ -55,6 +55,15 @@
     return tensorHandle;
 }
 
+std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                                     DataLayout dataLayout) const
+{
+    std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
+    tensorHandle->SetMemoryGroup(m_MemoryManager.GetInterLayerMemoryGroup());
+
+    return tensorHandle;
+}
+
 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorHandle&      parent,
                                                                         TensorShape const&   subTensorShape,
                                                                         unsigned int const* subTensorOrigin) const
@@ -290,6 +299,12 @@
     return nullptr;
 }
 
+std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                                     DataLayout dataLayout) const
+{
+    return nullptr;
+}
+
 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorHandle&      parent,
                                                                         TensorShape const&   subTensorShape,
                                                                         unsigned int const* subTensorOrigin) const
diff --git a/src/backends/ClWorkloadFactory.hpp b/src/backends/ClWorkloadFactory.hpp
index 892d564..d0bf416 100644
--- a/src/backends/ClWorkloadFactory.hpp
+++ b/src/backends/ClWorkloadFactory.hpp
@@ -33,6 +33,9 @@
 
     virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override;
 
+    virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                              DataLayout dataLayout) const override;
+
     virtual std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
diff --git a/src/backends/ClWorkloads/ClConvolution2dBaseWorkload.cpp b/src/backends/ClWorkloads/ClConvolution2dBaseWorkload.cpp
index 228f17d..110a2da 100644
--- a/src/backends/ClWorkloads/ClConvolution2dBaseWorkload.cpp
+++ b/src/backends/ClWorkloads/ClConvolution2dBaseWorkload.cpp
@@ -21,9 +21,9 @@
     const TensorInfo& weights,
     const boost::optional<TensorInfo>& biases)
 {
-    const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
-    const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
-    const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights);
+    const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+    const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
+    const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
 
     arm_compute::TensorInfo aclBiasesInfo;
     arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
@@ -32,7 +32,7 @@
     {
         BOOST_ASSERT(biases.is_initialized());
 
-        aclBiasesInfo = BuildArmComputeTensorInfo(biases.get());
+        aclBiasesInfo = BuildArmComputeTensorInfo(biases.get(), descriptor.m_DataLayout);
         optionalAclBiasesInfo = &aclBiasesInfo;
     }
 
diff --git a/src/backends/ClWorkloads/ClConvolution2dFloatWorkload.cpp b/src/backends/ClWorkloads/ClConvolution2dFloatWorkload.cpp
index f0b9a46..3da6fa7 100644
--- a/src/backends/ClWorkloads/ClConvolution2dFloatWorkload.cpp
+++ b/src/backends/ClWorkloads/ClConvolution2dFloatWorkload.cpp
@@ -25,7 +25,7 @@
     const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
 
     m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
-    BuildArmComputeTensor(*m_KernelTensor, weightInfo);
+    BuildArmComputeTensor(*m_KernelTensor, weightInfo, descriptor.m_DataLayout);
 
     arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
                                              m_Data.m_Parameters.m_StrideY,
@@ -38,7 +38,7 @@
     if (m_Data.m_Parameters.m_BiasEnabled)
     {
         m_BiasTensor = std::make_unique<arm_compute::CLTensor>();
-        BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo());
+        BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), descriptor.m_DataLayout);
     }
 
     m_Data.ValidateInputsOutputs("ClConvolution2dFloat32Workload", 1, 1);
diff --git a/src/backends/ClWorkloads/ClConvolution2dUint8Workload.cpp b/src/backends/ClWorkloads/ClConvolution2dUint8Workload.cpp
index c9f5eaa..3949a74 100644
--- a/src/backends/ClWorkloads/ClConvolution2dUint8Workload.cpp
+++ b/src/backends/ClWorkloads/ClConvolution2dUint8Workload.cpp
@@ -24,7 +24,7 @@
     const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
 
     m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
-    BuildArmComputeTensor(*m_KernelTensor, weightInfo);
+    BuildArmComputeTensor(*m_KernelTensor, weightInfo, descriptor.m_DataLayout);
 
     arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
                                              m_Data.m_Parameters.m_StrideY,
@@ -37,7 +37,7 @@
     if (m_Data.m_Parameters.m_BiasEnabled)
     {
         m_BiasTensor = std::make_unique<arm_compute::CLTensor>();
-        BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo());
+        BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), descriptor.m_DataLayout);
     }
 
     m_Data.ValidateInputsOutputs("ClConvolution2dUint8Workload", 1, 1);
diff --git a/src/backends/NeonTensorHandle.hpp b/src/backends/NeonTensorHandle.hpp
index e385c83..77f3cc1 100644
--- a/src/backends/NeonTensorHandle.hpp
+++ b/src/backends/NeonTensorHandle.hpp
@@ -36,6 +36,11 @@
         armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
     }
 
+    NeonTensorHandle(const TensorInfo& tensorInfo, DataLayout dataLayout)
+    {
+        armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
+    }
+
     arm_compute::ITensor& GetTensor() override { return m_Tensor; }
     arm_compute::ITensor const& GetTensor() const override { return m_Tensor; }
 
diff --git a/src/backends/NeonWorkloadFactory.cpp b/src/backends/NeonWorkloadFactory.cpp
index 80ce0b9..c989121 100644
--- a/src/backends/NeonWorkloadFactory.cpp
+++ b/src/backends/NeonWorkloadFactory.cpp
@@ -67,6 +67,15 @@
     return tensorHandle;
 }
 
+std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                                       DataLayout dataLayout) const
+{
+    auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
+    tensorHandle->SetMemoryGroup(m_MemoryManager.GetInterLayerMemoryGroup());
+
+    return tensorHandle;
+}
+
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
                                                             const WorkloadInfo&        info) const
 {
@@ -289,6 +298,12 @@
     return nullptr;
 }
 
+std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                                       DataLayout dataLayout) const
+{
+    return nullptr;
+}
+
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
                                                             const WorkloadInfo&        info) const
 {
diff --git a/src/backends/NeonWorkloadFactory.hpp b/src/backends/NeonWorkloadFactory.hpp
index a981855..45d1c2c 100644
--- a/src/backends/NeonWorkloadFactory.hpp
+++ b/src/backends/NeonWorkloadFactory.hpp
@@ -33,6 +33,9 @@
 
     virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override;
 
+    virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                              DataLayout dataLayout) const override;
+
     virtual std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
                                                    const WorkloadInfo&        info) const override;
 
diff --git a/src/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp b/src/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp
index 0e9894c..912e2d5 100644
--- a/src/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp
+++ b/src/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp
@@ -23,9 +23,9 @@
     const TensorInfo& weights,
     const boost::optional<TensorInfo>& biases)
 {
-    const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
-    const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
-    const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights);
+    const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+    const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
+    const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
 
     arm_compute::TensorInfo aclBiasesInfo;
     arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
@@ -34,7 +34,7 @@
     {
         BOOST_ASSERT(biases.is_initialized());
 
-        aclBiasesInfo = BuildArmComputeTensorInfo(biases.get());
+        aclBiasesInfo = BuildArmComputeTensorInfo(biases.get(), descriptor.m_DataLayout);
         optionalAclBiasesInfo = &aclBiasesInfo;
     }
 
@@ -63,12 +63,12 @@
     arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
     m_KernelTensor = std::make_unique<arm_compute::Tensor>();
-    BuildArmComputeTensor(*m_KernelTensor, m_Data.m_Weight->GetTensorInfo());
+    BuildArmComputeTensor(*m_KernelTensor, m_Data.m_Weight->GetTensorInfo(), descriptor.m_DataLayout);
 
     if (m_Data.m_Parameters.m_BiasEnabled)
     {
         m_BiasTensor = std::make_unique<arm_compute::Tensor>();
-        BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo());
+        BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), descriptor.m_DataLayout);
     }
 
     arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
diff --git a/src/backends/OutputHandler.cpp b/src/backends/OutputHandler.cpp
index c1be5b7..4dfa1a6 100644
--- a/src/backends/OutputHandler.cpp
+++ b/src/backends/OutputHandler.cpp
@@ -25,6 +25,11 @@
     m_TensorHandle = factory.CreateTensorHandle(m_TensorInfo);
 }
 
+void OutputHandler::CreateTensorHandles(const IWorkloadFactory& factory, DataLayout dataLayout)
+{
+    m_TensorHandle = factory.CreateTensorHandle(m_TensorInfo, dataLayout);
+}
+
 void OutputHandler::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const
 {
     dataCollector.Push(m_TensorHandle.get(), m_TensorInfo);
diff --git a/src/backends/OutputHandler.hpp b/src/backends/OutputHandler.hpp
index dfc0184..97da87d 100644
--- a/src/backends/OutputHandler.hpp
+++ b/src/backends/OutputHandler.hpp
@@ -39,6 +39,11 @@
     /// @param factory - Factory to be used for handler creation.
     void CreateTensorHandles(const IWorkloadFactory& factory);
 
+    /// @brief - Creates tensor handlers used by the intermediate tensors. Does not allocate memory.
+    /// @param factory - Factory to be used for handler creation.
+    /// @param dataLayout - Data Layout to be used for handler creation.
+    void CreateTensorHandles(const IWorkloadFactory& factory, DataLayout dataLayout);
+
     /// @brief - Gets the matching TensorInfo for the output.
     /// @return - References to the output TensorInfo.
     const TensorInfo& GetTensorInfo() const { return m_TensorInfo; }
diff --git a/src/backends/RefWorkloadFactory.cpp b/src/backends/RefWorkloadFactory.cpp
index 93b4d94..689adb6 100644
--- a/src/backends/RefWorkloadFactory.cpp
+++ b/src/backends/RefWorkloadFactory.cpp
@@ -36,6 +36,12 @@
     return std::make_unique<ScopedCpuTensorHandle>(tensorInfo);
 }
 
+std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                                      DataLayout dataLayout) const
+{
+    return std::make_unique<ScopedCpuTensorHandle>(tensorInfo);
+}
+
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
diff --git a/src/backends/RefWorkloadFactory.hpp b/src/backends/RefWorkloadFactory.hpp
index 6b13377..da0ca9b 100644
--- a/src/backends/RefWorkloadFactory.hpp
+++ b/src/backends/RefWorkloadFactory.hpp
@@ -49,6 +49,9 @@
 
     virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override;
 
+    virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                              DataLayout dataLayout) const override;
+
     virtual std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
diff --git a/src/backends/WorkloadData.hpp b/src/backends/WorkloadData.hpp
index b5b0402..5da9e8b 100644
--- a/src/backends/WorkloadData.hpp
+++ b/src/backends/WorkloadData.hpp
@@ -142,11 +142,13 @@
     Convolution2dQueueDescriptor()
         : m_Weight(nullptr)
         , m_Bias(nullptr)
+        , m_DataLayout(DataLayout::NCHW)
     {
     }
 
     const ConstCpuTensorHandle* m_Weight;
     const ConstCpuTensorHandle* m_Bias;
+    DataLayout m_DataLayout;
 
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
diff --git a/src/backends/WorkloadFactory.hpp b/src/backends/WorkloadFactory.hpp
index fbc6134..77e810c 100644
--- a/src/backends/WorkloadFactory.hpp
+++ b/src/backends/WorkloadFactory.hpp
@@ -49,6 +49,9 @@
 
     virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const = 0;
 
+    virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                              DataLayout dataLayout) const = 0;
+
     virtual std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const = 0;