IVGCVSW-6123 ConstTensorsAsInputs: Conv2d

  * Use new INetwork::AddConvolution2dLayer
    instead of deprecated version
  * Remove duplicated test in SerlializerTests
  * Fix some cosmetics

Signed-off-by: Keith Davis <keith.davis@arm.com>
Change-Id: I3407815bfdc1cdc01ca0a667b8e4d80d8621783f
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index fecc766..f2ba94f 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -90,9 +90,9 @@
 
 ARMNN_NO_DEPRECATE_WARN_BEGIN
 IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
-                                                    const ConstTensor& weights,
-                                                    const Optional<ConstTensor>& biases,
-                                                    const char* name)
+                                                   const ConstTensor& weights,
+                                                   const Optional<ConstTensor>& biases,
+                                                   const char* name)
 {
     return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor,
                                                weights,
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 7756f40..9d98104 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -605,10 +605,6 @@
 {
     armnn::NetworkImpl net;
 
-    unsigned int dims[] = { 10,1,1,1 };
-    std::vector<float> convWeightsData(10);
-    armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), convWeightsData);
-
     armnn::Convolution2dDescriptor convDesc2d;
     convDesc2d.m_PadLeft = 2;
     convDesc2d.m_PadRight = 3;
@@ -620,12 +616,7 @@
     convDesc2d.m_DilationY = 3;
     convDesc2d.m_BiasEnabled = false;
     convDesc2d.m_DataLayout = armnn::DataLayout::NCHW;
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d,
-                                                                          weights,
-                                                                          armnn::EmptyOptional(),
-                                                                          "conv layer");
-    ARMNN_NO_DEPRECATE_WARN_END
+    armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d, "conv layer");
     CHECK(convLayer);
 
     const armnn::BaseDescriptor& descriptor = convLayer->GetParameters();
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 3dd5527..b78863d 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -885,11 +885,11 @@
 
     // Define the network
     Graph graph;
-    auto input     = graph.AddLayer<InputLayer>(0, "input");
+    auto input        = graph.AddLayer<InputLayer>(0, "input");
     auto weightsLayer = graph.AddLayer<ConstantLayer>("Weights");
-    auto conv      = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution");
-    auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm");
-    auto output    = graph.AddLayer<OutputLayer>(0, "output");
+    auto conv         = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution");
+    auto batchNorm    = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm");
+    auto output       = graph.AddLayer<OutputLayer>(0, "output");
 
     // Set layer information
     input->GetOutputSlot().SetTensorInfo(inputInfo);
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index d7465c8..feeea5d 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -978,8 +978,8 @@
                                                     "m3");
 
     auto x2 = graph.InsertNewLayer<Convolution2dLayer>(m3->GetInputSlot(0),
-                                                                        Convolution2dDescriptor{},
-                                                                        "x2");
+                                                       Convolution2dDescriptor{},
+                                                       "x2");
 
     auto w2 = graph.InsertNewLayer<ConstantLayer>(x2->GetInputSlot(1), "w2");
 
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp
index 027b103..14c211f 100644
--- a/src/armnn/test/optimizations/FoldPadTests.cpp
+++ b/src/armnn/test/optimizations/FoldPadTests.cpp
@@ -636,13 +636,9 @@
         std::vector<float>    biasVector   = {5, 6, 7, 8};
         TensorInfo            biasInfo({4}, DataType::Float32, 0.0f, 0, true);
         ConstTensor           bias(biasInfo, biasVector);
-        Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor,
-                                                                        weights,
-                                                                        optionalBias,
-                                                                        "Conv2D");
-        ARMNN_NO_DEPRECATE_WARN_END
+
+        IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor, "Conv2D");
+
         TensorInfo outputInfo(4, outputShape, DataType::Float32);
         conv2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
@@ -653,6 +649,14 @@
         padLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(0));
         conv2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
 
+        auto weightsLayer = network->AddConstantLayer(weights, "Weights");
+        weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+        weightsLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(1));
+
+        auto biasLayer = network->AddConstantLayer(bias, "Bias");
+        biasLayer->GetOutputSlot(0).SetTensorInfo(bias.GetInfo());
+        biasLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(2));
+
         // Create ArmNN runtime
         IRuntimePtr          run              = IRuntime::Create(IRuntime::CreationOptions());    // default options
         // Optimise the network
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index 4a94f78..54cbbce 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -31,9 +31,10 @@
                                              const Optional<ConstTensor> &biases,
                                              const char *name)
     {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        return network->AddConvolution2dLayer(descriptor, weights, biases, name);
-        ARMNN_NO_DEPRECATE_WARN_END
+        IgnoreUnused(weights);
+        IgnoreUnused(biases);
+
+        return network->AddConvolution2dLayer(descriptor, name);
     }
 
     static std::vector<IConnectableLayer*> AddConstantLayers(INetwork *network,
@@ -41,12 +42,18 @@
                                                              const ConstTensor &weights,
                                                              const Optional<ConstTensor> &biases)
     {
-        IgnoreUnused(network);
-        IgnoreUnused(descriptor);
-        IgnoreUnused(weights);
-        IgnoreUnused(biases);
+        auto weightsLayer = network->AddConstantLayer(weights, "Weights");
+        weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+        std::vector<IConnectableLayer*> layers = {weightsLayer};
 
-        return {};
+        if (descriptor.m_BiasEnabled)
+        {
+            auto biasLayer = network->AddConstantLayer(biases.value(), "Bias");
+            biasLayer->GetOutputSlot(0).SetTensorInfo(biases.value().GetInfo());
+            layers.emplace_back(biasLayer);
+        }
+
+        return layers;
     }
 };
 
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
index 60bd962..63fb603 100644
--- a/src/armnnOnnxParser/OnnxParser.cpp
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -1762,11 +1762,16 @@
         }
     }
 
-    armnn::IConnectableLayer* layer;
+    node.input_size() == 3 ? desc.m_BiasEnabled = true : desc.m_BiasEnabled = false;
+    armnn::IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, node.name().c_str());
     std::vector<std::string> tensorIndexes= {node.input(0), node.input(1)};
 
     auto weightTensor = CreateConstTensor(node.input(1));
 
+    IConnectableLayer* weightsLayer = m_Network->AddConstantLayer(weightTensor.first);
+    weightsLayer->GetOutputSlot(0).SetTensorInfo(weightTensor.first.GetInfo());
+    weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
+
     if (node.input_size() == 3)
     {
         if(!m_TensorsInfo[node.input(2)].isConstant())
@@ -1777,22 +1782,15 @@
                                              CHECK_LOCATION().AsString()));
         }
         desc.m_BiasEnabled = true;
-        tensorIndexes.emplace_back(node.input(2));
         auto biasTensor = CreateConstTensor(node.input(2));
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        layer = m_Network->AddConvolution2dLayer(desc,
-                                                 weightTensor.first,
-                                                 Optional<ConstTensor>(biasTensor.first),
-                                                 node.name().c_str());
+
+        IConnectableLayer* biasLayer = m_Network->AddConstantLayer(biasTensor.first);
+        biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensor.first.GetInfo());
+        biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
+
+        tensorIndexes.emplace_back(node.input(2));
     }
-    else
-    {
-        layer = m_Network->AddConvolution2dLayer(desc,
-                                                 weightTensor.first,
-                                                 EmptyOptional(),
-                                                 node.name().c_str());
-        ARMNN_NO_DEPRECATE_WARN_END
-    }
+
     ARMNN_ASSERT(layer != nullptr);
 
     auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index a042939..43a8aae 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -417,7 +417,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-TEST_CASE("SerializeConvolution2dWithPerAxisParams")
+TEST_CASE("SerializeConvolution2dWithPerAxisParamsTestDeprecatedMethod")
 {
     using namespace armnn;
 
@@ -521,59 +521,6 @@
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     CHECK(deserializedNetwork);
 
-    Convolution2dLayerVerifier verifier(layerName, {inputInfo, weightsInfo, biasesInfo}, {outputInfo}, descriptor);
-
-    deserializedNetwork->ExecuteStrategy(verifier);
-}
-
-TEST_CASE("SerializeConvolution2dWeightsAndBiasesAsConstantLayers")
-{
-    const std::string layerName("convolution2d");
-    const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
-    const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
-
-    const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
-    const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
-
-    std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
-    armnn::ConstTensor weights(weightsInfo, weightsData);
-
-    std::vector<float> biasesData = GenerateRandomData<float>(biasesInfo.GetNumElements());
-    armnn::ConstTensor biases(biasesInfo, biasesData);
-
-    armnn::Convolution2dDescriptor descriptor;
-    descriptor.m_PadLeft     = 1;
-    descriptor.m_PadRight    = 1;
-    descriptor.m_PadTop      = 1;
-    descriptor.m_PadBottom   = 1;
-    descriptor.m_StrideX     = 2;
-    descriptor.m_StrideY     = 2;
-    descriptor.m_DilationX   = 2;
-    descriptor.m_DilationY   = 2;
-    descriptor.m_BiasEnabled = true;
-    descriptor.m_DataLayout  = armnn::DataLayout::NHWC;
-
-    armnn::INetworkPtr network = armnn::INetwork::Create();
-    armnn::IConnectableLayer* const inputLayer  = network->AddInputLayer(0);
-    armnn::IConnectableLayer* const weightsLayer = network->AddConstantLayer(weights, "Weights");
-    armnn::IConnectableLayer* const biasesLayer = network->AddConstantLayer(biases, "Biases");
-    armnn::IConnectableLayer* const convLayer   = network->AddConvolution2dLayer(descriptor,
-                                           layerName.c_str());
-    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
-
-    inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
-    weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1));
-    biasesLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2));
-    convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
-    inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
-    weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
-    biasesLayer->GetOutputSlot(0).SetTensorInfo(biasesInfo);
-    convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
-
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    CHECK(deserializedNetwork);
-
     const std::vector<armnn::ConstTensor>& constants {weights, biases};
     LayerVerifierBaseWithDescriptorAndConstants<armnn::Convolution2dDescriptor> verifier(
             layerName, {inputInfo, weightsInfo, biasesInfo}, {outputInfo}, descriptor, constants);
diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp
index 7700a55..905b8fa 100644
--- a/src/armnnTestUtils/CreateWorkload.hpp
+++ b/src/armnnTestUtils/CreateWorkload.hpp
@@ -2122,9 +2122,15 @@
     convDesc2d.m_BiasEnabled = biasEnabled;
     convDesc2d.m_DataLayout = armnn::DataLayout::NHWC;
 
-    armnn::IConnectableLayer* convLayer = nullptr;
+
     const std::string convLayerName("conv layer");
 
+    armnn::IConnectableLayer* convLayer = net->AddConvolution2dLayer(convDesc2d, convLayerName.c_str());
+
+    IConnectableLayer* weightsLayer = net->AddConstantLayer(weights);
+    weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+    weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1u));
+
     if (biasEnabled)
     {
         constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QAsymmU8) ?
@@ -2139,23 +2145,10 @@
 
         armnn::ConstTensor biases(biasTensorInfo, biasData);
 
-        // Create convolution layer with biases
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        convLayer = net->AddConvolution2dLayer(convDesc2d,
-                                              weights,
-                                              Optional<ConstTensor>(biases),
-                                              convLayerName.c_str());
-        ARMNN_NO_DEPRECATE_WARN_END
-    }
-    else
-    {
-        // Create convolution layer without biases
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        convLayer = net->AddConvolution2dLayer(convDesc2d,
-                                              weights,
-                                              EmptyOptional(),
-                                              convLayerName.c_str());
-        ARMNN_NO_DEPRECATE_WARN_END
+        IConnectableLayer* biasLayer = net->AddConstantLayer(biases);
+
+        biasLayer->GetOutputSlot(0).SetTensorInfo(biases.GetInfo());
+        biasLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2u));
     }
 
     CHECK(convLayer);
diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
index 45fcf19..6735469 100644
--- a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
@@ -315,7 +315,7 @@
     const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
     const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
     TensorInfo weightInfo({ 16,  1,  1, 16 }, DataType::QAsymmU8, 0.9f, 0);
-    TensorInfo biasInfo  ({  1,  1,  1, 16 }, DataType::Signed32,        0.9f, 0);
+    TensorInfo biasInfo({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
 
     weightInfo.SetConstant(true);
     biasInfo.SetConstant(true);
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index 8e3b275..bcea061 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -402,14 +402,16 @@
     armnn::INetworkPtr network = armnn::INetwork::Create();
     armnn::IConnectableLayer* const inputLayer  = network->AddInputLayer(0);
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const convLayer   =
-            network->AddConvolution2dLayer(descriptor,
-                                           weights,
-                                           armnn::Optional<armnn::ConstTensor>(biases),
-                                           layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_END
+    armnn::IConnectableLayer* const convLayer   = network->AddConvolution2dLayer(descriptor, layerName.c_str());
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+    armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights);
+    armnn::IConnectableLayer* biasLayer = network->AddConstantLayer(biases);
+
+    weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+    weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1u));
+
+    biasLayer->GetOutputSlot(0).SetTensorInfo(biasesInfo);
+    biasLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2u));
 
     inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
     convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 9bfd1fb..20537b3 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -320,14 +320,14 @@
     convDesc2d.m_PadBottom = 1;
     convDesc2d.m_DataLayout = DataLayout::NHWC;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d,
-                                                                          weights,
-                                                                          armnn::EmptyOptional(),
-                                                                          "conv");
-    ARMNN_NO_DEPRECATE_WARN_END
+    armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv");
+    armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights);
+
     ARMNN_ASSERT(convLayer);
 
+    weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+    weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1u));
+
     inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
     inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
 
@@ -878,14 +878,14 @@
     convDesc2d.m_PadTop = 1;
     convDesc2d.m_PadBottom = 1;
     convDesc2d.m_DataLayout = DataLayout::NHWC;
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d,
-                                                                          weights,
-                                                                          armnn::EmptyOptional(),
-                                                                          "conv");
-    ARMNN_NO_DEPRECATE_WARN_END
+    armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv");
     ARMNN_ASSERT(convLayer);
 
+    armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights);
+
+    weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+    weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1u));
+
     inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
     inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
 
@@ -1098,14 +1098,15 @@
     convDesc2d.m_PadTop = 1;
     convDesc2d.m_PadBottom = 1;
     convDesc2d.m_DataLayout = DataLayout::NHWC;
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d,
-                                                                          weights,
-                                                                          armnn::EmptyOptional(),
-                                                                          "conv");
-    ARMNN_NO_DEPRECATE_WARN_END
+
+    armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv");
     ARMNN_ASSERT(convLayer);
 
+    armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights);
+
+    weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+    weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1u));
+
     inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
     inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
 
diff --git a/src/profiling/test/ProfilingTestUtils.cpp b/src/profiling/test/ProfilingTestUtils.cpp
index 58708ca..215ae34 100644
--- a/src/profiling/test/ProfilingTestUtils.cpp
+++ b/src/profiling/test/ProfilingTestUtils.cpp
@@ -424,9 +424,18 @@
     conv2dDesc.m_PadTop = 2;
     conv2dDesc.m_PadBottom = 2;
     conv2dDesc.m_BiasEnabled = true;
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    IConnectableLayer* conv2d = net->AddConvolution2dLayer(conv2dDesc, weights, optionalBiases);
-    ARMNN_NO_DEPRECATE_WARN_END
+
+    IConnectableLayer* conv2d = net->AddConvolution2dLayer(conv2dDesc);
+
+    armnn::IConnectableLayer* weightsLayer = net->AddConstantLayer(weights, "Weights");
+    armnn::IConnectableLayer* biasLayer = net->AddConstantLayer(biases, "Bias");
+
+    weightsLayer->GetOutputSlot(0).SetTensorInfo(weightInfo);
+    weightsLayer->GetOutputSlot(0).Connect(conv2d->GetInputSlot(1u));
+
+    biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
+    biasLayer->GetOutputSlot(0).Connect(conv2d->GetInputSlot(2u));
+
     // Abs layer
     armnn::ElementwiseUnaryDescriptor absDesc;
     armnn::IConnectableLayer* const abs = net->AddElementwiseUnaryLayer(absDesc, "abs");