IVGCVSW-7006 Remove deprecated code due to be removed in 22.08

* AddConv and AddDWConv with weights and bias
* ResizeBilinearDescriptor
* b,blacklist option in accuracy tool

!android-nn-driver:8172

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Ibbc04fd18be7f938b11590bf67cd7af103cb4d99
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index c4869fa..3508ee8 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -88,19 +88,6 @@
     return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, name);
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
-                                                   const ConstTensor& weights,
-                                                   const Optional<ConstTensor>& biases,
-                                                   const char* name)
-{
-    return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor,
-                                               weights,
-                                               armnn::Optional<ConstTensor>(biases),
-                                               name);
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 IConnectableLayer* INetwork::AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor,
                                                    const char* name)
 {
@@ -123,18 +110,6 @@
 }
 
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
-    const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-    const ConstTensor& weights,
-    const Optional<ConstTensor>& biases,
-    const char* name)
-{
-    return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
-
 IConnectableLayer* INetwork::AddDequantizeLayer(const char* name)
 {
     return pNetworkImpl->AddDequantizeLayer(name);
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index 5799970..185a672 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -44,7 +44,6 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-
     void ExecuteStrategy(IStrategy& strategy) const override;
 
     void SerializeLayerParameters(ParameterStringifyFunction& fn) const override;
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index 95421c5..eea7ae8 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -632,13 +632,13 @@
     TensorInfo weightsInfo = constInfo;
     ConstTensor weights(weightsInfo, weightData);
     DepthwiseConvolution2dDescriptor desc;
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    // GetConstantTensorsByRef() returns {m_Weights, m_Bias} so we need to use the old AddDepthwiseConvolution2dLayer()
-    const auto depthwiseLayer = net->AddDepthwiseConvolution2dLayer(desc, weights, EmptyOptional(), "Depthwise");
-    ARMNN_NO_DEPRECATE_WARN_END
-    const void* resultData = depthwiseLayer->GetConstantTensorsByRef()[0].get()->GetConstTensor<void>();
-    auto resultValue = reinterpret_cast<const uint8_t*>(resultData);
-    CHECK(resultValue[0] == 3);
+
+    const auto weightsLayer = net->AddConstantLayer(weights);
+
+    const void* resultDataWeights = weightsLayer->GetConstantTensorsByRef()[0].get()->GetConstTensor<void>();
+    auto resultValueWeights = reinterpret_cast<const uint8_t*>(resultDataWeights);
+    CHECK(resultValueWeights[0] == 3);
+
 }
 
 }
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index a568bf1..3573a81 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -436,19 +436,19 @@
 
     armnn::INetworkPtr network = armnn::INetwork::Create();
     armnn::IConnectableLayer* const inputLayer  = network->AddInputLayer(0);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const convLayer   =
-            network->AddConvolution2dLayer(descriptor,
-                                           weights,
-                                           armnn::Optional<armnn::ConstTensor>(biases),
-                                           layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_END
+    armnn::IConnectableLayer* const weightsLayer = network->AddConstantLayer(weights, "weights");
+    armnn::IConnectableLayer* const biasLayer = network->AddConstantLayer(biases, "bias");
+    armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(descriptor, layerName.c_str());
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
 
     inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
+    weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1));
+    biasLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2));
     convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
 
     inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+    weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+    biasLayer->GetOutputSlot(0).SetTensorInfo(biasesInfo);
     convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
@@ -458,7 +458,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-TEST_CASE("SerializeConvolution2dWithPerAxisParamsTestDeprecatedMethod")
+TEST_CASE("SerializeConvolution2dWithPerAxisParams")
 {
     using namespace armnn;
 
@@ -491,19 +491,19 @@
 
     armnn::INetworkPtr network = armnn::INetwork::Create();
     armnn::IConnectableLayer* const inputLayer  = network->AddInputLayer(0);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const convLayer   =
-        network->AddConvolution2dLayer(descriptor,
-                                       weights,
-                                       armnn::Optional<armnn::ConstTensor>(biases),
-                                       layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_END
+    armnn::IConnectableLayer* const weightsLayer = network->AddConstantLayer(weights, "weights");
+    armnn::IConnectableLayer* const biasLayer = network->AddConstantLayer(weights, "bias");
+    armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(descriptor, layerName.c_str());
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
 
     inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
+    weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1));
+    biasLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2));
     convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
 
     inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+    weightsLayer->GetOutputSlot(0).SetTensorInfo(kernelInfo);
+    biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
     convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index f4afbd9..753fe06 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1596,54 +1596,6 @@
     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
 }
 
-
-void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
-{
-    const std::string descriptorName{"ResizeBilinearQueueDescriptor"};
-
-    ValidateNumInputs(workloadInfo,  descriptorName, 1);
-    ValidateNumOutputs(workloadInfo, descriptorName, 1);
-
-    const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
-    const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
-
-    ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
-    ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
-
-    std::vector<DataType> supportedTypes =
-    {
-        DataType::BFloat16,
-        DataType::Float16,
-        DataType::Float32,
-        DataType::QAsymmS8,
-        DataType::QAsymmU8,
-        DataType::QSymmS16
-    };
-
-    ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
-    ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
-
-    // ResizeBilinear only changes width and height: batch and channel count must match.
-    const unsigned int inputBatchSize  = inputTensorInfo.GetShape()[0];
-    const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
-    if (inputBatchSize != outputBatchSize)
-    {
-        throw InvalidArgumentException(
-            fmt::format("{}: Input batch size ({}) does not match output batch size ({})",
-                        descriptorName, inputBatchSize, outputBatchSize));
-    }
-
-    DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
-    const unsigned int inputChannelCount  = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
-    const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
-    if (inputChannelCount != outputChannelCount)
-    {
-        throw InvalidArgumentException(
-            fmt::format("{}: Input channel count ({}) does not match output channel count ({})",
-                        descriptorName, inputChannelCount, outputChannelCount));
-    }
-}
-
 void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
 {
     const std::string descriptorName{"ResizeQueueDescriptor"};