IVGCVSW-4483 Removes boost::polymorphic_pointer_downcast

 * replace boost::polymorphic_pointer_downcast by PolymorphicPointerDowncast
 * replaced/removed includes

Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: I0ef934a3804cf05e4c38dec6c4ec49c76111a302
diff --git a/src/backends/cl/workloads/ClConcatWorkload.cpp b/src/backends/cl/workloads/ClConcatWorkload.cpp
index 5370466..e0aebd3 100644
--- a/src/backends/cl/workloads/ClConcatWorkload.cpp
+++ b/src/backends/cl/workloads/ClConcatWorkload.cpp
@@ -5,6 +5,7 @@
 #include "ClConcatWorkload.hpp"
 #include "ClWorkloadUtils.hpp"
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <cl/ClTensorHandle.hpp>
 #include <cl/ClLayerSupport.hpp>
@@ -12,8 +13,6 @@
 #include <arm_compute/core/Types.h>
 #include <arm_compute/runtime/CL/functions/CLConcatenateLayer.h>
 
-#include <boost/polymorphic_pointer_cast.hpp>
-
 namespace armnn
 {
 using namespace armcomputetensorutils;
@@ -72,11 +71,12 @@
     std::vector<arm_compute::ICLTensor *> aclInputs;
     for (auto input : m_Data.m_Inputs)
     {
-        arm_compute::ICLTensor& aclInput  = boost::polymorphic_pointer_downcast<IClTensorHandle>(input)->GetTensor();
+        arm_compute::ICLTensor& aclInput  = armnn::PolymorphicPointerDowncast<IClTensorHandle>(input)->GetTensor();
         aclInputs.emplace_back(&aclInput);
     }
-    arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>(
-                                                                         m_Data.m_Outputs[0])->GetTensor();
+
+    arm_compute::ICLTensor& output =
+            armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
 
     // Create the layer function
     auto layer = std::make_unique<arm_compute::CLConcatenateLayer>();
diff --git a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
index 800a984..04885b1 100644
--- a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
@@ -8,11 +8,11 @@
 #include "ClWorkloadUtils.hpp"
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 
 #include <cl/ClTensorHandle.hpp>
 
 #include <boost/numeric/conversion/cast.hpp>
-#include <boost/polymorphic_pointer_cast.hpp>
 
 namespace armnn
 {
@@ -45,13 +45,13 @@
     arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
 
     arm_compute::ICLTensor& input =
-        boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
+        PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
     input.info()->set_data_layout(aclDataLayout);
 
     int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
 
     arm_compute::ICLTensor& output =
-        boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
+        PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
     output.info()->set_data_layout(aclDataLayout);
 
     m_Layer.configure(&input, &output, blockSize);
diff --git a/src/backends/cl/workloads/ClDequantizeWorkload.cpp b/src/backends/cl/workloads/ClDequantizeWorkload.cpp
index eca795d..eb63900 100644
--- a/src/backends/cl/workloads/ClDequantizeWorkload.cpp
+++ b/src/backends/cl/workloads/ClDequantizeWorkload.cpp
@@ -7,6 +7,7 @@
 #include "ClWorkloadUtils.hpp"
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 
 #include <arm_compute/core/Types.h>
@@ -14,8 +15,6 @@
 #include <cl/ClLayerSupport.hpp>
 #include <cl/ClTensorHandle.hpp>
 
-#include <boost/polymorphic_pointer_cast.hpp>
-
 namespace armnn
 {
 using namespace armcomputetensorutils;
@@ -34,10 +33,10 @@
 {
     m_Data.ValidateInputsOutputs("ClDequantizeWorkload", 1, 1);
 
-    arm_compute::ICLTensor& input = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+    arm_compute::ICLTensor& input = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
             m_Data.m_Inputs[0])->GetTensor();
 
-    arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+    arm_compute::ICLTensor& output = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
             m_Data.m_Outputs[0])->GetTensor();
 
     m_Layer.reset(new arm_compute::CLDequantizationLayer());
diff --git a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
index 64da92c..b87658b 100644
--- a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
@@ -9,13 +9,12 @@
 
 #include <aclCommon/ArmComputeUtils.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <cl/ClLayerSupport.hpp>
 #include <cl/ClTensorHandle.hpp>
 #include <cl/ClLayerSupport.hpp>
 
-#include <boost/polymorphic_pointer_cast.hpp>
-
 namespace armnn
 {
 using namespace armcomputetensorutils;
@@ -51,9 +50,9 @@
     m_Data.ValidateInputsOutputs("ClSpaceToBatchNdWorkload", 1, 1);
 
     arm_compute::ICLTensor& input  =
-        boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
+        armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& output =
-        boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
+        armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
 
     // ArmNN blockShape is [H, W] Cl asks for W, H
     int32_t blockHeight = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
diff --git a/src/backends/cl/workloads/ClSplitterWorkload.cpp b/src/backends/cl/workloads/ClSplitterWorkload.cpp
index 296e0a3..045fbb7 100644
--- a/src/backends/cl/workloads/ClSplitterWorkload.cpp
+++ b/src/backends/cl/workloads/ClSplitterWorkload.cpp
@@ -10,6 +10,7 @@
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 #include <aclCommon/ArmComputeUtils.hpp>
 #include <arm_compute/runtime/CL/functions/CLSplit.h>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <cl/ClTensorHandle.hpp>
 
@@ -74,13 +75,13 @@
         return;
     }
 
-    arm_compute::ICLTensor& input = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+    arm_compute::ICLTensor& input = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
             m_Data.m_Inputs[0])->GetTensor();
 
     std::vector<arm_compute::ICLTensor *> aclOutputs;
     for (auto output : m_Data.m_Outputs)
     {
-        arm_compute::ICLTensor& aclOutput  = boost::polymorphic_pointer_downcast<IClTensorHandle>(output)->GetTensor();
+        arm_compute::ICLTensor& aclOutput  = armnn::PolymorphicPointerDowncast<IClTensorHandle>(output)->GetTensor();
         aclOutputs.emplace_back(&aclOutput);
     }
 
diff --git a/src/backends/cl/workloads/ClStackWorkload.cpp b/src/backends/cl/workloads/ClStackWorkload.cpp
index 3ba698e..e434f98 100644
--- a/src/backends/cl/workloads/ClStackWorkload.cpp
+++ b/src/backends/cl/workloads/ClStackWorkload.cpp
@@ -5,6 +5,7 @@
 #include "ClStackWorkload.hpp"
 #include "ClWorkloadUtils.hpp"
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <cl/ClTensorHandle.hpp>
 #include <cl/ClLayerSupport.hpp>
@@ -12,7 +13,6 @@
 #include <arm_compute/core/Types.h>
 
 #include <boost/numeric/conversion/cast.hpp>
-#include <boost/polymorphic_pointer_cast.hpp>
 
 namespace armnn
 {
@@ -51,10 +51,10 @@
     std::vector<arm_compute::ICLTensor*> aclInputs;
     for (auto input : m_Data.m_Inputs)
     {
-        arm_compute::ICLTensor& aclInput = boost::polymorphic_pointer_downcast<IClTensorHandle>(input)->GetTensor();
+        arm_compute::ICLTensor& aclInput = armnn::PolymorphicPointerDowncast<IClTensorHandle>(input)->GetTensor();
         aclInputs.emplace_back(&aclInput);
     }
-    arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+    arm_compute::ICLTensor& output = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
                                                                          m_Data.m_Outputs[0])->GetTensor();
 
     m_Layer.reset(new arm_compute::CLStackLayer());