IVGCVSW-4483 Remove boost::polymorphic_downcast

 * exchange boost::polymorphic_downcast with armnn::PolymorphicDowncast
 * remove unnecessary includes of boost::polymorphic_downcast

Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: Ie603fb82860fe05fee547dc78073230cc62b2e1f
diff --git a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
index 8b229a1..9ae82ff 100644
--- a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
@@ -10,6 +10,7 @@
 #include <arm_compute/runtime/NEON/functions/NEDequantizationLayer.h>
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <neon/NeonTensorHandle.hpp>
 
@@ -32,8 +33,8 @@
 {
     m_Data.ValidateInputsOutputs("NeonDequantizeWorkload", 1, 1);
 
-    arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
-    arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+    arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
     std::unique_ptr<arm_compute::NEDequantizationLayer> layer(new arm_compute::NEDequantizationLayer());
     layer->configure(&input, &output);