IVGCVSW-1951 Remove type templating from NeonReshapeWorkload

Change-Id: I90bf3baaad725d121ca864b7a7bb0c74abb69daf
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 8202afa..0568c81 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -224,7 +224,7 @@
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
     const WorkloadInfo& info) const
 {
-    return MakeWorkload<NeonReshapeFloatWorkload, NeonReshapeUint8Workload>(descriptor, info);
+    return std::make_unique<NeonReshapeWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 709497e..4350297 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -34,8 +34,7 @@
         workloads/NeonPooling2dBaseWorkload.cpp \
         workloads/NeonPooling2dFloatWorkload.cpp \
         workloads/NeonPooling2dUint8Workload.cpp \
-        workloads/NeonReshapeFloatWorkload.cpp \
-        workloads/NeonReshapeUint8Workload.cpp \
+        workloads/NeonReshapeWorkload.cpp \
         workloads/NeonSoftmaxBaseWorkload.cpp \
         workloads/NeonSoftmaxFloatWorkload.cpp \
         workloads/NeonSoftmaxUint8Workload.cpp \
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index ac0451f..3f07ef0 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -370,12 +370,12 @@
     NeonCreatePooling2dWorkloadTest<NeonPooling2dUint8Workload, DataType::QuantisedAsymm8>(DataLayout::NHWC);
 }
 
-template <typename ReshapeWorkloadType, typename armnn::DataType DataType>
+template <typename armnn::DataType DataType>
 static void NeonCreateReshapeWorkloadTest()
 {
     Graph               graph;
     NeonWorkloadFactory factory;
-    auto                workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
+    auto                workload = CreateReshapeWorkloadTest<NeonReshapeWorkload, DataType>(factory, graph);
 
     // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
     ReshapeQueueDescriptor queueDescriptor = workload->GetData();
@@ -388,18 +388,18 @@
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
 {
-    NeonCreateReshapeWorkloadTest<NeonReshapeFloatWorkload, DataType::Float16>();
+    NeonCreateReshapeWorkloadTest<DataType::Float16>();
 }
 #endif
 
 BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
 {
-    NeonCreateReshapeWorkloadTest<NeonReshapeFloatWorkload, DataType::Float32>();
+    NeonCreateReshapeWorkloadTest<DataType::Float32>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
 {
-    NeonCreateReshapeWorkloadTest<NeonReshapeUint8Workload, DataType::QuantisedAsymm8>();
+    NeonCreateReshapeWorkloadTest<DataType::QuantisedAsymm8>();
 }
 
 template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index 06eb504..7c22472 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -53,10 +53,8 @@
     NeonPooling2dFloatWorkload.hpp
     NeonPooling2dUint8Workload.cpp
     NeonPooling2dUint8Workload.hpp
-    NeonReshapeFloatWorkload.cpp
-    NeonReshapeFloatWorkload.hpp
-    NeonReshapeUint8Workload.cpp
-    NeonReshapeUint8Workload.hpp
+    NeonReshapeWorkload.cpp
+    NeonReshapeWorkload.hpp
     NeonSoftmaxBaseWorkload.cpp
     NeonSoftmaxBaseWorkload.hpp
     NeonSoftmaxFloatWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonReshapeFloatWorkload.cpp b/src/backends/neon/workloads/NeonReshapeFloatWorkload.cpp
deleted file mode 100644
index 2dae946..0000000
--- a/src/backends/neon/workloads/NeonReshapeFloatWorkload.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonReshapeFloatWorkload.hpp"
-
-
-
-namespace armnn
-{
-
-NeonReshapeFloatWorkload::NeonReshapeFloatWorkload(const ReshapeQueueDescriptor& descriptor,
-                                                   const WorkloadInfo& info)
-    : FloatWorkload<ReshapeQueueDescriptor>(descriptor, info)
-{
-    m_Data.ValidateInputsOutputs("NeonReshapeFloatWorkload", 1, 1);
-
-    arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
-    arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
-    m_Layer.configure(&input, &output);
-}
-
-void NeonReshapeFloatWorkload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonReshapeFloatWorkload_Execute");
-    m_Layer.run();
-}
-
-} //namespace armnn
-
diff --git a/src/backends/neon/workloads/NeonReshapeFloatWorkload.hpp b/src/backends/neon/workloads/NeonReshapeFloatWorkload.hpp
deleted file mode 100644
index bdef862..0000000
--- a/src/backends/neon/workloads/NeonReshapeFloatWorkload.hpp
+++ /dev/null
@@ -1,29 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backends/neon/workloads/NeonWorkloadUtils.hpp>
-
-namespace armnn
-{
-
-class NeonReshapeFloatWorkload : public FloatWorkload<ReshapeQueueDescriptor>
-{
-public:
-    NeonReshapeFloatWorkload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info);
-
-    virtual void Execute() const override;
-
-private:
-    mutable arm_compute::NEReshapeLayer m_Layer;
-};
-
-} //namespace armnn
-
-
-
-
-
diff --git a/src/backends/neon/workloads/NeonReshapeUint8Workload.cpp b/src/backends/neon/workloads/NeonReshapeUint8Workload.cpp
deleted file mode 100644
index 41aa07f..0000000
--- a/src/backends/neon/workloads/NeonReshapeUint8Workload.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonReshapeUint8Workload.hpp"
-
-
-
-
-namespace armnn
-{
-NeonReshapeUint8Workload::NeonReshapeUint8Workload(const ReshapeQueueDescriptor& descriptor,
-                                                   const WorkloadInfo& info)
-    : Uint8Workload<ReshapeQueueDescriptor>(descriptor, info)
-{
-    m_Data.ValidateInputsOutputs("NeonReshapeUint8Workload", 1, 1);
-
-    arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
-    arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
-    m_Layer.configure(&input, &output);
-}
-
-void NeonReshapeUint8Workload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonReshapeUint8Workload_Execute");
-    m_Layer.run();
-}
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonReshapeWorkload.cpp b/src/backends/neon/workloads/NeonReshapeWorkload.cpp
new file mode 100644
index 0000000..c2dcdd5
--- /dev/null
+++ b/src/backends/neon/workloads/NeonReshapeWorkload.cpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonReshapeWorkload.hpp"
+
+namespace armnn
+{
+
+NeonReshapeWorkload::NeonReshapeWorkload(const ReshapeQueueDescriptor& descriptor,
+                                         const WorkloadInfo& info)
+    : BaseWorkload<ReshapeQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("NeonReshapeWorkload", 1, 1);
+
+    arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    m_Layer.configure(&input, &output);
+}
+
+void NeonReshapeWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonReshapeWorkload_Execute");
+    m_Layer.run();
+}
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonReshapeUint8Workload.hpp b/src/backends/neon/workloads/NeonReshapeWorkload.hpp
similarity index 64%
rename from src/backends/neon/workloads/NeonReshapeUint8Workload.hpp
rename to src/backends/neon/workloads/NeonReshapeWorkload.hpp
index 4951873..8fd278b 100644
--- a/src/backends/neon/workloads/NeonReshapeUint8Workload.hpp
+++ b/src/backends/neon/workloads/NeonReshapeWorkload.hpp
@@ -10,10 +10,11 @@
 namespace armnn
 {
 
-class NeonReshapeUint8Workload : public Uint8Workload<ReshapeQueueDescriptor>
+class NeonReshapeWorkload : public BaseWorkload<ReshapeQueueDescriptor>
 {
 public:
-    NeonReshapeUint8Workload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info);
+    NeonReshapeWorkload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info);
+
     virtual void Execute() const override;
 
 private:
@@ -21,7 +22,3 @@
 };
 
 } //namespace armnn
-
-
-
-
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index d5a76b4..c877c94 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -29,8 +29,7 @@
 #include "NeonPooling2dBaseWorkload.hpp"
 #include "NeonPooling2dFloatWorkload.hpp"
 #include "NeonPooling2dUint8Workload.hpp"
-#include "NeonReshapeFloatWorkload.hpp"
-#include "NeonReshapeUint8Workload.hpp"
+#include "NeonReshapeWorkload.hpp"
 #include "NeonSoftmaxFloatWorkload.hpp"
 #include "NeonSoftmaxUint8Workload.hpp"
 #include "NeonSplitterFloatWorkload.hpp"