IVGCVSW-1951 Remove type templating from NeonConstantWorkload

Change-Id: Ib831f02ab6b5d96f1a959187d8f3e694e6257ae5
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 1e8ab1a..a437227 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -218,7 +218,7 @@
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
     const WorkloadInfo& info) const
 {
-    return MakeWorkload<NeonConstantFloatWorkload, NeonConstantUint8Workload>(descriptor, info);
+    return std::make_unique<NeonConstantWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index af83fb1..e63baa0 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -16,8 +16,7 @@
         workloads/NeonActivationWorkload.cpp \
         workloads/NeonAdditionFloatWorkload.cpp \
         workloads/NeonBatchNormalizationFloatWorkload.cpp \
-        workloads/NeonConstantFloatWorkload.cpp \
-        workloads/NeonConstantUint8Workload.cpp \
+        workloads/NeonConstantWorkload.cpp \
         workloads/NeonConvertFp16ToFp32Workload.cpp \
         workloads/NeonConvertFp32ToFp16Workload.cpp \
         workloads/NeonConvolution2dBaseWorkload.cpp \
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index a96c27c..0b0b9ed 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -8,13 +8,10 @@
     NeonActivationWorkload.hpp
     NeonAdditionFloatWorkload.cpp
     NeonAdditionFloatWorkload.hpp
-    NeonBaseConstantWorkload.hpp
     NeonBatchNormalizationFloatWorkload.cpp
     NeonBatchNormalizationFloatWorkload.hpp
-    NeonConstantFloatWorkload.cpp
-    NeonConstantFloatWorkload.hpp
-    NeonConstantUint8Workload.cpp
-    NeonConstantUint8Workload.hpp
+    NeonConstantWorkload.cpp
+    NeonConstantWorkload.hpp
     NeonConvertFp16ToFp32Workload.cpp
     NeonConvertFp16ToFp32Workload.hpp
     NeonConvertFp32ToFp16Workload.cpp
diff --git a/src/backends/neon/workloads/NeonBaseConstantWorkload.hpp b/src/backends/neon/workloads/NeonBaseConstantWorkload.hpp
deleted file mode 100644
index 828e476..0000000
--- a/src/backends/neon/workloads/NeonBaseConstantWorkload.hpp
+++ /dev/null
@@ -1,82 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <arm_compute/core/Types.h>
-#include <armnnUtils/Half.hpp>
-#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
-#include <backends/neon/NeonTensorHandle.hpp>
-#include <backends/neon/workloads/NeonWorkloadUtils.hpp>
-#include <backends/CpuTensorHandle.hpp>
-#include <backends/Workload.hpp>
-
-#include <boost/cast.hpp>
-
-namespace armnn
-{
-
-// Base class template providing an implementation of the Constant layer common to all data types.
-template <armnn::DataType... DataFormats>
-class NeonBaseConstantWorkload : public TypedWorkload<ConstantQueueDescriptor, DataFormats...>
-{
-public:
-    NeonBaseConstantWorkload(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info)
-        : TypedWorkload<ConstantQueueDescriptor, DataFormats...>(descriptor, info)
-        , m_RanOnce(false)
-    {
-    }
-
-    virtual void Execute() const override
-    {
-        using namespace armcomputetensorutils;
-
-        // The intermediate tensor held by the corresponding layer output handler can be initialised with the
-        // given data on the first inference, then reused for subsequent inferences.
-        // The initialisation cannot happen at workload construction time since the ACL kernel for the next layer
-        // may not have been configured at the time.
-        if (!m_RanOnce)
-        {
-            const ConstantQueueDescriptor& data = this->m_Data;
-
-            BOOST_ASSERT(data.m_LayerOutput != nullptr);
-            arm_compute::ITensor& output =
-                boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
-            arm_compute::DataType computeDataType =
-                boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetDataType();
-
-            switch (computeDataType)
-            {
-                case arm_compute::DataType::F16:
-                {
-                    CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<Half>(), output);
-                    break;
-                }
-                case arm_compute::DataType::F32:
-                {
-                    CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<float>(), output);
-                    break;
-                }
-                case arm_compute::DataType::QASYMM8:
-                {
-                    CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<uint8_t>(), output);
-                    break;
-                }
-                default:
-                {
-                    BOOST_ASSERT_MSG(false, "Unknown data type");
-                    break;
-                }
-            }
-
-            m_RanOnce = true;
-        }
-    }
-
-private:
-    mutable bool m_RanOnce;
-};
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConstantFloatWorkload.cpp b/src/backends/neon/workloads/NeonConstantFloatWorkload.cpp
deleted file mode 100644
index dbdd057..0000000
--- a/src/backends/neon/workloads/NeonConstantFloatWorkload.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonConstantFloatWorkload.hpp"
-
-namespace armnn
-{
-
-void NeonConstantFloatWorkload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConstantFloatWorkload_Execute");
-    NeonBaseConstantWorkload::Execute();
-}
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConstantFloatWorkload.hpp b/src/backends/neon/workloads/NeonConstantFloatWorkload.hpp
deleted file mode 100644
index c35b5fd..0000000
--- a/src/backends/neon/workloads/NeonConstantFloatWorkload.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "NeonBaseConstantWorkload.hpp"
-
-namespace armnn
-{
-
-class NeonConstantFloatWorkload : public NeonBaseConstantWorkload<DataType::Float16, DataType::Float32>
-{
-public:
-    using NeonBaseConstantWorkload<DataType::Float16, DataType::Float32>::NeonBaseConstantWorkload;
-    virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConstantUint8Workload.cpp b/src/backends/neon/workloads/NeonConstantUint8Workload.cpp
deleted file mode 100644
index c607d86..0000000
--- a/src/backends/neon/workloads/NeonConstantUint8Workload.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonConstantUint8Workload.hpp"
-
-namespace armnn
-{
-
-void NeonConstantUint8Workload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConstantUint8Workload_Execute");
-    NeonBaseConstantWorkload::Execute();
-}
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConstantUint8Workload.hpp b/src/backends/neon/workloads/NeonConstantUint8Workload.hpp
deleted file mode 100644
index 2cb9516..0000000
--- a/src/backends/neon/workloads/NeonConstantUint8Workload.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "NeonBaseConstantWorkload.hpp"
-
-namespace armnn
-{
-
-class NeonConstantUint8Workload : public NeonBaseConstantWorkload<DataType::QuantisedAsymm8>
-{
-public:
-    using NeonBaseConstantWorkload<DataType::QuantisedAsymm8>::NeonBaseConstantWorkload;
-    virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp
new file mode 100644
index 0000000..a348547
--- /dev/null
+++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp
@@ -0,0 +1,75 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonConstantWorkload.hpp"
+
+#include <arm_compute/core/Types.h>
+#include <armnnUtils/Half.hpp>
+#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
+#include <backends/neon/NeonTensorHandle.hpp>
+#include <backends/CpuTensorHandle.hpp>
+#include <backends/Workload.hpp>
+
+#include <boost/cast.hpp>
+
+namespace armnn
+{
+
+NeonConstantWorkload::NeonConstantWorkload(const ConstantQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info)
+    : BaseWorkload<ConstantQueueDescriptor>(descriptor, info)
+    , m_RanOnce(false)
+{
+}
+
+void NeonConstantWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConstantWorkload_Execute");
+
+    using namespace armcomputetensorutils;
+
+    // The intermediate tensor held by the corresponding layer output handler can be initialised with the
+    // given data on the first inference, then reused for subsequent inferences.
+    // The initialisation cannot happen at workload construction time since the ACL kernel for the next layer
+    // may not have been configured at the time.
+    if (!m_RanOnce)
+    {
+        const ConstantQueueDescriptor& data = this->m_Data;
+
+        BOOST_ASSERT(data.m_LayerOutput != nullptr);
+        arm_compute::ITensor& output =
+            boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
+        arm_compute::DataType computeDataType =
+            boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetDataType();
+
+        switch (computeDataType)
+        {
+            case arm_compute::DataType::F16:
+            {
+                CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<Half>(), output);
+                break;
+            }
+            case arm_compute::DataType::F32:
+            {
+                CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<float>(), output);
+                break;
+            }
+            case arm_compute::DataType::QASYMM8:
+            {
+                CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<uint8_t>(), output);
+                break;
+            }
+            default:
+            {
+                BOOST_ASSERT_MSG(false, "Unknown data type");
+                break;
+            }
+        }
+
+        m_RanOnce = true;
+    }
+}
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.hpp b/src/backends/neon/workloads/NeonConstantWorkload.hpp
new file mode 100644
index 0000000..7206963
--- /dev/null
+++ b/src/backends/neon/workloads/NeonConstantWorkload.hpp
@@ -0,0 +1,24 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backends/neon/workloads/NeonWorkloadUtils.hpp>
+
+namespace armnn
+{
+
+class NeonConstantWorkload : public BaseWorkload<ConstantQueueDescriptor>
+{
+public:
+    NeonConstantWorkload(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+    virtual void Execute() const override;
+
+private:
+    mutable bool m_RanOnce;
+};
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 93711b6..702ddb5 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -6,10 +6,8 @@
 #pragma once
 #include "NeonActivationWorkload.hpp"
 #include "NeonAdditionFloatWorkload.hpp"
-#include "NeonBaseConstantWorkload.hpp"
 #include "NeonBatchNormalizationFloatWorkload.hpp"
-#include "NeonConstantFloatWorkload.hpp"
-#include "NeonConstantUint8Workload.hpp"
+#include "NeonConstantWorkload.hpp"
 #include "NeonConvertFp16ToFp32Workload.hpp"
 #include "NeonConvertFp32ToFp16Workload.hpp"
 #include "NeonConvolution2dBaseWorkload.hpp"