IVGCVSW-2967 Support QSymm16 for Constant workloads

* Validate that output is any of supported types in WorkloadData
* Validate that output is any of supported types in RefLayerSupport
* Add test for constant with QuantisedSymm16 in LayerTests
* Add test for creating constant workload in RefCreateWorkloadTests
* Add test for constant with QuantisedSymm16 in RefLayerTests
* Refactor RefConstantWorkload - BaseWorkload instead of TypedWorkload
* Refactor RefConstantWorkload - remove m_RanOnce, use PostAllocationConfigure()

Signed-off-by: Nina Drozd <nina.drozd@arm.com>
Change-Id: Ic30e61319ef4ff9c367689901f7c6d498142a9c5
diff --git a/src/backends/reference/workloads/RefConstantWorkload.cpp b/src/backends/reference/workloads/RefConstantWorkload.cpp
index e074c6f..3506198 100644
--- a/src/backends/reference/workloads/RefConstantWorkload.cpp
+++ b/src/backends/reference/workloads/RefConstantWorkload.cpp
@@ -16,37 +16,26 @@
 namespace armnn
 {
 
-template <armnn::DataType DataType>
-void RefConstantWorkload<DataType>::Execute() const
+RefConstantWorkload::RefConstantWorkload(
+    const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info)
+    : BaseWorkload<ConstantQueueDescriptor>(descriptor, info) {}
+
+void RefConstantWorkload::PostAllocationConfigure()
 {
-    // Considering the reference backend independently, it could be possible to initialise the intermediate tensor
-    // created by the layer output handler at workload construction time, rather than at workload execution time.
-    // However, this is not an option for other backends (e.g. CL). For consistency, we prefer to align all
-    // implementations.
-    // A similar argument can be made about performing the memory copy in the first place (the layer output handler
-    // could have a non-owning reference to the layer output tensor managed by the const input layer); again, this is
-    // not an option for other backends, and the extra complexity required to make this work for the reference backend
-    // may not be worth the effort (skipping a memory copy in the first inference).
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConstantWorkload_Execute");
+    const ConstantQueueDescriptor& data = this->m_Data;
 
-    if (!m_RanOnce)
-    {
-        const ConstantQueueDescriptor& data = this->m_Data;
+    BOOST_ASSERT(data.m_LayerOutput != nullptr);
 
-        BOOST_ASSERT(data.m_LayerOutput != nullptr);
+    const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]);
+    BOOST_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
 
-        const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]);
-        BOOST_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
-
-        memcpy(GetOutputTensorData<void>(0, data), data.m_LayerOutput->GetConstTensor<void>(),
-            outputInfo.GetNumBytes());
-
-        m_RanOnce = true;
-    }
+    memcpy(GetOutputTensorData<void>(0, data), data.m_LayerOutput->GetConstTensor<void>(),
+        outputInfo.GetNumBytes());
 }
 
-template class RefConstantWorkload<DataType::Float32>;
-template class RefConstantWorkload<DataType::QuantisedAsymm8>;
-template class RefConstantWorkload<DataType::Signed32>;
+void RefConstantWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConstantWorkload_Execute");
+}
 
 } //namespace armnn