IVGCVSW-5787 Add/Update Execute() implementations in RefActivationWorkload

 * Added multithreaded StridedSliceEndToEndTest

Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I4579db7b5959e0a22256f1bda00238c22e611dec
diff --git a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
index bf8649f..77167a8 100644
--- a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
@@ -18,16 +18,27 @@
         const WorkloadInfo& info)
         : BaseWorkload<ArgMinMaxQueueDescriptor>(descriptor, info) {}
 
+
 void RefArgMinMaxWorkload::Execute() const
 {
+    Execute(m_Data.m_Inputs, m_Data.m_Outputs);
+}
+
+void RefArgMinMaxWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+{
+    Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+}
+
+void RefArgMinMaxWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
+{
     ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefArgMinMaxWorkload_Execute");
 
-    const TensorInfo &inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+    const TensorInfo &inputTensorInfo = GetTensorInfo(inputs[0]);
 
-    std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, m_Data.m_Inputs[0]->Map());
+    std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, inputs[0]->Map());
     Decoder<float> &decoder = *decoderPtr;
 
-    const TensorInfo &outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+    const TensorInfo &outputTensorInfo = GetTensorInfo(outputs[0]);
 
     if (outputTensorInfo.GetDataType() == armnn::DataType::Signed32) {
         int32_t *output = GetOutputTensorData<int32_t>(0, m_Data);