IVGCVSW-5787 Add/Update Execute() implementations in RefActivationWorkload

 * Added multithreaded StridedSliceEndToEndTest

Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I4579db7b5959e0a22256f1bda00238c22e611dec
diff --git a/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp b/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
index 1b4e8f9..f187e0c 100644
--- a/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
@@ -22,32 +22,31 @@
     : BaseWorkload<LogicalBinaryQueueDescriptor>(desc, info)
 {}
 
-void RefLogicalBinaryWorkload::PostAllocationConfigure()
+void RefLogicalBinaryWorkload::Execute() const
 {
-    const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
-    const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
-    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
-
-    m_Input0 = MakeDecoder<InType>(inputInfo0);
-    m_Input1 = MakeDecoder<InType>(inputInfo1);
-    m_Output = MakeEncoder<OutType>(outputInfo);
+    Execute(m_Data.m_Inputs, m_Data.m_Outputs);
 }
 
-void RefLogicalBinaryWorkload::Execute() const
+void RefLogicalBinaryWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+{
+    Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+}
+
+void RefLogicalBinaryWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
 {
     ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefLogicalBinaryWorkload_Execute");
 
-    const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
-    const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
-    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+    const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
+    const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
+    const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
 
     const TensorShape& inShape0 = inputInfo0.GetShape();
     const TensorShape& inShape1 = inputInfo1.GetShape();
     const TensorShape& outShape = outputInfo.GetShape();
 
-    m_Input0->Reset(m_Data.m_Inputs[0]->Map());
-    m_Input1->Reset(m_Data.m_Inputs[1]->Map());
-    m_Output->Reset(m_Data.m_Outputs[0]->Map());
+    std::unique_ptr<Decoder<InType>>  input0 = MakeDecoder<InType>(inputInfo0, inputs[0]->Map());
+    std::unique_ptr<Decoder<InType>>  input1 = MakeDecoder<InType>(inputInfo1, inputs[1]->Map());
+    std::unique_ptr<Encoder<OutType>> output = MakeEncoder<OutType>(outputInfo, outputs[0]->Map());
 
     using AndFunction = LogicalBinaryFunction<std::logical_and<bool>>;
     using OrFunction  = LogicalBinaryFunction<std::logical_or<bool>>;
@@ -56,12 +55,12 @@
     {
         case LogicalBinaryOperation::LogicalAnd:
         {
-            AndFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
+            AndFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
             break;
         }
         case LogicalBinaryOperation::LogicalOr:
         {
-            OrFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
+            OrFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
             break;
         }
         default: