IVGCVSW-6683-6684 Add ClBaseWorkload and NeonBaseWorkload

* Neon/Cl Activation workloads inherit from Cl/Neon BaseWorkload
* Unit Test for ReplaceTensorHandle functions

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I985e34b93a96405735402a6d3b947957afbe2857
diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp
index 15de5b5..d01919c 100644
--- a/src/armnnTestUtils/CreateWorkload.hpp
+++ b/src/armnnTestUtils/CreateWorkload.hpp
@@ -68,7 +68,7 @@
 {
     // Creates the layer we're testing.
     ActivationDescriptor layerDesc;
-    layerDesc.m_Function = ActivationFunction::Abs;
+    layerDesc.m_Function = ActivationFunction::ReLu;
     layerDesc.m_A        = 3.5f;
     layerDesc.m_B        = -10.0f;
 
@@ -94,7 +94,7 @@
     CHECK(queueDescriptor.m_Outputs.size() == 1);
     CHECK(queueDescriptor.m_Parameters.m_A == 3.5f);
     CHECK(queueDescriptor.m_Parameters.m_B == -10.0f);
-    CHECK((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs));
+    CHECK((queueDescriptor.m_Parameters.m_Function == ActivationFunction::ReLu));
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 34914fc..d8b2d4f 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -1297,4 +1297,36 @@
     ClCreateQuantizedLstmWorkloadTest<ClQuantizedLstmWorkload>();
 }
 
+template <armnn::DataType DataType>
+static void ClCreateActivationWorkloadReplaceFunctionsTest()
+{
+    std::shared_ptr<ClMemoryManager> memoryManager = std::make_shared<ClMemoryManager>(
+            std::make_unique<arm_compute::CLBufferAllocator>());
+
+    Graph graph;
+    ClWorkloadFactory factory = ClWorkloadFactoryHelper::GetFactory(memoryManager);
+    // input and output are created as armnn::TensorInfo tensorInfo({1, 1}, DataType)
+    auto workloadPtr = CreateActivationWorkloadTest<ClActivationWorkload, DataType>(factory, graph);
+
+    // new input and output tensor handlers are created and then replace in the workload
+    const ClTensorHandleFactory tensorHandleFactory(memoryManager);
+    TensorInfo inputInfo({2 , 2}, DataType::Float16);
+    TensorInfo outputInfo({2 , 2}, DataType::Float16);
+    unique_ptr<ITensorHandle> inputHandle  = tensorHandleFactory.CreateTensorHandle(inputInfo, true);
+    inputHandle->Manage();
+    inputHandle->Allocate();
+    unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo, true);
+    outputHandle->Manage();
+    outputHandle->Allocate();
+
+    unsigned int slot = 0;
+    CHECK_THROWS_AS(workloadPtr->ReplaceInputTensorHandle(inputHandle.get(), slot), UnimplementedException);
+    CHECK_THROWS_AS(workloadPtr->ReplaceOutputTensorHandle(outputHandle.get(), slot), UnimplementedException);
+}
+
+TEST_CASE("ClReplaceFunctionsfromFloat32toFloat16ActivationWorkload")
+{
+    ClCreateActivationWorkloadReplaceFunctionsTest<armnn::DataType::Float32>();
+}
+
 }
diff --git a/src/backends/cl/workloads/ClActivationWorkload.cpp b/src/backends/cl/workloads/ClActivationWorkload.cpp
index 91a44f4..a92f8fb 100644
--- a/src/backends/cl/workloads/ClActivationWorkload.cpp
+++ b/src/backends/cl/workloads/ClActivationWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -32,7 +32,7 @@
 ClActivationWorkload::ClActivationWorkload(const ActivationQueueDescriptor& descriptor,
                                            const WorkloadInfo& info,
                                            const arm_compute::CLCompileContext& clCompileContext)
-    : BaseWorkload<ActivationQueueDescriptor>(descriptor, info)
+    : ClBaseWorkload<ActivationQueueDescriptor>(descriptor, info)
 {
     // Report Profiling Details
     ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClActivationWorkload_Construct",
diff --git a/src/backends/cl/workloads/ClActivationWorkload.hpp b/src/backends/cl/workloads/ClActivationWorkload.hpp
index 683229e..14835fb 100644
--- a/src/backends/cl/workloads/ClActivationWorkload.hpp
+++ b/src/backends/cl/workloads/ClActivationWorkload.hpp
@@ -1,11 +1,11 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #pragma once
 
-#include <armnn/backends/Workload.hpp>
+#include "ClBaseWorkload.hpp"
 
 #include <arm_compute/runtime/CL/functions/CLActivationLayer.h>
 
@@ -15,7 +15,7 @@
                                                  const TensorInfo& output,
                                                  const ActivationDescriptor& descriptor);
 
-class ClActivationWorkload : public BaseWorkload<ActivationQueueDescriptor>
+class ClActivationWorkload : public ClBaseWorkload<ActivationQueueDescriptor>
 {
 public:
     ClActivationWorkload(const ActivationQueueDescriptor& descriptor,
diff --git a/src/backends/cl/workloads/ClBaseWorkload.hpp b/src/backends/cl/workloads/ClBaseWorkload.hpp
new file mode 100644
index 0000000..e74fc84
--- /dev/null
+++ b/src/backends/cl/workloads/ClBaseWorkload.hpp
@@ -0,0 +1,40 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/backends/Workload.hpp>
+
+namespace armnn
+{
+template <typename QueueDescriptor>
+class ClBaseWorkload : public BaseWorkload<QueueDescriptor>
+{
+public:
+    ClBaseWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info)
+            : BaseWorkload<QueueDescriptor>(descriptor, info)
+    {}
+
+    // Replace input tensor handle with the given TensorHandle and call Reconfigure()
+    void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override
+    {
+        this->m_Data.m_Inputs[slot] = tensorHandle;
+        Reconfigure();
+    }
+
+    // Replace output tensor handle with the given TensorHandle and call Reconfigure()
+    void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override
+    {
+        this->m_Data.m_Outputs[slot] = tensorHandle;
+        Reconfigure();
+    }
+
+    // Reconfigure the workload configuration. Throw armnn::UnimplementedException by default.
+    virtual void Reconfigure()
+    {
+        throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+    }
+};
+} //namespace armnn
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index c1563fe..66718cc 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -1059,4 +1059,38 @@
     NeonCreateQLstmWorkloadTest<NeonQLstmWorkload>();
 }
 
+template <armnn::DataType DataType>
+static void NeonCreateActivationWorkloadReplaceFunctionsTest()
+{
+    shared_ptr<NeonMemoryManager> memoryManager = make_shared<NeonMemoryManager>();
+
+    Graph graph;
+    NeonWorkloadFactory factory = NeonWorkloadFactoryHelper::GetFactory(memoryManager);
+    // input and output are created as armnn::TensorInfo tensorInfo({1, 1}, DataType)
+    auto workloadPtr = CreateActivationWorkloadTest<NeonActivationWorkload, DataType>(factory, graph);
+
+    // new input and output tensor handlers are created and then replace in the workload
+    const NeonTensorHandleFactory tensorHandleFactory(memoryManager);
+    TensorInfo inputInfo({2 , 2}, DataType::Float16);
+    TensorInfo outputInfo({2 , 2}, DataType::Float16);
+    unique_ptr<ITensorHandle> inputHandle  = tensorHandleFactory.CreateTensorHandle(inputInfo);
+    inputHandle->Allocate();
+    unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
+    outputHandle->Allocate();
+
+    unsigned int slot = 0;
+    CHECK_THROWS_AS(workloadPtr->ReplaceInputTensorHandle(inputHandle.get(), slot), UnimplementedException);
+    CHECK_THROWS_AS(workloadPtr->ReplaceOutputTensorHandle(outputHandle.get(), slot), UnimplementedException);
+}
+
+TEST_CASE("NeonReplaceFunctionsfromFloat32toFloat16ActivationWorkload")
+{
+    NeonCreateActivationWorkloadReplaceFunctionsTest<armnn::DataType::Float32>();
+}
+
+TEST_CASE("NeonReplaceFunctionsfromUint8toFloat16ActivationWorkload")
+{
+    NeonCreateActivationWorkloadReplaceFunctionsTest<armnn::DataType::QAsymmU8>();
+}
+
 }
diff --git a/src/backends/neon/workloads/NeonActivationWorkload.cpp b/src/backends/neon/workloads/NeonActivationWorkload.cpp
index dd4c97d..0fadc12 100644
--- a/src/backends/neon/workloads/NeonActivationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonActivationWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -31,7 +31,7 @@
 
 NeonActivationWorkload::NeonActivationWorkload(const ActivationQueueDescriptor& descriptor,
                                                const WorkloadInfo& info)
-    : BaseWorkload<ActivationQueueDescriptor>(descriptor, info)
+    : NeonBaseWorkload<ActivationQueueDescriptor>(descriptor, info)
 {
     // Report Profiling Details
     ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonActivationWorkload_Construct",
diff --git a/src/backends/neon/workloads/NeonActivationWorkload.hpp b/src/backends/neon/workloads/NeonActivationWorkload.hpp
index c3d6cc1..72ad477 100644
--- a/src/backends/neon/workloads/NeonActivationWorkload.hpp
+++ b/src/backends/neon/workloads/NeonActivationWorkload.hpp
@@ -1,23 +1,22 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #pragma once
 
-#include <armnn/backends/Workload.hpp>
+#include "NeonBaseWorkload.hpp"
 
 #include <arm_compute/core/Error.h>
 #include <arm_compute/runtime/IFunction.h>
 
 namespace armnn
 {
-
 arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo& input,
                                                    const TensorInfo& output,
                                                    const ActivationDescriptor& descriptor);
 
-class NeonActivationWorkload : public BaseWorkload<ActivationQueueDescriptor>
+class NeonActivationWorkload : public NeonBaseWorkload<ActivationQueueDescriptor>
 {
 public:
     NeonActivationWorkload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info);
diff --git a/src/backends/neon/workloads/NeonBaseWorkload.hpp b/src/backends/neon/workloads/NeonBaseWorkload.hpp
new file mode 100644
index 0000000..a92f35a
--- /dev/null
+++ b/src/backends/neon/workloads/NeonBaseWorkload.hpp
@@ -0,0 +1,40 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/backends/Workload.hpp>
+
+namespace armnn
+{
+template <typename QueueDescriptor>
+class NeonBaseWorkload : public BaseWorkload<QueueDescriptor>
+{
+public:
+    NeonBaseWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info)
+            : BaseWorkload<QueueDescriptor>(descriptor, info)
+    {}
+
+    // Replace input tensor handle with the given TensorHandle and call Reconfigure()
+    void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override
+    {
+        this->m_Data.m_Inputs[slot] = tensorHandle;
+        Reconfigure();
+    }
+
+    // Replace output tensor handle with the given TensorHandle and call Reconfigure()
+    void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override
+    {
+        this->m_Data.m_Outputs[slot] = tensorHandle;
+        Reconfigure();
+    }
+
+    // Reconfigure the workload configuration. Throw armnn::UnimplementedException by default.
+    virtual void Reconfigure()
+    {
+        throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+    }
+};
+} //namespace armnn