IVGCVSW-1955: Unify backend exceptions (wrap cl::Error)

* Added wrapper function around arm_compute::IFunction::run() that catches
  cl::Error and wraps it into an armnn::RuntimeException
* Added MakeWorkload template inside ClWorkloadFactory that catches
  cl::Error and wraps it into an armnn::RuntimeException
* Replaced cl::Error with armnn::RuntimeException in catch statements inside
  LoadedNetwork

Change-Id: I2340f41ae02b8db1d7ef5157824a50e7410854e3
diff --git a/include/armnn/Exceptions.hpp b/include/armnn/Exceptions.hpp
index 29d874c..008617d 100644
--- a/include/armnn/Exceptions.hpp
+++ b/include/armnn/Exceptions.hpp
@@ -110,6 +110,11 @@
     using Exception::Exception;
 };
 
+class RuntimeException : public Exception
+{
+    using Exception::Exception;
+};
+
 template <typename ExceptionType>
 void ConditionalThrow(bool condition, const std::string& message)
 {
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 4f73bda..f49fa7b 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -11,10 +11,6 @@
 #include "Profiling.hpp"
 #include "HeapProfiling.hpp"
 
-#ifdef ARMCOMPUTECL_ENABLED
-#include <arm_compute/core/CL/OpenCL.h>
-#endif
-
 #include <backends/CpuTensorHandle.hpp>
 
 #include <boost/polymorphic_cast.hpp>
@@ -38,15 +34,6 @@
     return ss.str();
 }
 
-#if ARMCOMPUTECL_ENABLED
-std::string ToErrorMessage(const char * prefix, const cl::Error& error)
-{
-    std::stringstream ss;
-    ss << prefix << " " << error.what() << ".  CL error code is: " << error.err();
-    return ss.str();
-}
-#endif
-
 } // anonymous
 
 std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
@@ -54,30 +41,30 @@
 {
     std::unique_ptr<LoadedNetwork> loadedNetwork;
 
+    auto Fail = [&](const std::exception& error) -> std::unique_ptr<LoadedNetwork>
+    {
+        errorMessage = ToErrorMessage("An error occurred when preparing the network workloads: ", error);
+        BOOST_LOG_TRIVIAL(error) << errorMessage;
+
+        return std::unique_ptr<LoadedNetwork>();
+    };
+
     try
     {
         loadedNetwork.reset(new LoadedNetwork(std::move(net)));
     }
-    catch (const std::runtime_error& error)
+    catch (const armnn::RuntimeException& error)
     {
-        errorMessage = ToErrorMessage("An error occurred when preparing the network workloads: ", error);
-        BOOST_LOG_TRIVIAL(error) << errorMessage;
-        return std::unique_ptr<LoadedNetwork>();
+        return Fail(error);
     }
     catch (const armnn::Exception& error)
     {
-        errorMessage = ToErrorMessage("An error occurred when preparing the network workloads: ", error);
-        BOOST_LOG_TRIVIAL(error) << errorMessage;
-        return std::unique_ptr<LoadedNetwork>();
+        return Fail(error);
     }
-#if ARMCOMPUTECL_ENABLED
-    catch (const cl::Error& error)
+    catch (const std::runtime_error& error)
     {
-        errorMessage = ToErrorMessage("A CL error occurred attempting to prepare a network workload: ", error);
-        BOOST_LOG_TRIVIAL(error) << errorMessage;
-        return std::unique_ptr<LoadedNetwork>();
+        return Fail(error);
     }
-#endif
 
     return loadedNetwork;
 }
@@ -420,6 +407,12 @@
     m_CpuAcc.Acquire();
     m_GpuAcc.Acquire();
 
+    auto Fail = [&](const std::exception& error)
+    {
+        BOOST_LOG_TRIVIAL(error) << "An error occurred attempting to execute a workload: " << error.what();
+        success = false;
+    };
+
     try
     {
         for (size_t i = 0; i < m_WorkloadQueue.size(); ++i)
@@ -427,18 +420,13 @@
             m_WorkloadQueue[i]->Execute();
         }
     }
-#if ARMCOMPUTECL_ENABLED
-    catch (const cl::Error& error)
+    catch (const RuntimeException& error)
     {
-        BOOST_LOG_TRIVIAL(error) << "A CL error occurred attempting to execute a workload: "
-            << error.what() << ". CL error code is: " << error.err();
-        success = false;
+        Fail(error);
     }
-#endif
     catch (const std::runtime_error& error)
     {
-        BOOST_LOG_TRIVIAL(error) << "An error occurred attempting to execute a workload: " << error.what();
-        success = false;
+        Fail(error);
     }
 
     // Informs the memory managers to release memory in it's respective memory group
diff --git a/src/backends/MakeWorkloadHelper.hpp b/src/backends/MakeWorkloadHelper.hpp
index 281a65a..78a9669 100644
--- a/src/backends/MakeWorkloadHelper.hpp
+++ b/src/backends/MakeWorkloadHelper.hpp
@@ -39,7 +39,9 @@
 // Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos.
 template <typename Float16Workload, typename Float32Workload, typename Uint8Workload, typename QueueDescriptorType,
     typename... Args>
-std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info, Args&&... args)
+std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descriptor,
+                                              const WorkloadInfo& info,
+                                              Args&&... args)
 {
     const DataType dataType = !info.m_InputTensorInfos.empty() ?
         info.m_InputTensorInfos[0].GetDataType()
@@ -67,9 +69,11 @@
 // FloatWorkload, Uint8Workload>.
 // Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos.
 template <typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
-std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info, Args&&... args)
+std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descriptor,
+                                              const WorkloadInfo& info,
+                                              Args&&... args)
 {
-    return MakeWorkload<FloatWorkload, FloatWorkload, Uint8Workload>(descriptor, info,
+    return MakeWorkloadHelper<FloatWorkload, FloatWorkload, Uint8Workload>(descriptor, info,
        std::forward<Args>(args)...);
 }
 
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 68d3713..e1d8314 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -16,12 +16,13 @@
 #include <arm_compute/runtime/CL/CLBufferAllocator.h>
 #include <arm_compute/runtime/CL/CLScheduler.h>
 
-#include <backends/cl/workloads/ClWorkloads.hpp>
-
 #include <backends/MemCopyWorkload.hpp>
-#include <backends/cl/ClTensorHandle.hpp>
 
 #include <backends/aclCommon/memory/IPoolManager.hpp>
+
+#include <backends/cl/ClTensorHandle.hpp>
+#include <backends/cl/workloads/ClWorkloads.hpp>
+#include <backends/cl/workloads/ClWorkloadUtils.hpp>
 #endif
 
 #include <backends/MakeWorkloadHelper.hpp>
@@ -42,6 +43,36 @@
 
 #ifdef ARMCOMPUTECL_ENABLED
 
+template <typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
+std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
+                                                           const WorkloadInfo& info,
+                                                           Args&&... args)
+{
+    try
+    {
+        return MakeWorkloadHelper<FloatWorkload, Uint8Workload>(descriptor, info, std::forward<Args>(args)...);
+    }
+    catch (const cl::Error& clError)
+    {
+        throw WrapClError(clError, CHECK_LOCATION());
+    }
+}
+
+template <typename Workload, typename QueueDescriptorType, typename... Args>
+std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
+                                                           const WorkloadInfo& info,
+                                                           Args&&... args)
+{
+    try
+    {
+        return std::make_unique<Workload>(descriptor, info, std::forward<Args>(args)...);
+    }
+    catch (const cl::Error& clError)
+    {
+        throw WrapClError(clError, CHECK_LOCATION());
+    }
+}
+
 ClWorkloadFactory::ClWorkloadFactory()
 : m_MemoryManager(std::make_unique<arm_compute::CLBufferAllocator>())
 {
@@ -100,26 +131,26 @@
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                                const WorkloadInfo&              info) const
 {
-    return std::make_unique<ClActivationWorkload>(descriptor, info);
+    return MakeWorkload<ClActivationWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
                                                             const WorkloadInfo&           info) const
 {
     return MakeWorkload<ClSoftmaxFloatWorkload, ClSoftmaxUint8Workload>(descriptor, info,
-                                                                          m_MemoryManager.GetIntraLayerManager());
+                                                                        m_MemoryManager.GetIntraLayerManager());
 }
 
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
                                                              const WorkloadInfo&            info) const
 {
-    return std::make_unique<ClSplitterWorkload>(descriptor, info);
+    return MakeWorkload<ClSplitterWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
                                                                   const WorkloadInfo&          info) const
 {
-    return std::make_unique<ClMergerWorkload>(descriptor, info);
+    return MakeWorkload<ClMergerWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateFullyConnected(
@@ -132,25 +163,25 @@
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
                                                                    const WorkloadInfo&           info) const
 {
-    return std::make_unique<ClPermuteWorkload>(descriptor, info);
+    return MakeWorkload<ClPermuteWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
                                                                      const WorkloadInfo&           info) const
 {
-    return std::make_unique<ClPooling2dWorkload>(descriptor, info);
+    return MakeWorkload<ClPooling2dWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
                                                                          const WorkloadInfo&               info) const
 {
-    return std::make_unique<ClConvolution2dWorkload>(descriptor, info, m_MemoryManager.GetIntraLayerManager());
+    return MakeWorkload<ClConvolution2dWorkload>(descriptor, info, m_MemoryManager.GetIntraLayerManager());
 }
 
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDepthwiseConvolution2d(
     const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
 {
-    return std::make_unique<ClDepthwiseConvolutionWorkload>(descriptor, info);
+    return MakeWorkload<ClDepthwiseConvolutionWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
@@ -162,13 +193,13 @@
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
                                                                     const WorkloadInfo&            info) const
 {
-    return std::make_unique<ClAdditionWorkload>(descriptor, info);
+    return MakeWorkload<ClAdditionWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateMultiplication(
     const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const
 {
-    return std::make_unique<ClMultiplicationWorkload>(descriptor, info);
+    return MakeWorkload<ClMultiplicationWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateDivision(
@@ -180,7 +211,7 @@
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
                                                                        const WorkloadInfo& info) const
 {
-    return std::make_unique<ClSubtractionWorkload>(descriptor, info);
+    return MakeWorkload<ClSubtractionWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateBatchNormalization(
@@ -223,13 +254,13 @@
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
     const WorkloadInfo& info) const
 {
-    return std::make_unique<ClConstantWorkload>(descriptor, info);
+    return MakeWorkload<ClConstantWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
     const WorkloadInfo& info) const
 {
-    return std::make_unique<ClReshapeWorkload>(descriptor, info);
+    return MakeWorkload<ClReshapeWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
@@ -248,14 +279,14 @@
     const ConvertFp16ToFp32QueueDescriptor& descriptor,
     const WorkloadInfo& info) const
 {
-    return std::make_unique<ClConvertFp16ToFp32Workload>(descriptor, info);
+    return MakeWorkload<ClConvertFp16ToFp32Workload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvertFp32ToFp16(
     const ConvertFp32ToFp16QueueDescriptor& descriptor,
     const WorkloadInfo& info) const
 {
-    return std::make_unique<ClConvertFp32ToFp16Workload>(descriptor, info);
+    return MakeWorkload<ClConvertFp32ToFp16Workload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
@@ -267,7 +298,7 @@
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
                                                         const WorkloadInfo& info) const
 {
-    return std::make_unique<ClPadWorkload>(descriptor, info);
+    return MakeWorkload<ClPadWorkload>(descriptor, info);
 }
 
 void ClWorkloadFactory::Finalize()
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 9f8ec62..66de3a5 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -132,6 +132,16 @@
 private:
 
 #ifdef ARMCOMPUTECL_ENABLED
+    template<typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
+    static std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor,
+                                                   const WorkloadInfo& info,
+                                                   Args&&... args);
+
+    template <typename Workload, typename QueueDescriptorType, typename... Args>
+    static std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor,
+                                                   const WorkloadInfo& info,
+                                                   Args&&... args);
+
     mutable ClMemoryManager m_MemoryManager;
 #endif
 };
diff --git a/src/backends/cl/workloads/ClActivationWorkload.cpp b/src/backends/cl/workloads/ClActivationWorkload.cpp
index 426af9f..188ad32 100644
--- a/src/backends/cl/workloads/ClActivationWorkload.cpp
+++ b/src/backends/cl/workloads/ClActivationWorkload.cpp
@@ -53,7 +53,7 @@
 void ClActivationWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClActivationWorkload_Execute");
-    m_ActivationLayer.run();
+    RunClFunction(m_ActivationLayer, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClAdditionWorkload.cpp b/src/backends/cl/workloads/ClAdditionWorkload.cpp
index c9ac958..6ec207a 100644
--- a/src/backends/cl/workloads/ClAdditionWorkload.cpp
+++ b/src/backends/cl/workloads/ClAdditionWorkload.cpp
@@ -32,7 +32,7 @@
 void ClAdditionWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClAdditionWorkload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 arm_compute::Status ClAdditionValidate(const TensorInfo& input0,
diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
index 24be7cd..1f3f9b5 100644
--- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
@@ -94,7 +94,7 @@
 void ClBatchNormalizationFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClBatchNormalizationFloatWorkload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 void ClBatchNormalizationFloatWorkload::FreeUnusedTensors()
diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
index 2c9a0e1..b489ced 100644
--- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
@@ -29,7 +29,7 @@
 void ClConvertFp16ToFp32Workload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvertFp16ToFp32Workload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const TensorInfo& output)
diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
index 6758180..781607f 100644
--- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
@@ -29,7 +29,7 @@
 void ClConvertFp32ToFp16Workload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvertFp32ToFp16Workload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, const TensorInfo& output)
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 301859e..7c876ab 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -106,8 +106,7 @@
 void ClConvolution2dWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dWorkload_Execute");
-
-    m_ConvolutionLayer.run();
+    RunClFunction(m_ConvolutionLayer, CHECK_LOCATION());
 }
 
 void ClConvolution2dWorkload::FreeUnusedTensors()
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 6fa9ddc..6b159f1 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -137,7 +137,7 @@
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClDepthwiseConvolutionWorkload_Execute");
     BOOST_ASSERT(m_DepthwiseConvolutionLayer);
 
-    m_DepthwiseConvolutionLayer->run();
+    RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION());
 }
 
 } // namespace armnn
diff --git a/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp b/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
index a2d8534..324d8bd 100644
--- a/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
@@ -40,9 +40,7 @@
 void ClDivisionFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClDivisionFloatWorkload_Execute");
-
-    // Executes the layer.
-    m_ArithmeticDivision.run();
+    RunClFunction(m_ArithmeticDivision, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
index 0a60fc3..457d19e 100644
--- a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
@@ -25,7 +25,7 @@
 void ClFloorFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClFloorFloatWorkload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index b3a97f3..7b2ecf0 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -84,7 +84,7 @@
 void ClFullyConnectedWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClFullyConnectedWorkload_Execute");
-    m_FullyConnectedLayer.run();
+    RunClFunction(m_FullyConnectedLayer, CHECK_LOCATION());
 }
 
 void ClFullyConnectedWorkload::FreeUnusedTensors()
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
index f848016..0dd0603 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
@@ -48,7 +48,7 @@
 void ClL2NormalizationFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClL2NormalizationFloatWorkload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
index aa7110c..177368b 100644
--- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
@@ -217,7 +217,7 @@
 void ClLstmFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClLstmFloatWorkload_Execute");
-    m_LstmLayer.run();
+    RunClFunction(m_LstmLayer, CHECK_LOCATION());
 }
 
 arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
diff --git a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
index 9d23caa..c0bcdbc 100644
--- a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
+++ b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
@@ -52,9 +52,7 @@
 void ClMultiplicationWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClMultiplicationWorkload_Execute");
-
-    // Executes the layer.
-    m_PixelWiseMultiplication.run();
+    RunClFunction(m_PixelWiseMultiplication, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
index f6c07e1..f3cc6ec 100644
--- a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
@@ -49,7 +49,7 @@
 void ClNormalizationFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClNormalizationFloatWorkload_Execute");
-    m_NormalizationLayer.run();
+    RunClFunction(m_NormalizationLayer, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClPadWorkload.cpp b/src/backends/cl/workloads/ClPadWorkload.cpp
index 3e63d5c..44c0eea 100644
--- a/src/backends/cl/workloads/ClPadWorkload.cpp
+++ b/src/backends/cl/workloads/ClPadWorkload.cpp
@@ -37,7 +37,7 @@
 void ClPadWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClPadWorkload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 arm_compute::Status ClPadValidate(const TensorInfo& input,
diff --git a/src/backends/cl/workloads/ClPermuteWorkload.cpp b/src/backends/cl/workloads/ClPermuteWorkload.cpp
index 5dacc83..39fa56f 100644
--- a/src/backends/cl/workloads/ClPermuteWorkload.cpp
+++ b/src/backends/cl/workloads/ClPermuteWorkload.cpp
@@ -45,7 +45,7 @@
 void ClPermuteWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL( GetName() + "_Execute");
-    m_PermuteFunction.run();
+    RunClFunction(m_PermuteFunction, CHECK_LOCATION());
 }
 
 } // namespace armnn
diff --git a/src/backends/cl/workloads/ClPooling2dWorkload.cpp b/src/backends/cl/workloads/ClPooling2dWorkload.cpp
index 68512ff..b54afd2 100644
--- a/src/backends/cl/workloads/ClPooling2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClPooling2dWorkload.cpp
@@ -51,7 +51,7 @@
 void ClPooling2dWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClPooling2dWorkload_Execute");
-    m_PoolingLayer.run();
+    RunClFunction(m_PoolingLayer, CHECK_LOCATION());
 }
 
 }
diff --git a/src/backends/cl/workloads/ClReshapeWorkload.cpp b/src/backends/cl/workloads/ClReshapeWorkload.cpp
index 43a53cb..47cea94 100644
--- a/src/backends/cl/workloads/ClReshapeWorkload.cpp
+++ b/src/backends/cl/workloads/ClReshapeWorkload.cpp
@@ -26,7 +26,7 @@
 void ClReshapeWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClReshapeWorkload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
index 4ee6d5e..c4f0a04 100644
--- a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
@@ -38,8 +38,7 @@
 void ClResizeBilinearFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClResizeBilinearFloatWorkload_Execute");
-    m_ResizeBilinearLayer.run();
+    RunClFunction(m_ResizeBilinearLayer, CHECK_LOCATION());
 }
 
-
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
index 6060056..ed012cc 100644
--- a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
@@ -27,7 +27,7 @@
 void ClSoftmaxFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxFloatWorkload_Execute");
-    m_SoftmaxLayer.run();
+    RunClFunction(m_SoftmaxLayer, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
index 7e0589e..d06306e 100644
--- a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
@@ -36,8 +36,7 @@
 void ClSoftmaxUint8Workload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxUint8Workload_Execute");
-
-    m_SoftmaxLayer.run();
+    RunClFunction(m_SoftmaxLayer, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.cpp b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
index 1967fae..e23dab0 100644
--- a/src/backends/cl/workloads/ClSubtractionWorkload.cpp
+++ b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
@@ -32,7 +32,7 @@
 void ClSubtractionWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClSubtractionWorkload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 arm_compute::Status ClSubtractionValidate(const TensorInfo& input0,
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index c765c63..ca0de8d 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -10,6 +10,10 @@
 #include <backends/cl/OpenClTimer.hpp>
 #include <backends/CpuTensorHandle.hpp>
 
+#include <arm_compute/runtime/CL/CLFunctions.h>
+
+#include <sstream>
+
 #define ARMNN_SCOPED_PROFILING_EVENT_CL(name) \
     ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \
                                                   name, \
@@ -60,4 +64,24 @@
     }
 };
 
+inline RuntimeException WrapClError(const cl::Error& clError, const CheckLocation& location)
+{
+    std::stringstream message;
+    message << "CL error: " << clError.what() << ". Error code: " << clError.err();
+
+    return RuntimeException(message.str(), location);
+}
+
+inline void RunClFunction(arm_compute::IFunction& function, const CheckLocation& location)
+{
+    try
+    {
+        function.run();
+    }
+    catch (cl::Error& error)
+    {
+        throw WrapClError(error, location);
+    }
+}
+
 } //namespace armnn
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index b3e1dd9..0e069a2 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -79,13 +79,13 @@
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
                                                             const WorkloadInfo&        info) const
 {
-    return MakeWorkload<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
+    return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
                                                              const WorkloadInfo&        info) const
 {
-    return MakeWorkload<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
+    return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
@@ -97,8 +97,8 @@
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
                                                               const WorkloadInfo&           info) const
 {
-    return MakeWorkload<NeonSoftmaxFloatWorkload, NeonSoftmaxUint8Workload>(descriptor, info,
-                                                                              m_MemoryManager.GetIntraLayerManager());
+    return MakeWorkloadHelper<NeonSoftmaxFloatWorkload, NeonSoftmaxUint8Workload>(descriptor, info,
+        m_MemoryManager.GetIntraLayerManager());
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
@@ -116,8 +116,8 @@
 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateFullyConnected(
     const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info) const
 {
-    return MakeWorkload<NeonFullyConnectedWorkload, NeonFullyConnectedWorkload>(descriptor, info,
-                                                                                m_MemoryManager.GetIntraLayerManager());
+    return MakeWorkloadHelper<NeonFullyConnectedWorkload, NeonFullyConnectedWorkload>(descriptor, info,
+        m_MemoryManager.GetIntraLayerManager());
 }
 
 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
@@ -148,38 +148,38 @@
 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateNormalization(
     const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
 {
-    return MakeWorkload<NeonNormalizationFloatWorkload, NullWorkload>(descriptor, info,
-                                                                        m_MemoryManager.GetIntraLayerManager());
+    return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>(descriptor, info,
+        m_MemoryManager.GetIntraLayerManager());
 }
 
 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
                                                                       const WorkloadInfo&            info) const
 {
-    return MakeWorkload<NeonAdditionFloatWorkload, NullWorkload>(descriptor, info);
+    return MakeWorkloadHelper<NeonAdditionFloatWorkload, NullWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMultiplication(
     const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const
 {
-    return MakeWorkload<NeonMultiplicationFloatWorkload, NullWorkload>(descriptor, info);
+    return MakeWorkloadHelper<NeonMultiplicationFloatWorkload, NullWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision(
     const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const
 {
-    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+    return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateSubtraction(
     const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const
 {
-    return MakeWorkload<NeonSubtractionFloatWorkload, NullWorkload>(descriptor, info);
+    return MakeWorkloadHelper<NeonSubtractionFloatWorkload, NullWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateBatchNormalization(
     const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
 {
-    return MakeWorkload<NeonBatchNormalizationFloatWorkload, NullWorkload>(descriptor, info);
+    return MakeWorkloadHelper<NeonBatchNormalizationFloatWorkload, NullWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
@@ -190,7 +190,7 @@
         throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
     }
 
-    return MakeWorkload<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
+    return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateResizeBilinear(
@@ -210,8 +210,8 @@
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
     const WorkloadInfo& info) const
 {
-    return MakeWorkload<NeonL2NormalizationFloatWorkload, NullWorkload>(descriptor, info,
-                                                                          m_MemoryManager.GetIntraLayerManager());
+    return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>(descriptor, info,
+        m_MemoryManager.GetIntraLayerManager());
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
@@ -229,13 +229,13 @@
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
     const WorkloadInfo& info) const
 {
-    return MakeWorkload<NeonFloorFloatWorkload, NullWorkload>(descriptor, info);
+    return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
     const WorkloadInfo& info) const
 {
-    return MakeWorkload<NeonLstmFloatWorkload, NullWorkload>(descriptor, info);
+    return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp16ToFp32(
@@ -255,13 +255,13 @@
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
-    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+    return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const
 {
-    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+    return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
 }
 
 void NeonWorkloadFactory::Finalize()
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index b1f9d6c..048f6cd 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -18,7 +18,7 @@
 std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
     const WorkloadInfo& info) const
 {
-    return armnn::MakeWorkload<NullWorkload, F32Workload, U8Workload>(descriptor, info);
+    return armnn::MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload>(descriptor, info);
 }
 
 RefWorkloadFactory::RefWorkloadFactory()
@@ -114,7 +114,7 @@
 std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
                                                                     const WorkloadInfo&           info) const
 {
-    return armnn::MakeWorkload<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteUint8Workload>
+    return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteUint8Workload>
         (descriptor, info);
 }