IVGCVSW-1955: Unify backend exceptions (wrap cl::Error)

* Added wrapper function around arm_compute::IFunction::run() that catches
  cl::Error and wraps it into an armnn::RuntimeException
* Added MakeWorkload template inside ClWorkloadFactory that catches
  cl::Error and wraps it into an armnn::RuntimeException
* Replaced cl::Error with armnn::RuntimeException in catch statements inside
  LoadedNetwork

Change-Id: I2340f41ae02b8db1d7ef5157824a50e7410854e3
diff --git a/src/backends/cl/workloads/ClActivationWorkload.cpp b/src/backends/cl/workloads/ClActivationWorkload.cpp
index 426af9f..188ad32 100644
--- a/src/backends/cl/workloads/ClActivationWorkload.cpp
+++ b/src/backends/cl/workloads/ClActivationWorkload.cpp
@@ -53,7 +53,7 @@
 void ClActivationWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClActivationWorkload_Execute");
-    m_ActivationLayer.run();
+    RunClFunction(m_ActivationLayer, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClAdditionWorkload.cpp b/src/backends/cl/workloads/ClAdditionWorkload.cpp
index c9ac958..6ec207a 100644
--- a/src/backends/cl/workloads/ClAdditionWorkload.cpp
+++ b/src/backends/cl/workloads/ClAdditionWorkload.cpp
@@ -32,7 +32,7 @@
 void ClAdditionWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClAdditionWorkload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 arm_compute::Status ClAdditionValidate(const TensorInfo& input0,
diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
index 24be7cd..1f3f9b5 100644
--- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
@@ -94,7 +94,7 @@
 void ClBatchNormalizationFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClBatchNormalizationFloatWorkload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 void ClBatchNormalizationFloatWorkload::FreeUnusedTensors()
diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
index 2c9a0e1..b489ced 100644
--- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
@@ -29,7 +29,7 @@
 void ClConvertFp16ToFp32Workload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvertFp16ToFp32Workload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const TensorInfo& output)
diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
index 6758180..781607f 100644
--- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
@@ -29,7 +29,7 @@
 void ClConvertFp32ToFp16Workload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvertFp32ToFp16Workload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, const TensorInfo& output)
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 301859e..7c876ab 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -106,8 +106,7 @@
 void ClConvolution2dWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dWorkload_Execute");
-
-    m_ConvolutionLayer.run();
+    RunClFunction(m_ConvolutionLayer, CHECK_LOCATION());
 }
 
 void ClConvolution2dWorkload::FreeUnusedTensors()
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 6fa9ddc..6b159f1 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -137,7 +137,7 @@
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClDepthwiseConvolutionWorkload_Execute");
     BOOST_ASSERT(m_DepthwiseConvolutionLayer);
 
-    m_DepthwiseConvolutionLayer->run();
+    RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION());
 }
 
 } // namespace armnn
diff --git a/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp b/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
index a2d8534..324d8bd 100644
--- a/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
@@ -40,9 +40,7 @@
 void ClDivisionFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClDivisionFloatWorkload_Execute");
-
-    // Executes the layer.
-    m_ArithmeticDivision.run();
+    RunClFunction(m_ArithmeticDivision, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
index 0a60fc3..457d19e 100644
--- a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
@@ -25,7 +25,7 @@
 void ClFloorFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClFloorFloatWorkload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index b3a97f3..7b2ecf0 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -84,7 +84,7 @@
 void ClFullyConnectedWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClFullyConnectedWorkload_Execute");
-    m_FullyConnectedLayer.run();
+    RunClFunction(m_FullyConnectedLayer, CHECK_LOCATION());
 }
 
 void ClFullyConnectedWorkload::FreeUnusedTensors()
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
index f848016..0dd0603 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
@@ -48,7 +48,7 @@
 void ClL2NormalizationFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClL2NormalizationFloatWorkload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
index aa7110c..177368b 100644
--- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
@@ -217,7 +217,7 @@
 void ClLstmFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClLstmFloatWorkload_Execute");
-    m_LstmLayer.run();
+    RunClFunction(m_LstmLayer, CHECK_LOCATION());
 }
 
 arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
diff --git a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
index 9d23caa..c0bcdbc 100644
--- a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
+++ b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
@@ -52,9 +52,7 @@
 void ClMultiplicationWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClMultiplicationWorkload_Execute");
-
-    // Executes the layer.
-    m_PixelWiseMultiplication.run();
+    RunClFunction(m_PixelWiseMultiplication, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
index f6c07e1..f3cc6ec 100644
--- a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
@@ -49,7 +49,7 @@
 void ClNormalizationFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClNormalizationFloatWorkload_Execute");
-    m_NormalizationLayer.run();
+    RunClFunction(m_NormalizationLayer, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClPadWorkload.cpp b/src/backends/cl/workloads/ClPadWorkload.cpp
index 3e63d5c..44c0eea 100644
--- a/src/backends/cl/workloads/ClPadWorkload.cpp
+++ b/src/backends/cl/workloads/ClPadWorkload.cpp
@@ -37,7 +37,7 @@
 void ClPadWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClPadWorkload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 arm_compute::Status ClPadValidate(const TensorInfo& input,
diff --git a/src/backends/cl/workloads/ClPermuteWorkload.cpp b/src/backends/cl/workloads/ClPermuteWorkload.cpp
index 5dacc83..39fa56f 100644
--- a/src/backends/cl/workloads/ClPermuteWorkload.cpp
+++ b/src/backends/cl/workloads/ClPermuteWorkload.cpp
@@ -45,7 +45,7 @@
 void ClPermuteWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL( GetName() + "_Execute");
-    m_PermuteFunction.run();
+    RunClFunction(m_PermuteFunction, CHECK_LOCATION());
 }
 
 } // namespace armnn
diff --git a/src/backends/cl/workloads/ClPooling2dWorkload.cpp b/src/backends/cl/workloads/ClPooling2dWorkload.cpp
index 68512ff..b54afd2 100644
--- a/src/backends/cl/workloads/ClPooling2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClPooling2dWorkload.cpp
@@ -51,7 +51,7 @@
 void ClPooling2dWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClPooling2dWorkload_Execute");
-    m_PoolingLayer.run();
+    RunClFunction(m_PoolingLayer, CHECK_LOCATION());
 }
 
 }
diff --git a/src/backends/cl/workloads/ClReshapeWorkload.cpp b/src/backends/cl/workloads/ClReshapeWorkload.cpp
index 43a53cb..47cea94 100644
--- a/src/backends/cl/workloads/ClReshapeWorkload.cpp
+++ b/src/backends/cl/workloads/ClReshapeWorkload.cpp
@@ -26,7 +26,7 @@
 void ClReshapeWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClReshapeWorkload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
index 4ee6d5e..c4f0a04 100644
--- a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
@@ -38,8 +38,7 @@
 void ClResizeBilinearFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClResizeBilinearFloatWorkload_Execute");
-    m_ResizeBilinearLayer.run();
+    RunClFunction(m_ResizeBilinearLayer, CHECK_LOCATION());
 }
 
-
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
index 6060056..ed012cc 100644
--- a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
@@ -27,7 +27,7 @@
 void ClSoftmaxFloatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxFloatWorkload_Execute");
-    m_SoftmaxLayer.run();
+    RunClFunction(m_SoftmaxLayer, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
index 7e0589e..d06306e 100644
--- a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
@@ -36,8 +36,7 @@
 void ClSoftmaxUint8Workload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxUint8Workload_Execute");
-
-    m_SoftmaxLayer.run();
+    RunClFunction(m_SoftmaxLayer, CHECK_LOCATION());
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.cpp b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
index 1967fae..e23dab0 100644
--- a/src/backends/cl/workloads/ClSubtractionWorkload.cpp
+++ b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
@@ -32,7 +32,7 @@
 void ClSubtractionWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClSubtractionWorkload_Execute");
-    m_Layer.run();
+    RunClFunction(m_Layer, CHECK_LOCATION());
 }
 
 arm_compute::Status ClSubtractionValidate(const TensorInfo& input0,
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index c765c63..ca0de8d 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -10,6 +10,10 @@
 #include <backends/cl/OpenClTimer.hpp>
 #include <backends/CpuTensorHandle.hpp>
 
+#include <arm_compute/runtime/CL/CLFunctions.h>
+
+#include <sstream>
+
 #define ARMNN_SCOPED_PROFILING_EVENT_CL(name) \
     ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \
                                                   name, \
@@ -60,4 +64,24 @@
     }
 };
 
+inline RuntimeException WrapClError(const cl::Error& clError, const CheckLocation& location)
+{
+    std::stringstream message;
+    message << "CL error: " << clError.what() << ". Error code: " << clError.err();
+
+    return RuntimeException(message.str(), location);
+}
+
+inline void RunClFunction(arm_compute::IFunction& function, const CheckLocation& location)
+{
+    try
+    {
+        function.run();
+    }
+    catch (cl::Error& error)
+    {
+        throw WrapClError(error, location);
+    }
+}
+
 } //namespace armnn