IVGCVSW-1985: Replace std::string* reasonIfNotSupported with Optional<std::string&> in IsLayerSupported implementations

Change-Id: I2f054f0dcff9bdc86ee90c55b3e94c6b4ae25085
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 3758ed4..8bad89f 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -3,6 +3,7 @@
 // SPDX-License-Identifier: MIT
 //
 #include <armnn/LayerSupport.hpp>
+#include <armnn/Optional.hpp>
 
 #include <backends/reference/RefLayerSupport.hpp>
 #include <backends/neon/NeonLayerSupport.hpp>
@@ -36,16 +37,16 @@
     switch(compute) \
     { \
         case Compute::CpuRef: \
-            isSupported = func##Ref(__VA_ARGS__, &reasonIfUnsupportedFull); \
+            isSupported = func##Ref(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
             break; \
         case Compute::CpuAcc: \
-            isSupported = func##Neon(__VA_ARGS__, &reasonIfUnsupportedFull); \
+            isSupported = func##Neon(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
             break; \
         case Compute::GpuAcc: \
-            isSupported = func##Cl(__VA_ARGS__, &reasonIfUnsupportedFull); \
+            isSupported = func##Cl(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
             break; \
         default: \
-            isSupported = func##Ref(__VA_ARGS__, &reasonIfUnsupportedFull); \
+            isSupported = func##Ref(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
             break; \
     } \
     CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
diff --git a/src/armnn/LayerSupportCommon.hpp b/src/armnn/LayerSupportCommon.hpp
index e351cf4..d6dda4f 100644
--- a/src/armnn/LayerSupportCommon.hpp
+++ b/src/armnn/LayerSupportCommon.hpp
@@ -12,7 +12,7 @@
 {
 
 template<typename Float16Func, typename Float32Func, typename Uint8Func, typename ... Params>
-bool IsSupportedForDataTypeGeneric(std::string* reasonIfUnsupported,
+bool IsSupportedForDataTypeGeneric(Optional<std::string&> reasonIfUnsupported,
                                    DataType dataType,
                                    Float16Func float16FuncPtr,
                                    Float32Func float32FuncPtr,
@@ -33,83 +33,83 @@
 }
 
 template<typename ... Params>
-bool TrueFunc(std::string* reasonIfUnsupported, Params&&... params)
+bool TrueFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
     return true;
 }
 
 template<typename ... Params>
-bool FalseFunc(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
     return false;
 }
 
 template<typename ... Params>
-bool FalseFuncF16(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
     if (reasonIfUnsupported)
     {
-        *reasonIfUnsupported = "Layer is not supported with float16 data type";
+        reasonIfUnsupported.value() = "Layer is not supported with float16 data type";
     }
     return false;
 }
 
 template<typename ... Params>
-bool FalseFuncF32(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
     if (reasonIfUnsupported)
     {
-        *reasonIfUnsupported = "Layer is not supported with float32 data type";
+        reasonIfUnsupported.value() = "Layer is not supported with float32 data type";
     }
     return false;
 }
 
 template<typename ... Params>
-bool FalseFuncU8(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseFuncU8(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
     if (reasonIfUnsupported)
     {
-        *reasonIfUnsupported = "Layer is not supported with 8-bit data type";
+        reasonIfUnsupported.value() = "Layer is not supported with 8-bit data type";
     }
     return false;
 }
 
 template<typename ... Params>
-bool FalseInputFuncF32(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseInputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
     if (reasonIfUnsupported)
     {
-        *reasonIfUnsupported = "Layer is not supported with float32 data type input";
+        reasonIfUnsupported.value() = "Layer is not supported with float32 data type input";
     }
     return false;
 }
 
 template<typename ... Params>
-bool FalseInputFuncF16(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseInputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
     if (reasonIfUnsupported)
     {
-        *reasonIfUnsupported = "Layer is not supported with float16 data type input";
+        reasonIfUnsupported.value() = "Layer is not supported with float16 data type input";
     }
     return false;
 }
 
 template<typename ... Params>
-bool FalseOutputFuncF32(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseOutputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
     if (reasonIfUnsupported)
     {
-        *reasonIfUnsupported = "Layer is not supported with float32 data type output";
+        reasonIfUnsupported.value() = "Layer is not supported with float32 data type output";
     }
     return false;
 }
 
 template<typename ... Params>
-bool FalseOutputFuncF16(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseOutputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
     if (reasonIfUnsupported)
     {
-        *reasonIfUnsupported = "Layer is not supported with float16 data type output";
+        reasonIfUnsupported.value() = "Layer is not supported with float16 data type output";
     }
     return false;
 }
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 434b069..494b339 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -22,16 +22,16 @@
 #include "workloads/ClConvolution2dWorkload.hpp"
 #include "workloads/ClDepthwiseConvolutionWorkload.hpp"
 #include "workloads/ClDivisionFloatWorkload.hpp"
-#include "workloads/ClL2NormalizationFloatWorkload.hpp"
-#include "workloads/ClMultiplicationWorkload.hpp"
 #include "workloads/ClFullyConnectedWorkload.hpp"
-#include "workloads/ClPadWorkload.hpp"
-#include "workloads/ClPooling2dBaseWorkload.hpp"
-#include "workloads/ClPermuteWorkload.hpp"
+#include "workloads/ClL2NormalizationFloatWorkload.hpp"
+#include "workloads/ClLstmFloatWorkload.hpp"
+#include "workloads/ClMultiplicationWorkload.hpp"
 #include "workloads/ClNormalizationFloatWorkload.hpp"
+#include "workloads/ClPadWorkload.hpp"
+#include "workloads/ClPermuteWorkload.hpp"
+#include "workloads/ClPooling2dBaseWorkload.hpp"
 #include "workloads/ClSoftmaxBaseWorkload.hpp"
 #include "workloads/ClSubtractionWorkload.hpp"
-#include "workloads/ClLstmFloatWorkload.hpp"
 #endif
 
 using namespace boost;
@@ -59,14 +59,14 @@
     return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
 };
 
-bool IsClBackendSupported(std::string* reasonIfUnsupported)
+bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported)
 {
 #if ARMCOMPUTECL_ENABLED
     return true;
 #else
-    if (reasonIfUnsupported != nullptr)
+    if (reasonIfUnsupported)
     {
-        *reasonIfUnsupported = "The armnn library has been built without CL support";
+        reasonIfUnsupported.value() = "The armnn library has been built without CL support";
     }
     return false;
 #endif
@@ -80,13 +80,13 @@
 
 #if ARMCOMPUTECL_ENABLED
 template<class FuncType, class... Args>
-inline bool IsWorkloadSupported(FuncType&& func, std::string* reasonIfUnsupported, Args&&... args)
+inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
 {
     arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
     const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
     if (!supported && reasonIfUnsupported)
     {
-        *reasonIfUnsupported = aclStatus.error_description();
+        reasonIfUnsupported.value() = aclStatus.error_description();
     }
     return supported;
 }
@@ -101,7 +101,7 @@
 } //namespace
 
 template<typename FloatFunc, typename Uint8Func, typename ... Params>
-bool IsSupportedForDataTypeCl(std::string* reasonIfUnsupported,
+bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
                               DataType dataType,
                               FloatFunc floatFuncPtr,
                               Uint8Func uint8FuncPtr,
@@ -119,7 +119,7 @@
 bool IsActivationSupportedCl(const TensorInfo& input,
                              const TensorInfo& output,
                              const ActivationDescriptor& descriptor,
-                             std::string* reasonIfUnsupported)
+                             Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
                                    reasonIfUnsupported,
@@ -131,12 +131,13 @@
 bool IsAdditionSupportedCl(const TensorInfo& input0,
                            const TensorInfo& input1,
                            const TensorInfo& output,
-                           std::string* reasonIfUnsupported)
+                           Optional<std::string&> reasonIfUnsupported)
 {
-    return FORWARD_CL_LAYER_SUPPORT_FUNC(ClAdditionValidate(input0,
-        input1,
-        output,
-        reasonIfUnsupported));
+    FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
+                                   reasonIfUnsupported,
+                                   input0,
+                                   input1,
+                                   output);
 }
 
 bool IsBatchNormalizationSupportedCl(const TensorInfo& input,
@@ -146,7 +147,7 @@
                                      const TensorInfo& beta,
                                      const TensorInfo& gamma,
                                      const BatchNormalizationDescriptor& descriptor,
-                                     std::string* reasonIfUnsupported)
+                                     Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
                                    reasonIfUnsupported,
@@ -160,7 +161,7 @@
 }
 
 bool IsConstantSupportedCl(const TensorInfo& output,
-                           std::string* reasonIfUnsupported)
+                           Optional<std::string&> reasonIfUnsupported)
 {
     return IsSupportedForDataTypeCl(reasonIfUnsupported,
                                     output.GetDataType(),
@@ -201,10 +202,11 @@
     return isSupported;
 }
 
-bool IsDirectConvolution2dParamsSupportedCl(std::string* reasonIfUnsupported,
+bool IsDirectConvolution2dParamsSupportedCl(Optional<std::string&> reasonIfUnsupported,
                                             const Convolution2dDescriptor& parameters,
                                             const TensorInfo& weightInfo)
 {
+    ignore_unused(reasonIfUnsupported);
     return IsClDirectConvolution2dSupported(weightInfo, parameters);
 }
 
@@ -213,7 +215,7 @@
                                 const Convolution2dDescriptor& descriptor,
                                 const TensorInfo& weights,
                                 const Optional<TensorInfo>& biases,
-                                std::string* reasonIfUnsupported)
+                                Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
                                    reasonIfUnsupported,
@@ -229,7 +231,7 @@
                                        const DepthwiseConvolution2dDescriptor& descriptor,
                                        const TensorInfo& weights,
                                        const Optional<TensorInfo>& biases,
-                                       std::string* reasonIfUnsupported)
+                                       Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
                                    reasonIfUnsupported,
@@ -243,7 +245,7 @@
 bool IsDivisionSupportedCl(const TensorInfo& input0,
                            const TensorInfo& input1,
                            const TensorInfo& output,
-                           std::string* reasonIfUnsupported)
+                           Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
                                    reasonIfUnsupported,
@@ -255,12 +257,14 @@
 bool IsSubtractionSupportedCl(const TensorInfo& input0,
                               const TensorInfo& input1,
                               const TensorInfo& output,
-                              std::string* reasonIfUnsupported)
+                              Optional<std::string&> reasonIfUnsupported)
 {
-    return FORWARD_CL_LAYER_SUPPORT_FUNC(ClSubtractionValidate(input0,
-                                         input1,
-                                         output,
-                                         reasonIfUnsupported));
+
+    FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
+                                   reasonIfUnsupported,
+                                   input0,
+                                   input1,
+                                   output);
 }
 
 bool IsFullyConnectedSupportedCl(const TensorInfo& input,
@@ -268,7 +272,7 @@
                                  const TensorInfo& weights,
                                  const TensorInfo& biases,
                                  const FullyConnectedDescriptor& descriptor,
-                                 std::string* reasonIfUnsupported)
+                                 Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
                                    reasonIfUnsupported,
@@ -280,7 +284,7 @@
 }
 
 bool IsInputSupportedCl(const TensorInfo& input,
-    std::string* reasonIfUnsupported)
+                        Optional<std::string&> reasonIfUnsupported)
 {
     return IsSupportedForDataTypeCl(reasonIfUnsupported,
                                     input.GetDataType(),
@@ -291,14 +295,14 @@
 bool IsL2NormalizationSupportedCl(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const L2NormalizationDescriptor& descriptor,
-                                  std::string* reasonIfUnsupported)
+                                  Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
 }
 
 bool IsMergerSupportedCl(const std::vector<const TensorInfo*> inputs,
                          const OriginsDescriptor& descriptor,
-                         std::string* reasonIfUnsupported)
+                         Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(descriptor);
     return IsSupportedForDataTypeCl(reasonIfUnsupported,
@@ -310,7 +314,7 @@
 bool IsMultiplicationSupportedCl(const TensorInfo& input0,
                                  const TensorInfo& input1,
                                  const TensorInfo& output,
-                                 std::string* reasonIfUnsupported)
+                                 Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
                                    reasonIfUnsupported,
@@ -322,13 +326,13 @@
 bool IsNormalizationSupportedCl(const TensorInfo& input,
                                 const TensorInfo& output,
                                 const NormalizationDescriptor& descriptor,
-                                std::string* reasonIfUnsupported)
+                                Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
 }
 
 bool IsOutputSupportedCl(const TensorInfo& output,
-                         std::string* reasonIfUnsupported)
+                         Optional<std::string&> reasonIfUnsupported)
 {
     return IsSupportedForDataTypeCl(reasonIfUnsupported,
                                     output.GetDataType(),
@@ -336,18 +340,10 @@
                                     &TrueFunc<>);
 }
 
-bool IsPadSupportedCl(const TensorInfo& input,
-                      const TensorInfo& output,
-                      const PadDescriptor& descriptor,
-                      std::string* reasonIfUnsupported)
-{
-    return FORWARD_CL_LAYER_SUPPORT_FUNC(ClPadValidate(input, output, descriptor, reasonIfUnsupported));
-}
-
 bool IsPermuteSupportedCl(const TensorInfo& input,
                           const TensorInfo& output,
                           const PermuteDescriptor& descriptor,
-                          std::string* reasonIfUnsupported)
+                          Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(input);
     ignore_unused(output);
@@ -357,13 +353,13 @@
 bool IsPooling2dSupportedCl(const TensorInfo& input,
                             const TensorInfo& output,
                             const Pooling2dDescriptor& descriptor,
-                            std::string* reasonIfUnsupported)
+                            Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
 }
 
 bool IsResizeBilinearSupportedCl(const TensorInfo& input,
-                                 std::string* reasonIfUnsupported)
+                                 Optional<std::string&> reasonIfUnsupported)
 {
     return IsSupportedForDataTypeCl(reasonIfUnsupported,
                                     input.GetDataType(),
@@ -374,7 +370,7 @@
 bool IsSoftmaxSupportedCl(const TensorInfo& input,
                           const TensorInfo& output,
                           const SoftmaxDescriptor& descriptor,
-                          std::string* reasonIfUnsupported)
+                          Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(descriptor);
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output);
@@ -382,7 +378,7 @@
 
 bool IsSplitterSupportedCl(const TensorInfo& input,
                            const ViewsDescriptor& descriptor,
-                           std::string* reasonIfUnsupported)
+                           Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(descriptor);
     return IsSupportedForDataTypeCl(reasonIfUnsupported,
@@ -393,23 +389,25 @@
 
 bool IsFakeQuantizationSupportedCl(const TensorInfo& input,
                                    const FakeQuantizationDescriptor& descriptor,
-                                   std::string* reasonIfUnsupported)
+                                   Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(input);
     ignore_unused(descriptor);
+    ignore_unused(reasonIfUnsupported);
     return false;
 }
 
 bool IsReshapeSupportedCl(const TensorInfo& input,
-                          std::string* reasonIfUnsupported)
+                          Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(input);
+    ignore_unused(reasonIfUnsupported);
     return true;
 }
 
 bool IsFloorSupportedCl(const TensorInfo& input,
                         const TensorInfo& output,
-                        std::string* reasonIfUnsupported)
+                        Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(output);
     return IsClBackendSupported(reasonIfUnsupported) &&
@@ -420,59 +418,104 @@
                                          &FalseFuncU8<>);
 }
 
-bool IsLstmSupportedCl(const TensorInfo& input, const TensorInfo& outputStateIn,
-                       const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
-                       const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
-                       const TensorInfo& output, const LstmDescriptor& descriptor,
-                       const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
-                       const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
-                       const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
-                       const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
-                       const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
-                       const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
-                       const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
-                       const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
-                       const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported)
+bool IsLstmSupportedCl(const TensorInfo& input,
+                       const TensorInfo& outputStateIn,
+                       const TensorInfo& cellStateIn,
+                       const TensorInfo& scratchBuffer,
+                       const TensorInfo& outputStateOut,
+                       const TensorInfo& cellStateOut,
+                       const TensorInfo& output,
+                       const LstmDescriptor& descriptor,
+                       const TensorInfo& inputToForgetWeights,
+                       const TensorInfo& inputToCellWeights,
+                       const TensorInfo& inputToOutputWeights,
+                       const TensorInfo& recurrentToForgetWeights,
+                       const TensorInfo& recurrentToCellWeights,
+                       const TensorInfo& recurrentToOutputWeights,
+                       const TensorInfo& forgetGateBias,
+                       const TensorInfo& cellBias,
+                       const TensorInfo& outputGateBias,
+                       const TensorInfo* inputToInputWeights,
+                       const TensorInfo* recurrentToInputWeights,
+                       const TensorInfo* cellToInputWeights,
+                       const TensorInfo* inputGateBias,
+                       const TensorInfo* projectionWeights,
+                       const TensorInfo* projectionBias,
+                       const TensorInfo* cellToForgetWeights,
+                       const TensorInfo* cellToOutputWeights,
+                       Optional<std::string&> reasonIfUnsupported)
 {
-    FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate, reasonIfUnsupported,
-                                   input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut,
-                                   output, descriptor, inputToForgetWeights, inputToCellWeights,
-                                   inputToOutputWeights, recurrentToForgetWeights,
-                                   recurrentToCellWeights, recurrentToOutputWeights,
-                                   forgetGateBias, cellBias, outputGateBias,
-                                   inputToInputWeights, recurrentToInputWeights,
-                                   cellToInputWeights, inputGateBias, projectionWeights,
-                                   projectionBias, cellToForgetWeights, cellToOutputWeights);
+    FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input,
+                                   outputStateIn,
+                                   cellStateIn,
+                                   scratchBuffer,
+                                   outputStateOut,
+                                   cellStateOut,
+                                   output,
+                                   descriptor,
+                                   inputToForgetWeights,
+                                   inputToCellWeights,
+                                   inputToOutputWeights,
+                                   recurrentToForgetWeights,
+                                   recurrentToCellWeights,
+                                   recurrentToOutputWeights,
+                                   forgetGateBias,
+                                   cellBias,
+                                   outputGateBias,
+                                   inputToInputWeights,
+                                   recurrentToInputWeights,
+                                   cellToInputWeights,
+                                   inputGateBias,
+                                   projectionWeights,
+                                   projectionBias,
+                                   cellToForgetWeights,
+                                   cellToOutputWeights);
 }
 
 bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input,
                                     const TensorInfo& output,
-                                    std::string* reasonIfUnsupported)
+                                    Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
                                    reasonIfUnsupported,
                                    input,
-                                   output,
-                                   reasonIfUnsupported);
+                                   output);
 }
 
 bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input,
                                     const TensorInfo& output,
-                                    std::string* reasonIfUnsupported)
+                                    Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
                                    reasonIfUnsupported,
                                    input,
-                                   output,
-                                   reasonIfUnsupported);
+                                   output);
 }
 
 bool IsMeanSupportedCl(const TensorInfo& input,
                        const TensorInfo& output,
                        const MeanDescriptor& descriptor,
-                       std::string* reasonIfUnsupported)
+                       Optional<std::string&> reasonIfUnsupported)
 {
+    ignore_unused(input);
+    ignore_unused(output);
+    ignore_unused(descriptor);
+    ignore_unused(reasonIfUnsupported);
     return false;
 }
 
+bool IsPadSupportedCl(const TensorInfo& input,
+                      const TensorInfo& output,
+                      const PadDescriptor& descriptor,
+                      Optional<std::string&> reasonIfUnsupported)
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
+                                   reasonIfUnsupported,
+                                   input,
+                                   output,
+                                   descriptor);
+}
+
 }
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 314ac4c..5cd756b 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -5,6 +5,7 @@
 #pragma once
 
 #include <armnn/DescriptorsFwd.hpp>
+#include <armnn/Optional.hpp>
 #include <armnn/Types.hpp>
 #include <armnn/Tensor.hpp>
 #include <armnn/ArmNN.hpp>
@@ -18,19 +19,19 @@
 };
 
 bool IsClDirectConvolution2dSupported(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc);
-bool IsClDepthwiseConvolution2dDescParamsSupported(std::string* reasonIfUnsupported,
+bool IsClDepthwiseConvolution2dDescParamsSupported(Optional<std::string&> reasonIfUnsupported,
                                                    const DepthwiseConvolution2dDescriptor& parameters,
                                                    const TensorInfo& weights);
 
 bool IsActivationSupportedCl(const TensorInfo& input,
                              const TensorInfo& output,
                              const ActivationDescriptor& descriptor,
-                             std::string* reasonIfUnsupported = nullptr);
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsAdditionSupportedCl(const TensorInfo& input0,
                            const TensorInfo& input1,
                            const TensorInfo& output,
-                           std::string* reasonIfUnsupported = nullptr);
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsBatchNormalizationSupportedCl(const TensorInfo& input,
                                      const TensorInfo& output,
@@ -39,130 +40,143 @@
                                      const TensorInfo& beta,
                                      const TensorInfo& gamma,
                                      const BatchNormalizationDescriptor& descriptor,
-                                     std::string* reasonIfUnsupported = nullptr);
+                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsConstantSupportedCl(const TensorInfo& output,
-                           std::string* reasonIfUnsupported = nullptr);
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsConvolution2dSupportedCl(const TensorInfo& input,
                                 const TensorInfo& output,
                                 const Convolution2dDescriptor& descriptor,
                                 const TensorInfo& weights,
                                 const Optional<TensorInfo>& biases,
-                                std::string* reasonIfUnsupported = nullptr);
+                                Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input,
                                        const TensorInfo& output,
                                        const DepthwiseConvolution2dDescriptor& descriptor,
                                        const TensorInfo& weights,
                                        const Optional<TensorInfo>& biases,
-                                       std::string* reasonIfUnsupported = nullptr);
+                                       Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsDivisionSupportedCl(const TensorInfo& input0,
                            const TensorInfo& input1,
                            const TensorInfo& output,
-                           std::string* reasonIfUnsupported = nullptr);
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsSubtractionSupportedCl(const TensorInfo& input0,
                               const TensorInfo& input1,
                               const TensorInfo& output,
-                              std::string* reasonIfUnsupported = nullptr);
+                              Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsFullyConnectedSupportedCl(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const TensorInfo& weights,
                                  const TensorInfo& biases,
                                  const FullyConnectedDescriptor& descriptor,
-                                 std::string* reasonIfUnsupported = nullptr);
+                                 Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsInputSupportedCl(const TensorInfo& input,
-                        std::string* reasonIfUnsupported = nullptr);
+                        Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsL2NormalizationSupportedCl(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const L2NormalizationDescriptor& descriptor,
-                                  std::string* reasonIfUnsupported = nullptr);
+                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
-bool IsLstmSupportedCl(const TensorInfo& input, const TensorInfo& outputStateIn,
-                       const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
-                       const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
-                       const TensorInfo& output, const LstmDescriptor& descriptor,
-                       const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
-                       const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
-                       const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
-                       const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
-                       const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
-                       const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
-                       const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
-                       const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
-                       const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported = nullptr);
+bool IsLstmSupportedCl(const TensorInfo& input,
+                       const TensorInfo& outputStateIn,
+                       const TensorInfo& cellStateIn,
+                       const TensorInfo& scratchBuffer,
+                       const TensorInfo& outputStateOut,
+                       const TensorInfo& cellStateOut,
+                       const TensorInfo& output,
+                       const LstmDescriptor& descriptor,
+                       const TensorInfo& inputToForgetWeights,
+                       const TensorInfo& inputToCellWeights,
+                       const TensorInfo& inputToOutputWeights,
+                       const TensorInfo& recurrentToForgetWeights,
+                       const TensorInfo& recurrentToCellWeights,
+                       const TensorInfo& recurrentToOutputWeights,
+                       const TensorInfo& forgetGateBias,
+                       const TensorInfo& cellBias,
+                       const TensorInfo& outputGateBias,
+                       const TensorInfo* inputToInputWeights,
+                       const TensorInfo* recurrentToInputWeights,
+                       const TensorInfo* cellToInputWeights,
+                       const TensorInfo* inputGateBias,
+                       const TensorInfo* projectionWeights,
+                       const TensorInfo* projectionBias,
+                       const TensorInfo* cellToForgetWeights,
+                       const TensorInfo* cellToOutputWeights,
+                       Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsMergerSupportedCl(const std::vector<const TensorInfo*> inputs,
                          const OriginsDescriptor& descriptor,
-                         std::string* reasonIfUnsupported = nullptr);
+                         Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsMultiplicationSupportedCl(const TensorInfo& input0,
                                  const TensorInfo& input1,
                                  const TensorInfo& output,
-                                 std::string* reasonIfUnsupported = nullptr);
+                                 Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsNormalizationSupportedCl(const TensorInfo& input,
                                 const TensorInfo& output,
                                 const NormalizationDescriptor& descriptor,
-                                std::string* reasonIfUnsupported = nullptr);
+                                Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsOutputSupportedCl(const TensorInfo& output,
-                         std::string* reasonIfUnsupported = nullptr);
-
-bool IsPadSupportedCl(const TensorInfo& input,
-                      const TensorInfo& output,
-                      const PadDescriptor& descriptor,
-                      std::string* reasonIfUnsupported = nullptr);
+                         Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsPermuteSupportedCl(const TensorInfo& input,
                           const TensorInfo& output,
                           const PermuteDescriptor& descriptor,
-                          std::string* reasonIfUnsupported = nullptr);
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsPooling2dSupportedCl(const TensorInfo& input,
                             const TensorInfo& output,
                             const Pooling2dDescriptor& descriptor,
-                            std::string* reasonIfUnsupported = nullptr);
+                            Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsResizeBilinearSupportedCl(const TensorInfo& input,
-                                 std::string* reasonIfUnsupported = nullptr);
+                                 Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsSoftmaxSupportedCl(const TensorInfo& input,
                           const TensorInfo& output,
                           const SoftmaxDescriptor& descriptor,
-                          std::string* reasonIfUnsupported = nullptr);
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsSplitterSupportedCl(const TensorInfo& input,
                            const ViewsDescriptor& descriptor,
-                           std::string* reasonIfUnsupported = nullptr);
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsFakeQuantizationSupportedCl(const TensorInfo& input,
                                    const FakeQuantizationDescriptor& descriptor,
-                                   std::string* reasonIfUnsupported = nullptr);
+                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsReshapeSupportedCl(const TensorInfo& input,
-                          std::string* reasonIfUnsupported = nullptr);
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsFloorSupportedCl(const TensorInfo& input,
                         const TensorInfo& output,
-                        std::string* reasonIfUnsupported = nullptr);
+                        Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input,
+                                    const TensorInfo& output,
+                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input,
+                                    const TensorInfo& output,
+                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsMeanSupportedCl(const TensorInfo& input,
                        const TensorInfo& output,
                        const MeanDescriptor& descriptor,
-                       std::string* reasonIfUnsupported = nullptr);
+                       Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
-bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input,
-                                    const TensorInfo& output,
-                                    std::string* reasonIfUnsupported = nullptr);
-
-bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input,
-                                    const TensorInfo& output,
-                                    std::string* reasonIfUnsupported = nullptr);
+bool IsPadSupportedCl(const TensorInfo& input,
+                      const TensorInfo& output,
+                      const PadDescriptor& descriptor,
+                      Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 }
diff --git a/src/backends/cl/workloads/ClAdditionWorkload.cpp b/src/backends/cl/workloads/ClAdditionWorkload.cpp
index aa032e8..ec0dd30 100644
--- a/src/backends/cl/workloads/ClAdditionWorkload.cpp
+++ b/src/backends/cl/workloads/ClAdditionWorkload.cpp
@@ -37,10 +37,9 @@
     m_Layer.run();
 }
 
-bool ClAdditionValidate(const TensorInfo& input0,
-                        const TensorInfo& input1,
-                        const TensorInfo& output,
-                        std::string* reasonIfUnsupported)
+arm_compute::Status ClAdditionValidate(const TensorInfo& input0,
+                                       const TensorInfo& input1,
+                                       const TensorInfo& output)
 {
     const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
     const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
@@ -51,13 +50,7 @@
                                                                                       &aclOutputInfo,
                                                                                       g_AclConvertPolicy);
 
-    const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
-    if (!supported && reasonIfUnsupported)
-    {
-        *reasonIfUnsupported = aclStatus.error_description();
-    }
-
-    return supported;
+    return aclStatus;
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClAdditionWorkload.hpp b/src/backends/cl/workloads/ClAdditionWorkload.hpp
index 3e4ee26..c5e6aff 100644
--- a/src/backends/cl/workloads/ClAdditionWorkload.hpp
+++ b/src/backends/cl/workloads/ClAdditionWorkload.hpp
@@ -24,8 +24,7 @@
     mutable arm_compute::CLArithmeticAddition m_Layer;
 };
 
-bool ClAdditionValidate(const TensorInfo& input0,
-                        const TensorInfo& input1,
-                        const TensorInfo& output,
-                        std::string* reasonIfUnsupported);
+arm_compute::Status ClAdditionValidate(const TensorInfo& input0,
+                                       const TensorInfo& input1,
+                                       const TensorInfo& output);
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
index e7663b4..2c9a0e1 100644
--- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
@@ -32,19 +32,15 @@
     m_Layer.run();
 }
 
-arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input,
-                                                        const TensorInfo& output,
-                                                        std::string* reasonIfUnsupported)
+arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const TensorInfo& output)
 {
     if (input.GetDataType() != DataType::Float16)
     {
-        *reasonIfUnsupported = "Input should be Float16";
-        return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported);
+        return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "Input should be Float16");
     }
     if (output.GetDataType() != DataType::Float32)
     {
-        *reasonIfUnsupported = "Output should be Float32";
-        return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported);
+        return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "Output should be Float32");
     }
 
     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
@@ -53,12 +49,6 @@
     const arm_compute::Status aclStatus = arm_compute::CLDepthConvertLayer::validate(
         &aclInputInfo, &aclOutputInfo, g_AclConvertPolicy, 0);
 
-    const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
-    if (!supported && reasonIfUnsupported)
-    {
-        *reasonIfUnsupported = aclStatus.error_description();
-    }
-
     return aclStatus;
 }
 
diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp
index b644748..f5f230d 100644
--- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp
+++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp
@@ -23,8 +23,6 @@
     mutable arm_compute::CLDepthConvertLayer m_Layer;
 };
 
-arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input,
-                                                        const TensorInfo& output,
-                                                        std::string* reasonIfUnsupported);
+arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const TensorInfo& output);
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
index 2ae4adc..6758180 100644
--- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
@@ -32,19 +32,15 @@
     m_Layer.run();
 }
 
-arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input,
-                                                        const TensorInfo& output,
-                                                        std::string* reasonIfUnsupported)
+arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, const TensorInfo& output)
 {
     if (input.GetDataType() != DataType::Float32)
     {
-        *reasonIfUnsupported = "Input should be Float32";
-        return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported);
+        return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "Input should be Float32");
     }
     if (output.GetDataType() != DataType::Float16)
     {
-        *reasonIfUnsupported = "Output should be Float16";
-        return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported);
+        return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "Output should be Float16");
     }
 
     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
@@ -53,12 +49,6 @@
     const arm_compute::Status aclStatus = arm_compute::CLDepthConvertLayer::validate(
         &aclInputInfo, &aclOutputInfo, g_AclConvertPolicy, 0);
 
-    const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
-    if (!supported && reasonIfUnsupported)
-    {
-        *reasonIfUnsupported = aclStatus.error_description();
-    }
-
     return aclStatus;
 }
 
diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp
index 95d1990..28d0bfa 100644
--- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp
+++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp
@@ -23,8 +23,6 @@
     mutable arm_compute::CLDepthConvertLayer m_Layer;
 };
 
-arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input,
-                                                        const TensorInfo& output,
-                                                        std::string* reasonIfUnsupported);
+arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, const TensorInfo& output);
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClPadWorkload.cpp b/src/backends/cl/workloads/ClPadWorkload.cpp
index 45dc5e8..89b0d8f 100644
--- a/src/backends/cl/workloads/ClPadWorkload.cpp
+++ b/src/backends/cl/workloads/ClPadWorkload.cpp
@@ -35,10 +35,9 @@
     m_Layer.run();
 }
 
-bool ClPadValidate(const TensorInfo& input,
-                   const TensorInfo& output,
-                   const PadDescriptor& descriptor,
-                   std::string* reasonIfUnsupported)
+arm_compute::Status ClPadValidate(const TensorInfo& input,
+                                  const TensorInfo& output,
+                                  const PadDescriptor& descriptor)
 {
     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
@@ -48,13 +47,7 @@
                                                                             &aclOutputInfo,
                                                                             padList);
 
-    const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
-    if (!supported && reasonIfUnsupported)
-    {
-        *reasonIfUnsupported = aclStatus.error_description();
-    }
-
-    return supported;
+    return aclStatus;
 }
 
 } // namespace armnn
diff --git a/src/backends/cl/workloads/ClPadWorkload.hpp b/src/backends/cl/workloads/ClPadWorkload.hpp
index a7ad667..97f57fd 100644
--- a/src/backends/cl/workloads/ClPadWorkload.hpp
+++ b/src/backends/cl/workloads/ClPadWorkload.hpp
@@ -23,10 +23,9 @@
     mutable arm_compute::CLPadLayer m_Layer;
 };
 
-bool ClPadValidate(const TensorInfo& input,
-                   const TensorInfo& output,
-                   const PadDescriptor& descriptor,
-                   std::string* reasonIfUnsupported);
+arm_compute::Status ClPadValidate(const TensorInfo& input,
+                                  const TensorInfo& output,
+                                  const PadDescriptor& descriptor);
 
 } //namespace armnn
 
diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.cpp b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
index 8efed94..1967fae 100644
--- a/src/backends/cl/workloads/ClSubtractionWorkload.cpp
+++ b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
@@ -35,10 +35,9 @@
     m_Layer.run();
 }
 
-bool ClSubtractionValidate(const TensorInfo& input0,
-                           const TensorInfo& input1,
-                           const TensorInfo& output,
-                           std::string* reasonIfUnsupported)
+arm_compute::Status ClSubtractionValidate(const TensorInfo& input0,
+                                          const TensorInfo& input1,
+                                          const TensorInfo& output)
 {
     const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
     const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
@@ -49,13 +48,7 @@
                                                                                          &aclOutputInfo,
                                                                                          g_AclConvertPolicy);
 
-    const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
-    if (!supported && reasonIfUnsupported)
-    {
-        *reasonIfUnsupported = aclStatus.error_description();
-    }
-
-    return supported;
+    return aclStatus;
 }
 
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.hpp b/src/backends/cl/workloads/ClSubtractionWorkload.hpp
index 7dd608b..3a4210d 100644
--- a/src/backends/cl/workloads/ClSubtractionWorkload.hpp
+++ b/src/backends/cl/workloads/ClSubtractionWorkload.hpp
@@ -23,8 +23,7 @@
     mutable arm_compute::CLArithmeticSubtraction m_Layer;
 };
 
-bool ClSubtractionValidate(const TensorInfo& input0,
-                           const TensorInfo& input1,
-                           const TensorInfo& output,
-                           std::string* reasonIfUnsupported);
+arm_compute::Status ClSubtractionValidate(const TensorInfo& input0,
+                                          const TensorInfo& input1,
+                                          const TensorInfo& output);
 } //namespace armnn
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index ef70fbd..b6d5e48 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -69,13 +69,14 @@
     return preferDirectConvolution;
 }
 
-bool IsNeonNormalizationDescParamsSupported(std::string* reasonIfUnsupported, const NormalizationDescriptor& parameters)
+bool IsNeonNormalizationDescParamsSupported(Optional<std::string&> reasonIfUnsupported,
+                                            const NormalizationDescriptor& parameters)
 {
     if (parameters.m_NormMethodType != NormalizationAlgorithmMethod::LocalBrightness)
     {
         if (reasonIfUnsupported)
         {
-            *reasonIfUnsupported = "Unsupported normalisation method type, only LocalBrightness is supported";
+            reasonIfUnsupported.value() = "Unsupported normalisation method type, only LocalBrightness is supported";
         }
         return false;
     }
@@ -83,7 +84,7 @@
     {
         if (reasonIfUnsupported)
         {
-            *reasonIfUnsupported = "Normalization size must be an odd number.";
+            reasonIfUnsupported.value() = "Normalization size must be an odd number.";
         }
         return false;
     }
@@ -91,21 +92,21 @@
     return true;
 }
 
-bool IsNeonBackendSupported(std::string* reasonIfUnsupported)
+bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported)
 {
 #if ARMCOMPUTENEON_ENABLED
     return true;
 #else
-    if (reasonIfUnsupported != nullptr)
+    if (reasonIfUnsupported)
     {
-        *reasonIfUnsupported = "The armnn library has been built without NEON support";
+        reasonIfUnsupported.value() = "The armnn library has been built without NEON support";
     }
     return false;
 #endif
 }
 
 template<typename FloatFunc, typename Uint8Func, typename ... Params>
-bool IsSupportedForDataTypeNeon(std::string* reasonIfUnsupported,
+bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
                                 DataType dataType,
                                 FloatFunc floatFuncPtr,
                                 Uint8Func uint8FuncPtr,
@@ -122,13 +123,13 @@
 
 #if ARMCOMPUTENEON_ENABLED
 template<class FuncType, class... Args>
-inline bool IsWorkloadSupported(FuncType& func, std::string* reasonIfUnsupported, Args&&... args)
+inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
 {
     arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
     const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
     if (!supported && reasonIfUnsupported)
     {
-        *reasonIfUnsupported = aclStatus.error_description();
+        reasonIfUnsupported.value() = aclStatus.error_description();
     }
     return supported;
 }
@@ -143,7 +144,7 @@
 bool IsActivationSupportedNeon(const TensorInfo& input,
                                const TensorInfo& output,
                                const ActivationDescriptor& descriptor,
-                               std::string* reasonIfUnsupported)
+                               Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(descriptor);
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
@@ -156,7 +157,7 @@
 bool IsAdditionSupportedNeon(const TensorInfo& input0,
                              const TensorInfo& input1,
                              const TensorInfo& output,
-                             std::string* reasonIfUnsupported)
+                             Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate,
                                    reasonIfUnsupported,
@@ -172,7 +173,7 @@
                                        const TensorInfo& beta,
                                        const TensorInfo& gamma,
                                        const BatchNormalizationDescriptor& descriptor,
-                                       std::string* reasonIfUnsupported)
+                                       Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate,
                                    reasonIfUnsupported,
@@ -186,7 +187,7 @@
 }
 
 bool IsConstantSupportedNeon(const TensorInfo& output,
-                             std::string* reasonIfUnsupported)
+                             Optional<std::string&> reasonIfUnsupported)
 {
     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
                                       output.GetDataType(),
@@ -199,7 +200,7 @@
                                   const Convolution2dDescriptor& descriptor,
                                   const TensorInfo& weights,
                                   const Optional<TensorInfo>& biases,
-                                  std::string* reasonIfUnsupported)
+                                  Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
                                    reasonIfUnsupported,
@@ -215,7 +216,7 @@
                                          const DepthwiseConvolution2dDescriptor& descriptor,
                                          const TensorInfo& weights,
                                          const Optional<TensorInfo>& biases,
-                                         std::string* reasonIfUnsupported)
+                                         Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
                                    reasonIfUnsupported,
@@ -229,16 +230,20 @@
 bool IsDivisionSupportedNeon(const TensorInfo& input0,
                              const TensorInfo& input1,
                              const TensorInfo& output,
-                             std::string* reasonIfUnsupported)
+                             Optional<std::string&> reasonIfUnsupported)
 {
     // At the moment division is not supported
+    ignore_unused(input0);
+    ignore_unused(input1);
+    ignore_unused(output);
+    ignore_unused(reasonIfUnsupported);
     return false;
 }
 
 bool IsSubtractionSupportedNeon(const TensorInfo& input0,
                                 const TensorInfo& input1,
                                 const TensorInfo& output,
-                                std::string* reasonIfUnsupported)
+                                Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
                                    reasonIfUnsupported,
@@ -252,7 +257,7 @@
                                    const TensorInfo& weights,
                                    const TensorInfo& biases,
                                    const FullyConnectedDescriptor& descriptor,
-                                   std::string* reasonIfUnsupported)
+                                   Optional<std::string&> reasonIfUnsupported)
 {
     // At the moment U8 is unsupported
     if (input.GetDataType() == DataType::QuantisedAsymm8)
@@ -269,7 +274,7 @@
 }
 
 bool IsInputSupportedNeon(const TensorInfo& input,
-                          std::string* reasonIfUnsupported)
+                          Optional<std::string&> reasonIfUnsupported)
 {
     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
                                       input.GetDataType(),
@@ -280,14 +285,14 @@
 bool IsL2NormalizationSupportedNeon(const TensorInfo& input,
                                     const TensorInfo& output,
                                     const L2NormalizationDescriptor& descriptor,
-                                    std::string* reasonIfUnsupported)
+                                    Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
 }
 
 bool IsMergerSupportedNeon(const std::vector<const TensorInfo*> inputs,
                            const OriginsDescriptor& descriptor,
-                           std::string* reasonIfUnsupported)
+                           Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(descriptor);
     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
@@ -299,7 +304,7 @@
 bool IsMultiplicationSupportedNeon(const TensorInfo& input0,
                                    const TensorInfo& input1,
                                    const TensorInfo& output,
-                                   std::string* reasonIfUnsupported)
+                                   Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
                                    reasonIfUnsupported,
@@ -311,13 +316,13 @@
 bool IsNormalizationSupportedNeon(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const NormalizationDescriptor& descriptor,
-                                  std::string* reasonIfUnsupported)
+                                  Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
 }
 
 bool IsOutputSupportedNeon(const TensorInfo& output,
-                           std::string* reasonIfUnsupported)
+                           Optional<std::string&> reasonIfUnsupported)
 {
     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
                                       output.GetDataType(),
@@ -328,7 +333,7 @@
 bool IsPermuteSupportedNeon(const TensorInfo& input,
                             const TensorInfo& output,
                             const PermuteDescriptor& descriptor,
-                            std::string* reasonIfUnsupported)
+                            Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
 }
@@ -336,29 +341,30 @@
 bool IsPooling2dSupportedNeon(const TensorInfo& input,
                               const TensorInfo& output,
                               const Pooling2dDescriptor& descriptor,
-                              std::string* reasonIfUnsupported)
+                              Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
 }
 
 bool IsResizeBilinearSupportedNeon(const TensorInfo& input,
-                                   std::string* reasonIfUnsupported)
+                                   Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(input);
+    ignore_unused(reasonIfUnsupported);
     return false;
 }
 
 bool IsSoftmaxSupportedNeon(const TensorInfo& input,
                             const TensorInfo& output,
                             const SoftmaxDescriptor& descriptor,
-                            std::string* reasonIfUnsupported)
+                            Optional<std::string&> reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
 }
 
 bool IsSplitterSupportedNeon(const TensorInfo& input,
                              const ViewsDescriptor& descriptor,
-                             std::string* reasonIfUnsupported)
+                             Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(descriptor);
     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
@@ -369,15 +375,16 @@
 
 bool IsFakeQuantizationSupportedNeon(const TensorInfo& input,
                                      const FakeQuantizationDescriptor& descriptor,
-                                     std::string* reasonIfUnsupported)
+                                     Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(input);
     ignore_unused(descriptor);
+    ignore_unused(reasonIfUnsupported);
     return false;
 }
 
 bool IsReshapeSupportedNeon(const TensorInfo& input,
-                            std::string* reasonIfUnsupported)
+                            Optional<std::string&> reasonIfUnsupported)
 {
     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
                                       input.GetDataType(),
@@ -387,7 +394,7 @@
 
 bool IsFloorSupportedNeon(const TensorInfo& input,
                           const TensorInfo& output,
-                          std::string* reasonIfUnsupported)
+                          Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(output);
     return IsNeonBackendSupported(reasonIfUnsupported) &&
@@ -398,19 +405,32 @@
                                          &FalseFuncU8<>);
 }
 
-bool IsLstmSupportedNeon(const TensorInfo& input, const TensorInfo& outputStateIn,
-                         const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
-                         const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
-                         const TensorInfo& output, const LstmDescriptor& descriptor,
-                         const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
-                         const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
-                         const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
-                         const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
-                         const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
-                         const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
-                         const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
-                         const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
-                         const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported)
+bool IsLstmSupportedNeon(const TensorInfo& input,
+                         const TensorInfo& outputStateIn,
+                         const TensorInfo& cellStateIn,
+                         const TensorInfo& scratchBuffer,
+                         const TensorInfo& outputStateOut,
+                         const TensorInfo& cellStateOut,
+                         const TensorInfo& output,
+                         const LstmDescriptor& descriptor,
+                         const TensorInfo& inputToForgetWeights,
+                         const TensorInfo& inputToCellWeights,
+                         const TensorInfo& inputToOutputWeights,
+                         const TensorInfo& recurrentToForgetWeights,
+                         const TensorInfo& recurrentToCellWeights,
+                         const TensorInfo& recurrentToOutputWeights,
+                         const TensorInfo& forgetGateBias,
+                         const TensorInfo& cellBias,
+                         const TensorInfo& outputGateBias,
+                         const TensorInfo* inputToInputWeights,
+                         const TensorInfo* recurrentToInputWeights,
+                         const TensorInfo* cellToInputWeights,
+                         const TensorInfo* inputGateBias,
+                         const TensorInfo* projectionWeights,
+                         const TensorInfo* projectionBias,
+                         const TensorInfo* cellToForgetWeights,
+                         const TensorInfo* cellToOutputWeights,
+                         Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(input);
     ignore_unused(outputStateIn);
@@ -437,40 +457,51 @@
     ignore_unused(projectionBias);
     ignore_unused(cellToForgetWeights);
     ignore_unused(cellToOutputWeights);
+    ignore_unused(reasonIfUnsupported);
     return false;
 }
 
 bool IsConvertFp16ToFp32SupportedNeon(const TensorInfo& input,
                                       const TensorInfo& output,
-                                      std::string* reasonIfUnsupported)
+                                      Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(input);
     ignore_unused(output);
+    ignore_unused(reasonIfUnsupported);
     return true;
 }
 
 bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input,
                                       const TensorInfo& output,
-                                      std::string* reasonIfUnsupported)
+                                      Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(input);
     ignore_unused(output);
+    ignore_unused(reasonIfUnsupported);
     return true;
 }
 
 bool IsMeanSupportedNeon(const TensorInfo& input,
                          const TensorInfo& output,
                          const MeanDescriptor& descriptor,
-                         std::string* reasonIfUnsupported)
+                         Optional<std::string&> reasonIfUnsupported)
 {
+    ignore_unused(input);
+    ignore_unused(output);
+    ignore_unused(descriptor);
+    ignore_unused(reasonIfUnsupported);
     return false;
 }
 
 bool IsPadSupportedNeon(const TensorInfo& input,
                         const TensorInfo& output,
                         const PadDescriptor& descriptor,
-                        std::string* reasonIfUnsupported)
+                        Optional<std::string&> reasonIfUnsupported)
 {
+    ignore_unused(input);
+    ignore_unused(output);
+    ignore_unused(descriptor);
+    ignore_unused(reasonIfUnsupported);
     return false;
 }
 
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 8b674c6..468cf58 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -5,6 +5,7 @@
 #pragma once
 
 #include <armnn/DescriptorsFwd.hpp>
+#include <armnn/Optional.hpp>
 #include <armnn/Types.hpp>
 #include <armnn/Tensor.hpp>
 
@@ -18,22 +19,22 @@
 
 bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc);
 
-bool IsNeonNormalizationDescParamsSupported(std::string* reasonIfUnsupported,
+bool IsNeonNormalizationDescParamsSupported(Optional<std::string&> reasonIfUnsupported,
                                             const NormalizationDescriptor& parameters);
 
 bool IsActivationSupportedNeon(const TensorInfo& input,
                                const TensorInfo& output,
                                const ActivationDescriptor& descriptor,
-                               std::string* reasonIfUnsupported);
+                               Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
-bool IsNeonDepthwiseConvolution2dDescParamsSupported(std::string* reasonIfUnsupported,
+bool IsNeonDepthwiseConvolution2dDescParamsSupported(Optional<std::string&> reasonIfUnsupported,
                                                      const DepthwiseConvolution2dDescriptor& parameters,
                                                      const TensorInfo& weights);
 
 bool IsAdditionSupportedNeon(const TensorInfo& input0,
                              const TensorInfo& input1,
                              const TensorInfo& output,
-                             std::string* reasonIfUnsupported);
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsBatchNormalizationSupportedNeon(const TensorInfo& input,
                                        const TensorInfo& output,
@@ -42,17 +43,17 @@
                                        const TensorInfo& beta,
                                        const TensorInfo& gamma,
                                        const BatchNormalizationDescriptor& descriptor,
-                                       std::string* reasonIfUnsupported = nullptr);
+                                       Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsConstantSupportedNeon(const TensorInfo& output,
-                             std::string* reasonIfUnsupported = nullptr);
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsConvolution2dSupportedNeon(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const Convolution2dDescriptor& descriptor,
                                   const TensorInfo& weights,
                                   const Optional<TensorInfo>& biases,
-                                  std::string* reasonIfUnsupported = nullptr);
+                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 
 bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
@@ -60,113 +61,126 @@
                                          const DepthwiseConvolution2dDescriptor& descriptor,
                                          const TensorInfo& weights,
                                          const Optional<TensorInfo>& biases,
-                                         std::string* reasonIfUnsupported = nullptr);
+                                         Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsDivisionSupportedNeon(const TensorInfo& input0,
                              const TensorInfo& input1,
                              const TensorInfo& output,
-                             std::string* reasonIfUnsupported = nullptr);
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsSubtractionSupportedNeon(const TensorInfo& input0,
                                 const TensorInfo& input1,
                                 const TensorInfo& output,
-                                std::string* reasonIfUnsupported = nullptr);
+                                Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsFullyConnectedSupportedNeon(const TensorInfo& input,
                                    const TensorInfo& output,
                                    const TensorInfo& weights,
                                    const TensorInfo& biases,
                                    const FullyConnectedDescriptor& descriptor,
-                                   std::string* reasonIfUnsupported = nullptr);
+                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsInputSupportedNeon(const TensorInfo& input,
-                          std::string* reasonIfUnsupported = nullptr);
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsL2NormalizationSupportedNeon(const TensorInfo& input,
                                     const TensorInfo& output,
                                     const L2NormalizationDescriptor& descriptor,
-                                    std::string* reasonIfUnsupported = nullptr);
+                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsMergerSupportedNeon(const std::vector<const TensorInfo*> inputs,
                            const OriginsDescriptor& descriptor,
-                           std::string* reasonIfUnsupported = nullptr);
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsMultiplicationSupportedNeon(const TensorInfo& input0,
                                    const TensorInfo& input1,
                                    const TensorInfo& output,
-                                   std::string* reasonIfUnsupported = nullptr);
+                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsNormalizationSupportedNeon(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const NormalizationDescriptor& descriptor,
-                                  std::string* reasonIfUnsupported = nullptr);
+                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsOutputSupportedNeon(const TensorInfo& output,
-                           std::string* reasonIfUnsupported = nullptr);
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsPermuteSupportedNeon(const TensorInfo& input,
                             const TensorInfo& output,
                             const PermuteDescriptor& descriptor,
-                            std::string* reasonIfUnsupported = nullptr);
+                            Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsPooling2dSupportedNeon(const TensorInfo& input,
                               const TensorInfo& output,
                               const Pooling2dDescriptor& descriptor,
-                              std::string* reasonIfUnsupported = nullptr);
+                              Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsResizeBilinearSupportedNeon(const TensorInfo& input,
-                                   std::string* reasonIfUnsupported = nullptr);
+                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsSoftmaxSupportedNeon(const TensorInfo& input,
                             const TensorInfo& output,
                             const SoftmaxDescriptor& descriptor,
-                            std::string* reasonIfUnsupported = nullptr);
+                            Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsSplitterSupportedNeon(const TensorInfo& input,
                              const ViewsDescriptor& descriptor,
-                             std::string* reasonIfUnsupported = nullptr);
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsFakeQuantizationSupportedNeon(const TensorInfo& input,
                                      const FakeQuantizationDescriptor& descriptor,
-                                     std::string* reasonIfUnsupported = nullptr);
+                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsReshapeSupportedNeon(const TensorInfo& input,
-                            std::string* reasonIfUnsupported = nullptr);
+                            Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsFloorSupportedNeon(const TensorInfo& input,
                           const TensorInfo& output,
-                          std::string* reasonIfUnsupported = nullptr);
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
-bool IsLstmSupportedNeon(const TensorInfo& input, const TensorInfo& outputStateIn,
-                         const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
-                         const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
-                         const TensorInfo& output, const LstmDescriptor& descriptor,
-                         const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
-                         const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
-                         const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
-                         const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
-                         const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
-                         const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
-                         const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
-                         const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
-                         const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported = nullptr);
+bool IsLstmSupportedNeon(const TensorInfo& input,
+                         const TensorInfo& outputStateIn,
+                         const TensorInfo& cellStateIn,
+                         const TensorInfo& scratchBuffer,
+                         const TensorInfo& outputStateOut,
+                         const TensorInfo& cellStateOut,
+                         const TensorInfo& output,
+                         const LstmDescriptor& descriptor,
+                         const TensorInfo& inputToForgetWeights,
+                         const TensorInfo& inputToCellWeights,
+                         const TensorInfo& inputToOutputWeights,
+                         const TensorInfo& recurrentToForgetWeights,
+                         const TensorInfo& recurrentToCellWeights,
+                         const TensorInfo& recurrentToOutputWeights,
+                         const TensorInfo& forgetGateBias,
+                         const TensorInfo& cellBias,
+                         const TensorInfo& outputGateBias,
+                         const TensorInfo* inputToInputWeights,
+                         const TensorInfo* recurrentToInputWeights,
+                         const TensorInfo* cellToInputWeights,
+                         const TensorInfo* inputGateBias,
+                         const TensorInfo* projectionWeights,
+                         const TensorInfo* projectionBias,
+                         const TensorInfo* cellToForgetWeights,
+                         const TensorInfo* cellToOutputWeights,
+                         Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsConvertFp16ToFp32SupportedNeon(const TensorInfo& input,
                                       const TensorInfo& output,
-                                      std::string* reasonIfUnsupported = nullptr);
+                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input,
                                       const TensorInfo& output,
-                                      std::string* reasonIfUnsupported = nullptr);
+                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsMeanSupportedNeon(const TensorInfo& input,
                          const TensorInfo& output,
                          const MeanDescriptor& descriptor,
-                         std::string* reasonIfUnsupported = nullptr);
+                         Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsPadSupportedNeon(const TensorInfo& input,
                         const TensorInfo& output,
                         const PadDescriptor& descriptor,
-                        std::string* reasonIfUnsupported = nullptr);
+                        Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 }
diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
index 7019c82..0deff79 100644
--- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
@@ -33,7 +33,7 @@
 {
     m_Data.ValidateInputsOutputs("NeonNormalizationFloatWorkload", 1, 1);
     std::string reasonIfUnsupported;
-    if (!IsNeonNormalizationDescParamsSupported(&reasonIfUnsupported, m_Data.m_Parameters))
+    if (!IsNeonNormalizationDescParamsSupported(Optional<std::string&>(reasonIfUnsupported), m_Data.m_Parameters))
     {
         throw UnimplementedException(reasonIfUnsupported);
     }
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index e6b1442..2ee942c 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -17,22 +17,12 @@
 namespace armnn
 {
 
-namespace
-{
-
-std::string* GetReasonIfUnsupportedPtr(const Optional<std::string&>& reasonIfUnsupported)
-{
-    return reasonIfUnsupported ? &reasonIfUnsupported.value() : nullptr;
-}
-
-} // anonymous namespace
-
 bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
                                             const TensorInfo& output,
                                             const ActivationDescriptor& descriptor,
                                             Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsActivationSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsActivationSupportedRef(input, output, descriptor, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0,
@@ -40,10 +30,7 @@
                                           const TensorInfo& output,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsAdditionSupportedRef(input0,
-                                         input1,
-                                         output,
-                                         GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsAdditionSupportedRef(input0, input1, output, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
@@ -62,27 +49,27 @@
                                                    beta,
                                                    gamma,
                                                    descriptor,
-                                                   GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+                                                   reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsConstantSupportedRef(output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsConstantSupportedRef(output, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
                                                    const TensorInfo& output,
                                                    Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsConvertFp16ToFp32SupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsConvertFp16ToFp32SupportedRef(input, output, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
                                                    const TensorInfo& output,
                                                    Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsConvertFp32ToFp16SupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsConvertFp32ToFp16SupportedRef(input, output, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
@@ -97,7 +84,7 @@
                                               descriptor,
                                               weights,
                                               biases,
-                                              GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+                                              reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
@@ -112,7 +99,7 @@
                                                      descriptor,
                                                      weights,
                                                      biases,
-                                                     GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+                                                     reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0,
@@ -120,21 +107,21 @@
                                           const TensorInfo& output,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsDivisionSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsDivisionSupportedRef(input0, input1, output, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
                                                   const FakeQuantizationDescriptor& descriptor,
                                                   Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsFakeQuantizationSupportedRef(input, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsFakeQuantizationSupportedRef(input, descriptor, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsFloorSupported(const TensorInfo& input,
                                        const TensorInfo& output,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsFloorSupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsFloorSupportedRef(input, output, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
@@ -149,13 +136,13 @@
                                                weights,
                                                biases,
                                                descriptor,
-                                               GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+                                               reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsInputSupported(const TensorInfo& input,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsInputSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsInputSupportedRef(input, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
@@ -163,10 +150,7 @@
                                                  const L2NormalizationDescriptor& descriptor,
                                                  Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsL2NormalizationSupportedRef(input,
-                                                output,
-                                                descriptor,
-                                                GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsL2NormalizationSupportedRef(input, output, descriptor, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsLstmSupported(const TensorInfo& input,
@@ -221,7 +205,7 @@
                                      projectionBias,
                                      cellToForgetWeights,
                                      cellToOutputWeights,
-                                     GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+                                     reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
@@ -229,14 +213,14 @@
                                       const MeanDescriptor& descriptor,
                                       Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsMeanSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsMeanSupportedRef(input, output, descriptor,reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
                                         const OriginsDescriptor& descriptor,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsMergerSupportedRef(inputs, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsMergerSupportedRef(inputs, descriptor, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
@@ -244,7 +228,7 @@
                                                 const TensorInfo& output,
                                                 Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsMultiplicationSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsMultiplicationSupportedRef(input0, input1, output, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
@@ -255,13 +239,13 @@
     return armnn::IsNormalizationSupportedRef(input,
                                               output,
                                               descriptor,
-                                              GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+                                              reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsOutputSupported(const TensorInfo& output,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsOutputSupportedRef(output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsOutputSupportedRef(output, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsPadSupported(const TensorInfo& input,
@@ -269,7 +253,7 @@
                                      const PadDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsPadSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsPadSupportedRef(input, output, descriptor, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input,
@@ -277,7 +261,7 @@
                                          const PermuteDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsPermuteSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsPermuteSupportedRef(input, output, descriptor, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input,
@@ -285,19 +269,19 @@
                                            const Pooling2dDescriptor& descriptor,
                                            Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsPooling2dSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsPooling2dSupportedRef(input, output, descriptor, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsReshapeSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsReshapeSupportedRef(input, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
                                                 Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsResizeBilinearSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsResizeBilinearSupportedRef(input, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
@@ -305,14 +289,14 @@
                                          const SoftmaxDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsSoftmaxSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsSoftmaxSupportedRef(input, output, descriptor, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
                                           const ViewsDescriptor& descriptor,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsSplitterSupportedRef(input, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsSplitterSupportedRef(input, descriptor, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
@@ -320,7 +304,7 @@
                                              const TensorInfo& output,
                                              Optional<std::string&> reasonIfUnsupported) const
 {
-    return armnn::IsSubtractionSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+    return armnn::IsSubtractionSupportedRef(input0, input1, output, reasonIfUnsupported);
 }
 
 //
@@ -329,7 +313,7 @@
 // TODO: Functions kept for backward compatibility. Remove once transition to plugable backends is complete!
 
 template<typename Float32Func, typename Uint8Func, typename ... Params>
-bool IsSupportedForDataTypeRef(std::string* reasonIfUnsupported,
+bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
                                DataType dataType,
                                Float32Func floatFuncPtr,
                                Uint8Func uint8FuncPtr,
@@ -346,7 +330,7 @@
 bool IsActivationSupportedRef(const TensorInfo& input,
                               const TensorInfo& output,
                               const ActivationDescriptor& descriptor,
-                              std::string* reasonIfUnsupported)
+                              Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(output);
     ignore_unused(descriptor);
@@ -359,7 +343,7 @@
 bool IsAdditionSupportedRef(const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
-                            std::string* reasonIfUnsupported)
+                            Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(input1);
     ignore_unused(output);
@@ -376,7 +360,7 @@
                                       const TensorInfo& beta,
                                       const TensorInfo& gamma,
                                       const BatchNormalizationDescriptor& descriptor,
-                                      std::string* reasonIfUnsupported)
+                                      Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(descriptor);
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -386,7 +370,7 @@
 }
 
 bool IsConstantSupportedRef(const TensorInfo& output,
-                            std::string* reasonIfUnsupported)
+                            Optional<std::string&> reasonIfUnsupported)
 {
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
                                      output.GetDataType(),
@@ -399,7 +383,7 @@
                                  const Convolution2dDescriptor& descriptor,
                                  const TensorInfo& weights,
                                  const Optional<TensorInfo>& biases,
-                                 std::string* reasonIfUnsupported)
+                                 Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(descriptor);
     ignore_unused(output);
@@ -416,7 +400,7 @@
                                         const DepthwiseConvolution2dDescriptor& descriptor,
                                         const TensorInfo& weights,
                                         const Optional<TensorInfo>& biases,
-                                        std::string* reasonIfUnsupported)
+                                        Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(output);
     ignore_unused(descriptor);
@@ -431,7 +415,7 @@
 bool IsDivisionSupportedRef(const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
-                            std::string* reasonIfUnsupported)
+                            Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(input1);
     ignore_unused(output);
@@ -444,7 +428,7 @@
 bool IsSubtractionSupportedRef(const TensorInfo& input0,
                                const TensorInfo& input1,
                                const TensorInfo& output,
-                               std::string* reasonIfUnsupported)
+                               Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(input1);
     ignore_unused(output);
@@ -459,7 +443,7 @@
                                   const TensorInfo& weights,
                                   const TensorInfo& biases,
                                   const FullyConnectedDescriptor& descriptor,
-                                  std::string* reasonIfUnsupported)
+                                  Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(output);
     ignore_unused(descriptor);
@@ -472,7 +456,7 @@
 }
 
 bool IsInputSupportedRef(const TensorInfo& input,
-                         std::string* reasonIfUnsupported)
+                         Optional<std::string&> reasonIfUnsupported)
 {
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
                                      input.GetDataType(),
@@ -483,7 +467,7 @@
 bool IsL2NormalizationSupportedRef(const TensorInfo& input,
                                    const TensorInfo& output,
                                    const L2NormalizationDescriptor& descriptor,
-                                   std::string* reasonIfUnsupported)
+                                   Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(output);
     ignore_unused(descriptor);
@@ -495,7 +479,7 @@
 
 bool IsMergerSupportedRef(const std::vector<const TensorInfo*> inputs,
                           const OriginsDescriptor& descriptor,
-                          std::string* reasonIfUnsupported)
+                          Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(descriptor);
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -507,7 +491,7 @@
 bool IsMultiplicationSupportedRef(const TensorInfo& input0,
                                   const TensorInfo& input1,
                                   const TensorInfo& output,
-                                  std::string* reasonIfUnsupported)
+                                  Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(input1);
     ignore_unused(output);
@@ -520,7 +504,7 @@
 bool IsNormalizationSupportedRef(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const NormalizationDescriptor& descriptor,
-                                 std::string* reasonIfUnsupported)
+                                 Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(descriptor);
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -530,7 +514,7 @@
 }
 
 bool IsOutputSupportedRef(const TensorInfo& output,
-                          std::string* reasonIfUnsupported)
+                          Optional<std::string&> reasonIfUnsupported)
 {
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
                                      output.GetDataType(),
@@ -541,7 +525,7 @@
 bool IsPermuteSupportedRef(const TensorInfo& input,
                            const TensorInfo& output,
                            const PermuteDescriptor& descriptor,
-                           std::string* reasonIfUnsupported)
+                           Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(descriptor);
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -553,7 +537,7 @@
 bool IsPooling2dSupportedRef(const TensorInfo& input,
                              const TensorInfo& output,
                              const Pooling2dDescriptor& descriptor,
-                             std::string* reasonIfUnsupported)
+                             Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(descriptor);
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -563,7 +547,7 @@
 }
 
 bool IsResizeBilinearSupportedRef(const TensorInfo& input,
-                                  std::string* reasonIfUnsupported)
+                                  Optional<std::string&> reasonIfUnsupported)
 {
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
                                      input.GetDataType(),
@@ -574,7 +558,7 @@
 bool IsSoftmaxSupportedRef(const TensorInfo& input,
                            const TensorInfo& output,
                            const SoftmaxDescriptor& descriptor,
-                           std::string* reasonIfUnsupported)
+                           Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(output);
     ignore_unused(descriptor);
@@ -586,7 +570,7 @@
 
 bool IsSplitterSupportedRef(const TensorInfo& input,
                             const ViewsDescriptor& descriptor,
-                            std::string* reasonIfUnsupported)
+                            Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(descriptor);
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -597,7 +581,7 @@
 
 bool IsFakeQuantizationSupportedRef(const TensorInfo& input,
                                     const FakeQuantizationDescriptor& descriptor,
-                                    std::string* reasonIfUnsupported)
+                                    Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(descriptor);
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -607,7 +591,7 @@
 }
 
 bool IsReshapeSupportedRef(const TensorInfo& input,
-                           std::string* reasonIfUnsupported)
+                           Optional<std::string&> reasonIfUnsupported)
 {
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
                                      input.GetDataType(),
@@ -617,7 +601,7 @@
 
 bool IsFloorSupportedRef(const TensorInfo& input,
                          const TensorInfo& output,
-                         std::string* reasonIfUnsupported)
+                         Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(output);
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -626,19 +610,32 @@
                                      &FalseFuncU8<>);
 }
 
-bool IsLstmSupportedRef(const TensorInfo& input, const TensorInfo& outputStateIn,
-                        const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
-                        const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
-                        const TensorInfo& output, const LstmDescriptor& descriptor,
-                        const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
-                        const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
-                        const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
-                        const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
-                        const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
-                        const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
-                        const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
-                        const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
-                        const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported)
+bool IsLstmSupportedRef(const TensorInfo& input,
+                        const TensorInfo& outputStateIn,
+                        const TensorInfo& cellStateIn,
+                        const TensorInfo& scratchBuffer,
+                        const TensorInfo& outputStateOut,
+                        const TensorInfo& cellStateOut,
+                        const TensorInfo& output,
+                        const LstmDescriptor& descriptor,
+                        const TensorInfo& inputToForgetWeights,
+                        const TensorInfo& inputToCellWeights,
+                        const TensorInfo& inputToOutputWeights,
+                        const TensorInfo& recurrentToForgetWeights,
+                        const TensorInfo& recurrentToCellWeights,
+                        const TensorInfo& recurrentToOutputWeights,
+                        const TensorInfo& forgetGateBias,
+                        const TensorInfo& cellBias,
+                        const TensorInfo& outputGateBias,
+                        const TensorInfo* inputToInputWeights,
+                        const TensorInfo* recurrentToInputWeights,
+                        const TensorInfo* cellToInputWeights,
+                        const TensorInfo* inputGateBias,
+                        const TensorInfo* projectionWeights,
+                        const TensorInfo* projectionBias,
+                        const TensorInfo* cellToForgetWeights,
+                        const TensorInfo* cellToOutputWeights,
+                        Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(input);
     ignore_unused(outputStateIn);
@@ -665,12 +662,13 @@
     ignore_unused(projectionBias);
     ignore_unused(cellToForgetWeights);
     ignore_unused(cellToOutputWeights);
+    ignore_unused(reasonIfUnsupported);
     return false;
 }
 
 bool IsConvertFp16ToFp32SupportedRef(const TensorInfo& input,
                                      const TensorInfo& output,
-                                     std::string* reasonIfUnsupported)
+                                     Optional<std::string&> reasonIfUnsupported)
 {
     return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
                                           input.GetDataType(),
@@ -686,7 +684,7 @@
 
 bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input,
                                      const TensorInfo& output,
-                                     std::string* reasonIfUnsupported)
+                                     Optional<std::string&> reasonIfUnsupported)
 {
     return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
                                           input.GetDataType(),
@@ -703,7 +701,7 @@
 bool IsMeanSupportedRef(const TensorInfo& input,
                         const TensorInfo& output,
                         const MeanDescriptor& descriptor,
-                        std::string* reasonIfUnsupported)
+                        Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(output);
     ignore_unused(descriptor);
@@ -716,7 +714,7 @@
 bool IsPadSupportedRef(const TensorInfo& input,
                        const TensorInfo& output,
                        const PadDescriptor& descriptor,
-                       std::string* reasonIfUnsupported)
+                       Optional<std::string&> reasonIfUnsupported)
 {
     ignore_unused(output);
     ignore_unused(descriptor);
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 25501fe..1d0edf6 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -175,12 +175,12 @@
 bool IsActivationSupportedRef(const TensorInfo& input,
                               const TensorInfo& output,
                               const ActivationDescriptor& descriptor,
-                              std::string* reasonIfUnsupported = nullptr);
+                              Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsAdditionSupportedRef(const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
-                            std::string* reasonIfUnsupported = nullptr);
+                            Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsBatchNormalizationSupportedRef(const TensorInfo& input,
                                       const TensorInfo& output,
@@ -189,130 +189,143 @@
                                       const TensorInfo& beta,
                                       const TensorInfo& gamma,
                                       const BatchNormalizationDescriptor& descriptor,
-                                      std::string* reasonIfUnsupported = nullptr);
+                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsConstantSupportedRef(const TensorInfo& output,
-                            std::string* reasonIfUnsupported = nullptr);
+                            Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsConvolution2dSupportedRef(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const Convolution2dDescriptor& descriptor,
                                  const TensorInfo& weights,
                                  const Optional<TensorInfo>& biases,
-                                 std::string* reasonIfUnsupported = nullptr);
+                                 Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsDepthwiseConvolutionSupportedRef(const TensorInfo& input,
                                         const TensorInfo& output,
                                         const DepthwiseConvolution2dDescriptor& descriptor,
                                         const TensorInfo& weights,
                                         const Optional<TensorInfo>& biases,
-                                        std::string* reasonIfUnsupported = nullptr);
+                                        Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsDivisionSupportedRef(const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
-                            std::string* reasonIfUnsupported = nullptr);
+                            Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsSubtractionSupportedRef(const TensorInfo& input0,
                                const TensorInfo& input1,
                                const TensorInfo& output,
-                               std::string* reasonIfUnsupported = nullptr);
+                               Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsFullyConnectedSupportedRef(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const TensorInfo& weights,
                                   const TensorInfo& biases,
                                   const FullyConnectedDescriptor& descriptor,
-                                  std::string* reasonIfUnsupported = nullptr);
+                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsInputSupportedRef(const TensorInfo& input,
-                         std::string* reasonIfUnsupported = nullptr);
+                         Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsL2NormalizationSupportedRef(const TensorInfo& input,
                                    const TensorInfo& output,
                                    const L2NormalizationDescriptor& descriptor,
-                                   std::string* reasonIfUnsupported = nullptr);
+                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
-bool IsLstmSupportedRef(const TensorInfo& input, const TensorInfo& outputStateIn,
-                        const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
-                        const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
-                        const TensorInfo& output, const LstmDescriptor& descriptor,
-                        const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
-                        const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
-                        const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
-                        const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
-                        const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
-                        const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
-                        const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
-                        const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
-                        const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported = nullptr);
+bool IsLstmSupportedRef(const TensorInfo& input,
+                        const TensorInfo& outputStateIn,
+                        const TensorInfo& cellStateIn,
+                        const TensorInfo& scratchBuffer,
+                        const TensorInfo& outputStateOut,
+                        const TensorInfo& cellStateOut,
+                        const TensorInfo& output,
+                        const LstmDescriptor& descriptor,
+                        const TensorInfo& inputToForgetWeights,
+                        const TensorInfo& inputToCellWeights,
+                        const TensorInfo& inputToOutputWeights,
+                        const TensorInfo& recurrentToForgetWeights,
+                        const TensorInfo& recurrentToCellWeights,
+                        const TensorInfo& recurrentToOutputWeights,
+                        const TensorInfo& forgetGateBias,
+                        const TensorInfo& cellBias,
+                        const TensorInfo& outputGateBias,
+                        const TensorInfo* inputToInputWeights,
+                        const TensorInfo* recurrentToInputWeights,
+                        const TensorInfo* cellToInputWeights,
+                        const TensorInfo* inputGateBias,
+                        const TensorInfo* projectionWeights,
+                        const TensorInfo* projectionBias,
+                        const TensorInfo* cellToForgetWeights,
+                        const TensorInfo* cellToOutputWeights,
+                        Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsMergerSupportedRef(const std::vector<const TensorInfo*> inputs,
                           const OriginsDescriptor& descriptor,
-                          std::string* reasonIfUnsupported = nullptr);
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsMultiplicationSupportedRef(const TensorInfo& input0,
                                   const TensorInfo& input1,
                                   const TensorInfo& output,
-                                  std::string* reasonIfUnsupported = nullptr);
+                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsNormalizationSupportedRef(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const NormalizationDescriptor& descriptor,
-                                 std::string* reasonIfUnsupported = nullptr);
+                                 Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsOutputSupportedRef(const TensorInfo& output,
-                          std::string* reasonIfUnsupported = nullptr);
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsPermuteSupportedRef(const TensorInfo& input,
                            const TensorInfo& output,
                            const PermuteDescriptor& descriptor,
-                           std::string* reasonIfUnsupported = nullptr);
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsPooling2dSupportedRef(const TensorInfo& input,
                              const TensorInfo& output,
                              const Pooling2dDescriptor& descriptor,
-                             std::string* reasonIfUnsupported = nullptr);
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsResizeBilinearSupportedRef(const TensorInfo& input,
-                                  std::string* reasonIfUnsupported = nullptr);
+                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsSoftmaxSupportedRef(const TensorInfo& input,
                            const TensorInfo& output,
                            const SoftmaxDescriptor& descriptor,
-                           std::string* reasonIfUnsupported = nullptr);
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsSplitterSupportedRef(const TensorInfo& input,
                             const ViewsDescriptor& descriptor,
-                            std::string* reasonIfUnsupported = nullptr);
+                            Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsFakeQuantizationSupportedRef(const TensorInfo& input,
                                     const FakeQuantizationDescriptor& descriptor,
-                                    std::string* reasonIfUnsupported = nullptr);
+                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsReshapeSupportedRef(const TensorInfo& input,
-                           std::string* reasonIfUnsupported = nullptr);
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsFloorSupportedRef(const TensorInfo& input,
                          const TensorInfo& output,
-                         std::string* reasonIfUnsupported = nullptr);
+                         Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsConvertFp16ToFp32SupportedRef(const TensorInfo& input,
                                      const TensorInfo& output,
-                                     std::string* reasonIfUnsupported = nullptr);
+                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input,
                                      const TensorInfo& output,
-                                     std::string* reasonIfUnsupported = nullptr);
+                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsMeanSupportedRef(const TensorInfo& input,
                         const TensorInfo& output,
                         const MeanDescriptor& descriptor,
-                        std::string* reasonIfUnsupported = nullptr);
+                        Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 bool IsPadSupportedRef(const TensorInfo& input,
                        const TensorInfo& output,
                        const PadDescriptor& descriptor,
-                       std::string* reasonIfUnsupported = nullptr);
+                       Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
 }