IVGCVSW-1998 : replace Compute enum in LayerSupport free functions

!android-nn-driver:153490

Change-Id: I1c2a5f942e3a1c3626e093c90545ca27c64ba5e8
diff --git a/Android.mk b/Android.mk
index 25ed834..7493374 100644
--- a/Android.mk
+++ b/Android.mk
@@ -204,13 +204,14 @@
 
 LOCAL_STATIC_LIBRARIES := \
 	libneuralnetworks_common \
-	libarmnn \
 	libboost_log \
 	libboost_system \
 	libboost_unit_test_framework \
 	libboost_thread \
 	armnn-arm_compute
 
+LOCAL_WHOLE_STATIC_LIBRARIES := libarmnn
+
 LOCAL_SHARED_LIBRARIES := \
 	libbase \
 	libhidlbase \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 22eeedd..8182c22 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -284,20 +284,22 @@
         ${GATOR_ROOT}/annotate/streamline_annotate.c)
 endif()
 
-add_library_ex(armnn SHARED ${armnn_sources})
-
 # the backends under src/backends extend the list of
-# static libs armnn to link against
-list(APPEND armnnLibraries armnnUtils)
+# object libs armnn to include in the build
 include(src/backends/backends.cmake)
+foreach(lib ${armnnLibraries})
+    message("Adding object library dependency to armnn: ${lib}")
+    list(APPEND armnn_sources $<TARGET_OBJECTS:${lib}>)
+endforeach()
+
+add_library_ex(armnn SHARED ${armnn_sources})
 
 target_include_directories(armnn PRIVATE src)
 target_include_directories(armnn PRIVATE src/armnn)
 target_include_directories(armnn PRIVATE src/armnnUtils)
-foreach(lib ${armnnLibraries})
-    target_link_libraries(armnn ${lib})
-    message("Adding library dependency to armnn: ${lib}")
-endforeach()
+
+target_link_libraries(armnn armnnUtils)
+
 target_link_libraries(armnn ${CMAKE_DL_LIBS})
 
 install(TARGETS armnn DESTINATION ${CMAKE_INSTALL_PREFIX}/lib)
@@ -458,7 +460,7 @@
     endif()
 
     foreach(lib ${armnnUnitTestLibraries})
-        message("Adding library dependency to UnitTests: ${lib}")
+        message("Adding object library dependency to UnitTests: ${lib}")
         list(APPEND unittest_sources $<TARGET_OBJECTS:${lib}>)
     endforeach()
 
diff --git a/include/armnn/Exceptions.hpp b/include/armnn/Exceptions.hpp
index 89b6f2c..29d874c 100644
--- a/include/armnn/Exceptions.hpp
+++ b/include/armnn/Exceptions.hpp
@@ -48,6 +48,16 @@
 public:
     explicit Exception(const std::string& message);
 
+    // exception with context
+    explicit Exception(const std::string& message,
+                       const CheckLocation& location);
+
+    // preserving previous exception context
+    // and adding local context information
+    explicit Exception(const Exception& other,
+                       const std::string& message,
+                       const CheckLocation& location);
+
     virtual const char* what() const noexcept override;
 
 private:
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index 31874fe..8af8240 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -12,21 +12,21 @@
 namespace armnn
 {
 
-bool IsActivationSupported(Compute compute,
+bool IsActivationSupported(const BackendId& backend,
                            const TensorInfo& input,
                            const TensorInfo& output,
                            const ActivationDescriptor& descriptor,
                            char* reasonIfUnsupported = nullptr,
                            size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsAdditionSupported(Compute compute,
+bool IsAdditionSupported(const BackendId& backend,
                          const TensorInfo& input0,
                          const TensorInfo& input1,
                          const TensorInfo& output,
                          char* reasonIfUnsupported = nullptr,
                          size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsBatchNormalizationSupported(Compute compute,
+bool IsBatchNormalizationSupported(const BackendId& backend,
                                    const TensorInfo& input,
                                    const TensorInfo& output,
                                    const TensorInfo& mean,
@@ -37,24 +37,24 @@
                                    char* reasonIfUnsupported = nullptr,
                                    size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsConstantSupported(Compute compute,
+bool IsConstantSupported(const BackendId& backend,
                          const TensorInfo& output,
                          char* reasonIfUnsupported = nullptr,
                          size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsConvertFp16ToFp32Supported(Compute compute,
+bool IsConvertFp16ToFp32Supported(const BackendId& backend,
                                   const TensorInfo& input,
                                   const TensorInfo& output,
                                   char* reasonIfUnsupported = nullptr,
                                   size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsConvertFp32ToFp16Supported(Compute compute,
+bool IsConvertFp32ToFp16Supported(const BackendId& backend,
                                   const TensorInfo& input,
                                   const TensorInfo& output,
                                   char* reasonIfUnsupported = nullptr,
                                   size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsConvolution2dSupported(Compute compute,
+bool IsConvolution2dSupported(const BackendId& backend,
                               const TensorInfo& input,
                               const TensorInfo& output,
                               const Convolution2dDescriptor& descriptor,
@@ -63,7 +63,7 @@
                               char* reasonIfUnsupported = nullptr,
                               size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsDepthwiseConvolutionSupported(Compute compute,
+bool IsDepthwiseConvolutionSupported(const BackendId& backend,
                                      const TensorInfo& input,
                                      const TensorInfo& output,
                                      const DepthwiseConvolution2dDescriptor& descriptor,
@@ -72,26 +72,26 @@
                                      char* reasonIfUnsupported = nullptr,
                                      size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsDivisionSupported(Compute compute,
+bool IsDivisionSupported(const BackendId& backend,
                          const TensorInfo& input0,
                          const TensorInfo& input1,
                          const TensorInfo& output,
                          char* reasonIfUnsupported = nullptr,
                          size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsSubtractionSupported(Compute compute,
+bool IsSubtractionSupported(const BackendId& backend,
                             const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
                             char* reasonIfUnsupported = nullptr,
                             size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsInputSupported(Compute compute,
+bool IsInputSupported(const BackendId& backend,
                       const TensorInfo& input,
                       char* reasonIfUnsupported = nullptr,
                       size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsFullyConnectedSupported(Compute compute,
+bool IsFullyConnectedSupported(const BackendId& backend,
                                const TensorInfo& input,
                                const TensorInfo& output,
                                const TensorInfo& weights,
@@ -100,14 +100,14 @@
                                char* reasonIfUnsupported = nullptr,
                                size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsL2NormalizationSupported(Compute compute,
+bool IsL2NormalizationSupported(const BackendId& backend,
                                 const TensorInfo& input,
                                 const TensorInfo& output,
                                 const L2NormalizationDescriptor& descriptor,
                                 char* reasonIfUnsupported = nullptr,
                                 size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsLstmSupported(Compute compute, const TensorInfo& input, const TensorInfo& outputStateIn,
+bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
                      const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
                      const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
                      const TensorInfo& output, const LstmDescriptor& descriptor,
@@ -122,88 +122,88 @@
                      const TensorInfo* cellToOutputWeights, char* reasonIfUnsupported = nullptr,
                      size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsMergerSupported(Compute compute,
+bool IsMergerSupported(const BackendId& backend,
                        const std::vector<const TensorInfo*> inputs,
                        const OriginsDescriptor& descriptor,
                        char* reasonIfUnsupported = nullptr,
                        size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsMultiplicationSupported(Compute compute,
+bool IsMultiplicationSupported(const BackendId& backend,
                                const TensorInfo& input0,
                                const TensorInfo& input1,
                                const TensorInfo& output,
                                char* reasonIfUnsupported = nullptr,
                                size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsNormalizationSupported(Compute compute,
+bool IsNormalizationSupported(const BackendId& backend,
                               const TensorInfo& input,
                               const TensorInfo& output,
                               const NormalizationDescriptor& descriptor,
                               char* reasonIfUnsupported = nullptr,
                               size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsOutputSupported(Compute compute,
+bool IsOutputSupported(const BackendId& backend,
                        const TensorInfo& output,
                        char* reasonIfUnsupported = nullptr,
                        size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsPermuteSupported(Compute compute,
+bool IsPermuteSupported(const BackendId& backend,
                         const TensorInfo& input,
                         const TensorInfo& output,
                         const PermuteDescriptor& descriptor,
                         char* reasonIfUnsupported = nullptr,
                         size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsPooling2dSupported(Compute compute,
+bool IsPooling2dSupported(const BackendId& backend,
                           const TensorInfo& input,
                           const TensorInfo& output,
                           const Pooling2dDescriptor& descriptor,
                           char* reasonIfUnsupported = nullptr,
                           size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsResizeBilinearSupported(Compute compute,
+bool IsResizeBilinearSupported(const BackendId& backend,
                                const TensorInfo& input,
                                char* reasonIfUnsupported = nullptr,
                                size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsSoftmaxSupported(Compute compute,
+bool IsSoftmaxSupported(const BackendId& backend,
                         const TensorInfo& input,
                         const TensorInfo& output,
                         const SoftmaxDescriptor& descriptor,
                         char* reasonIfUnsupported = nullptr,
                         size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsSplitterSupported(Compute compute,
+bool IsSplitterSupported(const BackendId& backend,
                          const TensorInfo& input,
                          const ViewsDescriptor& descriptor,
                          char* reasonIfUnsupported = nullptr,
                          size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsFakeQuantizationSupported(Compute compute,
+bool IsFakeQuantizationSupported(const BackendId& backend,
                                  const TensorInfo& input,
                                  const FakeQuantizationDescriptor& descriptor,
                                  char* reasonIfUnsupported = nullptr,
                                  size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsReshapeSupported(Compute compute,
+bool IsReshapeSupported(const BackendId& backend,
                         const TensorInfo& input,
                         char* reasonIfUnsupported = nullptr,
                         size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsFloorSupported(Compute compute,
+bool IsFloorSupported(const BackendId& backend,
                       const TensorInfo& input,
                       const TensorInfo& output,
                       char* reasonIfUnsupported = nullptr,
                       size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsMeanSupported(Compute compute,
+bool IsMeanSupported(const BackendId& backend,
                      const TensorInfo& input,
                      const TensorInfo& output,
                      const MeanDescriptor& descriptor,
                      char* reasonIfUnsupported = nullptr,
                      size_t reasonIfUnsupportedMaxLength = 1024);
 
-bool IsPadSupported(Compute compute,
+bool IsPadSupported(const BackendId& backend,
                      const TensorInfo& input,
                      const TensorInfo& output,
                      const PadDescriptor& descriptor,
diff --git a/src/armnn/Exceptions.cpp b/src/armnn/Exceptions.cpp
index 1c4ebb6..52b28e9 100644
--- a/src/armnn/Exceptions.cpp
+++ b/src/armnn/Exceptions.cpp
@@ -2,7 +2,7 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include "armnn/Exceptions.hpp"
+#include <armnn/Exceptions.hpp>
 
 #include <string>
 
@@ -10,10 +10,25 @@
 {
 
 Exception::Exception(const std::string& message)
-: m_Message(message)
+: m_Message{message}
 {
 }
 
+Exception::Exception(const std::string& message,
+                     const CheckLocation& location)
+: m_Message{message}
+{
+    m_Message += location.AsString();
+}
+
+Exception::Exception(const Exception& other,
+                     const std::string& message,
+                     const CheckLocation& location)
+: m_Message{other.m_Message}
+{
+    m_Message += "\n" + message + location.AsString();
+}
+
 const char* Exception::what() const noexcept
 {
     return m_Message.c_str();
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 8bad89f..2494c74 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -5,18 +5,19 @@
 #include <armnn/LayerSupport.hpp>
 #include <armnn/Optional.hpp>
 
-#include <backends/reference/RefLayerSupport.hpp>
-#include <backends/neon/NeonLayerSupport.hpp>
-#include <backends/cl/ClLayerSupport.hpp>
+#include <backends/BackendRegistry.hpp>
 
 #include <boost/assert.hpp>
 
 #include <cstring>
 #include <algorithm>
+#include <unordered_map>
 
 namespace armnn
 {
 
+namespace
+{
 /// Helper function to copy a full string to a truncated version.
 void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
 {
@@ -29,27 +30,39 @@
     }
 }
 
+IBackend& GetBackend(const BackendId& id)
+{
+    static std::unordered_map<BackendId, IBackendUniquePtr> cachedBackends;
+    auto it = cachedBackends.find(id);
+    if (it == cachedBackends.end())
+    {
+        auto factoryFunc = BackendRegistry::Instance().GetFactory(id);
+        auto emplaceResult =
+            cachedBackends.emplace(
+                std::make_pair(id, factoryFunc())
+            );
+        BOOST_ASSERT(emplaceResult.second);
+        it = emplaceResult.first;
+    }
+
+    return *(it->second.get());
+}
+
+}
+
 // Helper macro to avoid code duplication.
 // Forwards function func to funcRef, funcNeon or funcCl, depending on the value of compute.
-#define FORWARD_LAYER_SUPPORT_FUNC(compute, func, ...) \
+#define FORWARD_LAYER_SUPPORT_FUNC(backend, func, ...) \
     std::string reasonIfUnsupportedFull; \
     bool isSupported; \
-    switch(compute) \
-    { \
-        case Compute::CpuRef: \
-            isSupported = func##Ref(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
-            break; \
-        case Compute::CpuAcc: \
-            isSupported = func##Neon(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
-            break; \
-        case Compute::GpuAcc: \
-            isSupported = func##Cl(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
-            break; \
-        default: \
-            isSupported = func##Ref(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
-            break; \
+    try { \
+        auto const& layerSupportObject = GetBackend(backend).GetLayerSupport(); \
+        isSupported = layerSupportObject.func(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
+        CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
+    } catch (InvalidArgumentException e) { \
+        /* re-throwing with more context information */ \
+        throw InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
     } \
-    CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
     return isSupported;
 
 bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
@@ -57,17 +70,17 @@
     return input0.GetDataType() == input1.GetDataType();
 }
 
-bool IsActivationSupported(Compute compute,
+bool IsActivationSupported(const BackendId& backend,
                            const TensorInfo& input,
                            const TensorInfo& output,
                            const ActivationDescriptor& descriptor,
                            char* reasonIfUnsupported,
                            size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsActivationSupported, input, output, descriptor);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsActivationSupported, input, output, descriptor);
 }
 
-bool IsAdditionSupported(Compute compute,
+bool IsAdditionSupported(const BackendId& backend,
                          const TensorInfo& input0,
                          const TensorInfo& input1,
                          const TensorInfo& output,
@@ -79,10 +92,10 @@
         return false;
     }
 
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsAdditionSupported, input0, input1, output);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output);
 }
 
-bool IsBatchNormalizationSupported(Compute compute,
+bool IsBatchNormalizationSupported(const BackendId& backend,
                                    const TensorInfo& input,
                                    const TensorInfo& output,
                                    const TensorInfo& mean,
@@ -93,7 +106,7 @@
                                    char* reasonIfUnsupported,
                                    size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute,
+    FORWARD_LAYER_SUPPORT_FUNC(backend,
                                IsBatchNormalizationSupported,
                                input,
                                output,
@@ -104,33 +117,33 @@
                                descriptor);
 }
 
-bool IsConstantSupported(Compute compute,
+bool IsConstantSupported(const BackendId& backend,
                          const TensorInfo& output,
                          char* reasonIfUnsupported,
                          size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsConstantSupported, output);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsConstantSupported, output);
 }
 
-bool IsConvertFp16ToFp32Supported(Compute compute,
+bool IsConvertFp16ToFp32Supported(const BackendId& backend,
                                   const TensorInfo& input,
                                   const TensorInfo& output,
                                   char* reasonIfUnsupported,
                                   size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvertFp16ToFp32Supported, input, output);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp16ToFp32Supported, input, output);
 }
 
-bool IsConvertFp32ToFp16Supported(Compute compute,
+bool IsConvertFp32ToFp16Supported(const BackendId& backend,
                                   const TensorInfo& input,
                                   const TensorInfo& output,
                                   char* reasonIfUnsupported,
                                   size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvertFp32ToFp16Supported, input, output);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp32ToFp16Supported, input, output);
 }
 
-bool IsConvolution2dSupported(Compute compute,
+bool IsConvolution2dSupported(const BackendId& backend,
                               const TensorInfo& input,
                               const TensorInfo& output,
                               const Convolution2dDescriptor& descriptor,
@@ -139,30 +152,30 @@
                               char* reasonIfUnsupported,
                               size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvolution2dSupported, input, output, descriptor, weights, biases);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvolution2dSupported, input, output, descriptor, weights, biases);
 }
 
-bool IsDivisionSupported(Compute compute,
+bool IsDivisionSupported(const BackendId& backend,
                          const TensorInfo& input0,
                          const TensorInfo& input1,
                          const TensorInfo& output,
                          char* reasonIfUnsupported,
                          size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsDivisionSupported, input0, input1, output);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output);
 }
 
-bool IsSubtractionSupported(Compute compute,
+bool IsSubtractionSupported(const BackendId& backend,
                             const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
                             char* reasonIfUnsupported,
                             size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsSubtractionSupported, input0, input1, output);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
 }
 
-bool IsDepthwiseConvolutionSupported(Compute compute,
+bool IsDepthwiseConvolutionSupported(const BackendId& backend,
                                      const TensorInfo& input,
                                      const TensorInfo& output,
                                      const DepthwiseConvolution2dDescriptor& descriptor,
@@ -171,18 +184,18 @@
                                      char* reasonIfUnsupported,
                                      size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
 }
 
-bool IsInputSupported(Compute compute,
+bool IsInputSupported(const BackendId& backend,
                       const TensorInfo& input,
                       char* reasonIfUnsupported,
                       size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsInputSupported, input);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
 }
 
-bool IsFullyConnectedSupported(Compute compute,
+bool IsFullyConnectedSupported(const BackendId& backend,
                                const TensorInfo& input,
                                const TensorInfo& output,
                                const TensorInfo& weights,
@@ -191,20 +204,20 @@
                                char* reasonIfUnsupported,
                                size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
 }
 
-bool IsL2NormalizationSupported(Compute compute,
+bool IsL2NormalizationSupported(const BackendId& backend,
                                 const TensorInfo& input,
                                 const TensorInfo& output,
                                 const L2NormalizationDescriptor& descriptor,
                                 char* reasonIfUnsupported,
                                 size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsL2NormalizationSupported, input, output, descriptor);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsL2NormalizationSupported, input, output, descriptor);
 }
 
-bool IsLstmSupported(Compute compute, const TensorInfo& input, const TensorInfo& outputStateIn,
+bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
                      const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
                      const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
                      const TensorInfo& output, const LstmDescriptor& descriptor,
@@ -220,7 +233,7 @@
                      size_t reasonIfUnsupportedMaxLength)
 
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsLstmSupported, input, outputStateIn, cellStateIn,
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsLstmSupported, input, outputStateIn, cellStateIn,
                                scratchBuffer, outputStateOut, cellStateOut,
                                output, descriptor, inputToForgetWeights, inputToCellWeights,
                                inputToOutputWeights, recurrentToForgetWeights,
@@ -230,109 +243,109 @@
                                cellToInputWeights, inputGateBias, projectionWeights,
                                projectionBias, cellToForgetWeights, cellToOutputWeights);
 }
-bool IsMergerSupported(Compute compute,
+bool IsMergerSupported(const BackendId& backend,
                        std::vector<const TensorInfo*> inputs,
                        const OriginsDescriptor& descriptor,
                        char* reasonIfUnsupported,
                        size_t reasonIfUnsupportedMaxLength)
 {
     BOOST_ASSERT(inputs.size() > 0);
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsMergerSupported, inputs, descriptor);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, descriptor);
 }
 
-bool IsMultiplicationSupported(Compute compute,
+bool IsMultiplicationSupported(const BackendId& backend,
                                const TensorInfo& input0,
                                const TensorInfo& input1,
                                const TensorInfo& output,
                                char* reasonIfUnsupported,
                                size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsMultiplicationSupported, input0, input1, output);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsMultiplicationSupported, input0, input1, output);
 }
 
-bool IsNormalizationSupported(Compute compute,
+bool IsNormalizationSupported(const BackendId& backend,
                               const TensorInfo& input,
                               const TensorInfo& output,
                               const NormalizationDescriptor& descriptor,
                               char* reasonIfUnsupported,
                               size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsNormalizationSupported, input, output, descriptor);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsNormalizationSupported, input, output, descriptor);
 }
 
-bool IsOutputSupported(Compute compute,
+bool IsOutputSupported(const BackendId& backend,
                        const TensorInfo& output,
                        char* reasonIfUnsupported,
                        size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsOutputSupported, output);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsOutputSupported, output);
 }
 
-bool IsPermuteSupported(Compute compute,
+bool IsPermuteSupported(const BackendId& backend,
                         const TensorInfo& input,
                         const TensorInfo& output,
                         const PermuteDescriptor& descriptor,
                         char* reasonIfUnsupported,
                         size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsPermuteSupported, input, output, descriptor);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsPermuteSupported, input, output, descriptor);
 }
 
-bool IsPooling2dSupported(Compute compute,
+bool IsPooling2dSupported(const BackendId& backend,
                           const TensorInfo& input,
                           const TensorInfo& output,
                           const Pooling2dDescriptor& descriptor,
                           char* reasonIfUnsupported,
                           size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsPooling2dSupported, input, output, descriptor);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor);
 }
 
-bool IsResizeBilinearSupported(Compute compute,
+bool IsResizeBilinearSupported(const BackendId& backend,
                                const TensorInfo& input,
                                char* reasonIfUnsupported,
                                size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsResizeBilinearSupported, input);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeBilinearSupported, input);
 }
 
-bool IsSoftmaxSupported(Compute compute,
+bool IsSoftmaxSupported(const BackendId& backend,
                         const TensorInfo& input,
                         const TensorInfo& output,
                         const SoftmaxDescriptor& descriptor,
                         char* reasonIfUnsupported,
                         size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsSoftmaxSupported, input, output, descriptor);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsSoftmaxSupported, input, output, descriptor);
 }
 
-bool IsSplitterSupported(Compute compute,
+bool IsSplitterSupported(const BackendId& backend,
                          const TensorInfo& input,
                          const ViewsDescriptor& descriptor,
                          char* reasonIfUnsupported,
                          size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsSplitterSupported, input, descriptor);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
 }
 
-bool IsFakeQuantizationSupported(Compute compute,
+bool IsFakeQuantizationSupported(const BackendId& backend,
                                  const TensorInfo& input,
                                  const FakeQuantizationDescriptor& descriptor,
                                  char* reasonIfUnsupported,
                                  size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsFakeQuantizationSupported, input, descriptor);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
 }
 
-bool IsReshapeSupported(Compute compute,
+bool IsReshapeSupported(const BackendId& backend,
                         const TensorInfo& input,
                         char* reasonIfUnsupported,
                         size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsReshapeSupported, input);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input);
 }
 
-bool IsFloorSupported(Compute compute,
+bool IsFloorSupported(const BackendId& backend,
                       const TensorInfo& input,
                       const TensorInfo& output,
                       char* reasonIfUnsupported,
@@ -344,20 +357,20 @@
         return false;
     }
 
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsFloorSupported, input, output);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
 }
 
-bool IsMeanSupported(Compute compute,
+bool IsMeanSupported(const BackendId& backend,
                      const TensorInfo& input,
                      const TensorInfo& output,
                      const MeanDescriptor& descriptor,
                      char* reasonIfUnsupported,
                      size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsMeanSupported, input, output, descriptor);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
 }
 
-bool IsPadSupported(Compute compute,
+bool IsPadSupported(const BackendId& backend,
                     const TensorInfo& input,
                     const TensorInfo& output,
                     const PadDescriptor& descriptor,
@@ -365,7 +378,7 @@
                     size_t reasonIfUnsupportedMaxLength)
 {
 
-    FORWARD_LAYER_SUPPORT_FUNC(compute, IsPadSupported, input, output, descriptor);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
 }
 
 }
diff --git a/src/backends/BackendRegistry.cpp b/src/backends/BackendRegistry.cpp
index a5e9f0e..1360168 100644
--- a/src/backends/BackendRegistry.cpp
+++ b/src/backends/BackendRegistry.cpp
@@ -19,7 +19,8 @@
 {
     if (m_BackendFactories.count(id) > 0)
     {
-        throw InvalidArgumentException(std::string(id) + " already registered as backend");
+        throw InvalidArgumentException(std::string(id) + " already registered as backend",
+                                       CHECK_LOCATION());
     }
 
     m_BackendFactories[id] = factory;
@@ -30,7 +31,8 @@
     auto it = m_BackendFactories.find(id);
     if (it == m_BackendFactories.end())
     {
-        throw InvalidArgumentException(std::string(id) + " has no backend factory registered");
+        throw InvalidArgumentException(std::string(id) + " has no backend factory registered",
+                                       CHECK_LOCATION());
     }
 
     return it->second;
diff --git a/src/backends/CMakeLists.txt b/src/backends/CMakeLists.txt
index 0bc6888..3079447 100644
--- a/src/backends/CMakeLists.txt
+++ b/src/backends/CMakeLists.txt
@@ -30,7 +30,7 @@
     WorkloadUtils.hpp
 )
 
-add_library(armnnBackendsCommon STATIC ${armnnBackendsCommon_sources})
+add_library(armnnBackendsCommon OBJECT ${armnnBackendsCommon_sources})
 target_include_directories(armnnBackendsCommon PRIVATE ${PROJECT_SOURCE_DIR}/src)
 target_include_directories(armnnBackendsCommon PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
 target_include_directories(armnnBackendsCommon PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/ILayerSupport.cpp b/src/backends/ILayerSupport.cpp
index beefa87..34168c5 100644
--- a/src/backends/ILayerSupport.cpp
+++ b/src/backends/ILayerSupport.cpp
@@ -4,6 +4,7 @@
 //
 
 #include <armnn/ILayerSupport.hpp>
+#include <armnn/Exceptions.hpp>
 
 namespace armnn
 {
diff --git a/src/backends/README.md b/src/backends/README.md
index 09b9e81..670d6cf 100644
--- a/src/backends/README.md
+++ b/src/backends/README.md
@@ -13,7 +13,7 @@
 
 The ```backend.cmake``` has two main purposes:
 
-1. It makes sure the artifact (typically a static library) is linked into the ArmNN shared library.
+1. It makes sure the artifact (a cmake OBJECT library) is linked into the ArmNN shared library.
 2. It makes sure that the subdirectory where backend sources reside gets included in the build.
 
 To achieve this there are two requirements for the ```backend.cmake``` file
@@ -28,7 +28,7 @@
 add_subdirectory(${PROJECT_SOURCE_DIR}/src/backends/reference)
 
 #
-# Add the static libraries built by the reference backend to the
+# Add the cmake OBJECT libraries built by the reference backend to the
 # list of libraries linked against the ArmNN shared library.
 #
 list(APPEND armnnLibraries armnnRefBackend armnnRefBackendWorkloads)
diff --git a/src/backends/aclCommon/CMakeLists.txt b/src/backends/aclCommon/CMakeLists.txt
index d99b90b..2bfd024 100644
--- a/src/backends/aclCommon/CMakeLists.txt
+++ b/src/backends/aclCommon/CMakeLists.txt
@@ -25,7 +25,7 @@
 
 add_subdirectory(test)
 
-add_library(armnnAclCommon STATIC ${armnnAclCommon_sources})
+add_library(armnnAclCommon OBJECT ${armnnAclCommon_sources})
 target_include_directories(armnnAclCommon PRIVATE ${PROJECT_SOURCE_DIR}/src)
 target_include_directories(armnnAclCommon PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
 target_include_directories(armnnAclCommon PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/backends.cmake b/src/backends/backends.cmake
index f6f69bd..57f5a00 100644
--- a/src/backends/backends.cmake
+++ b/src/backends/backends.cmake
@@ -11,8 +11,14 @@
 FILE(GLOB commonIncludes ${PROJECT_SOURCE_DIR}/src/backends/*/common.cmake)
 FILE(GLOB backendIncludes ${PROJECT_SOURCE_DIR}/src/backends/*/backend.cmake)
 
-# prefer to include common code first so backends can depend on them
-foreach(includeFile ${commonIncludes} ${backendIncludes})
+# prefer to include common code first
+foreach(includeFile ${commonIncludes})
+    message("Including backend common library into the build: ${includeFile}")
+    include(${includeFile})
+endforeach()
+
+# now backends can depend on common code included first
+foreach(includeFile ${backendIncludes})
     message("Including backend into the build: ${includeFile}")
     include(${includeFile})
 endforeach()
diff --git a/src/backends/cl/CMakeLists.txt b/src/backends/cl/CMakeLists.txt
index 2f32081..5704e0e 100644
--- a/src/backends/cl/CMakeLists.txt
+++ b/src/backends/cl/CMakeLists.txt
@@ -24,7 +24,7 @@
     add_subdirectory(test)
 endif()
 
-add_library(armnnClBackend STATIC ${armnnClBackend_sources})
+add_library(armnnClBackend OBJECT ${armnnClBackend_sources})
 target_include_directories(armnnClBackend PRIVATE ${PROJECT_SOURCE_DIR}/src)
 target_include_directories(armnnClBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
 target_include_directories(armnnClBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index 95acf00..29d1b3a 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -15,10 +15,9 @@
 
 namespace
 {
-static const BackendId s_Id{"GpuAcc"};
 
 static BackendRegistry::Helper g_RegisterHelper{
-    s_Id,
+    ClBackend::GetIdStatic(),
     []()
     {
         return IBackendUniquePtr(new ClBackend, &ClBackend::Destroy);
@@ -27,8 +26,9 @@
 
 }
 
-const BackendId& ClBackend::GetId() const
+const BackendId& ClBackend::GetIdStatic()
 {
+    static const BackendId s_Id{"GpuAcc"};
     return s_Id;
 }
 
diff --git a/src/backends/cl/ClBackend.hpp b/src/backends/cl/ClBackend.hpp
index b927db4..1a99b76 100644
--- a/src/backends/cl/ClBackend.hpp
+++ b/src/backends/cl/ClBackend.hpp
@@ -16,7 +16,8 @@
     ClBackend()  = default;
     ~ClBackend() = default;
 
-    const BackendId& GetId() const override;
+    static const BackendId& GetIdStatic();
+    const BackendId& GetId() const override { return GetIdStatic(); }
 
     const ILayerSupport& GetLayerSupport() const override;
 
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 9088da8..7c66348 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -5,8 +5,10 @@
 
 #include "ClLayerSupport.hpp"
 
-#include "InternalTypes.hpp"
-#include "LayerSupportCommon.hpp"
+#include <InternalTypes.hpp>
+#include <LayerSupportCommon.hpp>
+
+#include <armnn/Descriptors.hpp>
 
 #include <boost/core/ignore_unused.hpp>
 
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 75e90e0..2d57d10 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -4,7 +4,7 @@
 //
 #pragma once
 
-#include <armnn/ArmNN.hpp>
+#include <armnn/ILayerSupport.hpp>
 
 namespace armnn
 {
diff --git a/src/backends/cl/test/CMakeLists.txt b/src/backends/cl/test/CMakeLists.txt
index 4936a78..262e23a 100644
--- a/src/backends/cl/test/CMakeLists.txt
+++ b/src/backends/cl/test/CMakeLists.txt
@@ -15,4 +15,4 @@
 add_library(armnnClBackendUnitTests OBJECT ${armnnClBackendUnitTests_sources})
 target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src)
 target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
-target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
\ No newline at end of file
+target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index 5bd2172..59a45fa 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -58,7 +58,7 @@
     ClWorkloadUtils.hpp
 )
 
-add_library(armnnClBackendWorkloads STATIC ${armnnClBackendWorkloads_sources})
+add_library(armnnClBackendWorkloads OBJECT ${armnnClBackendWorkloads_sources})
 target_include_directories(armnnClBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src)
 target_include_directories(armnnClBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
 target_include_directories(armnnClBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/neon/CMakeLists.txt b/src/backends/neon/CMakeLists.txt
index 152955a..c44dcc1 100644
--- a/src/backends/neon/CMakeLists.txt
+++ b/src/backends/neon/CMakeLists.txt
@@ -29,7 +29,7 @@
     )
 endif()
 
-add_library(armnnNeonBackend STATIC ${armnnNeonBackend_sources})
+add_library(armnnNeonBackend OBJECT ${armnnNeonBackend_sources})
 target_include_directories(armnnNeonBackend PRIVATE ${PROJECT_SOURCE_DIR}/src)
 target_include_directories(armnnNeonBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
 target_include_directories(armnnNeonBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index 2e235b6..3c12f77 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -16,10 +16,8 @@
 namespace
 {
 
-static const BackendId s_Id{"CpuAcc"};
-
 static BackendRegistry::Helper g_RegisterHelper{
-    s_Id,
+    NeonBackend::GetIdStatic(),
     []()
     {
         return IBackendUniquePtr(new NeonBackend, &NeonBackend::Destroy);
@@ -28,8 +26,9 @@
 
 }
 
-const BackendId& NeonBackend::GetId() const
+const BackendId& NeonBackend::GetIdStatic()
 {
+    static const BackendId s_Id{"CpuAcc"};
     return s_Id;
 }
 
diff --git a/src/backends/neon/NeonBackend.hpp b/src/backends/neon/NeonBackend.hpp
index fa2cad1..c7f7f6e 100644
--- a/src/backends/neon/NeonBackend.hpp
+++ b/src/backends/neon/NeonBackend.hpp
@@ -16,7 +16,8 @@
     NeonBackend()  = default;
     ~NeonBackend() = default;
 
-    const BackendId& GetId() const override;
+    static const BackendId& GetIdStatic();
+    const BackendId& GetId() const override { return GetIdStatic(); }
 
     const ILayerSupport& GetLayerSupport() const override;
 
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 91be981..1223ba8 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -4,10 +4,7 @@
 //
 #pragma once
 
-#include <armnn/DescriptorsFwd.hpp>
-#include <armnn/Optional.hpp>
-#include <armnn/Types.hpp>
-#include <armnn/Tensor.hpp>
+#include <armnn/ILayerSupport.hpp>
 
 namespace armnn
 {
diff --git a/src/backends/neon/test/CMakeLists.txt b/src/backends/neon/test/CMakeLists.txt
index 4a3380c..384a5e1 100644
--- a/src/backends/neon/test/CMakeLists.txt
+++ b/src/backends/neon/test/CMakeLists.txt
@@ -14,4 +14,4 @@
 add_library(armnnNeonBackendUnitTests OBJECT ${armnnNeonBackendUnitTests_sources})
 target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src)
 target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
-target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
\ No newline at end of file
+target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index 0b0b9ed..fddbcb5 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -61,7 +61,7 @@
     NeonWorkloadUtils.hpp
 )
 
-add_library(armnnNeonBackendWorkloads STATIC ${armnnNeonBackendWorkloads_sources})
+add_library(armnnNeonBackendWorkloads OBJECT ${armnnNeonBackendWorkloads_sources})
 target_include_directories(armnnNeonBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src)
 target_include_directories(armnnNeonBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
 target_include_directories(armnnNeonBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/reference/CMakeLists.txt b/src/backends/reference/CMakeLists.txt
index 5aa3fc2..05ef7d5 100644
--- a/src/backends/reference/CMakeLists.txt
+++ b/src/backends/reference/CMakeLists.txt
@@ -12,7 +12,7 @@
     RefWorkloadFactory.hpp
 )
 
-add_library(armnnRefBackend STATIC ${armnnRefBackend_sources})
+add_library(armnnRefBackend OBJECT ${armnnRefBackend_sources})
 target_include_directories(armnnRefBackend PRIVATE ${PROJECT_SOURCE_DIR}/src)
 target_include_directories(armnnRefBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
 target_include_directories(armnnRefBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/reference/RefBackend.cpp b/src/backends/reference/RefBackend.cpp
index ef52a5e..1f08d82 100644
--- a/src/backends/reference/RefBackend.cpp
+++ b/src/backends/reference/RefBackend.cpp
@@ -15,10 +15,9 @@
 
 namespace
 {
-const BackendId s_Id{"CpuRef"};
 
 static BackendRegistry::Helper s_RegisterHelper{
-    s_Id,
+    RefBackend::GetIdStatic(),
     []()
     {
         return IBackendUniquePtr(new RefBackend, &RefBackend::Destroy);
@@ -27,8 +26,9 @@
 
 }
 
-const BackendId& RefBackend::GetId() const
+const BackendId& RefBackend::GetIdStatic()
 {
+    static const BackendId s_Id{"CpuRef"};
     return s_Id;
 }
 
diff --git a/src/backends/reference/RefBackend.hpp b/src/backends/reference/RefBackend.hpp
index dcc9741..c206dbd 100644
--- a/src/backends/reference/RefBackend.hpp
+++ b/src/backends/reference/RefBackend.hpp
@@ -16,7 +16,8 @@
     RefBackend()  = default;
     ~RefBackend() = default;
 
-    const BackendId& GetId() const override;
+    static const BackendId& GetIdStatic();
+    const BackendId& GetId() const override { return GetIdStatic(); }
 
     const ILayerSupport& GetLayerSupport() const override;
 
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 2ee942c..3a250a6 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -3,8 +3,10 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include "LayerSupportCommon.hpp"
 #include "RefLayerSupport.hpp"
+
+#include <LayerSupportCommon.hpp>
+
 #include <armnn/Descriptors.hpp>
 #include <armnn/Types.hpp>
 #include <armnn/Tensor.hpp>
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 1d0edf6..40bca7f 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -4,10 +4,7 @@
 //
 #pragma once
 
-#include <armnn/DescriptorsFwd.hpp>
-#include <armnn/Types.hpp>
-#include <armnn/Tensor.hpp>
-#include <layers/LstmLayer.hpp>
+#include <armnn/ILayerSupport.hpp>
 
 namespace armnn
 {
diff --git a/src/backends/reference/test/CMakeLists.txt b/src/backends/reference/test/CMakeLists.txt
index 511d747..deee364 100644
--- a/src/backends/reference/test/CMakeLists.txt
+++ b/src/backends/reference/test/CMakeLists.txt
@@ -12,4 +12,4 @@
 add_library(armnnRefBackendUnitTests OBJECT ${armnnRefBackendUnitTests_sources})
 target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src)
 target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
-target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
\ No newline at end of file
+target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 5a756e4..be71a85 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -102,7 +102,7 @@
     RefMeanUint8Workload.hpp
 )
 
-add_library(armnnRefBackendWorkloads STATIC ${armnnRefBackendWorkloads_sources})
+add_library(armnnRefBackendWorkloads OBJECT ${armnnRefBackendWorkloads_sources})
 target_include_directories(armnnRefBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src)
 target_include_directories(armnnRefBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
 target_include_directories(armnnRefBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)