Refactor: Don't include all ComputeLibrary function definitions everywhere.

Just include the function definition that is specifically needed for each workload.
Also, tighten up the scope where Compute Library functions are available.

Knocks about 30seconds off a 4m30s single-threaded compile of the Neon workloads.

Change-Id: Idac438f3bc77ff978295fbc9505cb42447def145
diff --git a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.hpp b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.hpp
index 52e4db7..3619ea0 100644
--- a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.hpp
+++ b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.hpp
@@ -5,7 +5,12 @@
 
 #pragma once
 
-#include <neon/workloads/NeonWorkloadUtils.hpp>
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/IFunction.h>
+#include <arm_compute/runtime/Tensor.h>
+
+#include <memory>
 
 namespace armnn
 {
@@ -26,7 +31,7 @@
     virtual void Execute() const override;
 
 private:
-    mutable arm_compute::NEBatchNormalizationLayer m_Layer;
+    std::unique_ptr<arm_compute::IFunction> m_Layer;
 
     std::unique_ptr<arm_compute::Tensor> m_Mean;
     std::unique_ptr<arm_compute::Tensor> m_Variance;