IVGCVSW-1927 Add Neon 8-bit FullyConnected support

Change-Id: Idf4cc7a9a7d3261b9eceb653b999257506cdae76
diff --git a/src/backends/NeonWorkloads/NeonFullyConnectedWorkload.hpp b/src/backends/NeonWorkloads/NeonFullyConnectedWorkload.hpp
new file mode 100644
index 0000000..11991f8
--- /dev/null
+++ b/src/backends/NeonWorkloads/NeonFullyConnectedWorkload.hpp
@@ -0,0 +1,40 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backends/NeonWorkloadUtils.hpp>
+
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+
+#include <memory>
+
+namespace armnn
+{
+
+arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input,
+                                                       const TensorInfo& output,
+                                                       const TensorInfo& weights,
+                                                       const TensorInfo& biases,
+                                                       const FullyConnectedDescriptor& descriptor);
+
+class NeonFullyConnectedWorkload : public BaseWorkload<FullyConnectedQueueDescriptor>
+{
+public:
+    NeonFullyConnectedWorkload(const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info,
+                               std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
+    virtual void Execute() const override;
+
+private:
+    mutable arm_compute::NEFullyConnectedLayer m_FullyConnectedLayer;
+
+    std::unique_ptr<arm_compute::Tensor> m_WeightsTensor;
+    std::unique_ptr<arm_compute::Tensor> m_BiasesTensor;
+
+    void FreeUnusedTensors();
+};
+
+} //namespace armnn
+