IVGCVSW-4390 Refactor QUANTIZE to make use of Decoder/Encoder types

 * Add no-ops for CL/NEON Uint8
 * Refactor Quantize workload to Decoder/Encoder types

Signed-off-by: Keith Davis <keith.davis@arm.com>
Change-Id: I80b09de528299b925e2ac38acd9a5019b8d3e4ac
diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.hpp b/src/backends/reference/workloads/RefQuantizeWorkload.hpp
index 6a43b84..9ae1076 100644
--- a/src/backends/reference/workloads/RefQuantizeWorkload.hpp
+++ b/src/backends/reference/workloads/RefQuantizeWorkload.hpp
@@ -7,6 +7,8 @@
 
 #include <backendsCommon/Workload.hpp>
 #include <backendsCommon/WorkloadData.hpp>
+#include "Decoders.hpp"
+#include "Encoders.hpp"
 
 namespace armnn {
 
@@ -14,13 +16,15 @@
 {
 public:
     RefQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo &info);
+    void PostAllocationConfigure() override;
     void Execute() const override;
 
 private:
+
+    std::unique_ptr<Decoder<float>> m_InputDecoder;
+    std::unique_ptr<Encoder<float>> m_OutputEncoder;
+
     size_t m_NumElements;
-    armnn::DataType m_TargetType;
-    float m_Scale;
-    int m_Offset;
 };
 
 } //namespace armnn
\ No newline at end of file