Add Quantize Support to TOSA Ref Backend

* Adding a one to many tosa mapping for Quantize
* Added tests

* Resolves IVGCVSW-7175


Signed-off-by: John Mcloughlin <john.mcloughlin@arm.com>
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Ia0852fefb618b4a29c2601b9de8b6b2731229801
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index ed95bcf..264381d 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -48,6 +48,7 @@
     PreluEndToEndTestImpl.hpp
     QLstmEndToEndTestImpl.cpp
     QLstmEndToEndTestImpl.hpp
+    QuantizationEndToEndTestImpl.hpp
     QuantizedLstmEndToEndTestImpl.cpp
     QuantizedLstmEndToEndTestImpl.hpp
     RankEndToEndTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
index 82fceb8..a3c3a82 100644
--- a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019,2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019,2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
diff --git a/src/backends/backendsCommon/test/QuantizationEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/QuantizationEndToEndTestImpl.hpp
new file mode 100644
index 0000000..f5c2eea
--- /dev/null
+++ b/src/backends/backendsCommon/test/QuantizationEndToEndTestImpl.hpp
@@ -0,0 +1,108 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <CommonTestUtils.hpp>
+
+#include <armnn/INetwork.hpp>
+
+#include <ResolveType.hpp>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+armnn::INetworkPtr CreateQuantizationNetwork(const armnn::TensorInfo& inputInfo,
+                                             const armnn::TensorInfo& outputInfo)
+{
+    using namespace armnn;
+
+    INetworkPtr network(INetwork::Create());
+
+    IConnectableLayer *input= network->AddInputLayer(0, "input");
+    IConnectableLayer *quantization = network->AddQuantizeLayer("quantization");
+    IConnectableLayer *output = network->AddOutputLayer(0, "output");
+
+    Connect(input, quantization, inputInfo, 0, 0);
+    Connect(quantization, output, outputInfo, 0, 0);
+
+    return network;
+}
+
+template<armnn::DataType ArmnnIType, armnn::DataType ArmnnOType,
+        typename Tin = armnn::ResolveType<ArmnnIType>, typename Tout = armnn::ResolveType<ArmnnOType>>
+void QuantizeEndToEndLayerTestImpl(const std::vector<armnn::BackendId>& backends,
+                                   const armnn::TensorShape& tensorShape,
+                                   const std::vector<Tin>& input,
+                                   const std::vector<Tout>& expectedOutput,
+                                   float scale,
+                                   int32_t offset)
+{
+    using namespace armnn;
+
+    TensorInfo inputInfo(tensorShape, ArmnnIType);
+    TensorInfo outputInfo(tensorShape, ArmnnOType, scale, offset);
+
+    inputInfo.SetConstant(true);
+
+    // Builds up the structure of the network
+    INetworkPtr net = CreateQuantizationNetwork(inputInfo, outputInfo);
+
+    CHECK(net);
+
+    const std::map<int, std::vector<Tin>> inputTensorData = { { 0, input } };
+    const std::map<int, std::vector<Tout>> expectedOutputData = { { 0, expectedOutput } };
+
+    EndToEndLayerTestImpl<ArmnnIType, ArmnnOType>(std::move(net), inputTensorData, expectedOutputData, backends);
+}
+
+template<armnn::DataType ArmnnOType, typename Tout = armnn::ResolveType<ArmnnOType>>
+void QuantizationEndToEndFloat32(const std::vector<armnn::BackendId>& backends)
+{
+    using namespace armnn;
+
+    const TensorShape tensorShape({ 1, 1, 1, 5 });
+
+    std::vector<float> inputData = { 63.5f, 49.5f, 14.0f, 0.0f, 50.0f };
+
+    float qScale = 0.5f;
+    int32_t qOffset = 127;
+    std::vector<Tout> expectedOutputData = armnnUtils::QuantizedVector<Tout>(inputData, qScale, qOffset);
+
+    QuantizeEndToEndLayerTestImpl<DataType::Float32, ArmnnOType>(backends,
+                                                                 tensorShape,
+                                                                 inputData,
+                                                                 expectedOutputData,
+                                                                 qScale,
+                                                                 qOffset);
+};
+
+template<armnn::DataType ArmnnOType, typename Tout = armnn::ResolveType<ArmnnOType>>
+void QuantizationEndToEndFloat16(const std::vector<armnn::BackendId>& backends)
+{
+    using namespace armnn;
+    using namespace half_float::literal;
+    using Half = half_float::half;
+
+    const TensorShape tensorShape({ 1, 1, 1, 5 });
+
+    std::vector<float> floatInputData = { 63.f, 49.f, 14.f, 0.f, 50.f };
+    std::vector<Half> inputData = { 63._h, 49._h, 14._h, 0._h, 50._h };
+
+    float qScale = 0.25f;
+    int32_t qOffset = 1;
+    std::vector<Tout> expectedOutputData = armnnUtils::QuantizedVector<Tout>(floatInputData, qScale, qOffset);
+
+    QuantizeEndToEndLayerTestImpl<DataType::Float16, ArmnnOType>(backends,
+                                                                 tensorShape,
+                                                                 inputData,
+                                                                 expectedOutputData,
+                                                                 qScale,
+                                                                 qOffset);
+};
+
+}
\ No newline at end of file
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 8de763e..143d85a 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -29,6 +29,7 @@
 #include <backendsCommon/test/LogSoftmaxEndToEndTestImpl.hpp>
 #include <backendsCommon/test/PreluEndToEndTestImpl.hpp>
 #include <backendsCommon/test/QLstmEndToEndTestImpl.hpp>
+#include <backendsCommon/test/QuantizationEndToEndTestImpl.hpp>
 #include <backendsCommon/test/RankEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ReduceEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ReshapeEndToEndTestImpl.hpp>
@@ -792,42 +793,42 @@
 }
 
 // DepthToSpace
-TEST_CASE("DephtToSpaceEndToEndNchwFloat32")
+TEST_CASE("DepthToSpaceEndToEndNchwFloat32")
 {
     DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-TEST_CASE("DephtToSpaceEndToEndNchwFloat16")
+TEST_CASE("DepthToSpaceEndToEndNchwFloat16")
 {
     DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-TEST_CASE("DephtToSpaceEndToEndNchwUint8")
+TEST_CASE("DepthToSpaceEndToEndNchwUint8")
 {
     DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-TEST_CASE("DephtToSpaceEndToEndNchwInt16")
+TEST_CASE("DepthToSpaceEndToEndNchwInt16")
 {
     DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-TEST_CASE("DephtToSpaceEndToEndNhwcFloat32")
+TEST_CASE("DepthToSpaceEndToEndNhwcFloat32")
 {
     DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-TEST_CASE("DephtToSpaceEndToEndNhwcFloat16")
+TEST_CASE("DepthToSpaceEndToEndNhwcFloat16")
 {
     DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-TEST_CASE("DephtToSpaceEndToEndNhwcUint8")
+TEST_CASE("DepthToSpaceEndToEndNhwcUint8")
 {
     DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-TEST_CASE("DephtToSpaceEndToEndNhwcInt16")
+TEST_CASE("DepthToSpaceEndToEndNhwcInt16")
 {
     DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
@@ -1064,6 +1065,38 @@
     PreluEndToEndPositiveTest<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
+// Quantization
+TEST_CASE("QuantizationEndToEndFloat32_U8Test")
+{
+    QuantizationEndToEndFloat32<armnn::DataType::QAsymmU8>(defaultBackends);
+}
+
+TEST_CASE("QuantizationEndToEndFloat32_I8Test")
+{
+    QuantizationEndToEndFloat32<armnn::DataType::QAsymmS8>(defaultBackends);
+}
+
+TEST_CASE("QuantizationEndToEndFloat32_S16Test")
+{
+    QuantizationEndToEndFloat32<armnn::DataType::QSymmS16>(defaultBackends);
+}
+
+TEST_CASE("QuantizationEndToEndFloat16_U8Test")
+{
+    QuantizationEndToEndFloat16<armnn::DataType::QAsymmU8>(defaultBackends);
+}
+
+TEST_CASE("QuantizationEndToEndFloat16_I8Test")
+{
+    QuantizationEndToEndFloat16<armnn::DataType::QAsymmS8>(defaultBackends);
+}
+
+TEST_CASE("QuantizationEndToEndFloat16_S16Test")
+{
+    QuantizationEndToEndFloat16<armnn::DataType::QSymmS16>(defaultBackends);
+}
+
+// SpaceToDepth
 TEST_CASE("RefSpaceToDepthNhwcEndToEndTest1")
 {
     SpaceToDepthNhwcEndToEndTest1(defaultBackends);
diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp
index dff266d..6c6bff4 100644
--- a/src/backends/tosaCommon/TosaMappings.cpp
+++ b/src/backends/tosaCommon/TosaMappings.cpp
@@ -74,6 +74,10 @@
                 return ConvertPooling2DToTosaOperator(layer, inputs, outputs, poolDesc);
             }
         }
+        case LayerType::Quantize:
+        {
+            return ConvertQuantizeToTosaOperator(layer, inputs, outputs);
+        }
         case LayerType::Reshape:
         {
             auto reshapeDesc = PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
index 279b193..b694634 100644
--- a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
+++ b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
@@ -18,6 +18,8 @@
         ElementwiseUnaryOperator.hpp
         Pooling2DOperator.hpp
         Pooling2DOperator.cpp
+        QuantizeOperator.hpp
+        QuantizeOperator.cpp
         ReshapeOperator.hpp
         ReshapeOperator.cpp
         ResizeOperator.hpp
diff --git a/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp b/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp
new file mode 100644
index 0000000..1107add
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp
@@ -0,0 +1,139 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+// Copyright © 2020 The TensorFlow Authors. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "QuantizeOperator.hpp"
+
+// This function is paraphrased from:
+// tensorflow/compiler/mlir/tosa/transforms/legalize_common.cc from function convertQuantizeOp
+TosaSerializationBasicBlock* ConvertQuantizeToTosaOperator(const Layer* layer,
+                                                           const std::vector<const TensorInfo*>& inputs,
+                                                           const std::vector<const TensorInfo*>& outputs)
+{
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE( inputs.size() == 1,
+                                         "ConvertQuantizeToTosaOperator: Quantize must have only one input" );
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE( outputs.size() == 1,
+                                         "ConvertQuantizeToTosaOperator: Quantize must have only one output" );
+
+    std::string inputName           = std::string("input0_");
+    std::string outputNameZeroPoint = std::string("intermediate0_") + GetUniqueTosaMappingID();
+    std::string outputNameScale     = std::string("intermediate1_") + GetUniqueTosaMappingID();
+    std::string outputNameMul       = std::string("intermediate2_") + GetUniqueTosaMappingID();
+    std::string outputNameAdd       = std::string("intermediate3_") + GetUniqueTosaMappingID();
+    std::string outputName          = std::string("output0_");
+    std::string blockName           = std::string("Op_QUANTIZE_block_") + GetUniqueTosaMappingID();
+
+    // If a layer is present then the block will be used for execution, so input and output names need to be determined
+    // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+    if(layer != nullptr)
+    {
+        // Get the layers connected to the input slots and determine unique tensor names.
+        Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
+        inputName = GenerateUniqueName(connectedLayer, 0);
+
+        // Determine unique output tensor name.
+        outputName = GenerateUniqueOutputName(*layer, 0);
+    }
+
+    const TensorInfo inputInfo = *inputs[0];
+    const TensorInfo outputInfo = *outputs[0];
+
+    // Extract quantization detail from Tensor
+    float zeroPoint = static_cast<float>(outputInfo.GetQuantizationOffset());
+    // No per axis support in Tensorflow TOSA code
+    float scale = outputInfo.GetQuantizationScale();
+
+    // As per the Tensorflow quantization specification
+    // Tensorflow TOSA code calculates quantization using multiplication by scale
+    // Armnn code calculates quantization using division by scale
+    // Invert scale factor passed from Armnn for tf TOSA code
+    scale = (scale != 0) ?  (1 / scale) : scale;
+
+    std::vector<TosaSerializationTensor*> tensors;
+
+    // Only add input tensors if connected layer is an input layer.
+    // As intermediate or constant tensors will be created separately.
+    // There also can't be duplicate tensor.
+    std::vector<int32_t> inputShape0;
+    DType inputDType0 =  DType::DType_UNKNOWN;
+    if(inputName.find("input0_") != std::string::npos)
+    {
+        inputShape0 = GetTosaTensorShape(inputInfo.GetShape());
+        inputDType0 = ArmNNToDType(inputInfo.GetDataType());
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE( inputDType0 == DType::DType_FP16 || inputDType0 == DType::DType_FP32,
+                                             "ConvertQuantizeToTosaOperator: Quantize input must be of type Float" );
+        tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
+    }
+
+    std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputInfo.GetShape());
+    DType outputDType0 = ArmNNToDType(outputInfo.GetDataType());
+
+    // quantize:
+    // const_zeroPoint = constant(zeroPoint)
+    // const_scale = constant(scale)
+    // out_mul = mul(input, const_scale)
+    // out_add = add(out_mul, const_zeroPoint)
+    // output = cast<output_type>(out_add)
+
+    // const_zeroPoint
+    TosaSerializationOperator* zeroPointOp = nullptr;
+    TosaSerializationTensor* zeroPointTensor = nullptr;
+    CreateConstTosaOperator<float>(outputNameZeroPoint,
+                                   zeroPoint,
+                                   inputDType0,
+                                   inputShape0,
+                                   zeroPointOp,
+                                   zeroPointTensor);
+    tensors.push_back(zeroPointTensor);
+
+    // const_scale
+    TosaSerializationOperator *scaleOp = nullptr;
+    TosaSerializationTensor* scaleTensor = nullptr;
+    CreateConstTosaOperator<float>(outputNameScale,
+                                   scale,
+                                   inputDType0,
+                                   inputShape0,
+                                   scaleOp,
+                                   scaleTensor);
+    tensors.push_back(scaleTensor);
+
+    // mul
+    int32_t shift = 0;
+    TosaMulAttribute mulAttribute(shift);
+    TosaSerializationOperator* mulOp = new TosaSerializationOperator(Op_MUL,
+                                                                     Attribute_MulAttribute,
+                                                                     &mulAttribute,
+                                                                     {inputName, outputNameScale},
+                                                                     {outputNameMul});
+    tensors.push_back(new TosaSerializationTensor(outputNameMul, inputShape0, inputDType0, {}));
+
+    // add
+    TosaSerializationOperator* addOp = new TosaSerializationOperator(Op_ADD,
+                                                                     Attribute_NONE,
+                                                                     nullptr,
+                                                                     {outputNameMul, outputNameZeroPoint},
+                                                                     {outputNameAdd});
+    tensors.push_back(new TosaSerializationTensor(outputNameAdd, inputShape0, inputDType0, {}));
+
+    // cast
+    TosaSerializationOperator* castOp = new TosaSerializationOperator(Op_CAST,
+                                                                      Attribute_NONE,
+                                                                      nullptr,
+                                                                      {outputNameAdd},
+                                                                      {outputName});
+
+    tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
+
+    // operatorInputNames/operatorOutputNames ends up being the same as
+    // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
+    return new TosaSerializationBasicBlock(blockName,                                       // name
+                                           mainName,                                        // region name
+                                           {zeroPointOp, scaleOp, mulOp, addOp, castOp},    // operators
+                                           tensors,                                         // tensors
+                                           {inputName},                                     // inputs
+                                           {outputName});                                   // outputs
+}
diff --git a/src/backends/tosaCommon/operatorMappings/QuantizeOperator.hpp b/src/backends/tosaCommon/operatorMappings/QuantizeOperator.hpp
new file mode 100644
index 0000000..895c091
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/QuantizeOperator.hpp
@@ -0,0 +1,16 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertQuantizeToTosaOperator(const Layer* layer,
+                                                           const std::vector<const TensorInfo*>& inputs,
+                                                           const std::vector<const TensorInfo*>& outputs);
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
index 749e876..3d88b65 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
@@ -12,6 +12,7 @@
 #include "ElementwiseBinaryOperator.hpp"
 #include "ElementwiseUnaryOperator.hpp"
 #include "Pooling2DOperator.hpp"
+#include "QuantizeOperator.hpp"
 #include "ReshapeOperator.hpp"
 #include "ResizeOperator.hpp"
 #include "SliceOperator.hpp"
diff --git a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
index 3e106e1..e43f6ca 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -354,3 +354,102 @@
     tensorHandle->Unmap();
     return uint8Data;
 }
+
+
+inline std::vector<uint8_t> CreateConstTosaData(const void* value,
+                                                DType dtype,
+                                                const std::vector<int32_t>& shape)
+{
+    std::vector<uint8_t> uint8Data;
+    tosa_err_t error = tosa_err_t::TOSA_OK;
+
+    unsigned int numElements = 1;
+    for (auto s : shape)
+    {
+        if (s < 0)
+        {
+            throw armnn::Exception("CreateConstTosaData: negative shape elements unhandled.");
+        }
+        numElements = numElements * static_cast<unsigned int>(s);
+    }
+
+    switch (dtype)
+    {
+        case DType::DType_FP32:
+        {
+            std::vector<float> data(numElements, *static_cast<const float*>(value));
+            error = TosaSerializationHandler::ConvertF32toU8(data, uint8Data);
+            break;
+        }
+        case DType::DType_FP16:
+        {
+            std::vector<float> data(numElements, *static_cast<const float*>(value));
+            error = TosaSerializationHandler::ConvertF16toU8(data, uint8Data);
+            break;
+        }
+        case DType::DType_INT48:
+        {
+            std::vector<int64_t> data(numElements, *static_cast<const int64_t*>(value));
+            error = TosaSerializationHandler::ConvertI48toU8(data, uint8Data);
+            break;
+        }
+        case DType::DType_INT32:
+        {
+            std::vector<int32_t> data(numElements, *static_cast<const int32_t*>(value));
+            error = TosaSerializationHandler::ConvertI32toU8(data, uint8Data);
+            break;
+        }
+        case DType::DType_INT16:
+        {
+            std::vector<int16_t> data(numElements, *static_cast<const int16_t*>(value));
+            error = TosaSerializationHandler::ConvertI16toU8(data, uint8Data);
+            break;
+        }
+        case DType::DType_INT8:
+        {
+            std::vector<int8_t> data(numElements, *static_cast<const int8_t*>(value));
+            error = TosaSerializationHandler::ConvertI8toU8(data, uint8Data);
+            break;
+        }
+        case DType::DType_INT4:
+        {
+            std::vector<int8_t> data(numElements, *static_cast<const int8_t*>(value));
+            error = TosaSerializationHandler::ConvertI4toU8(data, uint8Data);
+            break;
+        }
+        case DType::DType_BOOL:
+        {
+            std::vector<bool> data(numElements, *static_cast<const bool*>(value));
+            error = TosaSerializationHandler::ConvertBooltoU8(data, uint8Data);
+            break;
+        }
+        default:
+        {
+            throw armnn::Exception("CreateConstTosaData: An unsupported data type was encountered.");
+        }
+    }
+
+    if(error != tosa_err_t::TOSA_OK)
+    {
+        throw armnn::Exception("CreateConstTosaData: An error occurred when converting constant data");
+    }
+
+    return uint8Data;
+}
+
+template<typename T>
+inline void CreateConstTosaOperator(const std::string& outputName,
+                                    const T value,
+                                    DType dtype,
+                                    const std::vector<int32_t>& shape,
+                                    TosaSerializationOperator*& op,
+                                    TosaSerializationTensor*& tensor)
+{
+    std::vector<uint8_t> uint8Data = CreateConstTosaData(static_cast<const void *>(&value), dtype, shape);
+
+    op = new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {outputName});
+    ARMNN_THROW_MSG_IF_FALSE(op, armnn::Exception, "CreateConstTosaOperator: failed to created operator");
+
+    tensor = new TosaSerializationTensor(outputName, shape, dtype, uint8Data);
+    ARMNN_THROW_MSG_IF_FALSE(tensor, armnn::Exception, "CreateConstTosaOperator: failed to created tensor");
+}
\ No newline at end of file
diff --git a/src/backends/tosaCommon/test/OneToManyMappingTests.cpp b/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
index 94dd537..f439b04 100644
--- a/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
+++ b/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
@@ -4,7 +4,9 @@
 //
 
 #include "AvgPool2DIgnoreValueChecker.hpp"
+#include "QuantizeChecker.hpp"
 #include "SplitChecker.hpp"
+
 #include <armnn/IRuntime.hpp>
 
 using namespace armnn;
@@ -84,6 +86,46 @@
                               descriptor);
 }
 
+TEST_CASE("GetTosaMapping_QuantizeLayer")
+{
+    NullDescriptor descriptor;
+    DataType outputDataType = DataType::Signed32;
+
+    TensorInfo inputTensorInfo({ 1, 3, 3, 1 }, DataType::Float32);
+    TensorInfo outputTensorInfo({ 1, 3, 3, 1 }, outputDataType);
+    std::vector<int32_t> shape = { 1, 3, 3, 1 };
+
+    TosaSerializationBasicBlock* basicBlock =
+            GetTosaMapping(nullptr, LayerType::Quantize, {&inputTensorInfo}, {&outputTensorInfo}, descriptor);
+    VerifyQuantize(basicBlock, shape, ArmNNToDType(DataType::Float32), ArmNNToDType(outputDataType));
+}
+TEST_CASE("GetTosaMappingFromLayer_QuantizeLayer")
+{
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+    // Builds up the structure of the network.
+    INetworkPtr net(INetwork::Create());
+    NullDescriptor descriptor;
+    DataType outputDataType = DataType::Signed32;
+
+    IConnectableLayer* input0   = net->AddInputLayer(0, "input0");
+    IConnectableLayer* quantize = net->AddQuantizeLayer("quantize");
+    IConnectableLayer* output   = net->AddOutputLayer(0, "output");
+
+    input0->GetOutputSlot(0).Connect(quantize->GetInputSlot(0));
+    quantize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    armnn::TensorInfo inputTensorInfo({ 1, 3, 3, 1 }, DataType::Float32);
+    armnn::TensorInfo outputTensorInfo({ 1, 3, 3, 1 }, outputDataType);
+    std::vector<int32_t> shape = { 1, 3, 3, 1 };
+
+    input0->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+    quantize->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    TosaSerializationBasicBlock* basicBlock = GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(quantize));
+    VerifyQuantize(basicBlock, shape, ArmNNToDType(DataType::Float32), ArmNNToDType(outputDataType));
+}
+
 TEST_CASE("GetTosaMapping_SplitLayer")
 {
     const unsigned int numViews = 3;
diff --git a/src/backends/tosaCommon/test/QuantizeChecker.hpp b/src/backends/tosaCommon/test/QuantizeChecker.hpp
new file mode 100644
index 0000000..1a35903
--- /dev/null
+++ b/src/backends/tosaCommon/test/QuantizeChecker.hpp
@@ -0,0 +1,105 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TosaTestUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+void VerifyQuantize(TosaSerializationBasicBlock* quantizeBlock,
+                    std::vector<int32_t> shape,
+                    DType inputDataType = DType_FP32,
+                    DType outputDataType = DType_FP32)
+{
+    std::string blockStr = "Op_QUANTIZE_block_";
+    CHECK(quantizeBlock->GetName().find(blockStr)  != std::string::npos);
+    CHECK(quantizeBlock->GetInputs().size() == 1);
+    CHECK(quantizeBlock->GetOutputs().size() == 1);
+    CHECK(quantizeBlock->GetOperators().size() == 5); // MUL, CONST, ADD, CONST, CAST
+    CHECK(quantizeBlock->GetTensors().size() == 6);
+
+    std::basic_string<char> blockInputName = quantizeBlock->GetInputs()[0];
+    std::basic_string<char> blockOutputName = quantizeBlock->GetOutputs()[0];
+
+    //
+    // Verify Constants
+    //
+    TosaSerializationOperator* constZeroPointOp = quantizeBlock->GetOperators().at(0);
+    CHECK(constZeroPointOp->GetAttributeType() == Attribute_NONE);
+    CHECK(constZeroPointOp->GetOp() == tosa::Op_CONST);
+
+    TosaSerializationOperator* constScaleOp = quantizeBlock->GetOperators().at(1);
+    CHECK(constScaleOp->GetAttributeType() == Attribute_NONE);
+    CHECK(constScaleOp->GetOp() == tosa::Op_CONST);
+
+    //
+    // Verify Multiplication
+    //
+    ElementwiseBinaryDescriptor mulDescriptor(BinaryOperation::Mul);
+    TosaSerializationOperator* mulOp = quantizeBlock->GetOperators().at(2);
+    CHECK(mulOp->GetAttributeType() == tosa::Attribute_MulAttribute);
+    CHECK(mulOp->GetOp() == tosa::Op_MUL);
+
+    CHECK(mulOp->GetInputTensorNames().size() == 2);
+    std::basic_string<char> mulInputName0 = mulOp->GetInputTensorNames()[0];
+    std::basic_string<char> mulInputName1 = mulOp->GetInputTensorNames()[1];
+
+    CHECK(blockInputName == mulInputName0);
+
+    TosaSerializationTensor* mulInputTensor0 = quantizeBlock->GetTensorByName(mulInputName0);
+    CHECK(mulInputTensor0->GetDtype() == inputDataType);
+    CHECK(mulInputTensor0->GetData().size() == 0);
+    CHECK(mulInputTensor0->GetShape() == shape);
+
+    TosaSerializationTensor* mulInputTensor1 = quantizeBlock->GetTensorByName(mulInputName1);
+    CHECK(mulInputTensor1->GetShape() == shape);
+
+    //
+    // Verify Addition
+    //
+    ElementwiseBinaryDescriptor addDescriptor(BinaryOperation::Add);
+    TosaSerializationOperator* addOp = quantizeBlock->GetOperators().at(3);
+    CHECK(addOp->GetAttributeType() == Attribute_NONE);
+    CHECK(addOp->GetOp() == tosa::Op_ADD);
+
+    CHECK(addOp->GetInputTensorNames().size() == 2);
+    std::basic_string<char> addInputName0 = addOp->GetInputTensorNames()[0];
+    std::basic_string<char> addInputName1 = addOp->GetInputTensorNames()[1];
+
+    TosaSerializationTensor* addInputTensor0 = quantizeBlock->GetTensorByName(addInputName0);
+    CHECK(addInputTensor0->GetDtype() == inputDataType);
+    CHECK(addInputTensor0->GetData().size() == 0);
+    CHECK(addInputTensor0->GetShape() == shape);
+
+    TosaSerializationTensor* addInputTensor1 = quantizeBlock->GetTensorByName(addInputName1);
+    CHECK(addInputTensor1->GetShape() == shape);
+
+    //
+    // Verify Cast
+    //
+    TosaSerializationOperator* castOp = quantizeBlock->GetOperators().at(4);
+    CHECK(castOp->GetAttributeType() == Attribute_NONE);
+    CHECK(castOp->GetOp() == tosa::Op_CAST);
+
+    CHECK(castOp->GetInputTensorNames().size() == 1);
+    CHECK(castOp->GetOutputTensorNames().size() == 1);
+
+    std::basic_string<char> castInputName = castOp->GetInputTensorNames()[0];
+    std::basic_string<char> castOutputName = castOp->GetOutputTensorNames()[0];
+
+    TosaSerializationTensor* castInputTensor = quantizeBlock->GetTensorByName(castInputName);
+    CHECK(castInputTensor->GetDtype() == inputDataType);
+    CHECK(castInputTensor->GetData().size() == 0);
+    CHECK(castInputTensor->GetShape() == shape);
+
+    TosaSerializationTensor* castOutputTensor = quantizeBlock->GetTensorByName(castOutputName);
+    CHECK(castOutputTensor->GetDtype() == outputDataType);
+    CHECK(castOutputTensor->GetData().size() == 0);
+    CHECK(castOutputTensor->GetShape() == shape);
+
+    CHECK(blockOutputName == castOutputName);
+
+
+}
\ No newline at end of file
diff --git a/src/backends/tosaReference/TosaRefLayerSupport.cpp b/src/backends/tosaReference/TosaRefLayerSupport.cpp
index 60d0f7c..a38c431 100644
--- a/src/backends/tosaReference/TosaRefLayerSupport.cpp
+++ b/src/backends/tosaReference/TosaRefLayerSupport.cpp
@@ -71,6 +71,7 @@
         }
         case LayerType::ElementwiseUnary:
         case LayerType::Pooling2d:
+        case LayerType::Quantize:
         case LayerType::Reshape:
         case LayerType::Resize:
         case LayerType::Slice:
diff --git a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
index 05d4114..914df76 100644
--- a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
@@ -12,6 +12,7 @@
 #include "backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp"
 #include "backendsCommon/test/MultiplicationEndToEndTestImpl.hpp"
 #include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
+#include "backendsCommon/test/QuantizationEndToEndTestImpl.hpp"
 #include "backendsCommon/test/ReshapeEndToEndTestImpl.hpp"
 #include "backendsCommon/test/ResizeEndToEndTestImpl.hpp"
 #include "backendsCommon/test/SliceEndToEndTestImpl.hpp"
@@ -133,6 +134,37 @@
     MaxPool2dEndToEnd<DataType::Float32>(tosaDefaultBackends, PaddingMethod::IgnoreValue);
 }
 
+// Quantization
+TEST_CASE("TosaRefQuantizeFromFloat32ToInt8")
+{
+    QuantizationEndToEndFloat32<DataType::QAsymmS8>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefQuantizeFromFloat32ToInt16")
+{
+    QuantizationEndToEndFloat32<DataType::QSymmS16>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefQuantizeFromFloat32ToInt32")
+{
+    QuantizationEndToEndFloat32<DataType::Signed32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefQuantizeFromFloat16ToInt8")
+{
+    QuantizationEndToEndFloat16<DataType::QAsymmS8>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefQuantizeFromFloat16ToInt16")
+{
+    QuantizationEndToEndFloat16<DataType::QSymmS16>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefQuantizeFromFloat16ToInt32")
+{
+    QuantizationEndToEndFloat16<DataType::Signed32>(tosaDefaultBackends);
+}
+
 // Reshape
 TEST_CASE("TosaRefReshapeEndtoEndTestFloat32")
 {