GitHub #543 Problem Parsing Mixed-Precision Model

 * Fixed bug when converting Constants with Per-Axis Quantization

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ifbea23e60483746ec987da491dae96e74cb33af4
diff --git a/include/armnnUtils/TensorUtils.hpp b/include/armnnUtils/TensorUtils.hpp
index f7f20bd..2d6ec2f 100644
--- a/include/armnnUtils/TensorUtils.hpp
+++ b/include/armnnUtils/TensorUtils.hpp
@@ -55,4 +55,9 @@
 
 std::pair<unsigned int, std::vector<float>> GetPerAxisParams(const armnn::TensorInfo& info);
 
+template<typename PrimitiveType>
+std::unique_ptr<float[]> ToFloatArray(const std::vector<PrimitiveType>& data, const armnn::TensorInfo& tensorInfo);
+
+std::unique_ptr<float[]> ToFloatArray(const std::vector<uint8_t>& data, const armnn::TensorInfo& tensorInfo);
+
 } // namespace armnnUtils
diff --git a/src/armnn/TypesUtils.cpp b/src/armnn/TypesUtils.cpp
index 4ba9ed1..74ac231 100644
--- a/src/armnn/TypesUtils.cpp
+++ b/src/armnn/TypesUtils.cpp
@@ -81,4 +81,8 @@
 
 /// Explicit specialization of Dequantize for int32_t
 template
-float armnn::Dequantize<int32_t>(int32_t value, float scale, int32_t offset);
\ No newline at end of file
+float armnn::Dequantize<int32_t>(int32_t value, float scale, int32_t offset);
+
+/// Explicit specialization of Dequantize for int64_t
+template
+float armnn::Dequantize<int64_t>(int64_t value, float scale, int32_t offset);
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 0484c6f..191cfd2 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -316,6 +316,14 @@
         ::memcpy(uint64Buffer.data(), bufferPtr->data.data(), bufferPtr->data.size());
         buffer.assign(std::begin(uint64Buffer), std::end(uint64Buffer));
     }
+    else
+    {
+        CheckLocation location = CHECK_LOCATION();
+        throw ParseException(
+                fmt::format("Unsupported data type for uint buffer {}, only Signed 32 or Signed 64 are supported. {}",
+                            GetDataTypeName(info.GetDataType()),
+                            location.AsString()));
+    }
     return buffer;
 }
 
@@ -911,42 +919,16 @@
     return std::move(m_Network);
 }
 
-std::unique_ptr<float[]> AsFloatArray(TfLiteParserImpl::BufferRawPtr bufferPtr,
-                                      const TensorInfo& tensorInfo)
+bool TfLiteParserImpl::ShouldConstantTensorBeConverted(TfLiteParserImpl::TensorRawPtr tensorPtr,
+                                                       armnn::DataType inputDataType,
+                                                       armnn::DataType tensorDataType)
 {
-    if (tensorInfo.GetDataType() == DataType::QAsymmS8 || tensorInfo.GetDataType() == DataType::QSymmS8 ||
-        tensorInfo.GetDataType() == DataType::QAsymmU8)
-    {
-        std::unique_ptr<float[]> buffer(new float[tensorInfo.GetNumElements()]);
-
-        if (tensorInfo.HasPerAxisQuantization())
-        {
-            unsigned int axis = tensorInfo.GetQuantizationDim().value();
-            auto axisDimensionality = tensorInfo.GetShape()[axis];
-            auto axisFactor = armnnUtils::GetNumElementsAfter(tensorInfo.GetShape(), axis);
-
-            for (unsigned int i = 0; i < tensorInfo.GetNumDimensions(); ++i)
-            {
-                unsigned int axisIndex = (i / axisFactor) % axisDimensionality;
-                buffer[i] = Dequantize<int8_t>(bufferPtr->data[i], tensorInfo.GetQuantizationScales()[axisIndex],
-                                               tensorInfo.GetQuantizationOffset());
-            }
-        }
-        else
-        {
-            for (unsigned int i = 0; i < tensorInfo.GetNumElements(); ++i)
-            {
-                buffer[i] = Dequantize<int8_t>(bufferPtr->data[i], tensorInfo.GetQuantizationScale(),
-                                               tensorInfo.GetQuantizationOffset());
-            }
-        }
-        return buffer;
-    }
-    throw ParseException(
-            fmt::format("Unsupported input/weights combination:  Input {} not supported with Weights {}",
-                        GetDataTypeName(DataType::Float32),
-                        GetDataTypeName(tensorInfo.GetDataType()),
-                        CHECK_LOCATION().AsString()));
+    return (TfLiteParserImpl::IsConstTensor(tensorPtr) && inputDataType == DataType::Float32 &&
+            (tensorDataType == DataType::QAsymmU8 ||
+             tensorDataType == DataType::QAsymmS8 ||
+             tensorDataType == DataType::QSymmS8 ||
+             tensorDataType == DataType::Signed32 ||
+             tensorDataType == DataType::Signed64));
 }
 
 void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
@@ -1136,9 +1118,7 @@
     auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
     armnn::IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, layerName.c_str());
 
-    if (IsConstTensor(inputs[1]) && inputTensorInfo.GetDataType() == DataType::Float32 &&
-        (filterTensorInfo.GetDataType() == DataType::QAsymmU8 ||
-            filterTensorInfo.GetDataType() == DataType::QAsymmS8))
+    if (ShouldConstantTensorBeConverted(inputs[1], inputTensorInfo.GetDataType(), filterTensorInfo.GetDataType()))
     {
         m_ConstantsToDequantize.emplace_back(inputs[1]->buffer);
     }
@@ -1150,9 +1130,7 @@
         // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
         tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
 
-        if (IsConstTensor(inputs[2]) && inputTensorInfo.GetDataType() == DataType::Float32 &&
-            (filterTensorInfo.GetDataType() == DataType::QAsymmU8 ||
-                filterTensorInfo.GetDataType() == DataType::QAsymmS8))
+        if (ShouldConstantTensorBeConverted(inputs[2], inputTensorInfo.GetDataType(), biasTensorInfo.GetDataType()))
         {
             m_ConstantsToDequantize.emplace_back(inputs[2]->buffer);
         }
@@ -3112,9 +3090,7 @@
     // Add the weights input to the registration list, constant layers will be added by SetupConstantLayers if constant.
     tensorIndexesToRegister.emplace_back(inputTensorIndexes[1]);
 
-    if (desc.m_ConstantWeights && inputTensorInfo.GetDataType() == DataType::Float32 &&
-        (filterTensorInfo.GetDataType() == DataType::QAsymmU8 ||
-         filterTensorInfo.GetDataType() == DataType::QAsymmS8))
+    if (ShouldConstantTensorBeConverted(inputs[1], inputTensorInfo.GetDataType(), filterTensorInfo.GetDataType()))
     {
         m_ConstantsToDequantize.emplace_back(inputs[1]->buffer);
     }
@@ -3127,9 +3103,7 @@
         // Add the biases input to the registration list, constant layer will be added by SetupConstantLayers.
         tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
 
-        if (desc.m_ConstantWeights && inputTensorInfo.GetDataType() == DataType::Float32 &&
-            (biasTensorInfo.GetDataType() == DataType::QAsymmU8 ||
-             biasTensorInfo.GetDataType() == DataType::QAsymmS8))
+        if (ShouldConstantTensorBeConverted(inputs[2], inputTensorInfo.GetDataType(), biasTensorInfo.GetDataType()))
         {
             m_ConstantsToDequantize.emplace_back(inputs[2]->buffer);
         }
@@ -4925,11 +4899,22 @@
     // Make sure isConstant flag is set.
     tensorInfo.SetConstant();
 
-    if (inputDataType == DataType::Float32 &&  tensorInfo.GetDataType() != DataType::Float32)
+    if (inputDataType == DataType::Float32 && tensorInfo.GetDataType() != DataType::Float32)
     {
-        TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
-        std::unique_ptr<float[]> data = AsFloatArray(bufferPtr, tensorInfo);
-        return std::make_pair(ConstTensor(constTensorInfo, data.get()), std::move(data));
+        try
+        {
+            TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
+            std::unique_ptr<float[]> data = armnnUtils::ToFloatArray(bufferPtr->data, tensorInfo);
+            return std::make_pair(ConstTensor(constTensorInfo, data.get()), std::move(data));
+        }
+        catch (armnn::InvalidArgumentException)
+        {
+            throw ParseException(
+                    fmt::format("Unsupported input/weights combination:  Input {} not supported with Weights {}",
+                                GetDataTypeName(DataType::Float32),
+                                GetDataTypeName(tensorInfo.GetDataType()),
+                                CHECK_LOCATION().AsString()));
+        }
     }
     else
     {
@@ -4950,9 +4935,20 @@
 
     if (inputTensorInfo.GetDataType() == DataType::Float32 && tensorInfo.GetDataType() != DataType::Float32)
     {
-        TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
-        std::unique_ptr<float[]> data = AsFloatArray(bufferPtr, tensorInfo);
-        return std::make_pair(new ConstTensor(constTensorInfo, data.get()), std::move(data));
+        try
+        {
+            TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
+            std::unique_ptr<float[]> data = armnnUtils::ToFloatArray(bufferPtr->data, tensorInfo);
+            return std::make_pair(new ConstTensor(constTensorInfo, data.get()), std::move(data));
+        }
+        catch (armnn::InvalidArgumentException)
+        {
+            throw ParseException(
+                    fmt::format("Unsupported input/weights combination:  Input {} not supported with Weights {}",
+                                GetDataTypeName(DataType::Float32),
+                                GetDataTypeName(tensorInfo.GetDataType()),
+                                CHECK_LOCATION().AsString()));
+        }
     }
     else
     {
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index f8ddc55..7eb6c48 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -242,7 +242,13 @@
     };
 
     bool ShouldConstantTensorBeCreated(unsigned int tensorIndex);
+
     bool IsConstTensor(TensorRawPtr tensorPtr);
+
+    bool ShouldConstantTensorBeConverted(TfLiteParserImpl::TensorRawPtr tensorPtr,
+                                         armnn::DataType inputDataType,
+                                         armnn::DataType filterDataType);
+
     armnn::ConstTensor CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
                                                     armnn::TensorInfo& tensorInfo);
 
@@ -250,6 +256,7 @@
     CreateConstTensorPermuted(TensorRawPtr tensorPtr,
                               armnn::TensorInfo& tensorInfo,
                               armnn::Optional<armnn::PermutationVector&> permutationVector);
+
     std::pair<armnn::ConstTensor, std::unique_ptr<float[]>>
     CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
                                  armnn::TensorInfo& tensorInfo,
@@ -261,6 +268,7 @@
                                   TfLiteParserImpl::TensorRawPtr tensorPtr,
                                   armnn::TensorInfo& tensorInfo,
                                   armnn::Optional<armnn::PermutationVector&> permutationVector);
+
     std::pair<armnn::ConstTensor*, std::unique_ptr<float[]>>
     CreateConstTensorPtr(TensorRawPtr tensorPtr,
                          armnn::TensorInfo& inputTensorInfo);
diff --git a/src/armnnTfLiteParser/test/Conv2D.cpp b/src/armnnTfLiteParser/test/Conv2D.cpp
index 45c4a43..334c102 100644
--- a/src/armnnTfLiteParser/test/Conv2D.cpp
+++ b/src/armnnTfLiteParser/test/Conv2D.cpp
@@ -673,7 +673,7 @@
                                       "[ 1, 2, 2, 1 ]",    // filterShape
                                       "[ 2,1, 0,6 ]",      // filterData
                                       "[ 1 ]",             // biasShape
-                                      "[ 10, 0, 0, 0 ]",   // biasData
+                                      "[ 10 ]",            // biasData
                                       "1",                 // stride w and h
                                       "NONE",              // activation
                                       "1.0",               // filterScale
diff --git a/src/armnnUtils/TensorUtils.cpp b/src/armnnUtils/TensorUtils.cpp
index d77f5d7..9e3d719 100644
--- a/src/armnnUtils/TensorUtils.cpp
+++ b/src/armnnUtils/TensorUtils.cpp
@@ -128,12 +128,11 @@
     }
     outputShape.insert(outputShape.begin() + axis, 1);
 
-    return TensorShape(outputDim, outputShape.data());
+    return { outputDim, outputShape.data() };
 }
 
 std::vector<unsigned int> SqueezeDims(const TensorShape& tensorShape)
 {
-    unsigned int outputDimSize = 0;
     std::vector<unsigned int> squeezedDims;
 
     for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); ++i)
@@ -141,7 +140,6 @@
         if (tensorShape[i] != 1)
         {
             squeezedDims.push_back(tensorShape[i]);
-            ++outputDimSize;
         }
     }
     return squeezedDims;
@@ -201,4 +199,91 @@
     return { axisFactor, scales };
 }
 
+template<typename PrimitiveType>
+void CheckSizes(const std::vector<PrimitiveType>& data, const armnn::TensorInfo& tensorInfo, unsigned int size = 1)
+{
+    if (data.size() / size != tensorInfo.GetNumElements())
+    {
+        throw InvalidArgumentException(
+                fmt::format("The data does not contain the expected number of elements {} != {}. {}",
+                            data.size(), tensorInfo.GetNumElements(), CHECK_LOCATION().AsString()));
+    }
+}
+
+template<typename PrimitiveType>
+std::unique_ptr<float[]> ToFloatArray(const std::vector<PrimitiveType>& data, const armnn::TensorInfo& tensorInfo)
+{
+    CheckSizes(data, tensorInfo);
+
+    std::unique_ptr<float[]> returnBuffer(new float[tensorInfo.GetNumElements()]);
+
+    if (tensorInfo.HasPerAxisQuantization())
+    {
+        unsigned int axis = tensorInfo.GetQuantizationDim().value();
+        auto axisDimensionality = tensorInfo.GetShape()[axis];
+        auto axisFactor = armnnUtils::GetNumElementsAfter(tensorInfo.GetShape(), axis);
+
+        for (unsigned int i = 0; i < tensorInfo.GetNumElements(); ++i)
+        {
+            unsigned int axisIndex;
+
+            if (i < axisFactor)
+            {
+                axisIndex = 0;
+            }
+            else
+            {
+                axisIndex = (i / axisFactor) % axisDimensionality;
+            }
+            returnBuffer[i] = Dequantize<PrimitiveType>(data[i],
+                                                        tensorInfo.GetQuantizationScales()[axisIndex],
+                                                        tensorInfo.GetQuantizationOffset());
+        }
+    }
+    else
+    {
+        for (unsigned int i = 0; i < tensorInfo.GetNumElements(); ++i)
+        {
+            returnBuffer[i] = Dequantize<PrimitiveType>(data[i],
+                                                        tensorInfo.GetQuantizationScale(),
+                                                        tensorInfo.GetQuantizationOffset());
+        }
+    }
+    return returnBuffer;
+}
+
+std::unique_ptr<float[]> ToFloatArray(const std::vector<uint8_t>& data, const armnn::TensorInfo& tensorInfo)
+{
+    if (tensorInfo.GetDataType() == DataType::QAsymmS8 || tensorInfo.GetDataType() == DataType::QSymmS8)
+    {
+        CheckSizes(data, tensorInfo);
+        std::vector<int8_t> buffer(tensorInfo.GetNumElements());
+        ::memcpy(buffer.data(), data.data(), data.size());
+        return ToFloatArray<int8_t>(buffer, tensorInfo);
+    }
+    else if (tensorInfo.GetDataType() == DataType::QAsymmU8)
+    {
+        CheckSizes(data, tensorInfo);
+        return ToFloatArray<uint8_t>(data, tensorInfo);
+    }
+    else if (tensorInfo.GetDataType() == DataType::Signed32)
+    {
+        CheckSizes(data, tensorInfo, 4);
+        std::vector<int32_t> buffer(tensorInfo.GetNumElements());
+        ::memcpy(buffer.data(), data.data(), data.size());
+        return ToFloatArray<int32_t>(buffer, tensorInfo);
+    }
+    else if (tensorInfo.GetDataType() == DataType::Signed64)
+    {
+        CheckSizes(data, tensorInfo, 8);
+        std::vector<int64_t> buffer(tensorInfo.GetNumElements());
+        ::memcpy(buffer.data(), data.data(), data.size());
+        return ToFloatArray<int64_t>(buffer, tensorInfo);
+    }
+    throw InvalidArgumentException(
+            fmt::format("Unsupported datatype {}. {}",
+                        GetDataTypeName(tensorInfo.GetDataType()),
+                        CHECK_LOCATION().AsString()));
+}
+
 } // namespace armnnUtils
diff --git a/src/armnnUtils/test/TensorUtilsTest.cpp b/src/armnnUtils/test/TensorUtilsTest.cpp
index 6d5f719..16349c5 100644
--- a/src/armnnUtils/test/TensorUtilsTest.cpp
+++ b/src/armnnUtils/test/TensorUtilsTest.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019,2021-2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -134,4 +134,175 @@
     CHECK_THROWS_AS(ExpandDims(inputShape, -5), armnn::InvalidArgumentException);
 }
 
+TEST_CASE("ToFloatArrayInvalidDataType")
+{
+    armnn::TensorInfo info({ 2, 3, 4 }, armnn::DataType::BFloat16);
+    std::vector<uint8_t> data {1,2,3,4,5,6,7,8,9,10};
+
+    // Invalid argument
+    CHECK_THROWS_AS(ToFloatArray(data, info), armnn::InvalidArgumentException);
+}
+
+TEST_CASE("ToFloatArrayQSymmS8PerAxis")
+{
+    std::vector<float> quantizationScales { 0.1f, 0.2f, 0.3f, 0.4f };
+    unsigned int quantizationDim = 1;
+
+    armnn::TensorInfo info({ 3, 4 }, armnn::DataType::QSymmS8, quantizationScales, quantizationDim);
+    std::vector<uint8_t> data { 100, 120, 130, 140, 150, 160, 170 ,180, 190, 200, 210, 220 };
+    float expected[] { 10.0f, 24.0f, -37.8f, -46.4f, -10.6f, -19.2f, -25.8f, -30.4f, -6.6f, -11.2f, -13.8f, -14.4f };
+
+    std::unique_ptr<float[]> result = ToFloatArray(data, info);
+
+    for (uint i = 0; i < info.GetNumElements(); ++i)
+    {
+        CHECK_EQ(result[i], doctest::Approx(expected[i]));
+    }
+}
+
+TEST_CASE("ToFloatArrayQSymmS8")
+{
+    armnn::TensorInfo info({ 3, 4 }, armnn::DataType::QSymmS8, 0.1f);
+    std::vector<uint8_t> data { 100, 120, 130, 140, 150, 160, 170 ,180, 190, 200, 210, 220 };
+    float expected[] { 10.0f, 12.0f, -12.6f, -11.6f, -10.6f, -9.6f, -8.6f, -7.6f, -6.6f,  -5.6f, -4.6f, -3.6f };
+
+    std::unique_ptr<float[]> result = ToFloatArray(data, info);
+
+    for (uint i = 0; i < info.GetNumElements(); ++i)
+    {
+        CHECK_EQ(result[i], doctest::Approx(expected[i]));
+    }
+}
+
+TEST_CASE("ToFloatArrayQAsymmS8PerAxis")
+{
+    std::vector<float> quantizationScales { 0.1f, 0.2f, 0.3f, 0.4f };
+    unsigned int quantizationDim = 1;
+
+    armnn::TensorInfo info({ 3, 4 }, armnn::DataType::QAsymmS8, quantizationScales, quantizationDim);
+    std::vector<uint8_t> data { 100, 120, 130, 140, 150, 160, 170 ,180, 190, 200, 210, 220 };
+    float expected[] { 10.0f, 24.0f, -37.8f, -46.4f, -10.6f, -19.2f, -25.8f, -30.4f, -6.6f, -11.2f, -13.8f, -14.4f };
+
+    std::unique_ptr<float[]> result = ToFloatArray(data, info);
+
+    for (uint i = 0; i < info.GetNumElements(); ++i)
+    {
+        CHECK_EQ(result[i], doctest::Approx(expected[i]));
+    }
+}
+
+TEST_CASE("ToFloatArrayQAsymmS8")
+{
+    armnn::TensorInfo info({ 3, 4 }, armnn::DataType::QAsymmS8, 0.1f);
+    std::vector<uint8_t> data { 100, 120, 130, 140, 150, 160, 170 ,180, 190, 200, 210, 220 };
+    float expected[] { 10.0f, 12.0f, -12.6f, -11.6f, -10.6f, -9.6f, -8.6f, -7.6f, -6.6f,  -5.6f, -4.6f, -3.6f };
+
+    std::unique_ptr<float[]> result = ToFloatArray(data, info);
+
+    for (uint i = 0; i < info.GetNumElements(); ++i)
+    {
+        CHECK_EQ(result[i], doctest::Approx(expected[i]));
+    }
+}
+
+TEST_CASE("ToFloatArrayQASymmU8PerAxis")
+{
+    std::vector<float> quantizationScales { 0.1f, 0.2f, 0.3f, 0.4f };
+    unsigned int quantizationDim = 1;
+
+    armnn::TensorInfo info({ 3, 4 }, armnn::DataType::QAsymmU8, quantizationScales, quantizationDim);
+    std::vector<uint8_t> data { 100, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220 };
+    float expected[] { 10.0f, 24.0f, 39.0f, 56.0f, 15.0f, 32.0f, 51.0f, 72.0f, 19.0f, 40.0f, 63.0f, 88.0f };
+
+    std::unique_ptr<float[]> result = ToFloatArray(data, info);
+
+    for (uint i = 0; i < info.GetNumElements(); ++i)
+    {
+        CHECK_EQ(result[i], doctest::Approx(expected[i]));
+    }
+}
+
+TEST_CASE("ToFloatArrayQAsymmU8")
+{
+    armnn::TensorInfo info({ 3, 4 }, armnn::DataType::QAsymmU8, 0.1f);
+    std::vector<uint8_t> data { 100, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220 };
+    float expected[] { 10.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f };
+
+    std::unique_ptr<float[]> result = ToFloatArray(data, info);
+
+    for (uint i = 0; i < info.GetNumElements(); ++i)
+    {
+        CHECK_EQ(result[i], doctest::Approx(expected[i]));
+    }
+}
+
+TEST_CASE("ToFloatArraySigned32PerAxis")
+{
+    std::vector<float> quantizationScales { 0.1f, 0.2f, 0.3f, 0.4f };
+    unsigned int quantizationDim = 1;
+
+    armnn::TensorInfo info({ 3, 4 }, armnn::DataType::Signed32, quantizationScales, quantizationDim);
+    std::vector<uint8_t> data { 100, 0, 0, 0, 120, 0, 0, 0, 130, 0, 0, 0, 140, 0, 0, 0, 150, 0, 0, 0, 160, 0, 0, 0,
+                                170, 0, 0, 0, 180, 0, 0, 0, 190, 0, 0, 0, 200, 0, 0, 0, 210, 0, 0, 0, 220, 0, 0, 0 };
+    float expected[] { 10.0f, 24.0f, 39.0f, 56.0f, 15.0f, 32.0f, 51.0f, 72.0f, 19.0f, 40.0f, 63.0f, 88.0f };
+
+    std::unique_ptr<float[]> result = ToFloatArray(data, info);
+
+    for (uint i = 0; i < info.GetNumElements(); ++i)
+    {
+        CHECK_EQ(result[i], doctest::Approx(expected[i]));
+    }
+}
+
+TEST_CASE("ToFloatArraySigned32")
+{
+    armnn::TensorInfo info({ 3, 4 }, armnn::DataType::Signed32, 0.1f);
+    std::vector<uint8_t> data { 100, 0, 0, 0, 120, 0, 0, 0, 130, 0, 0, 0, 140, 0, 0, 0, 150, 0, 0, 0, 160, 0, 0, 0,
+                                170, 0, 0, 0, 180, 0, 0, 0, 190, 0, 0, 0, 200, 0, 0, 0, 210, 0, 0, 0, 220, 0, 0, 0 };
+    float expected[] { 10.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f };
+
+    std::unique_ptr<float[]> result = ToFloatArray(data, info);
+
+    for (uint i = 0; i < info.GetNumElements(); ++i)
+    {
+        CHECK_EQ(result[i], doctest::Approx(expected[i]));
+    }
+}
+
+TEST_CASE("ToFloatArraySigned64PerAxis")
+{
+    std::vector<float> quantizationScales { 0.1f, 0.2f, 0.3f, 0.4f };
+    unsigned int quantizationDim = 1;
+
+    armnn::TensorInfo info({ 3, 4 }, armnn::DataType::Signed64, quantizationScales, quantizationDim);
+    std::vector<uint8_t> data { 100, 0, 0, 0, 0, 0, 0, 0, 120, 0, 0, 0, 0, 0, 0, 0, 130, 0, 0, 0, 0, 0, 0, 0,
+                                140, 0, 0, 0, 0, 0, 0, 0, 150, 0, 0, 0, 0, 0, 0, 0, 160, 0, 0, 0, 0, 0, 0, 0,
+                                170, 0, 0, 0, 0, 0, 0, 0, 180, 0, 0, 0, 0, 0, 0, 0, 190, 0, 0, 0, 0, 0, 0, 0,
+                                200, 0, 0, 0, 0, 0, 0, 0, 210, 0, 0, 0, 0, 0, 0, 0, 220, 0, 0, 0, 0, 0, 0, 0 };
+    float expected[] { 10.0f, 24.0f, 39.0f, 56.0f, 15.0f, 32.0f, 51.0f, 72.0f, 19.0f, 40.0f, 63.0f, 88.0f };
+
+    std::unique_ptr<float[]> result = ToFloatArray(data, info);
+
+    for (uint i = 0; i < info.GetNumElements(); ++i)
+    {
+        CHECK_EQ(result[i], doctest::Approx(expected[i]));
+    }
+}
+
+TEST_CASE("ToFloatArraySigned64")
+{
+    armnn::TensorInfo info({ 3, 4 }, armnn::DataType::Signed64, 0.1f);
+    std::vector<uint8_t> data { 100, 0, 0, 0, 0, 0, 0, 0, 120, 0, 0, 0, 0, 0, 0, 0, 130, 0, 0, 0, 0, 0, 0, 0,
+                                140, 0, 0, 0, 0, 0, 0, 0, 150, 0, 0, 0, 0, 0, 0, 0, 160, 0, 0, 0, 0, 0, 0, 0,
+                                170, 0, 0, 0, 0, 0, 0, 0, 180, 0, 0, 0, 0, 0, 0, 0, 190, 0, 0, 0, 0, 0, 0, 0,
+                                200, 0, 0, 0, 0, 0, 0, 0, 210, 0, 0, 0, 0, 0, 0, 0, 220, 0, 0, 0, 0, 0, 0, 0 };
+    float expected[] { 10.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f };
+
+    std::unique_ptr<float[]> result = ToFloatArray(data, info);
+
+    for (uint i = 0; i < info.GetNumElements(); ++i)
+    {
+        CHECK_EQ(result[i], doctest::Approx(expected[i]));
+    }
+}
 }