IVGCVSW-7889 Add support for Signed64 data type in Cast layer

* Remove mention of "isDepthwise" variable name when not needed and therefore IgnoreUnused call
* Improve error messages and change them to throws in Encoder and Decoder

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I8ce30b5075e1e47d54abc12390265ba8e9ee1405
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 167639a..2be227a 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -847,7 +847,7 @@
                                       const TensorInfo& output,
                                       Optional<std::string&> reasonIfUnsupported) const
 {
-    std::array<DataType, 9> supportedInputTypes =
+    std::array<DataType, 10> supportedInputTypes =
         {
                 DataType::Float32,
                 DataType::Float16,
@@ -855,7 +855,8 @@
                 DataType::QAsymmS8,
                 DataType::QAsymmU8,
                 DataType::QSymmS16,
-                DataType::Signed32
+                DataType::Signed32,
+                DataType::Signed64
         };
 
     bool supported = true;
diff --git a/src/backends/reference/test/RefPerAxisIteratorTests.cpp b/src/backends/reference/test/RefPerAxisIteratorTests.cpp
index 92b828e..d437f4d 100644
--- a/src/backends/reference/test/RefPerAxisIteratorTests.cpp
+++ b/src/backends/reference/test/RefPerAxisIteratorTests.cpp
@@ -5,6 +5,8 @@
 
 #include <reference/workloads/Decoders.hpp>
 
+#include <armnn/utility/IgnoreUnused.hpp>
+
 #include <fmt/format.h>
 
 #include <doctest/doctest.h>
diff --git a/src/backends/reference/workloads/ArgMinMax.cpp b/src/backends/reference/workloads/ArgMinMax.cpp
index 3bf2853..abc0512 100644
--- a/src/backends/reference/workloads/ArgMinMax.cpp
+++ b/src/backends/reference/workloads/ArgMinMax.cpp
@@ -7,6 +7,7 @@
 
 #include <armnnUtils/TensorUtils.hpp>
 
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/utility/NumericCast.hpp>
 
 namespace armnn
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index 1665c1f..694c229 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -7,7 +7,6 @@
 
 #include <armnn/TypesUtils.hpp>
 #include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/utility/NumericCast.hpp>
 #include <armnnUtils/FloatingPointConverter.hpp>
 #include <armnnUtils/TensorUtils.hpp>
@@ -45,9 +44,7 @@
 
     virtual IType Get() const = 0;
 
-    virtual std::vector<float>
-    DecodeTensor(const TensorShape &tensorShape,
-                 bool isDepthwise = false) = 0;
+    virtual std::vector<float> DecodeTensor(const TensorShape &tensorShape, bool isDepthwise = false) = 0;
 };
 
 template<typename IType>
@@ -125,11 +122,8 @@
     {
         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
     }
-    std::vector<float> DecodeTensor (const TensorShape& tensorShape,
-                                     const bool isDepthwise) override
+    std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
     {
-        IgnoreUnused(isDepthwise);
-
         const unsigned int size = tensorShape.GetNumElements();
         std::vector<float> decodedTensor;
         decodedTensor.reserve(size);
@@ -162,11 +156,8 @@
     {
         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
     }
-    std::vector<float> DecodeTensor (const TensorShape& tensorShape,
-                                     const bool isDepthwise) override
+    std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
     {
-        IgnoreUnused(isDepthwise);
-
         const unsigned int size = tensorShape.GetNumElements();
         std::vector<float> decodedTensor;
         decodedTensor.reserve(size);
@@ -199,11 +190,8 @@
     {
         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
     }
-    std::vector<float> DecodeTensor (const TensorShape& tensorShape,
-                                     const bool isDepthwise) override
+    std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
     {
-        IgnoreUnused(isDepthwise);
-
         const unsigned int size = tensorShape.GetNumElements();
         std::vector<float> decodedTensor;
         decodedTensor.reserve(size);
@@ -236,11 +224,8 @@
     {
         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
     }
-    std::vector<float> DecodeTensor (const TensorShape& tensorShape,
-                                     const bool isDepthwise) override
+    std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
     {
-        IgnoreUnused(isDepthwise);
-
         const unsigned int size = tensorShape.GetNumElements();
         std::vector<float> decodedTensor;
         decodedTensor.reserve(size);
@@ -275,11 +260,8 @@
         armnnUtils::FloatingPointConverter::ConvertFloat16To32(m_Iterator, 1, &val);
         return val;
     }
-    std::vector<float> DecodeTensor (const TensorShape& tensorShape,
-                                     const bool isDepthwise) override
+    std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool ) override
     {
-        IgnoreUnused(isDepthwise);
-
         const unsigned int size = tensorShape.GetNumElements();
         std::vector<float> decodedTensor;
         decodedTensor.reserve(size);
@@ -311,10 +293,8 @@
     {
         return *m_Iterator;
     }
-    std::vector<float> DecodeTensor (const TensorShape& tensorShape,
-                                     const bool isDepthwise) override
+    std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
     {
-        IgnoreUnused(isDepthwise);
         const unsigned int size = tensorShape.GetNumElements();
         std::vector<float> decodedTensor;
 
@@ -338,11 +318,8 @@
     {
         return static_cast<float>(*m_Iterator) * m_Scale;
     }
-    std::vector<float> DecodeTensor (const TensorShape& tensorShape,
-                                     const bool isDepthwise) override
+    std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
     {
-        IgnoreUnused(isDepthwise);
-
         const unsigned int size = tensorShape.GetNumElements();
         std::vector<float> decodedTensor;
         decodedTensor.reserve(size);
@@ -374,11 +351,8 @@
     {
         return static_cast<float>(*m_Iterator);
     }
-    std::vector<float> DecodeTensor (const TensorShape& tensorShape,
-                                     const bool isDepthwise) override
+    std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
     {
-        IgnoreUnused(isDepthwise);
-
         const unsigned int size = tensorShape.GetNumElements();
         std::vector<float> decodedTensor;
         decodedTensor.reserve(size);
@@ -406,11 +380,37 @@
     {
         return *m_Iterator;
     }
-    std::vector<float> DecodeTensor (const TensorShape& tensorShape,
-                                     const bool isDepthwise) override
+    std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
     {
-        IgnoreUnused(isDepthwise);
+        const unsigned int size = tensorShape.GetNumElements();
+        std::vector<float> decodedTensor;
+        decodedTensor.reserve(size);
 
+        for (uint32_t i = 0; i < size; ++i)
+        {
+            this->operator[](i);
+            decodedTensor.emplace_back(static_cast<float>(*m_Iterator));
+        }
+
+        return decodedTensor;
+    }
+};
+
+class Int64Decoder : public TypedIterator<const int64_t, Decoder<double_t>>
+{
+public:
+    Int64Decoder(const int64_t* data)
+            : TypedIterator(data) {}
+
+    Int64Decoder()
+            : Int64Decoder(nullptr) {}
+
+    double_t Get() const override
+    {
+        return static_cast<double_t>(*m_Iterator);
+    }
+    std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
+    {
         const unsigned int size = tensorShape.GetNumElements();
         std::vector<float> decodedTensor;
         decodedTensor.reserve(size);
@@ -438,11 +438,8 @@
     {
         return *m_Iterator;
     }
-    std::vector<float> DecodeTensor (const TensorShape& tensorShape,
-                                     const bool isDepthwise) override
+    std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
     {
-        IgnoreUnused(isDepthwise);
-
         const unsigned int size = tensorShape.GetNumElements();
         std::vector<float> decodedTensor;
         decodedTensor.reserve(size);
@@ -471,11 +468,8 @@
         return *m_Iterator;
     }
 
-    std::vector<float> DecodeTensor(const TensorShape& tensorShape,
-                                    const bool isDepthwise) override
+    std::vector<float> DecodeTensor(const TensorShape& tensorShape, const bool) override
     {
-        IgnoreUnused(isDepthwise);
-
         const unsigned int size = tensorShape.GetNumElements();
         std::vector<float> decodedTensor;
         decodedTensor.reserve(size);
@@ -668,6 +662,26 @@
     }
 };
 
+class Int64Encoder : public TypedIterator<int64_t, Encoder<double>>
+{
+public:
+    Int64Encoder(int64_t* data)
+            : TypedIterator(data) {}
+
+    Int64Encoder()
+            : Int64Encoder(nullptr) {}
+
+    void Set(double right) override
+    {
+        *m_Iterator = static_cast<int64_t>(right);
+    }
+
+    double_t Get() const override
+    {
+        return static_cast<double>(*m_Iterator);
+    }
+};
+
 class BooleanEncoder : public TypedIterator<uint8_t, Encoder<bool>>
 {
 public:
@@ -797,11 +811,8 @@
         return m_Scales[m_AxisIndex];
     }
 
-    std::vector<float> DecodeTensor(const TensorShape &tensorShape,
-                                    bool isDepthwise) override
+    std::vector<float> DecodeTensor(const TensorShape &tensorShape, const bool) override
     {
-        IgnoreUnused(isDepthwise);
-
         const unsigned int size = tensorShape.GetNumElements();
         std::vector<float> decodedTensor;
         decodedTensor.reserve(size);
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index 54e7008..3bf3db7 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -10,8 +10,6 @@
 #include <armnnUtils/FloatingPointConverter.hpp>
 #include <armnnUtils/TensorUtils.hpp>
 
-#include <armnn/utility/Assert.hpp>
-
 namespace armnn
 {
 
@@ -121,7 +119,25 @@
         }
         default:
         {
-            ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
+            throw InvalidArgumentException("Unsupported target Data Type!");
+            break;
+        }
+    }
+    return nullptr;
+}
+
+template<>
+inline std::unique_ptr<Decoder<double_t>> MakeDecoder(const TensorInfo& info, const void* data)
+{
+    switch(info.GetDataType())
+    {
+        case DataType::Signed64:
+        {
+            return std::make_unique<Int64Decoder>(static_cast<const int64_t*>(data));
+        }
+        default:
+        {
+            throw InvalidArgumentException("Cannot decode to double. Unsupported origin Data Type!");
             break;
         }
     }
@@ -139,7 +155,7 @@
         }
         default:
         {
-            ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
+            throw InvalidArgumentException("Cannot decode to bool. Unsupported origin Data Type!");
             break;
         }
     }
@@ -157,7 +173,7 @@
         }
         default:
         {
-            ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
+            throw InvalidArgumentException("Cannot decode to int32. Unsupported origin Data Type!");
             break;
         }
     }
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index 2108efe..c5ab327 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -6,6 +6,7 @@
 #include "DetectionPostProcess.hpp"
 
 #include <armnn/utility/Assert.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/utility/NumericCast.hpp>
 
 #include <algorithm>
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index 8a70237..5de3615 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -9,8 +9,6 @@
 
 #include <armnnUtils/TensorUtils.hpp>
 
-#include <armnn/utility/Assert.hpp>
-
 namespace armnn
 {
 
@@ -89,7 +87,25 @@
         }
         default:
         {
-            ARMNN_ASSERT_MSG(false, "Unsupported target Data Type!");
+            throw InvalidArgumentException("Unsupported target Data Type!");
+            break;
+        }
+    }
+    return nullptr;
+}
+
+template<>
+inline std::unique_ptr<Encoder<double_t>> MakeEncoder(const TensorInfo& info, void* data)
+{
+    switch(info.GetDataType())
+    {
+        case armnn::DataType::Signed64:
+        {
+            return std::make_unique<Int64Encoder>(static_cast<int64_t*>(data));
+        }
+        default:
+        {
+            throw InvalidArgumentException("Cannot encode from double. Unsupported target Data Type!");
             break;
         }
     }
@@ -107,7 +123,7 @@
         }
         default:
         {
-            ARMNN_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
+            throw InvalidArgumentException("Cannot encode from boolean. Unsupported target Data Type!");
             break;
         }
     }
@@ -125,7 +141,7 @@
         }
         default:
         {
-            ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
+            throw InvalidArgumentException("Cannot encode from int32. Unsupported Data Type!");
             break;
         }
     }