MLCE-328 Serializer/Deserializer does not support Signed64

 * Added support for Signed64 to flatbuffer's schema & updated source tree
 * Added support for Signed64 to TFLite Delegate
 * Added support for Signed64 to Serializer
 * Added support for Signed64 to Deserializer
 * Added unit test for ArgMinMax to Deserializer
 * Deprecated m_Output_Type from the ArgMinMaxDescriptor: the output type
   is solely determined by the DataType of the output Tensor
 * Fixed issue where RefArgMinMaxWorkload could output data using
   the wrong DataType
 * Added Signed64 to RefLayerSupport::IsArgMinMaxSupported as a supported
   type

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: Ib622c052a1f8aa3e658262f8bde5a6881a8cbe10
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 049a4f1..8ccb270 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -922,6 +922,7 @@
             src/armnnDeserializer/test/DeserializeAbs.cpp
             src/armnnDeserializer/test/DeserializeActivation.cpp
             src/armnnDeserializer/test/DeserializeAdd.cpp
+            src/armnnDeserializer/test/DeserializeArgMinMax.cpp
             src/armnnDeserializer/test/DeserializeBatchToSpaceNd.cpp
             src/armnnDeserializer/test/DeserializeBatchNormalization.cpp
             src/armnnDeserializer/test/DeserializeComparison.cpp
diff --git a/delegate/src/ArgMinMax.hpp b/delegate/src/ArgMinMax.hpp
index 090d18e..54994df 100644
--- a/delegate/src/ArgMinMax.hpp
+++ b/delegate/src/ArgMinMax.hpp
@@ -67,40 +67,26 @@
     {
         desc.m_Function = armnn::ArgMinMaxFunction::Max;
         auto* argMaxParameters = reinterpret_cast<TfLiteArgMaxParams*>(tfLiteNode->builtin_data);
-        switch (argMaxParameters->output_type)
+        if (argMaxParameters->output_type != kTfLiteInt32 && argMaxParameters->output_type != kTfLiteInt64)
         {
-            case kTfLiteInt32:
-                desc.m_Output_Type = armnn::DataType::Signed32;
-                break;
-            case kTfLiteInt64:
-                desc.m_Output_Type = armnn::DataType::Signed64;
-                break;
-            default:
-                TF_LITE_MAYBE_KERNEL_LOG(
-                    tfLiteContext,
-                    "TfLiteArmnnDelegate: output_type data type is not supported in operator #%d node #%d: ",
-                    argMinMaxOperatorCode, nodeIndex);
-                return kTfLiteError;
+            TF_LITE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnDelegate: output_type data type is not supported in operator #%d node #%d: ",
+                argMinMaxOperatorCode, nodeIndex);
+            return kTfLiteError;
         }
     }
     else
     {
         desc.m_Function = armnn::ArgMinMaxFunction::Min;
         auto* argMinParameters = reinterpret_cast<TfLiteArgMinParams*>(tfLiteNode->builtin_data);
-        switch (argMinParameters->output_type)
+        if (argMinParameters->output_type != kTfLiteInt32 && argMinParameters->output_type != kTfLiteInt64)
         {
-            case kTfLiteInt32:
-                desc.m_Output_Type = armnn::DataType::Signed32;
-                break;
-            case kTfLiteInt64:
-                desc.m_Output_Type = armnn::DataType::Signed64;
-                break;
-            default:
-                TF_LITE_MAYBE_KERNEL_LOG(
+            TF_LITE_MAYBE_KERNEL_LOG(
                     tfLiteContext,
                     "TfLiteArmnnDelegate: output_type data type is not supported in operator #%d node #%d: ",
                     argMinMaxOperatorCode, nodeIndex);
-                return kTfLiteError;
+            return kTfLiteError;
         }
     }
 
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 1b5f1e3..1e5782e 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -391,6 +391,8 @@
             return armnn::DataType::QSymmS16;
         case kTfLiteInt32:
             return armnn::DataType::Signed32;
+        case kTfLiteInt64:
+            return armnn::DataType::Signed64;
         default:
             throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
     }
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 278c61f..683ef7a 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -70,7 +70,7 @@
     ArgMinMaxFunction m_Function;
     /// Axis to reduce across the input tensor.
     int m_Axis;
-    // Tensor data type and this could be int32 or int64. Default type is int64.
+    /// Deprecated and will be removed in future release.
     armnn::DataType m_Output_Type;
 };
 
diff --git a/src/armnn/ResolveType.hpp b/src/armnn/ResolveType.hpp
index e1bea42..5355091 100644
--- a/src/armnn/ResolveType.hpp
+++ b/src/armnn/ResolveType.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -58,6 +58,12 @@
 };
 
 template<>
+struct ResolveTypeImpl<DataType::Signed64>
+{
+    using Type = int64_t;
+};
+
+template<>
 struct ResolveTypeImpl<DataType::Boolean>
 {
     using Type = uint8_t;
diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp
index 463f3eb..6a744f7 100644
--- a/src/armnnCaffeParser/CaffeParser.cpp
+++ b/src/armnnCaffeParser/CaffeParser.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "CaffeParser.hpp"
@@ -1418,7 +1418,6 @@
 
     ArgMinMaxDescriptor desc;
     desc.m_Axis = axis;
-    desc.m_Output_Type = armnn::DataType::Signed32;
     desc.m_Function = ArgMinMaxFunction::Max;
 
     armnn::IConnectableLayer* argmaxLayer = m_Network->AddArgMinMaxLayer(desc,
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 9f68e71..89a42b6 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -589,6 +589,9 @@
         case DataType_Signed32:
             type = armnn::DataType::Signed32;
             break;
+        case DataType_Signed64:
+            type = armnn::DataType::Signed64;
+            break;
         case DataType_Float32:
             type = armnn::DataType::Float32;
             break;
diff --git a/src/armnnDeserializer/test/DeserializeArgMinMax.cpp b/src/armnnDeserializer/test/DeserializeArgMinMax.cpp
new file mode 100644
index 0000000..6358b53
--- /dev/null
+++ b/src/armnnDeserializer/test/DeserializeArgMinMax.cpp
@@ -0,0 +1,134 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../Deserializer.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(DeserializeParser)
+
+struct ArgMinMaxFixture : public ParserFlatbuffersSerializeFixture
+{
+    explicit ArgMinMaxFixture(const std::string& inputShape,
+                              const std::string& outputShape,
+                              const std::string& axis,
+                              const std::string& argMinMaxFunction)
+    {
+        m_JsonString = R"(
+        {
+          layers: [
+            {
+              layer_type: "InputLayer",
+              layer: {
+                base: {
+                  base: {
+                    layerName: "InputLayer",
+                    layerType: "Input",
+                    inputSlots: [
+
+                    ],
+                    outputSlots: [
+                      {
+                        tensorInfo: {
+                          dimensions: )" + inputShape + R"(,
+                          dataType: "Float32",
+                          quantizationScale: 0.0
+                        }
+                      }
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              layer_type: "ArgMinMaxLayer",
+              layer: {
+                base: {
+                  index: 1,
+                  layerName: "ArgMinMaxLayer",
+                  layerType: "ArgMinMax",
+                  inputSlots: [
+                    {
+                      connection: {
+                        sourceLayerIndex: 0,
+                        outputSlotIndex: 0
+                      }
+                    }
+                  ],
+                  outputSlots: [
+                    {
+                      tensorInfo: {
+                        dimensions: )" + outputShape + R"(,
+                        dataType: "Signed64",
+                        quantizationScale: 0.0
+                      }
+                    }
+                  ]
+                },
+                descriptor: {
+                  axis: )" + axis + R"(,
+                  argMinMaxFunction: )" + argMinMaxFunction + R"(
+                }
+              }
+            },
+            {
+              layer_type: "OutputLayer",
+              layer: {
+                base: {
+                  base: {
+                    index: 2,
+                    layerName: "OutputLayer",
+                    layerType: "Output",
+                    inputSlots: [
+                      {
+                        connection: {
+                          sourceLayerIndex: 1,
+                          outputSlotIndex: 0
+                        }
+                      }
+                    ],
+                    outputSlots: [
+
+                    ]
+                  }
+                }
+              }
+            }
+          ],
+          inputIds: [
+            0
+          ],
+          outputIds: [
+            0
+          ],
+          featureVersions: {
+            bindingIdsScheme: 1
+          }
+        }
+    )";
+        Setup();
+    }
+};
+
+struct SimpleArgMinMaxFixture : public ArgMinMaxFixture
+{
+    SimpleArgMinMaxFixture() : ArgMinMaxFixture("[ 1, 1, 1, 5 ]",
+                                                "[ 1, 1, 1 ]",
+                                                "-1",
+                                                "Max") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ArgMinMax, SimpleArgMinMaxFixture)
+{
+    RunTest<3, armnn::DataType::Float32, armnn::DataType::Signed64>(
+            0,
+            {{"InputLayer", { 6.0f, 2.0f, 8.0f, 10.0f, 9.0f}}},
+            {{"OutputLayer",{ 3l }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index 88d66f7..438ea83 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -39,7 +39,8 @@
     QAsymmU8 = 6,
     QSymmS16 = 7,
     QAsymmS8 = 8,
-    QSymmS8 = 9
+    QSymmS8 = 9,
+    Signed64 = 10
 }
 
 enum DataLayout : byte {
diff --git a/src/armnnSerializer/ArmnnSchema_generated.h b/src/armnnSerializer/ArmnnSchema_generated.h
index 99ab0dc..2cd88e2 100644
--- a/src/armnnSerializer/ArmnnSchema_generated.h
+++ b/src/armnnSerializer/ArmnnSchema_generated.h
@@ -4,7 +4,6 @@
 //
 // automatically generated by the FlatBuffers compiler, do not modify
 
-
 #ifndef FLATBUFFERS_GENERATED_ARMNNSCHEMA_ARMNNSERIALIZER_H_
 #define FLATBUFFERS_GENERATED_ARMNNSCHEMA_ARMNNSERIALIZER_H_
 
@@ -466,11 +465,12 @@
   DataType_QSymmS16 = 7,
   DataType_QAsymmS8 = 8,
   DataType_QSymmS8 = 9,
+  DataType_Signed64 = 10,
   DataType_MIN = DataType_Float16,
-  DataType_MAX = DataType_QSymmS8
+  DataType_MAX = DataType_Signed64
 };
 
-inline const DataType (&EnumValuesDataType())[10] {
+inline const DataType (&EnumValuesDataType())[11] {
   static const DataType values[] = {
     DataType_Float16,
     DataType_Float32,
@@ -481,13 +481,14 @@
     DataType_QAsymmU8,
     DataType_QSymmS16,
     DataType_QAsymmS8,
-    DataType_QSymmS8
+    DataType_QSymmS8,
+    DataType_Signed64
   };
   return values;
 }
 
 inline const char * const *EnumNamesDataType() {
-  static const char * const names[11] = {
+  static const char * const names[12] = {
     "Float16",
     "Float32",
     "QuantisedAsymm8",
@@ -498,13 +499,14 @@
     "QSymmS16",
     "QAsymmS8",
     "QSymmS8",
+    "Signed64",
     nullptr
   };
   return names;
 }
 
 inline const char *EnumNameDataType(DataType e) {
-  if (flatbuffers::IsOutRange(e, DataType_Float16, DataType_QSymmS8)) return "";
+  if (flatbuffers::IsOutRange(e, DataType_Float16, DataType_Signed64)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesDataType()[index];
 }
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index ae9ddf2..15ae78c 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -1718,6 +1718,15 @@
 
     switch (tensorInfo.GetDataType())
     {
+        case armnn::DataType::Signed64:
+        {
+            auto fbVector = CreateDataVector<int64_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
+            flatbuffers::Offset<serializer::LongData> flatBuffersData = serializer::CreateLongData(
+                    m_flatBufferBuilder,
+                    fbVector);
+            fbPayload = flatBuffersData.o;
+            break;
+        }
         case armnn::DataType::Float32:
         case armnn::DataType::Signed32:
         {
diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp
index 929bf92..936fb53 100644
--- a/src/armnnSerializer/SerializerUtils.cpp
+++ b/src/armnnSerializer/SerializerUtils.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -57,6 +57,8 @@
         case armnn::DataType::QSymmS8:
         case armnn::DataType::Boolean:
             return armnnSerializer::ConstTensorData::ConstTensorData_ByteData;
+        case armnn::DataType::Signed64:
+            return armnnSerializer::ConstTensorData::ConstTensorData_LongData;
         default:
             return armnnSerializer::ConstTensorData::ConstTensorData_NONE;
     }
@@ -72,6 +74,8 @@
             return armnnSerializer::DataType::DataType_Float16;
         case armnn::DataType::Signed32:
             return armnnSerializer::DataType::DataType_Signed32;
+        case armnn::DataType::Signed64:
+            return armnnSerializer::DataType::DataType_Signed64;
         case armnn::DataType::QSymmS16:
             return armnnSerializer::DataType::DataType_QSymmS16;
         case armnn::DataType::QAsymmS8:
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index d7c10cb..9d44354 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -76,11 +76,11 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeArgMinMax)
+void SerializeArgMinMaxTest(armnn::DataType dataType)
 {
     const std::string layerName("argminmax");
     const armnn::TensorInfo inputInfo({1, 2, 3}, armnn::DataType::Float32);
-    const armnn::TensorInfo outputInfo({1, 3}, armnn::DataType::Signed32);
+    const armnn::TensorInfo outputInfo({1, 3}, dataType);
 
     armnn::ArgMinMaxDescriptor descriptor;
     descriptor.m_Function = armnn::ArgMinMaxFunction::Max;
@@ -107,6 +107,16 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
+BOOST_AUTO_TEST_CASE(SerializeArgMinMaxSigned32)
+{
+    SerializeArgMinMaxTest(armnn::DataType::Signed32);
+}
+
+BOOST_AUTO_TEST_CASE(SerializeArgMinMaxSigned64)
+{
+    SerializeArgMinMaxTest(armnn::DataType::Signed64);
+}
+
 BOOST_AUTO_TEST_CASE(SerializeBatchNormalization)
 {
     const std::string layerName("batchNormalization");
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 8286007..c4d2942 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -2941,9 +2941,6 @@
 
 void TfLiteParserImpl::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
 {
-    const auto &operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
-    const auto *options = operatorPtr->builtin_options.AsArgMaxOptions();
-
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
     auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(inputs.size(), 2);
@@ -2961,14 +2958,20 @@
 
     ArgMinMaxDescriptor desc;
     desc.m_Axis = axisBufferPtr->data.data()[0];
-    // If output_type is int32 then set Signed32 else Signed64. Default type is Signed64.
-    desc.m_Output_Type = options->output_type == 3 ? armnn::DataType::Signed32 : armnn::DataType::Signed64;
     desc.m_Function = ArgMinMaxFunction::Max;
 
     // Register a ArgMax layer.
     IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerName.c_str());
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
+        outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
+    {
+        throw ParseException(
+                fmt::format(
+                        "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
+                                CHECK_LOCATION().AsString()));
+    }
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // Register input tensor to the layer.
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 992ae71..2e0a8f2 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -179,7 +179,7 @@
 {
     IgnoreUnused(descriptor);
 
-    std::array<DataType, 7> supportedTypes =
+    std::array<DataType, 8> supportedInputTypes =
     {
         DataType::BFloat16,
         DataType::Float16,
@@ -187,14 +187,20 @@
         DataType::QAsymmS8,
         DataType::QAsymmU8,
         DataType::QSymmS16,
-        DataType::Signed32
+        DataType::Signed32,
+        DataType::Signed64
+    };
+
+    std::array<DataType,2> supportedOutputTypes = {
+        DataType::Signed32,
+        DataType::Signed64
     };
 
     bool supported = true;
 
-    supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+    supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
                                   "Reference ArgMinMax: input is not a supported type.");
-    supported &= CheckSupportRule(TypeIs(output, DataType::Signed32), reasonIfUnsupported,
+    supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
                                   "Reference ArgMinMax: output type not supported");
 
     return supported;
diff --git a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
index b7246d5..bf8649f 100644
--- a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -29,7 +29,7 @@
 
     const TensorInfo &outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]);
 
-    if (m_Data.m_Parameters.m_Output_Type == armnn::DataType::Signed32) {
+    if (outputTensorInfo.GetDataType() == armnn::DataType::Signed32) {
         int32_t *output = GetOutputTensorData<int32_t>(0, m_Data);
         ArgMinMax(decoder, output, inputTensorInfo, outputTensorInfo, m_Data.m_Parameters.m_Function,
                   m_Data.m_Parameters.m_Axis);