IVGCVSW-3697 Add Support for ANEURALNETWORKS_ARG[MAX|MIN] to HAL 1.2 Driver

!armnn:2313

Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>
Change-Id: I543136e4e2ef9aece1378d2642064cc585246645
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index c8e2968..e6f8acb 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -88,6 +88,10 @@
             return ConvertAbs(operation, model, data);
         case V1_2::OperationType::ADD:
             return ConvertAdd(operation, model, data);
+        case V1_2::OperationType::ARGMAX:
+            return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Max);
+        case V1_2::OperationType::ARGMIN:
+            return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Min);
         case V1_2::OperationType::AVERAGE_POOL_2D:
             return ConvertAveragePool2d(operation, model, data);
         case V1_2::OperationType::BATCH_TO_SPACE_ND:
@@ -210,6 +214,15 @@
     return ::ConvertAdd<hal_1_2::HalPolicy>(operation, model, data);
 }
 
+bool HalPolicy::ConvertArgMinMax(const V1_2::Operation& operation,
+                                 const V1_2::Model& model,
+                                 ConversionData& data,
+                                 armnn::ArgMinMaxFunction argMinMaxFunction)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertArgMinMax()");
+    return ::ConvertArgMinMax<hal_1_2::HalPolicy>(operation, model, data, argMinMaxFunction);
+}
+
 bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_2::HalPolicy::ConvertAveragePool2d()");
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index d611102..1f0b54d 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -35,6 +35,11 @@
 
     static bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data);
 
+    static bool ConvertArgMinMax(const Operation& operation,
+                                 const Model& model,
+                                 ConversionData& data,
+                                 armnn::ArgMinMaxFunction argMinMaxFunction);
+
     static bool ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 0637c2b..a284a50 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -1564,6 +1564,82 @@
 template<typename HalPolicy,
          typename Operation = typename HalPolicy::Operation,
          typename Model     = typename HalPolicy::Model>
+bool ConvertArgMinMax(const Operation& operation,
+                      const Model& model,
+                      ConversionData& data,
+                      armnn::ArgMinMaxFunction argMinMaxFunction)
+{
+    ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
+
+    using HalOperand = typename HalPolicy::Operand;
+    using HalOperandType = typename HalPolicy::OperandType;
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+
+    if (!input0.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    int32_t axis;
+    if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
+    int rank = static_cast<int>(inputInfo.GetNumDimensions());
+
+    if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
+    {
+        // Square bracket denotes inclusive n while parenthesis denotes exclusive n
+        // E.g. Rank 4 tensor can have axis in range [-4, 3)
+        // -1 == 3, -2 == 2, -3 == 1, -4 == 0
+        return Fail("%s: Axis must be in range [-n, n)", __func__);
+    }
+
+    const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    armnn::ArgMinMaxDescriptor descriptor;
+    descriptor.m_Function = argMinMaxFunction;
+    descriptor.m_Axis     = axis;
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsArgMinMaxSupported,
+                               data.m_Backends,
+                               isSupported,
+                               inputInfo0,
+                               outputInfo,
+                               descriptor);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
+    assert(layer != nullptr);
+
+    input0.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+}
+
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Model     = typename HalPolicy::Model>
 bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
 {
     using HalOperand = typename HalPolicy::Operand;