IVGCVSW-3583 Fix Skipped Batch_To_Space Hal 1.2 Tests

Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I3f2928ba86a9d306a7eb400db3a420e42cf3fa7e
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index 93ee70e..2fab474 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -605,77 +605,7 @@
 bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_1::HalPolicy::ConvertBatchToSpaceNd()");
-
-    LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
-    const Operand* blockOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
-    if (!blockOperand)
-    {
-        return Fail("%s: Could not read input 1", __func__);
-    }
-
-    // Convert the block operand to int32
-    std::vector<int32_t> block;
-    if (!GetTensorInt32Values<hal_1_1::HalPolicy>(*blockOperand, block, model, data))
-    {
-        return Fail("%s: Input 1 has invalid values", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
-
-    unsigned int rank = inputInfo.GetNumDimensions();
-    if (rank != 4)
-    {
-        Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
-    }
-
-    if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
-    {
-        return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
-                    " greater than or equal to 1", __func__);
-    }
-
-    armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
-    batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
-    batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
-
-    // Setting crops to 0,0 0,0 as it is not supported in Android NN API
-    batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsBatchToSpaceNdSupported,
-                               data.m_Backends,
-                               isSupported,
-                               inputInfo,
-                               outputInfo,
-                               batchToSpaceNdDesc);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
+    return ::ConvertBatchToSpaceNd<hal_1_1::HalPolicy>(operation, model, data);
 }
 
 } // namespace hal_1_1
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index fe571df..4372c16 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -55,7 +55,6 @@
     }
     switch (static_cast<V1_1::OperationType>(operationType))
     {
-        case V1_1::OperationType::BATCH_TO_SPACE_ND:
         case V1_1::OperationType::DIV:
         case V1_1::OperationType::MEAN:
         case V1_1::OperationType::SPACE_TO_BATCH_ND:
@@ -128,6 +127,8 @@
     {
         case V1_2::OperationType::AVERAGE_POOL_2D:
             return ConvertAveragePool2d(operation, model, data);
+        case V1_2::OperationType::BATCH_TO_SPACE_ND:
+            return ConvertBatchToSpaceNd(operation, model, data);
         case V1_2::OperationType::CONV_2D:
             return ConvertConv2d(operation, model, data);
         case V1_2::OperationType::DEPTHWISE_CONV_2D:
@@ -182,6 +183,12 @@
     return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
 }
 
+bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertBatchToSpaceNd()");
+    return ::ConvertBatchToSpaceNd<hal_1_2::HalPolicy>(operation, model, data);
+}
+
 bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_2::HalPolicy::ConvertConv2d()");
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index 5d6158a..e4719e8 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -33,6 +33,8 @@
 private:
     static bool ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data);
 
+    static bool ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index fc6d365..946bc95 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -1800,5 +1800,90 @@
     return Fail("%s: ProcessActivation failed", __func__);
 }
 
+template<typename HalPolicy,
+         typename HalOperation   = typename HalPolicy::Operation,
+         typename HalModel       = typename HalPolicy::Model>
+bool ConvertBatchToSpaceNd(const HalOperation& operation,
+                           const HalModel& model,
+                           ConversionData& data)
+{
+    using HalOperand     = typename HalPolicy::Operand;
+    using HalOperandType = typename HalPolicy::OperandType;
+
+    LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
+    if (!blockOperand)
+    {
+        return Fail("%s: Could not read input 1", __func__);
+    }
+
+    // Convert the block operand to int32
+    std::vector<int32_t> block;
+    if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
+    {
+        return Fail("%s: Input 1 has invalid values", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+
+    unsigned int rank = inputInfo.GetNumDimensions();
+    if (rank != 4)
+    {
+        Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
+    }
+
+    if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
+    {
+        return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
+                    " greater than or equal to 1", __func__);
+    }
+
+    armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
+    batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
+    batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
+
+    if (Is12Operand(*output))
+    {
+        batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
+    }
+    // Setting crops to 0,0 0,0 as it is not supported in Android NN API
+    batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsBatchToSpaceNdSupported,
+                               data.m_Backends,
+                               isSupported,
+                               inputInfo,
+                               outputInfo,
+                               batchToSpaceNdDesc);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+}
 
 } // namespace armnn_driver