IVGCVSW-1806: Refactor Android-NN-Driver ModelToINetworkConverter

* Moved conversion logic into new V1_0 and V1_1 HalPolicy classes
* Extracted common helper functions into ConversionUtils class

Change-Id: I1ab50edc266dd528c0cb22a5cd1aa65e103674d9
diff --git a/1.1/ArmnnDriver.hpp b/1.1/ArmnnDriver.hpp
index 3824805..ef8bca8 100644
--- a/1.1/ArmnnDriver.hpp
+++ b/1.1/ArmnnDriver.hpp
@@ -9,114 +9,109 @@
 
 #include "ArmnnDevice.hpp"
 #include "ArmnnDriverImpl.hpp"
+#include "HalPolicy.hpp"
+
 #include "../ArmnnDriverImpl.hpp"
 #include "../1.0/ArmnnDriverImpl.hpp"
+#include "../1.0/HalPolicy.hpp"
 
 #include <log/log.h>
 
 namespace armnn_driver
 {
-namespace V1_1
+namespace hal_1_1
 {
 
-class ArmnnDriver : public ArmnnDevice, public ::android::hardware::neuralnetworks::V1_1::IDevice
+class ArmnnDriver : public ArmnnDevice, public V1_1::IDevice
 {
 public:
     ArmnnDriver(DriverOptions options)
         : ArmnnDevice(std::move(options))
     {
-        ALOGV("V1_1::ArmnnDriver::ArmnnDriver()");
+        ALOGV("hal_1_1::ArmnnDriver::ArmnnDriver()");
     }
     ~ArmnnDriver() {}
 
 public:
-    Return<void> getCapabilities(
-            ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb) override
+    Return<void> getCapabilities(V1_0::IDevice::getCapabilities_cb cb) override
     {
-        ALOGV("V1_1::ArmnnDriver::getCapabilities()");
+        ALOGV("hal_1_1::ArmnnDriver::getCapabilities()");
 
-        return V1_0::ArmnnDriverImpl::getCapabilities(m_Runtime,
-                                                      cb);
+        return hal_1_0::ArmnnDriverImpl::getCapabilities(m_Runtime, cb);
     }
 
-    Return<void> getSupportedOperations(
-            const ::android::hardware::neuralnetworks::V1_0::Model& model,
-            ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb) override
+    Return<void> getSupportedOperations(const V1_0::Model& model,
+                                        V1_0::IDevice::getSupportedOperations_cb cb) override
     {
-        ALOGV("V1_1::ArmnnDriver::getSupportedOperations()");
+        ALOGV("hal_1_1::ArmnnDriver::getSupportedOperations()");
 
-        return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::getSupportedOperations(m_Runtime,
-                                                                                     m_Options,
-                                                                                     model,
-                                                                                     cb);
+        return armnn_driver::ArmnnDriverImpl<hal_1_0::HalPolicy>::getSupportedOperations(m_Runtime,
+                                                                                         m_Options,
+                                                                                         model,
+                                                                                         cb);
     }
 
-    Return<ErrorStatus> prepareModel(
-            const ::android::hardware::neuralnetworks::V1_0::Model& model,
-            const android::sp<IPreparedModelCallback>& cb) override
+    Return<ErrorStatus> prepareModel(const V1_0::Model& model,
+                                     const android::sp<IPreparedModelCallback>& cb) override
     {
-        ALOGV("V1_1::ArmnnDriver::prepareModel()");
+        ALOGV("hal_1_1::ArmnnDriver::prepareModel()");
 
-        return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::prepareModel(m_Runtime,
-                                                                           m_ClTunedParameters,
-                                                                           m_Options,
-                                                                           model,
-                                                                           cb);
+        return armnn_driver::ArmnnDriverImpl<hal_1_0::HalPolicy>::prepareModel(m_Runtime,
+                                                                               m_ClTunedParameters,
+                                                                               m_Options,
+                                                                               model,
+                                                                               cb);
     }
 
-    Return<void> getCapabilities_1_1(
-            ::android::hardware::neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb) override
+    Return<void> getCapabilities_1_1(V1_1::IDevice::getCapabilities_1_1_cb cb) override
     {
-        ALOGV("V1_1::ArmnnDriver::getCapabilities_1_1()");
+        ALOGV("hal_1_1::ArmnnDriver::getCapabilities_1_1()");
 
-        return V1_1::ArmnnDriverImpl::getCapabilities_1_1(m_Runtime,
-                                                          cb);
+        return hal_1_1::ArmnnDriverImpl::getCapabilities_1_1(m_Runtime, cb);
     }
 
-    Return<void> getSupportedOperations_1_1(
-            const ::android::hardware::neuralnetworks::V1_1::Model& model,
-            ::android::hardware::neuralnetworks::V1_1::IDevice::getSupportedOperations_1_1_cb cb) override
+    Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+                                            V1_1::IDevice::getSupportedOperations_1_1_cb cb) override
     {
-        ALOGV("V1_1::ArmnnDriver::getSupportedOperations_1_1()");
+        ALOGV("hal_1_1::ArmnnDriver::getSupportedOperations_1_1()");
 
-        return armnn_driver::ArmnnDriverImpl<HalVersion_1_1>::getSupportedOperations(m_Runtime,
-                                                                                     m_Options,
-                                                                                     model,
-                                                                                     cb);
+        return armnn_driver::ArmnnDriverImpl<hal_1_1::HalPolicy>::getSupportedOperations(m_Runtime,
+                                                                                         m_Options,
+                                                                                         model,
+                                                                                         cb);
     }
 
-    Return<ErrorStatus> prepareModel_1_1(
-            const ::android::hardware::neuralnetworks::V1_1::Model& model,
-            ::android::hardware::neuralnetworks::V1_1::ExecutionPreference preference,
-            const android::sp<IPreparedModelCallback>& cb) override
+    Return<ErrorStatus> prepareModel_1_1(const V1_1::Model& model,
+                                         V1_1::ExecutionPreference preference,
+                                         const android::sp<IPreparedModelCallback>& cb) override
     {
-        ALOGV("V1_1::ArmnnDriver::prepareModel_1_1()");
+        ALOGV("hal_1_1::ArmnnDriver::prepareModel_1_1()");
 
         if (!(preference == ExecutionPreference::LOW_POWER ||
               preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
               preference == ExecutionPreference::SUSTAINED_SPEED))
         {
-            ALOGV("V1_1::ArmnnDriver::prepareModel_1_1: Invalid execution preference");
+            ALOGV("hal_1_1::ArmnnDriver::prepareModel_1_1: Invalid execution preference");
             cb->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
             return ErrorStatus::INVALID_ARGUMENT;
         }
 
-        return armnn_driver::ArmnnDriverImpl<HalVersion_1_1>::prepareModel(m_Runtime,
-                                                                           m_ClTunedParameters,
-                                                                           m_Options,
-                                                                           model,
-                                                                           cb,
-                                                                           model.relaxComputationFloat32toFloat16
-                                                                           && m_Options.GetFp16Enabled());
+        return armnn_driver::ArmnnDriverImpl<hal_1_1::HalPolicy>::prepareModel(m_Runtime,
+                                                                               m_ClTunedParameters,
+                                                                               m_Options,
+                                                                               model,
+                                                                               cb,
+                                                                               model.relaxComputationFloat32toFloat16
+                                                                               && m_Options.GetFp16Enabled());
     }
 
     Return<DeviceStatus> getStatus() override
     {
-        ALOGV("V1_1::ArmnnDriver::getStatus()");
+        ALOGV("hal_1_1::ArmnnDriver::getStatus()");
 
-        return armnn_driver::ArmnnDriverImpl<HalVersion_1_1>::getStatus();
+        return armnn_driver::ArmnnDriverImpl<hal_1_1::HalPolicy>::getStatus();
     }
 };
 
-} // armnn_driver::namespace V1_1
-} // namespace armnn_driver
+} // namespace hal_1_1
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/1.1/ArmnnDriverImpl.cpp b/1.1/ArmnnDriverImpl.cpp
index 0a68953..d8939a0 100644
--- a/1.1/ArmnnDriverImpl.cpp
+++ b/1.1/ArmnnDriverImpl.cpp
@@ -8,34 +8,28 @@
 
 #include <log/log.h>
 
-using namespace std;
-using namespace android;
-using namespace android::nn;
-using namespace android::hardware;
-
 namespace
 {
 
-const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime";
-const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage";
-const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime";
-const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage";
+const char *g_Float32PerformanceExecTimeName             = "ArmNN.float32Performance.execTime";
+const char *g_Float32PerformancePowerUsageName           = "ArmNN.float32Performance.powerUsage";
+const char *g_Quantized8PerformanceExecTimeName          = "ArmNN.quantized8Performance.execTime";
+const char *g_Quantized8PerformancePowerUsageName        = "ArmNN.quantized8Performance.powerUsage";
 const char *g_RelaxedFloat32toFloat16PerformanceExecTime = "ArmNN.relaxedFloat32toFloat16Performance.execTime";
 
 } // anonymous namespace
 
 namespace armnn_driver
 {
-namespace V1_1
+namespace hal_1_1
 {
 
-Return<void> ArmnnDriverImpl::getCapabilities_1_1(
-        const armnn::IRuntimePtr& runtime,
-        neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb)
+Return<void> ArmnnDriverImpl::getCapabilities_1_1(const armnn::IRuntimePtr& runtime,
+                                                  V1_1::IDevice::getCapabilities_1_1_cb cb)
 {
-    ALOGV("V1_1::ArmnnDriverImpl::getCapabilities()");
+    ALOGV("hal_1_1::ArmnnDriverImpl::getCapabilities()");
 
-    neuralnetworks::V1_1::Capabilities capabilities;
+    V1_1::Capabilities capabilities;
     if (runtime)
     {
         capabilities.float32Performance.execTime =
@@ -57,10 +51,10 @@
     }
     else
     {
-        capabilities.float32Performance.execTime = 0;
-        capabilities.float32Performance.powerUsage = 0;
-        capabilities.quantized8Performance.execTime = 0;
-        capabilities.quantized8Performance.powerUsage = 0;
+        capabilities.float32Performance.execTime                 = 0;
+        capabilities.float32Performance.powerUsage               = 0;
+        capabilities.quantized8Performance.execTime              = 0;
+        capabilities.quantized8Performance.powerUsage            = 0;
         capabilities.relaxedFloat32toFloat16Performance.execTime = 0;
 
         cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
@@ -69,5 +63,5 @@
     return Void();
 }
 
-} // namespace armnn_driver::V1_1
-} // namespace armnn_driver
+} // namespace hal_1_1
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/1.1/ArmnnDriverImpl.hpp b/1.1/ArmnnDriverImpl.hpp
index bdb2585..4308bac 100644
--- a/1.1/ArmnnDriverImpl.hpp
+++ b/1.1/ArmnnDriverImpl.hpp
@@ -13,16 +13,15 @@
 
 namespace armnn_driver
 {
-namespace V1_1
+namespace hal_1_1
 {
 
 class ArmnnDriverImpl
 {
 public:
-    static Return<void> getCapabilities_1_1(
-            const armnn::IRuntimePtr& runtime,
-            ::android::hardware::neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb);
+    static Return<void> getCapabilities_1_1(const armnn::IRuntimePtr& runtime,
+                                            V1_1::IDevice::getCapabilities_1_1_cb cb);
 };
 
-} // namespace armnn_driver::V1_1
+} // namespace hal_1_1
 } // namespace armnn_driver
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
new file mode 100644
index 0000000..0e66943
--- /dev/null
+++ b/1.1/HalPolicy.cpp
@@ -0,0 +1,89 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "HalPolicy.hpp"
+
+#include "../1.0/HalPolicy.hpp"
+
+namespace armnn_driver
+{
+namespace hal_1_1
+{
+
+bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
+{
+    if (compliantWithV1_0(operation))
+    {
+        hal_1_0::HalPolicy::Operation v10Operation = convertToV1_0(operation);
+        hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
+
+        return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
+    }
+    else
+    {
+        switch (operation.type)
+        {
+            case V1_1::OperationType::DIV:
+                return ConvertDiv(operation, model, data);
+            default:
+                return Fail("%s: Operation type %s not supported in ArmnnDriver",
+                            __func__, toString(operation.type).c_str());
+        }
+    }
+}
+
+bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
+{
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // The FuseActivation parameter is always the input index 2
+    // and it should be optional
+    ActivationFn activationFunction;
+    if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return false;
+    }
+
+    const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
+
+    if (!IsLayerSupported(__func__,
+                          armnn::IsDivisionSupported,
+                          data.m_Compute,
+                          input0.GetTensorInfo(),
+                          input1.GetTensorInfo(),
+                          outInfo))
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
+    armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
+
+    const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
+    const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
+
+    if (endLayer)
+    {
+        BroadcastTensor(input0, input1, startLayer, *data.m_Network);
+        return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
+    }
+
+    return Fail("%s: ProcessActivation failed", __func__);
+}
+
+} // namespace hal_1_1
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/1.1/HalPolicy.hpp b/1.1/HalPolicy.hpp
new file mode 100644
index 0000000..3722d49
--- /dev/null
+++ b/1.1/HalPolicy.hpp
@@ -0,0 +1,31 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "ConversionUtils.hpp"
+
+#include <HalInterfaces.h>
+
+namespace armnn_driver
+{
+namespace hal_1_1
+{
+
+class HalPolicy
+{
+public:
+    using Model                     = V1_1::Model;
+    using Operation                 = V1_1::Operation;
+    using getSupportedOperations_cb = V1_1::IDevice::getSupportedOperations_1_1_cb;
+
+    static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data);
+
+private:
+    static bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data);
+};
+
+} // namespace hal_1_1
+} // namespace armnn_driver