IVGCVSW-7722 Add ArmNNSettings to Opaque Delegate

* Fix order for reading options to read backend first independently of the order given

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Ia87b5920c7cd79b3e66bb6e5779e2355b21a7ec6
diff --git a/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp b/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp
index e83f47f..bdde301 100644
--- a/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp
+++ b/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp
@@ -18,7 +18,7 @@
 TEST_SUITE("ArmnnDelegate")
 {
 
-TEST_CASE ("ArmnnDelegate Registered")
+TEST_CASE ("ArmnnDelegate_Registered")
 {
     using namespace tflite;
     auto tfLiteInterpreter = std::make_unique<Interpreter>();
@@ -60,7 +60,7 @@
     CHECK(tfLiteInterpreter != nullptr);
 }
 
-TEST_CASE ("ArmnnDelegateOptimizerOptionsRegistered")
+TEST_CASE ("ArmnnDelegate_OptimizerOptionsRegistered")
 {
     using namespace tflite;
     auto tfLiteInterpreter = std::make_unique<Interpreter>();
diff --git a/delegate/common/src/DelegateOptions.cpp b/delegate/common/src/DelegateOptions.cpp
index 4596159..3b83997 100644
--- a/delegate/common/src/DelegateOptions.cpp
+++ b/delegate/common/src/DelegateOptions.cpp
@@ -146,38 +146,47 @@
     bool internalProfilingState = false;
     armnn::ProfilingDetailsMethod internalProfilingDetail = armnn::ProfilingDetailsMethod::DetailsWithEvents;
 
+    // Process backends
     bool GpuAccFound = false;
     bool CpuAccFound = false;
-
     for (size_t i = 0; i < num_options; ++i)
     {
-        // Process backends
         if (std::string(options_keys[i]) == std::string("backends"))
         {
             // The backend option is a comma separated string of backendIDs that needs to be split
             std::vector<armnn::BackendId> backends;
-            char* dup = strdup(options_values[i]);
-            char* pch = std::strtok(dup, ",");
+            char *dup = strdup(options_values[i]);
+            char *pch = std::strtok(dup, ",");
             while (pch != NULL)
             {
                 backends.push_back(pch);
-                pch = strtok (NULL, ",");
+                pch = strtok(NULL, ",");
             }
             SetBackends(backends);
             GpuAccFound = std::count(GetBackends().begin(), GetBackends().end(), "GpuAcc");
             CpuAccFound = std::count(GetBackends().begin(), GetBackends().end(), "CpuAcc");
+            break;
         }
-            // Process dynamic-backends-path
+    }
+
+    // Rest of options after knowing the backend
+    for (size_t i = 0; i < num_options; ++i)
+    {
+        if (std::string(options_keys[i]) == std::string("backends"))
+        {
+            continue;
+        }
+        // Process dynamic-backends-path
         else if (std::string(options_keys[i]) == std::string("dynamic-backends-path"))
         {
             runtimeOptions.m_DynamicBackendsPath = std::string(options_values[i]);
         }
-            // Process logging level
+        // Process logging level
         else if (std::string(options_keys[i]) == std::string("logging-severity"))
         {
             SetLoggingSeverity(options_values[i]);
         }
-            // Process GPU backend options
+        // Process GPU backend options
         else if (std::string(options_keys[i]) == std::string("gpu-tuning-level"))
         {
             if (GpuAccFound)
@@ -266,7 +275,7 @@
                 "WARNING: CachedNetworkFilePath is enabled, but no backends that accept this option are set.";
             }
         }
-            // Process GPU & CPU backend options
+        // Process GPU & CPU backend options
         else if (std::string(options_keys[i]) == std::string("enable-fast-math"))
         {
             if (GpuAccFound)
@@ -287,7 +296,7 @@
                 "WARNING: Fastmath is enabled, but no backends that accept this option are set.";
             }
         }
-            // Process CPU backend options
+        // Process CPU backend options
         else if (std::string(options_keys[i]) == std::string("number-of-threads"))
         {
             if (CpuAccFound)
@@ -303,17 +312,17 @@
                 "WARNING: NumberOfThreads is enabled, but no backends that accept this option are set.";
             }
         }
-            // Process reduce-fp32-to-fp16 option
+        // Process reduce-fp32-to-fp16 option
         else if (std::string(options_keys[i]) == std::string("reduce-fp32-to-fp16"))
         {
             optimizerOptions.SetReduceFp32ToFp16(armnn::stringUtils::StringToBool(options_values[i]));
         }
-            // Process debug-data
+        // Process debug-data
         else if (std::string(options_keys[i]) == std::string("debug-data"))
         {
             optimizerOptions.SetDebugEnabled(armnn::stringUtils::StringToBool(options_values[i]));
         }
-            // Infer output-shape
+        // Infer output-shape
         else if (std::string(options_keys[i]) == std::string("infer-output-shape"))
         {
             if (armnn::stringUtils::StringToBool(options_values[i]))
@@ -325,23 +334,23 @@
                 optimizerOptions.SetShapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly);
             }
         }
-            // Allow expanded dims
+        // Allow expanded dims
         else if (std::string(options_keys[i]) == std::string("allow-expanded-dims"))
         {
             optimizerOptions.SetAllowExpandedDims(armnn::stringUtils::StringToBool(options_values[i]));
         }
-            // Process memory-import
+        // Process memory-import
         else if (std::string(options_keys[i]) == std::string("memory-import"))
         {
             optimizerOptions.SetImportEnabled(armnn::stringUtils::StringToBool(options_values[i]));
         }
-            // Process enable-internal-profiling
+        // Process enable-internal-profiling
         else if (std::string(options_keys[i]) == std::string("enable-internal-profiling"))
         {
             internalProfilingState = *options_values[i] != '0';
             optimizerOptions.SetProfilingEnabled(internalProfilingState);
         }
-            // Process internal-profiling-detail
+        // Process internal-profiling-detail
         else if (std::string(options_keys[i]) == std::string("internal-profiling-detail"))
         {
             uint32_t detailLevel = static_cast<uint32_t>(std::stoul(options_values[i]));
@@ -358,7 +367,7 @@
                     break;
             }
         }
-            // Process enable-external-profiling
+        // Process enable-external-profiling
         else if (std::string(options_keys[i]) == std::string("enable-external-profiling"))
         {
             runtimeOptions.m_ProfilingOptions.m_EnableProfiling = armnn::stringUtils::StringToBool(options_values[i]);
@@ -398,7 +407,6 @@
         {
             SetSerializeToDot(options_values[i]);
         }
-
         // Process disable-tflite-runtime-fallback
         else if (std::string(options_keys[i]) == std::string("disable-tflite-runtime-fallback"))
         {
diff --git a/delegate/opaque/include/armnn_delegate.hpp b/delegate/opaque/include/armnn_delegate.hpp
index b07d96f..ae85556 100644
--- a/delegate/opaque/include/armnn_delegate.hpp
+++ b/delegate/opaque/include/armnn_delegate.hpp
@@ -36,7 +36,7 @@
 /// Forward declaration for functions initializing the ArmNN Delegate
 ::armnnDelegate::DelegateOptions TfLiteArmnnDelegateOptionsDefault();
 
-TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(const void* settings);
+TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(armnnDelegate::DelegateOptions options);
 
 void TfLiteArmnnOpaqueDelegateDelete(TfLiteOpaqueDelegate* tfLiteDelegate);
 
@@ -96,16 +96,15 @@
 class ArmnnDelegatePlugin : public DelegatePluginInterface
 {
 public:
-    static std::unique_ptr<ArmnnDelegatePlugin> New(const tflite::TFLiteSettings& tflite_settings)
+    static std::unique_ptr<ArmnnDelegatePlugin> New(const tflite::TFLiteSettings& tfliteSettings)
     {
-        return std::make_unique<ArmnnDelegatePlugin>(tflite_settings);
+        return std::make_unique<ArmnnDelegatePlugin>(tfliteSettings);
     }
 
     tflite::delegates::TfLiteDelegatePtr Create() override
     {
-        // Use default settings until options have been enabled.
-        return tflite::delegates::TfLiteDelegatePtr(
-            TfLiteArmnnOpaqueDelegateCreate(nullptr), TfLiteArmnnOpaqueDelegateDelete);
+        return tflite::delegates::TfLiteDelegatePtr(TfLiteArmnnOpaqueDelegateCreate(m_delegateOptions),
+                                                    TfLiteArmnnOpaqueDelegateDelete);
     }
 
     int GetDelegateErrno(TfLiteOpaqueDelegate* from_delegate) override
@@ -114,9 +113,11 @@
     }
 
     explicit ArmnnDelegatePlugin(const tflite::TFLiteSettings& tfliteSettings)
-    {
-        // Use default settings until options have been enabled.
-    }
+            : m_delegateOptions(ParseArmNNSettings(&tfliteSettings))
+    {}
+
+private:
+    armnnDelegate::DelegateOptions m_delegateOptions;
 };
 
 /// ArmnnSubgraph class where parsing the nodes to ArmNN format and creating the ArmNN Graph
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index 8e3597d..129bc43 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -258,12 +258,8 @@
     return status;
 }
 
-TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(const void* settings)
+TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(armnnDelegate::DelegateOptions options)
 {
-    // This method will always create Opaque Delegate with default settings until
-    // we have a DelegateOptions Constructor which can parse the void* settings
-    armnn::IgnoreUnused(settings);
-    auto options = TfLiteArmnnDelegateOptionsDefault();
     auto* armnnDelegate = new ::armnnOpaqueDelegate::ArmnnOpaqueDelegate(options);
     return TfLiteOpaqueDelegateCreate(armnnDelegate->GetDelegateBuilder());
 }
diff --git a/delegate/opaque/src/armnn_external_delegate.cpp b/delegate/opaque/src/armnn_external_delegate.cpp
index 6cc29f3..aa1f335 100644
--- a/delegate/opaque/src/armnn_external_delegate.cpp
+++ b/delegate/opaque/src/armnn_external_delegate.cpp
@@ -9,7 +9,10 @@
 
     TfLiteOpaqueDelegate* ArmNNDelegateCreateFunc(const void* tflite_settings)
     {
-        auto delegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(tflite_settings);
+        armnnDelegate::DelegateOptions opt = armnnOpaqueDelegate::ParseArmNNSettings(
+                static_cast<const tflite::TFLiteSettings*>(tflite_settings));
+
+        auto delegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(opt);
         return delegate;
     }
 
diff --git a/delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp b/delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp
index 091dcef..1562c9f 100644
--- a/delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp
+++ b/delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp
@@ -8,12 +8,95 @@
 
 #include <opaque/include/armnn_delegate.hpp>
 
+#include <tensorflow/lite/kernels/builtin_op_kernels.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include "tensorflow/lite/core/c/builtin_op_data.h"
+
 namespace armnnOpaqueDelegate
 {
 
 TEST_SUITE("ArmnnOpaqueDelegate")
 {
 
+TEST_CASE ("ArmnnOpaqueDelegate_Registered")
+{
+    using namespace tflite;
+    auto tfLiteInterpreter = std::make_unique<Interpreter>();
+
+    tfLiteInterpreter->AddTensors(3);
+    tfLiteInterpreter->SetInputs({0, 1});
+    tfLiteInterpreter->SetOutputs({2});
+
+    tfLiteInterpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "input1", {1,2,2,1}, TfLiteQuantization());
+    tfLiteInterpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "input2", {1,2,2,1}, TfLiteQuantization());
+    tfLiteInterpreter->SetTensorParametersReadWrite(2, kTfLiteFloat32, "output", {1,2,2,1}, TfLiteQuantization());
+
+    TfLiteAddParams* addParams = reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
+    addParams->activation = kTfLiteActNone;
+    addParams->pot_scale_int16 = false;
+
+    tflite::ops::builtin::BuiltinOpResolver opResolver;
+    const TfLiteRegistration* opRegister = opResolver.FindOp(BuiltinOperator_ADD, 1);
+    tfLiteInterpreter->AddNodeWithParameters({0, 1}, {2}, "", 0, addParams, opRegister);
+
+    // Create the Armnn Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendOptions> backendOptions;
+    backendOptions.emplace_back(
+            armnn::BackendOptions{ "BackendName",
+                                   {
+                                           { "Option1", 42 },
+                                           { "Option2", true }
+                                   }}
+    );
+
+    armnnDelegate::DelegateOptions delegateOptions(backends, backendOptions);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateDelete)>
+            theArmnnDelegate(armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(delegateOptions),
+                             armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateDelete);
+
+    auto status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
+    CHECK(status == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+}
+
+TEST_CASE ("ArmnnOpaqueDelegate_OptimizerOptionsRegistered")
+{
+    using namespace tflite;
+    auto tfLiteInterpreter = std::make_unique<Interpreter>();
+
+    tfLiteInterpreter->AddTensors(3);
+    tfLiteInterpreter->SetInputs({0, 1});
+    tfLiteInterpreter->SetOutputs({2});
+
+    tfLiteInterpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "input1", {1,2,2,1}, TfLiteQuantization());
+    tfLiteInterpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "input2", {1,2,2,1}, TfLiteQuantization());
+    tfLiteInterpreter->SetTensorParametersReadWrite(2, kTfLiteFloat32, "output", {1,2,2,1}, TfLiteQuantization());
+
+    TfLiteAddParams* addParams = reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
+    addParams->activation = kTfLiteActNone;
+    addParams->pot_scale_int16 = false;
+
+    tflite::ops::builtin::BuiltinOpResolver opResolver;
+    const TfLiteRegistration* opRegister = opResolver.FindOp(BuiltinOperator_ADD, 1);
+    tfLiteInterpreter->AddNodeWithParameters({0, 1}, {2}, "", 0, addParams, opRegister);
+
+    // Create the Armnn Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+
+    armnn::OptimizerOptionsOpaque optimizerOptions(true, true, false, true);
+
+    armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateDelete)>
+                        theArmnnDelegate(armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(delegateOptions),
+                                         armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateDelete);
+
+    auto status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
+    CHECK(status == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+}
+
 TEST_CASE ("DelegateOptions_OpaqueDelegateDefault")
 {
     // Check default options can be created
@@ -28,7 +111,7 @@
     CHECK(builder);
 
     // Check Opaque delegate created
-    auto opaqueDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(&options);
+    auto opaqueDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(options);
     CHECK(opaqueDelegate);
 
     // Check Opaque Delegate can be deleted
@@ -38,16 +121,27 @@
 
 TEST_CASE ("DelegatePluginTest")
 {
-    // Use default settings until options have been enabled.
-    flatbuffers::FlatBufferBuilder flatBufferBuilder;
-    tflite::TFLiteSettingsBuilder tfliteSettingsBuilder(flatBufferBuilder);
-    flatbuffers::Offset<tflite::TFLiteSettings> tfliteSettings = tfliteSettingsBuilder.Finish();
-    flatBufferBuilder.Finish(tfliteSettings);
-    const tflite::TFLiteSettings* settings = flatbuffers::GetRoot<tflite::TFLiteSettings>(
-        flatBufferBuilder.GetBufferPointer());
+    const char* backends = "CpuRef";
+    bool fastmath = false;
+    const char* additional_parameters = "allow-expanded-dims=true";
+
+    flatbuffers::FlatBufferBuilder flatbuffer_builder;
+    flatbuffers::Offset<tflite::ArmNNSettings>
+            armnn_settings_offset = tflite::CreateArmNNSettingsDirect(flatbuffer_builder,
+                                                                      backends,
+                                                                      fastmath,
+                                                                      additional_parameters);
+
+    tflite::TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder);
+    tflite_settings_builder.add_armnn_settings(armnn_settings_offset);
+    flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings_offset = tflite_settings_builder.Finish();
+    flatbuffer_builder.Finish(tflite_settings_offset);
+
+    const tflite::TFLiteSettings* tflite_settings = flatbuffers::GetRoot<tflite::TFLiteSettings>(
+            flatbuffer_builder.GetBufferPointer());
 
     std::unique_ptr<tflite::delegates::DelegatePluginInterface> delegatePlugin =
-        tflite::delegates::DelegatePluginRegistry::CreateByName("armnn_delegate", *settings);
+        tflite::delegates::DelegatePluginRegistry::CreateByName("armnn_delegate", *tflite_settings);
 
     // Plugin is created correctly using armnn_delegate name.
     CHECK((delegatePlugin != nullptr));
diff --git a/delegate/opaque/src/test/DelegateTestInterpreter.cpp b/delegate/opaque/src/test/DelegateTestInterpreter.cpp
index 04e6ad6..c46d3e1 100644
--- a/delegate/opaque/src/test/DelegateTestInterpreter.cpp
+++ b/delegate/opaque/src/test/DelegateTestInterpreter.cpp
@@ -7,7 +7,6 @@
 
 #include <armnn_delegate.hpp>
 
-#include <armnn/utility/IgnoreUnused.hpp>
 
 namespace delegateTestInterpreter
 {
@@ -17,9 +16,6 @@
                                                  const std::string& customOp,
                                                  bool disableFallback)
 {
-    armnn::IgnoreUnused(backends);
-    armnn::IgnoreUnused(disableFallback);
-
     TfLiteModel* tfLiteModel = delegateTestInterpreter::CreateTfLiteModel(modelBuffer);
 
     TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions();
@@ -28,8 +24,11 @@
         options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp);
     }
 
-    // Use default settings until options have been enabled.
-    auto armnnDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(nullptr);
+    // Disable fallback by default for unit tests unless specified.
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    delegateOptions.DisableTfLiteRuntimeFallback(disableFallback);
+
+    auto armnnDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(delegateOptions);
     TfLiteInterpreterOptionsAddDelegate(options, armnnDelegate);
 
     m_TfLiteDelegate = armnnDelegate;
@@ -44,8 +43,6 @@
                                                  const armnnDelegate::DelegateOptions& delegateOptions,
                                                  const std::string& customOp)
 {
-    armnn::IgnoreUnused(delegateOptions);
-
     TfLiteModel* tfLiteModel = delegateTestInterpreter::CreateTfLiteModel(modelBuffer);
 
     TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions();
@@ -54,8 +51,7 @@
         options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp);
     }
 
-    // Use default settings until options have been enabled.
-    auto armnnDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(nullptr);
+    auto armnnDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(delegateOptions);
     TfLiteInterpreterOptionsAddDelegate(options, armnnDelegate);
 
     m_TfLiteDelegate = armnnDelegate;