IVGCVSW-7555 Restructure Delegate

* New folders created:
  * common is for common code where TfLite API is not used
  * classic is for existing delegate implementations
  * opaque is for new opaque delegate implementation,
  * tests is for shared between existing Delegate and Opaque Delegate which have test utils to work which delegate to use.
* Existing delegate is built to libarmnnDelegate.so and opaque delegate is built as libarmnnOpaqueDelegate.so
* Opaque structure is introduced but no API is added yet.
* CmakeList.txt and delegate/CMakeList.txt have been modified and 2 new CmakeList.txt added
* Rename BUILD_ARMNN_TFLITE_DELEGATE as BUILD_CLASSIC_DELEGATE
* Rename BUILD_ARMNN_TFLITE_OPAQUE_DELEGATE as BUILD_OPAQUE_DELEGATE

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Ib682b9ad0ac8d8acdc4ec6d9099bb0008a9fe8ed
diff --git a/delegate/classic/src/Fill.hpp b/delegate/classic/src/Fill.hpp
new file mode 100644
index 0000000..15dc91e
--- /dev/null
+++ b/delegate/classic/src/Fill.hpp
@@ -0,0 +1,114 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitFillOperator(DelegateData& delegateData,
+                               TfLiteContext* tfLiteContext,
+                               TfLiteNode* tfLiteNode,
+                               int nodeIndex,
+                               int32_t tfLiteFillOperatorCode)
+{
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    switch(tfLiteFillOperatorCode)
+    {
+        case kTfLiteBuiltinFill:
+            TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+            break;
+        default:
+            return kTfLiteError;
+    }
+
+    const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+    const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+    if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteFillOperatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const TfLiteTensor& tfLiteFillTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+    if (!IsValid(tfLiteContext, tfLiteFillTensor, tfLiteFillOperatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+    if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteFillOperatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    armnn::TensorInfo inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+    armnn::FillDescriptor descriptor;
+    switch (tfLiteFillTensor.type)
+    {
+        case kTfLiteFloat32:
+            descriptor.m_Value = tflite::GetTensorData<float>(&tfLiteFillTensor)[0];
+            break;
+        case kTfLiteInt32:
+            descriptor.m_Value = tflite::GetTensorData<int32_t>(&tfLiteFillTensor)[0];
+            break;
+        default:
+            TF_LITE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnDelegate: FILL value data type is not supported in operator #%d node #%d: ",
+                tfLiteFillOperatorCode, nodeIndex);
+            return kTfLiteError;
+    }
+
+    bool isSupported = false;
+    armnn::BackendId setBackend;
+    auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC("FILL",
+                                   tfLiteContext,
+                                   IsFillSupported,
+                                   delegateData.m_Backends,
+                                   isSupported,
+                                   setBackend,
+                                   inputTensorInfo,
+                                   outInfo,
+                                   descriptor);
+    };
+
+    if (!delegateData.m_Network)
+    {
+        validateFunc(outputTensorInfo, isSupported);
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor);
+    layer->SetBackendId(setBackend);
+    ARMNN_ASSERT(layer != nullptr);
+
+    armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+    outputSlot.SetTensorInfo(outputTensorInfo);
+
+    auto inputsTensorsProcess = ProcessInputs(layer,
+                                              delegateData,
+                                              tfLiteContext,
+                                              tfLiteNode);
+    if (inputsTensorsProcess == kTfLiteError)
+    {
+        return inputsTensorsProcess;
+    }
+
+    return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate