IVGCVSW-8232 ScatterNd added to delegate and opaque delegate

Signed-off-by: Kevin May <kevin.may@arm.com>
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>

Change-Id: I5839f54c71f74eaa6819333393bb3054db9db5be
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index d92611f..f8b0300 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -1,5 +1,5 @@
 #
-# Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 # SPDX-License-Identifier: MIT
 #
 
@@ -188,6 +188,8 @@
         test/ReverseV2TestHelper.hpp
         test/RoundTest.cpp
         test/RoundTestHelper.hpp
+        test/ScatterNdTest.cpp
+        test/ScatterNdTestHelper.hpp
         test/SoftmaxTest.cpp
         test/SoftmaxTestHelper.hpp
         test/SpaceDepthTest.cpp
diff --git a/delegate/classic/CMakeLists.txt b/delegate/classic/CMakeLists.txt
index fbd19ed..72ecc28 100644
--- a/delegate/classic/CMakeLists.txt
+++ b/delegate/classic/CMakeLists.txt
@@ -1,5 +1,5 @@
 #
-# Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
 # SPDX-License-Identifier: MIT
 #
 
@@ -37,6 +37,7 @@
         src/Resize.hpp
         src/ReverseV2.hpp
         src/Round.hpp
+        src/ScatterNd.hpp
         src/Shape.hpp
         src/SharedFunctions.hpp
         src/SharedFunctions.cpp
diff --git a/delegate/classic/src/ScatterNd.hpp b/delegate/classic/src/ScatterNd.hpp
new file mode 100644
index 0000000..c73e231
--- /dev/null
+++ b/delegate/classic/src/ScatterNd.hpp
@@ -0,0 +1,161 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+
+namespace armnnDelegate
+{
+TfLiteStatus ValidateScatterNdOperator(DelegateData& delegateData,
+                                       TfLiteContext* tfLiteContext,
+                                       const armnn::TensorInfo& indicesInfo,
+                                       const armnn::TensorInfo& updatesInfo,
+                                       const armnn::TensorInfo& shapeInfo,
+                                       const armnn::TensorInfo& outputInfo,
+                                       const armnn::ScatterNdDescriptor& descriptor)
+{
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC("SCATTER_ND",
+                               tfLiteContext,
+                               IsScatterNdSupported,
+                               delegateData.m_Backends,
+                               isSupported,
+                               armnn::BackendId(),
+                               shapeInfo,
+                               indicesInfo,
+                               updatesInfo,
+                               outputInfo,
+                               descriptor);
+    return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus VisitScatterNdOperator(DelegateData& delegateData,
+                                    TfLiteContext* tfLiteContext,
+                                    TfLiteNode* tfLiteNode,
+                                    int nodeIndex,
+                                    int32_t scatterNdOperatorCode)
+{
+    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+
+    // The indices tensor are the positions the data is updated/scattered into
+    const TfLiteTensor& tfLiteIndicesTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+    if (IsDynamicTensor(tfLiteIndicesTensor))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+            scatterNdOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    // The updates tensor provides the data which will be updated/scattered into the relevant indices
+    const TfLiteTensor& tfLiteUpdatesTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+    if (IsDynamicTensor(tfLiteUpdatesTensor))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+            scatterNdOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    // For tflite scatternd there is no input tensor
+    // The shape tensor is a 1D tensor which represents the shape of an input tensor to be filled with zeros
+    const TfLiteTensor& tfLiteShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+    if (IsDynamicTensor(tfLiteUpdatesTensor))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+                scatterNdOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    // The output tensor
+    const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+    if (IsDynamicTensor(tfLiteOutputTensor))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+            scatterNdOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteIndicesTensor);
+    const armnn::TensorInfo& updatesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteUpdatesTensor);
+    const armnn::TensorInfo& shapeTensorInfo   = GetTensorInfoForTfLiteTensor(tfLiteShapeTensor);
+    const armnn::TensorInfo& outputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+    armnn::ScatterNdDescriptor scatterNdDescriptor;
+    scatterNdDescriptor.m_Function     = armnn::ScatterNdFunction::Update;
+    scatterNdDescriptor.m_InputEnabled = false;
+    scatterNdDescriptor.m_Axis         = 0;
+    scatterNdDescriptor.m_AxisEnabled  = false;
+
+    // Check output dimensions
+    if (shapeTensorInfo.GetShape().GetNumElements() != outputTensorInfo.GetNumDimensions())
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnDelegate: Shape tensor number of elements and output tensor dimension differ",
+            "Operator: #%d node #%d: ",
+            scatterNdOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    // No network pointer indicates that only support for this operator should be checked
+    if (!delegateData.m_Network)
+    {
+        return ValidateScatterNdOperator(delegateData,
+                                         tfLiteContext,
+                                         indicesTensorInfo,
+                                         updatesTensorInfo,
+                                         shapeTensorInfo,
+                                         outputTensorInfo,
+                                         scatterNdDescriptor);
+    }
+
+    auto layerName = GetLayerName(armnn::LayerType::ScatterNd, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddScatterNdLayer(scatterNdDescriptor, layerName.c_str());
+
+    if (layer == nullptr)
+    {
+        return kTfLiteError;
+    }
+
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
+    {
+        return kTfLiteError;
+    }
+
+    if (static_cast<unsigned int>(tfLiteNode->outputs->size) != layer->GetNumOutputSlots())
+    {
+        return kTfLiteError;
+    }
+
+    delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[2]]->Connect(layer->GetInputSlot(0));
+    delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(layer->GetInputSlot(1));
+    delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[1]]->Connect(layer->GetInputSlot(2));
+
+    // Prepare output slots
+    armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+    delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[0])] = &outputSlot;
+
+    return kTfLiteOk;
+}
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/classic/src/armnn_delegate.cpp b/delegate/classic/src/armnn_delegate.cpp
index 05bf9b2..52621ee 100644
--- a/delegate/classic/src/armnn_delegate.cpp
+++ b/delegate/classic/src/armnn_delegate.cpp
@@ -34,6 +34,7 @@
 #include "Resize.hpp"
 #include "ReverseV2.hpp"
 #include "Round.hpp"
+#include "ScatterNd.hpp"
 #include "Shape.hpp"
 #include "Slice.hpp"
 #include "StridedSlice.hpp"
@@ -1070,6 +1071,12 @@
                                             tfLiteNode,
                                             nodeIndex,
                                             kTfLiteBuiltinTransposeConv);
+        case kTfLiteBuiltinScatterNd:
+            return VisitScatterNdOperator(delegateData,
+                                          tfLiteContext,
+                                          tfLiteNode,
+                                          nodeIndex,
+                                          kTfLiteBuiltinScatterNd);
         case kTfLiteBuiltinSoftmax:
             return VisitSoftmaxOperator(delegateData,
                                         tfLiteContext,
diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt
index abbf38d..858a0a3 100644
--- a/delegate/opaque/CMakeLists.txt
+++ b/delegate/opaque/CMakeLists.txt
@@ -1,5 +1,5 @@
 #
-# Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
 # SPDX-License-Identifier: MIT
 #
 
@@ -34,6 +34,7 @@
         src/Resize.hpp
         src/ReverseV2.hpp
         src/Round.hpp
+        src/ScatterNd.hpp
         src/Shape.hpp
         src/SharedFunctions.cpp
         src/SharedFunctions.hpp
diff --git a/delegate/opaque/src/ScatterNd.hpp b/delegate/opaque/src/ScatterNd.hpp
new file mode 100644
index 0000000..08bbed7
--- /dev/null
+++ b/delegate/opaque/src/ScatterNd.hpp
@@ -0,0 +1,173 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <OpaqueDelegateUtils.hpp>
+
+namespace armnnOpaqueDelegate
+{
+TfLiteStatus ValidateScatterNdOperator(DelegateData& delegateData,
+                                       TfLiteOpaqueContext *tfLiteContext,
+                                       const armnn::TensorInfo& indicesInfo,
+                                       const armnn::TensorInfo& updatesInfo,
+                                       const armnn::TensorInfo& shapeInfo,
+                                       const armnn::TensorInfo& outputInfo,
+                                       const armnn::ScatterNdDescriptor& descriptor)
+{
+    bool isSupported = false;
+    FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SCATTER_ND",
+                                      tfLiteContext,
+                                      IsScatterNdSupported,
+                                      delegateData.m_Backends,
+                                      isSupported,
+                                      armnn::BackendId(),
+                                      shapeInfo,
+                                      indicesInfo,
+                                      updatesInfo,
+                                      outputInfo,
+                                      descriptor);
+    return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus VisitScatterNdOperator(DelegateData& delegateData,
+                                    TfLiteOpaqueContext* tfLiteContext,
+                                    TfLiteOpaqueNode* tfLiteNode,
+                                    int nodeIndex,
+                                    int32_t scatterNdOperatorCode)
+{
+    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    // Gather input indices and use to get input tensor.
+    auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
+    const int* inputTensors;
+    if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+            nodeIndex);
+        return kTfLiteError;
+    }
+
+    // Gather input indices and use to get output tensor.
+    int numOutputs = 0;
+    const int* outputTensors;
+    if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+            nodeIndex);
+        return kTfLiteError;
+    }
+
+    // The indices tensor are the positions the data is updated/scattered into
+    const TfLiteOpaqueTensor* tfLiteIndicesTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+    if (IsDynamicTensor(tfLiteIndicesTensor))
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+            scatterNdOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    // The updates tensor provides the data which will be updated/scattered into the relevant indices
+    const TfLiteOpaqueTensor* tfLiteUpdatesTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
+    if (IsDynamicTensor(tfLiteUpdatesTensor))
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+            scatterNdOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    // For TFLite ScatterNd there is no input tensor
+    // The shape tensor is a 1D tensor which represents the shape of an input tensor to be filled with zeros
+    const TfLiteOpaqueTensor* tfLiteShapeTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]);
+    if (IsDynamicTensor(tfLiteShapeTensor))
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+                scatterNdOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    // The output tensor
+    const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+    if (IsDynamicTensor(tfLiteOutputTensor))
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnOpaqueDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+            scatterNdOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    const armnn::TensorInfo& shapeTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteShapeTensor);
+    const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteIndicesTensor);
+    const armnn::TensorInfo& updatesTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteUpdatesTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+    armnn::ScatterNdDescriptor scatterNdDescriptor;
+    scatterNdDescriptor.m_Function     = armnn::ScatterNdFunction::Update;
+    scatterNdDescriptor.m_InputEnabled = false;
+    scatterNdDescriptor.m_Axis         = 0;
+    scatterNdDescriptor.m_AxisEnabled  = false;
+
+    // Check output dimensions
+    if (shapeTensorInfo.GetShape().GetNumElements() != outputTensorInfo.GetNumDimensions())
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Input tensor dimension and output tensor dimension differ",
+                "Operator: #%d node #%d: ",
+                scatterNdOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    // No network pointer indicates that only support for this operator should be checked
+    if (!delegateData.m_Network)
+    {
+        return ValidateScatterNdOperator(delegateData,
+                                         tfLiteContext,
+                                         indicesTensorInfo,
+                                         updatesTensorInfo,
+                                         shapeTensorInfo,
+                                         outputTensorInfo,
+                                         scatterNdDescriptor);
+    }
+
+    auto layerName = GetName(armnn::LayerType::ScatterNd, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddScatterNdLayer(scatterNdDescriptor, layerName.c_str());
+
+    if (layer == nullptr)
+    {
+        return kTfLiteError;
+    }
+
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
+    {
+        return kTfLiteError;
+    }
+
+    delegateData.m_OutputSlotForNode[inputTensors[2]]->Connect(layer->GetInputSlot(0));
+    delegateData.m_OutputSlotForNode[inputTensors[0]]->Connect(layer->GetInputSlot(1));
+    delegateData.m_OutputSlotForNode[inputTensors[1]]->Connect(layer->GetInputSlot(2));
+
+    // Prepare output slots
+    armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+    delegateData.m_OutputSlotForNode[static_cast<unsigned long>(outputTensors[0])] = &outputSlot;
+
+    return kTfLiteOk;
+}
+
+} // namespace armnnOpaqueDelegate
\ No newline at end of file
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index 4ed0a78..9e047f6 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -33,6 +33,7 @@
 #include "Resize.hpp"
 #include "ReverseV2.hpp"
 #include "Round.hpp"
+#include "ScatterNd.hpp"
 #include "Shape.hpp"
 #include "Slice.hpp"
 #include "StridedSlice.hpp"
@@ -1154,6 +1155,12 @@
                                                  nodeIndex,
                                                  kTfLiteBuiltinRsqrt,
                                                  armnn::UnaryOperation::Rsqrt);
+        case kTfLiteBuiltinScatterNd:
+            return VisitScatterNdOperator(delegateData,
+                                          tfLiteContext,
+                                          tfLiteNode,
+                                          nodeIndex,
+                                          kTfLiteBuiltinScatterNd);
         case kTfLiteBuiltinShape:
             return VisitShapeOperator(delegateData,
                                       tfLiteContext,
diff --git a/delegate/test/ScatterNdTest.cpp b/delegate/test/ScatterNdTest.cpp
new file mode 100644
index 0000000..2b2a67c
--- /dev/null
+++ b/delegate/test/ScatterNdTest.cpp
@@ -0,0 +1,446 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ScatterNdTestHelper.hpp"
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+template <typename T>
+void ScatterNd1DimTest(tflite::TensorType tensorType, const std::vector<armnn::BackendId>& backends = {})
+{
+    // Set shapes
+    std::vector<int32_t> indicesShape = { 3, 1 };
+    std::vector<int32_t> updatesShape = { 3 };
+    std::vector<int32_t> shapeShape = { 1 };
+    std::vector<int32_t> expectedOutputShape = { 5 };
+
+    // Set Values
+    std::vector<int32_t> indicesValues = { 0, 1, 2 };
+    std::vector<T> updatesValues = { 1, 2, 3 };
+    std::vector<int32_t> shapeValue = { 5 };
+    std::vector<T> expectedOutputValues =  { 1, 2, 3, 0, 0 };
+
+    ScatterNdTestImpl<T>(tensorType,
+                         indicesShape,
+                         indicesValues,
+                         updatesShape,
+                         updatesValues,
+                         shapeShape,
+                         shapeValue,
+                         expectedOutputShape,
+                         expectedOutputValues,
+                         backends);
+}
+
+template <typename T>
+void ScatterNd2DimTest(tflite::TensorType tensorType, const std::vector<armnn::BackendId>& backends = {})
+{
+    // Set shapes
+    std::vector<int32_t> indicesShape = { 3, 2 };
+    std::vector<int32_t> updatesShape = { 3 };
+    std::vector<int32_t> shapeShape = { 2 };
+    std::vector<int32_t> expectedOutputShape = { 3, 3 };
+
+    // Set Values
+    std::vector<int32_t> indicesValues = { 0, 0,
+                                           1, 1,
+                                           2, 2 };
+    std::vector<T> updatesValues = { 1, 2, 3 };
+    std::vector<int32_t> shapeValue = { 3, 3 };
+    std::vector<T> expectedOutputValues =  { 1, 0, 0,
+                                             0, 2, 0,
+                                             0, 0, 3 };
+
+    ScatterNdTestImpl<T>(tensorType,
+                         indicesShape,
+                         indicesValues,
+                         updatesShape,
+                         updatesValues,
+                         shapeShape,
+                         shapeValue,
+                         expectedOutputShape,
+                         expectedOutputValues,
+                         backends);
+}
+
+template <typename T>
+void ScatterNd2Dim1Outter1InnerTest(tflite::TensorType tensorType, const std::vector<armnn::BackendId>& backends = {})
+{
+    // Set shapes
+    std::vector<int32_t> indicesShape = { 2, 1 };
+    std::vector<int32_t> updatesShape = { 2, 3 };
+    std::vector<int32_t> shapeShape = { 2 };
+    std::vector<int32_t> expectedOutputShape = { 3, 3 };
+
+    // Set Values
+    std::vector<int32_t> indicesValues = { 0, 1 };
+    std::vector<T> updatesValues = { 1, 1, 1,
+                                           1, 1, 1 };
+    std::vector<int32_t> shapeValue = { 3, 3 };
+    std::vector<T> expectedOutputValues =  { 1, 1, 1,
+                                            1, 1, 1,
+                                            0, 0, 0 };
+
+    ScatterNdTestImpl<T>(tensorType,
+                         indicesShape,
+                         indicesValues,
+                         updatesShape,
+                         updatesValues,
+                         shapeShape,
+                         shapeValue,
+                         expectedOutputShape,
+                         expectedOutputValues,
+                         backends);
+}
+
+template <typename T>
+void ScatterNd3DimTest(tflite::TensorType tensorType, const std::vector<armnn::BackendId>& backends = {})
+{
+    // Set shapes
+    std::vector<int32_t> indicesShape = { 3, 3 };
+    std::vector<int32_t> updatesShape = { 3 };
+    std::vector<int32_t> shapeShape = { 3 };
+    std::vector<int32_t> expectedOutputShape = { 3, 3, 3 };
+
+    // Set Values
+    std::vector<int32_t> indicesValues = { 0, 0, 0,
+                                           1, 1, 1,
+                                           2, 2, 2 };
+    std::vector<T> updatesValues = { 1, 2, 3 };
+    std::vector<int32_t> shapeValue = { 3, 3, 3 };
+    std::vector<T> expectedOutputValues =  { 1, 0, 0,
+                                             0, 0, 0,
+                                             0, 0, 0,
+
+                                             0, 0, 0,
+                                             0, 2, 0,
+                                             0, 0, 0,
+
+                                             0, 0, 0,
+                                             0, 0, 0,
+                                             0, 0, 3 };
+
+    ScatterNdTestImpl<T>(tensorType,
+                         indicesShape,
+                         indicesValues,
+                         updatesShape,
+                         updatesValues,
+                         shapeShape,
+                         shapeValue,
+                         expectedOutputShape,
+                         expectedOutputValues,
+                         backends);
+}
+
+template <typename T>
+void ScatterNd3Dim1Outter2InnerTest(tflite::TensorType tensorType, const std::vector<armnn::BackendId>& backends = {})
+{
+    // Set shapes
+    std::vector<int32_t> indicesShape = { 2, 1 };
+    std::vector<int32_t> updatesShape = { 2, 3, 3 };
+    std::vector<int32_t> shapeShape = { 3 };
+    std::vector<int32_t> expectedOutputShape = { 3, 3, 3 };
+
+    // Set Values
+    std::vector<int32_t> indicesValues = { 0, 1 };
+    std::vector<T> updatesValues = { 1, 1, 1,
+                                    1, 1, 1,
+                                    1, 1, 1,
+
+                                    2, 2, 2,
+                                    2, 2, 2,
+                                    2, 2, 2 };
+    std::vector<int32_t> shapeValue = { 3, 3, 3 };
+    std::vector<T> expectedOutputValues =  { 1, 1, 1,
+                                             1, 1, 1,
+                                             1, 1, 1,
+
+                                             2, 2, 2,
+                                             2, 2, 2,
+                                             2, 2, 2,
+
+                                             0, 0, 0,
+                                             0, 0, 0,
+                                             0, 0, 0 };
+
+    ScatterNdTestImpl<T>(tensorType,
+                         indicesShape,
+                         indicesValues,
+                         updatesShape,
+                         updatesValues,
+                         shapeShape,
+                         shapeValue,
+                         expectedOutputShape,
+                         expectedOutputValues,
+                         backends);
+}
+
+template <typename T>
+void ScatterNd3Dim2Outter1InnerTest(tflite::TensorType tensorType, const std::vector<armnn::BackendId>& backends = {})
+{
+    // Set shapes
+    std::vector<int32_t> indicesShape = { 2, 2 };
+    std::vector<int32_t> updatesShape = { 2, 3 };
+    std::vector<int32_t> shapeShape = { 3 };
+    std::vector<int32_t> expectedOutputShape = { 3, 3, 3 };
+
+    // Set Values
+    std::vector<int32_t> indicesValues = { 0, 0,
+                                           1, 1 };
+    std::vector<T> updatesValues = { 1, 1, 1,
+                                     2, 2, 2 };
+    std::vector<int32_t> shapeValue = { 3, 3, 3 };
+    std::vector<T> expectedOutputValues =  { 1, 1, 1,
+                                             0, 0, 0,
+                                             0, 0, 0,
+
+                                             0, 0, 0,
+                                             2, 2, 2,
+                                             0, 0, 0,
+
+                                             0, 0, 0,
+                                             0, 0, 0,
+                                             0, 0, 0 };
+
+    ScatterNdTestImpl<T>(tensorType,
+                         indicesShape,
+                         indicesValues,
+                         updatesShape,
+                         updatesValues,
+                         shapeShape,
+                         shapeValue,
+                         expectedOutputShape,
+                         expectedOutputValues,
+                         backends);
+}
+
+template <typename T>
+void ScatterNdDim4(tflite::TensorType tensorType, const std::vector<armnn::BackendId>& backends = {})
+{
+    // Set shapes
+    std::vector<int32_t> indicesShape = { 3, 4 };
+    std::vector<int32_t> updatesShape = { 3 };
+    std::vector<int32_t> shapeShape = { 4 };
+    std::vector<int32_t> expectedOutputShape = { 2, 3, 3, 3 };
+
+    // Set Values
+    std::vector<int32_t> indicesValues = { 0, 0, 0, 0,
+                                           0, 1, 1, 1,
+                                           1, 1, 1, 1 };
+    std::vector<T> updatesValues = { 1, 2, 3 };
+    std::vector<int32_t> shapeValue = { 2, 3, 3, 3 };
+    std::vector<T> expectedOutputValues =  { 1, 0, 0,
+                                             0, 0, 0,
+                                             0, 0, 0,
+
+                                             0, 0, 0,
+                                             0, 2, 0,
+                                             0, 0, 0,
+
+                                             0, 0, 0,
+                                             0, 0, 0,
+                                             0, 0, 0,
+
+                                             0, 0, 0,
+                                             0, 0, 0,
+                                             0, 0, 0,
+
+                                             0, 0, 0,
+                                             0, 3, 0,
+                                             0, 0, 0,
+
+                                             0, 0, 0,
+                                             0, 0, 0,
+                                             0, 0, 0 };
+
+    ScatterNdTestImpl<T>(tensorType,
+                         indicesShape,
+                         indicesValues,
+                         updatesShape,
+                         updatesValues,
+                         shapeShape,
+                         shapeValue,
+                         expectedOutputShape,
+                         expectedOutputValues,
+                         backends);
+}
+
+TEST_SUITE("ScatterNdDelegateTests")
+{
+
+TEST_CASE ("ScatterNd_1Dim_FP32_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd1DimTest<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("ScatterNd_1Dim_INT32_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd1DimTest<int32_t>(tflite::TensorType_INT32, backends);
+}
+
+TEST_CASE ("ScatterNd_1Dim_INT8_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd1DimTest<int8_t>(tflite::TensorType_INT8, backends);
+}
+
+TEST_CASE ("ScatterNd_1Dim_UINT8_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd1DimTest<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+TEST_CASE ("ScatterNd_2Dim_FP32_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd2DimTest<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("ScatterNd_2Dim_INT32_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd2DimTest<int32_t>(tflite::TensorType_INT32, backends);
+}
+
+TEST_CASE ("ScatterNd_2Dim_INT8_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd2DimTest<int8_t>(tflite::TensorType_INT8, backends);
+}
+
+TEST_CASE ("ScatterNd_2Dim_UINT8_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd2DimTest<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+TEST_CASE ("ScatterNd_2Dim_1Outter_1Inner_FP32_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd2Dim1Outter1InnerTest<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("ScatterNd_2Dim_1Outter_1Inner_INT32_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd2Dim1Outter1InnerTest<int32_t>(tflite::TensorType_INT32, backends);
+}
+
+TEST_CASE ("ScatterNd_2Dim_1Outter_1Inner_INT8_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd2Dim1Outter1InnerTest<int8_t>(tflite::TensorType_INT8, backends);
+}
+
+TEST_CASE ("ScatterNd_2Dim_1Outter_1Inner_UINT8_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd2Dim1Outter1InnerTest<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+TEST_CASE ("ScatterNd_3Dim_FP32_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd3DimTest<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("ScatterNd_3Dim_INT32_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd3DimTest<int32_t>(tflite::TensorType_INT32, backends);
+}
+
+TEST_CASE ("ScatterNd_3Dim_INT8_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd3DimTest<int8_t>(tflite::TensorType_INT8, backends);
+}
+
+TEST_CASE ("ScatterNd_3Dim_UINT8_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd3DimTest<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+TEST_CASE ("ScatterNd_3Dim_1Outter_2Inner_FP32_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd3Dim1Outter2InnerTest<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("ScatterNd_3Dim_1Outter_2Inner_INT32_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd3Dim1Outter2InnerTest<int32_t>(tflite::TensorType_INT32, backends);
+}
+
+TEST_CASE ("ScatterNd_3Dim_1Outter_2Inner_INT8_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd3Dim1Outter2InnerTest<int8_t>(tflite::TensorType_INT8, backends);
+}
+
+TEST_CASE ("ScatterNd_3Dim_1Outter_2Inner_UINT8_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd3Dim1Outter2InnerTest<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+TEST_CASE ("ScatterNd_3Dim_2Outter_1Inner_FP32_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd3Dim2Outter1InnerTest<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("ScatterNd_3Dim_2Outter_1Inner_INT32_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd3Dim2Outter1InnerTest<int32_t>(tflite::TensorType_INT32, backends);
+}
+
+TEST_CASE ("ScatterNd_3Dim_2Outter_1Inner_INT8_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd3Dim2Outter1InnerTest<int8_t>(tflite::TensorType_INT8, backends);
+}
+
+TEST_CASE ("ScatterNd_3Dim_2Outter_1Inner_UINT8_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNd3Dim2Outter1InnerTest<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+TEST_CASE ("ScatterNd_4Dim_FP32_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNdDim4<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("ScatterNd_4Dim_INT32_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNdDim4<int32_t>(tflite::TensorType_INT32, backends);
+}
+
+TEST_CASE ("ScatterNd_4Dim_INT8_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNdDim4<int8_t>(tflite::TensorType_INT8, backends);
+}
+
+TEST_CASE ("ScatterNd_4Dim_UINT8_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ScatterNdDim4<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+} // TEST_SUITE("ScatterNdDelegateTests")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ScatterNdTestHelper.hpp b/delegate/test/ScatterNdTestHelper.hpp
new file mode 100644
index 0000000..5d2cfb0
--- /dev/null
+++ b/delegate/test/ScatterNdTestHelper.hpp
@@ -0,0 +1,174 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
+
+#include <tensorflow/lite/version.h>
+
+namespace
+{
+
+std::vector<char> CreateScatterNdTfLiteModel(tflite::TensorType tensorType,
+                                             const std::vector<int32_t>& indicesShape,
+                                             const std::vector<int32_t>& updatesShape,
+                                             const std::vector<int32_t>& shapeShape,
+                                             const std::vector<int32_t>& outputShape,
+                                             const std::vector<int32_t>& shapeData,
+                                             float quantScale = 1.0f,
+                                             int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder)); // indices
+    buffers.push_back(CreateBuffer(flatBufferBuilder)); // updates
+    buffers.push_back(CreateBuffer(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(shapeData.data()),
+                                                                      sizeof(int32_t) * shapeData.size())));
+    buffers.push_back(CreateBuffer(flatBufferBuilder)); // output
+
+    auto quantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    std::array<flatbuffers::Offset<Tensor>, 4> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(indicesShape.data(),
+                                                                      indicesShape.size()),
+                              TensorType_INT32,
+                              1,
+                              flatBufferBuilder.CreateString("indices_tensor"),
+                              quantizationParameters);
+
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(updatesShape.data(),
+                                                                      updatesShape.size()),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("updates_tensor"),
+                              quantizationParameters);
+
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(shapeShape.data(),
+                                                                      shapeShape.size()),
+                              TensorType_INT32,
+                              3,
+                              flatBufferBuilder.CreateString("shape_tensor"),
+                              quantizationParameters);
+
+    tensors[3] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
+                                                                      outputShape.size()),
+                              tensorType,
+                              4,
+                              flatBufferBuilder.CreateString("output_tensor"),
+                              quantizationParameters);
+
+    // Create Operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_ScatterNdOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions  = CreateScatterNdOptions(flatBufferBuilder).Union();
+
+    const std::vector<int> operatorInputs { 0, 1, 2 };
+    const std::vector<int> operatorOutputs { 3 };
+
+    flatbuffers::Offset<Operator> scatterNdOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{ 0, 1, 2 };
+    const std::vector<int> subgraphOutputs{ 3 };
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&scatterNdOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: ScatterNd Operator Model");
+    flatbuffers::Offset <OperatorCode> opCode = CreateOperatorCode(flatBufferBuilder,
+                                                                   tflite::BuiltinOperator_SCATTER_ND);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&opCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template<typename T>
+void ScatterNdTestImpl(tflite::TensorType tensorType,
+                       std::vector<int32_t>& indicesShape,
+                       std::vector<int32_t>& indicesValues,
+                       std::vector<int32_t>& updatesShape,
+                       std::vector<T>& updatesValues,
+                       std::vector<int32_t>& shapeShape,
+                       std::vector<int32_t>& shapeValue,
+                       std::vector<int32_t>& expectedOutputShape,
+                       std::vector<T>& expectedOutputValues,
+                       const std::vector<armnn::BackendId>& backends = {},
+                       float quantScale = 1.0f,
+                       int quantOffset = 0)
+{
+    using namespace delegateTestInterpreter;
+
+    std::vector<char> modelBuffer = CreateScatterNdTfLiteModel(tensorType,
+                                                               indicesShape,
+                                                               updatesShape,
+                                                               shapeShape,
+                                                               expectedOutputShape,
+                                                               shapeValue,
+                                                               quantScale,
+                                                               quantOffset);
+
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(indicesValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(updatesValues, 1) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(shapeValue, 2) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>   tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
+
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<int32_t>(indicesValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(updatesValues, 1) == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<int32_t>(shapeValue, 2) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>   armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
+
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
+
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/docs/05_03_delegate.dox b/docs/05_03_delegate.dox
index dde50e1..75ebf9c 100644
--- a/docs/05_03_delegate.dox
+++ b/docs/05_03_delegate.dox
@@ -172,6 +172,8 @@
 
 - RSQRT
 
+- SCATTERND
+
 - SHAPE
 
 - SIN