IVGCVSW-7625 GpuFsa Op: Add Resize/Scale operator

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I695ef452d004ed7b606020037cad681ef1fc80c3
diff --git a/src/backends/gpuFsa/GpuFsaBackend.cpp b/src/backends/gpuFsa/GpuFsaBackend.cpp
index 4a41030..1bfe8dd 100644
--- a/src/backends/gpuFsa/GpuFsaBackend.cpp
+++ b/src/backends/gpuFsa/GpuFsaBackend.cpp
@@ -26,6 +26,7 @@
 #include "layers/GpuFsaElementwiseBinaryAdd.hpp"
 #include "layers/GpuFsaElementwiseBinarySub.hpp"
 #include "layers/GpuFsaPooling2d.hpp"
+#include "layers/GpuFsaResize.hpp"
 
 namespace armnn
 {
@@ -331,6 +332,13 @@
                 GpuFsaPooling2dCreateOp(preCompiledBlobPtr, input, *desc);
                 break;
             }
+            case (LayerType::Resize):
+            {
+                auto input = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+                auto desc = PolymorphicDowncast<const ResizeDescriptor*>(&base.GetParameters());
+                GpuFsaResizeCreateOp(preCompiledBlobPtr, input, *desc);
+                break;
+            }
             default:
                 // unsupported layer for GpuFsa backend
                 continue;
diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
index d9d95e5..56af9c4 100644
--- a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
+++ b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
@@ -16,6 +16,7 @@
 #include "layers/GpuFsaElementwiseBinaryAdd.hpp"
 #include "layers/GpuFsaElementwiseBinarySub.hpp"
 #include "layers/GpuFsaPooling2d.hpp"
+#include "layers/GpuFsaResize.hpp"
 #endif
 
 #include <vector>
@@ -187,6 +188,21 @@
                                         infos[0],
                                         *desc);
         }
+        case LayerType::Resize:
+        {
+            if (infos.size() != 2)
+            {
+                throw InvalidArgumentException("Invalid number of Resize TensorInfos. "
+                                               "TensorInfos should be of format: {input, output}.");
+            }
+
+            auto desc = PolymorphicDowncast<const ResizeDescriptor*>(&descriptor);
+
+            FORWARD_LAYER_VALIDATE_FUNC(GpuFsaResizeValidate,
+                                        reasonIfUnsupported,
+                                        infos[0],
+                                        *desc);
+        }
         case LayerType::Constant:
         case LayerType::Input:
         case LayerType::Output:
diff --git a/src/backends/gpuFsa/layers/CMakeLists.txt b/src/backends/gpuFsa/layers/CMakeLists.txt
index 8ffa934..9ea36b6 100644
--- a/src/backends/gpuFsa/layers/CMakeLists.txt
+++ b/src/backends/gpuFsa/layers/CMakeLists.txt
@@ -16,6 +16,8 @@
         GpuFsaElementwiseBinarySub.hpp
         GpuFsaPooling2d.cpp
         GpuFsaPooling2d.hpp
+        GpuFsaResize.cpp
+        GpuFsaResize.hpp
         UtilsGpuFsa.cpp
         UtilsGpuFsa.hpp
     )
diff --git a/src/backends/gpuFsa/layers/GpuFsaResize.cpp b/src/backends/gpuFsa/layers/GpuFsaResize.cpp
new file mode 100644
index 0000000..b46b601
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaResize.cpp
@@ -0,0 +1,79 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaResize.hpp"
+#include "UtilsGpuFsa.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuResize.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
+
+using namespace arm_compute::experimental::dynamic_fusion;
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+arm_compute::Status GpuFsaResizeValidate(const TensorInfo& input,
+                                         const ResizeDescriptor& descriptor)
+{
+    // Create a new workload sketch, for validation purposes
+    auto compileCtx         = arm_compute::CLKernelLibrary::get().get_compile_context();
+    auto workloadContext    = GpuWorkloadContext(&compileCtx);
+    GpuWorkloadSketch sketch{ &workloadContext };
+
+    arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+    aclInputInfo.set_are_values_constant(input.IsConstant());
+    arm_compute::ITensorInfo* inputInfo = workloadContext.create_tensor_info(aclInputInfo);
+
+    ResizeAttributes resizeAttributes = CreateResizeAttributes(descriptor);
+
+    return GpuResize::validate_op(sketch, inputInfo, resizeAttributes);
+}
+
+void GpuFsaResizeCreateOp(GpuFsaPreCompiledBlob* blob,
+                          const TensorInfo& input,
+                          const ResizeDescriptor& descriptor)
+{
+    GpuWorkloadSketch* sketch           = blob->sketch.get();
+    GpuWorkloadContext* workloadContext = blob->workloadContext.get();
+    std::vector<arm_compute::ITensorInfo*> inputTensorInfos  = {};
+    std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
+
+    arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+    aclInputInfo.set_are_values_constant(input.IsConstant());
+
+    inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInputInfo));
+
+    ResizeAttributes resizeAttributes = CreateResizeAttributes(descriptor);
+
+    // Validate operator, check status and update reasonIfUnsupported
+    arm_compute::Status aclStatus = GpuResize::validate_op(*sketch,
+                                                           inputTensorInfos[0],
+                                                           resizeAttributes);
+
+    const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
+    if (!supported)
+    {
+        throw BackendCapabilityException("\"GpuFsa\" backend failed during resize validation");
+    }
+
+    arm_compute::ITensorInfo* addOutputInfo = GpuResize::create_op(*sketch,
+                                                                   inputTensorInfos[0],
+                                                                   resizeAttributes);
+
+    // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created.
+    outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
+    GpuOutput::create_op(*sketch, addOutputInfo, outputTensorInfos[0]);
+
+    // Store the TensorInfos within the blob as unique_ptrs to be used later
+    blob->inputTensorInfos  = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
+    blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaResize.hpp b/src/backends/gpuFsa/layers/GpuFsaResize.hpp
new file mode 100644
index 0000000..04a2f77
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaResize.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Descriptors.hpp>
+
+#include <gpuFsa/GpuFsaBackend.hpp>
+
+namespace armnn
+{
+arm_compute::Status GpuFsaResizeValidate(const TensorInfo& input,
+                                         const ResizeDescriptor& descriptor);
+
+void GpuFsaResizeCreateOp(GpuFsaPreCompiledBlob* blob,
+                          const TensorInfo& input,
+                          const ResizeDescriptor& descriptor);
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/UtilsGpuFsa.cpp b/src/backends/gpuFsa/layers/UtilsGpuFsa.cpp
index a1d96f0..b11f91c 100644
--- a/src/backends/gpuFsa/layers/UtilsGpuFsa.cpp
+++ b/src/backends/gpuFsa/layers/UtilsGpuFsa.cpp
@@ -58,4 +58,20 @@
     pool2dAttributes.exclude_padding(excludePadding);
 
     return pool2dAttributes;
+}
+
+arm_compute::experimental::dynamic_fusion::ResizeAttributes
+CreateResizeAttributes(const armnn::ResizeDescriptor& descriptor)
+{
+    arm_compute::experimental::dynamic_fusion::ResizeAttributes resizeAttributes{};
+    resizeAttributes.output_width(static_cast<int32_t>(descriptor.m_TargetWidth));
+    resizeAttributes.output_height(static_cast<int32_t>(descriptor.m_TargetHeight));
+    resizeAttributes.interpolation_policy(descriptor.m_Method == ResizeMethod::Bilinear ?
+                                          arm_compute::InterpolationPolicy::BILINEAR :
+                                          arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR);
+    resizeAttributes.sampling_policy(descriptor.m_HalfPixelCenters ? arm_compute::SamplingPolicy::CENTER
+                                                                   : arm_compute::SamplingPolicy::TOP_LEFT);
+    resizeAttributes.align_corners(descriptor.m_AlignCorners);
+
+    return resizeAttributes;
 }
\ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/UtilsGpuFsa.hpp b/src/backends/gpuFsa/layers/UtilsGpuFsa.hpp
index 6c1d97a..2dbf363 100644
--- a/src/backends/gpuFsa/layers/UtilsGpuFsa.hpp
+++ b/src/backends/gpuFsa/layers/UtilsGpuFsa.hpp
@@ -9,6 +9,7 @@
 #include "arm_compute/dynamic_fusion/sketch/attributes/Conv2dAttributes.h"
 #include "arm_compute/dynamic_fusion/sketch/attributes/DepthwiseConv2dAttributes.h"
 #include "arm_compute/dynamic_fusion/sketch/attributes/Pool2dAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/ResizeAttributes.h"
 
 /// Utility function used to setup an arm_compute::Conv2dAttributes object from given descriptor
 /// @param[in] armnn::Convolution2dDescriptor
@@ -28,3 +29,9 @@
 /// @return arm_compute::experimental::dynamic_fusion::Pool2dAttributes
 arm_compute::experimental::dynamic_fusion::Pool2dAttributes
 CreatePool2dAttributes(const armnn::Pooling2dDescriptor& descriptor);
+
+/// Utility function used to setup an arm_compute::ResizeDescriptor object from given descriptor
+/// @param[in] armnn::ResizeDescriptor
+/// @return arm_compute::experimental::dynamic_fusion::ResizeAttributes
+arm_compute::experimental::dynamic_fusion::ResizeAttributes
+CreateResizeAttributes(const armnn::ResizeDescriptor& descriptor);
diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
index ccab0bf..d2412bf 100644
--- a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
@@ -11,7 +11,7 @@
 #include "backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp"
 #include "backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp"
 #include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
-
+#include "backendsCommon/test/ResizeEndToEndTestImpl.hpp"
 
 #include <doctest/doctest.h>
 
@@ -129,4 +129,28 @@
     }
 }
 
+// Resize Bilinear
+TEST_CASE("GpuFsaResizeBilinearEndToEndFloatNhwcTest")
+{
+    ResizeBilinearEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends, armnn::DataLayout::NHWC);
+}
+
+// Resize NearestNeighbor
+TEST_CASE("GpuFsaResizeNearestNeighborEndToEndFloatNhwcTest")
+{
+    ResizeNearestNeighborEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends, armnn::DataLayout::NHWC);
+}
+
+TEST_CASE("GpuFsaResizeNearestNeighborEndToEndFloatAlignCornersNhwcTest")
+{
+    ResizeNearestNeighborEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends, armnn::DataLayout::NHWC, 
+                                                            true, false);
+}
+
+TEST_CASE("GpuFsaResizeNearestNeighborEndToEndFloatHalfPixelNhwcTest")
+{
+    ResizeNearestNeighborEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends, armnn::DataLayout::NHWC, 
+                                                            false, true);
+}
+
 }
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
index 4e39a80..dda4d1f 100644
--- a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
@@ -149,4 +149,26 @@
     CHECK(supported);
 }
 
+TEST_CASE("IsLayerSupportedGpuFsaResize")
+{
+    TensorInfo inputInfo({ 1, 5, 5, 1 }, DataType::Float32);
+    TensorInfo outputInfo({ 1, 10, 10, 1 }, DataType::Float32);
+
+    ResizeDescriptor desc{};
+    desc.m_Method = ResizeMethod::NearestNeighbor;
+    desc.m_TargetHeight = 10;
+    desc.m_TargetWidth = 10;
+    desc.m_DataLayout = DataLayout::NHWC;
+
+    GpuFsaLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(LayerType::Resize,
+                                                     {inputInfo, outputInfo},
+                                                     desc,
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     reasonIfNotSupported);
+    CHECK(supported);
+}
+
 }
\ No newline at end of file