IVGCVSW-7550 GpuFsa Op: Add ElementWiseBinary Operator ADD
* Adding support for Gpu Add operator
* Added tests for layer support, end to end and optimization
Signed-off-by: Tracy Narine <tracy.narine@arm.com>
Change-Id: Ie9328d269c5c0ff60a7e10133b728ac9265033af
diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
index 79dd9d3..9972b24 100644
--- a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
@@ -6,7 +6,9 @@
#include "backendsCommon/test/EndToEndTestImpl.hpp"
#include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp"
+
#include "backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp"
+#include "backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp"
#include <doctest/doctest.h>
@@ -32,4 +34,15 @@
armnn::DataLayout::NHWC);
}
+// ElementwiseBinary Add
+TEST_CASE("GpuFsaElementwiseBinaryAddTestFloat32")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends, BinaryOperation::Add);
+}
+
+TEST_CASE("GpuFsaElementwiseBinaryAddTestFloat16")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(gpuFsaDefaultBackends, BinaryOperation::Add);
+}
+
}
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
index f162df0..49ddade 100644
--- a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -61,4 +61,24 @@
REQUIRE(reasonIfNotSupported.find("NCHW not supported by this kernel") != std::string::npos);
}
+TEST_CASE("IsLayerSupportedGpuFsaElementWiseBinaryAdd")
+{
+ TensorInfo input0Info({ 2, 2 }, DataType::Float32);
+ TensorInfo input1Info({ 2, 2 }, DataType::Float32);
+ TensorInfo outputInfo({ 2, 2 }, DataType::Float32);
+
+ ElementwiseBinaryDescriptor desc;
+ desc.m_Operation = BinaryOperation::Add;
+
+ GpuFsaLayerSupport supportChecker;
+ std::string reasonIfNotSupported;
+ auto supported = supportChecker.IsLayerSupported(LayerType::ElementwiseBinary,
+ {input0Info, input1Info, outputInfo},
+ desc,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfNotSupported);
+ CHECK(supported);
+}
+
}
\ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
index 7e094ce..673a52a 100644
--- a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -134,4 +134,52 @@
&IsLayerOfType<OutputLayer>));
}
+TEST_CASE("ElementwiseBinaryAddSupportedOptimizedNetwork")
+{
+ using namespace armnn;
+
+ const float qScale = 1.0f;
+ const int32_t qOffset = 0;
+
+ const TensorShape& input1Shape = { 2, 2, 2 };
+ const TensorShape& input2Shape = { 2, 2, 2 };
+ const TensorShape& outputShape = { 2, 2, 2 };
+
+ TensorInfo input1TensorInfo(input1Shape, DataType::Float32, qScale, qOffset, true);
+ TensorInfo input2TensorInfo(input2Shape, DataType::Float32, qScale, qOffset, true);
+ TensorInfo outputTensorInfo(outputShape, DataType::Float32, qScale, qOffset);
+
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+ INetworkPtr network(INetwork::Create());
+
+ IConnectableLayer* input1 = network->AddInputLayer(0, "input0");
+ IConnectableLayer* input2 = network->AddInputLayer(1, "input1");
+
+ ElementwiseBinaryDescriptor desc;
+ desc.m_Operation = BinaryOperation::Add;
+
+ IConnectableLayer* elementwiseBinaryLayer = network->AddElementwiseBinaryLayer(desc, "elementwiseBinary");
+ IConnectableLayer* output = network->AddOutputLayer(2, "output");
+
+ Connect(input1, elementwiseBinaryLayer, input1TensorInfo, 0, 0);
+ Connect(input2, elementwiseBinaryLayer, input2TensorInfo, 0, 1);
+ Connect(elementwiseBinaryLayer, output, outputTensorInfo, 0, 0);
+
+ std::vector<BackendId> backends = { "GpuFsa" };
+
+ OptimizerOptionsOpaque optimizedOptions;
+ IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ CHECK(optNet);
+
+ Graph& graph = GetGraphForTesting(optNet.get());
+
+ // Check graph layer sequence to ensure that the network has been replaced with a PreCompiledLayer
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<PreCompiledLayer>,
+ &IsLayerOfType<OutputLayer>));
+}
+
}
\ No newline at end of file