MLCE-190: Neon and CL Constant Workloads do not support newer DataTypes
* Added support for QASYMM8_SIGNED, QSYMM16, QSYMM8 and QSYMM8_PER_CHANNEL to Neon and CL backends
* Added unit tests to Neon, CL and Ref backends
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I4c726b6d86b4d75abedd130dcea372d1e82be5c2
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index dccfd1e..0780f4b 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -882,5 +882,22 @@
return result;
}
+template<typename FactoryType, armnn::DataType OutputDataType>
+bool IsConstantLayerSupportedTests(std::string& reasonIfUnsupported)
+{
+ armnn::Graph graph;
+
+ armnn::Layer* const layer = graph.AddLayer<armnn::ConstantLayer>("ConstantLayerName");
+ armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "OutputLayerName");
+
+ armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
+
+ layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+ layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
+
+ bool result = FactoryType::IsLayerSupported(*layer, OutputDataType, reasonIfUnsupported);
+
+ return result;
+}
} //namespace
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 12c71c0..546cbc1 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -23,6 +23,7 @@
#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
#include "workloads/ClBatchToSpaceNdWorkload.hpp"
#include "workloads/ClComparisonWorkload.hpp"
+#include "workloads/ClConstantWorkload.hpp"
#include "workloads/ClConvertFp16ToFp32Workload.hpp"
#include "workloads/ClConvertFp32ToFp16Workload.hpp"
#include "workloads/ClConvolution2dWorkload.hpp"
@@ -284,10 +285,9 @@
bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return IsSupportedForDataTypeCl(reasonIfUnsupported,
- output.GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClConstantWorkloadValidate,
+ reasonIfUnsupported,
+ output);
}
bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
diff --git a/src/backends/cl/test/ClLayerSupportTests.cpp b/src/backends/cl/test/ClLayerSupportTests.cpp
index 33a2912..81d0cc2 100644
--- a/src/backends/cl/test/ClLayerSupportTests.cpp
+++ b/src/backends/cl/test/ClLayerSupportTests.cpp
@@ -131,4 +131,41 @@
BOOST_CHECK(result);
}
+BOOST_AUTO_TEST_CASE(IsConstantSupportedCl)
+{
+ std::string reasonIfUnsupported;
+
+ bool result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::Float16>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::Float32>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::QAsymmU8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::Boolean>(reasonIfUnsupported);
+ BOOST_CHECK(!result);
+
+ result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::QSymmS16>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::QSymmS8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::QAsymmS8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::BFloat16>(reasonIfUnsupported);
+ BOOST_CHECK(!result);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/cl/workloads/ClConstantWorkload.cpp b/src/backends/cl/workloads/ClConstantWorkload.cpp
index e928870..bae7446 100644
--- a/src/backends/cl/workloads/ClConstantWorkload.cpp
+++ b/src/backends/cl/workloads/ClConstantWorkload.cpp
@@ -15,6 +15,31 @@
namespace armnn
{
+arm_compute::Status ClConstantWorkloadValidate(const TensorInfo& output)
+{
+ const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ std::array<arm_compute::DataType,7> supportedTypes = {
+ arm_compute::DataType::F16,
+ arm_compute::DataType::F32,
+ arm_compute::DataType::QASYMM8,
+ arm_compute::DataType::QASYMM8_SIGNED,
+ arm_compute::DataType::QSYMM16,
+ arm_compute::DataType::QSYMM8,
+ arm_compute::DataType::QSYMM8_PER_CHANNEL
+ };
+ auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type());
+
+ if (it != end(supportedTypes))
+ {
+ return arm_compute::Status{};
+ }
+ else
+ {
+ return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported DataType"};
+ }
+}
+
ClConstantWorkload::ClConstantWorkload(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info)
: BaseWorkload<ConstantQueueDescriptor>(descriptor, info)
, m_RanOnce(false)
@@ -54,6 +79,22 @@
CopyArmComputeClTensorData(output, data.m_LayerOutput->GetConstTensor<uint8_t>());
break;
}
+ case arm_compute::DataType::QASYMM8_SIGNED:
+ {
+ CopyArmComputeClTensorData(output, data.m_LayerOutput->GetConstTensor<int8_t>());
+ break;
+ }
+ case arm_compute::DataType::QSYMM16:
+ {
+ CopyArmComputeClTensorData(output, data.m_LayerOutput->GetConstTensor<int16_t>());
+ break;
+ }
+ case arm_compute::DataType::QSYMM8:
+ case arm_compute::DataType::QSYMM8_PER_CHANNEL:
+ {
+ CopyArmComputeClTensorData(output, data.m_LayerOutput->GetConstTensor<int8_t>());
+ break;
+ }
default:
{
ARMNN_ASSERT_MSG(false, "Unknown data type");
diff --git a/src/backends/cl/workloads/ClConstantWorkload.hpp b/src/backends/cl/workloads/ClConstantWorkload.hpp
index 75325dc..e5a1d44 100644
--- a/src/backends/cl/workloads/ClConstantWorkload.hpp
+++ b/src/backends/cl/workloads/ClConstantWorkload.hpp
@@ -5,10 +5,13 @@
#pragma once
+#include <arm_compute/core/Error.h>
#include <backendsCommon/Workload.hpp>
namespace armnn
{
+arm_compute::Status ClConstantWorkloadValidate(const TensorInfo& output);
+
class ClConstantWorkload : public BaseWorkload<ConstantQueueDescriptor>
{
public:
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 44e84fb..5d59ab8 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -25,6 +25,7 @@
#include "workloads/NeonBatchNormalizationWorkload.hpp"
#include "workloads/NeonBatchToSpaceNdWorkload.hpp"
#include "workloads/NeonComparisonWorkload.hpp"
+#include "workloads/NeonConstantWorkload.hpp"
#include "workloads/NeonConvolution2dWorkload.hpp"
#include "workloads/NeonDepthToSpaceWorkload.hpp"
#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
@@ -253,10 +254,9 @@
bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return IsSupportedForDataTypeNeon(reasonIfUnsupported,
- output.GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConstantWorkloadValidate,
+ reasonIfUnsupported,
+ output);
}
bool NeonLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input,
diff --git a/src/backends/neon/test/NeonLayerSupportTests.cpp b/src/backends/neon/test/NeonLayerSupportTests.cpp
index 2d43125..3b086ad 100644
--- a/src/backends/neon/test/NeonLayerSupportTests.cpp
+++ b/src/backends/neon/test/NeonLayerSupportTests.cpp
@@ -85,4 +85,41 @@
BOOST_CHECK(result);
}
+BOOST_AUTO_TEST_CASE(IsConstantSupportedNeon)
+{
+ std::string reasonIfUnsupported;
+
+ bool result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::Float16>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::Float32>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::QAsymmU8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::Boolean>(reasonIfUnsupported);
+ BOOST_CHECK(!result);
+
+ result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::QSymmS16>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::QSymmS8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::QAsymmS8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::BFloat16>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp
index 1cffbe1..f7c8a73 100644
--- a/src/backends/neon/workloads/NeonConstantWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp
@@ -19,6 +19,32 @@
namespace armnn
{
+arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output)
+{
+ const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ std::array<arm_compute::DataType,8> supportedTypes = {
+ arm_compute::DataType::BFLOAT16,
+ arm_compute::DataType::F16,
+ arm_compute::DataType::F32,
+ arm_compute::DataType::QASYMM8,
+ arm_compute::DataType::QASYMM8_SIGNED,
+ arm_compute::DataType::QSYMM16,
+ arm_compute::DataType::QSYMM8,
+ arm_compute::DataType::QSYMM8_PER_CHANNEL
+ };
+ auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type());
+
+ if (it != end(supportedTypes))
+ {
+ return arm_compute::Status{};
+ }
+ else
+ {
+ return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported DataType"};
+ }
+}
+
NeonConstantWorkload::NeonConstantWorkload(const ConstantQueueDescriptor& descriptor,
const WorkloadInfo& info)
: BaseWorkload<ConstantQueueDescriptor>(descriptor, info)
@@ -68,6 +94,22 @@
CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<uint8_t>(), output);
break;
}
+ case arm_compute::DataType::QASYMM8_SIGNED:
+ {
+ CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output);
+ break;
+ }
+ case arm_compute::DataType::QSYMM16:
+ {
+ CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int16_t>(), output);
+ break;
+ }
+ case arm_compute::DataType::QSYMM8:
+ case arm_compute::DataType::QSYMM8_PER_CHANNEL:
+ {
+ CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output);
+ break;
+ }
default:
{
ARMNN_ASSERT_MSG(false, "Unknown data type");
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.hpp b/src/backends/neon/workloads/NeonConstantWorkload.hpp
index 18c1547..f800a45 100644
--- a/src/backends/neon/workloads/NeonConstantWorkload.hpp
+++ b/src/backends/neon/workloads/NeonConstantWorkload.hpp
@@ -9,6 +9,7 @@
namespace armnn
{
+arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output);
class NeonConstantWorkload : public BaseWorkload<ConstantQueueDescriptor>
{
diff --git a/src/backends/reference/test/RefLayerSupportTests.cpp b/src/backends/reference/test/RefLayerSupportTests.cpp
index 1d4b4a0..2a27a9d 100644
--- a/src/backends/reference/test/RefLayerSupportTests.cpp
+++ b/src/backends/reference/test/RefLayerSupportTests.cpp
@@ -235,4 +235,41 @@
!= std::string::npos);
}
+BOOST_AUTO_TEST_CASE(IsConstantSupportedRef)
+{
+ std::string reasonIfUnsupported;
+
+ bool result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::Float16>(reasonIfUnsupported);
+ BOOST_CHECK(!result);
+
+ result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::Float32>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::QAsymmU8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::Boolean>(reasonIfUnsupported);
+ BOOST_CHECK(!result);
+
+ result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::QSymmS16>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::QSymmS8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::QAsymmS8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::BFloat16>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+}
+
BOOST_AUTO_TEST_SUITE_END()