IVGCVSW-3723 Adding reference workload support for ArgMinMax
Change-Id: I65209ecec4e3abf808163239748d6e830568c2e3
Signed-off-by: Nikhil Raj <nikhil.raj@arm.com>
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 572f617..14183a7 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -192,6 +192,29 @@
return supported;
}
+bool RefLayerSupport::IsArgMinMaxSupported(const armnn::TensorInfo &input, const armnn::TensorInfo &output,
+ const armnn::ArgMinMaxDescriptor &descriptor,
+ armnn::Optional<std::string &> reasonIfUnsupported) const
+{
+ ignore_unused(descriptor);
+
+ std::array<DataType, 3> supportedTypes =
+ {
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ bool supported = true;
+
+ supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+ "Reference ArgMinMax: input is not a supported type.");
+ supported &= CheckSupportRule(TypeIs(output, DataType::Signed32), reasonIfUnsupported,
+ "Reference ArgMinMax: output type not supported");
+
+ return supported;
+}
+
bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& mean,