Make Convert workloads use arm_compute::NECast in CpuAcc backend
NECast can use conversion instructions where they are available
so this should in general be faster.
Signed-off-by: Matthew Bentham <Matthew.Bentham@arm.com>
Change-Id: I3f259e17b280a4f4c36f363965ffbc8ee8c4c29f
diff --git a/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.hpp b/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.hpp
index 666f487..c6fed76 100644
--- a/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.hpp
+++ b/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.hpp
@@ -5,13 +5,17 @@
#pragma once
+#include <arm_compute/runtime/NEON/functions/NECast.h>
#include <armnn/backends/Workload.hpp>
#include <armnn/backends/WorkloadData.hpp>
+#include <memory>
#include <neon/workloads/NeonWorkloadUtils.hpp>
namespace armnn
{
+arm_compute::Status NeonConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
class NeonConvertFp32ToFp16Workload : public Float32ToFloat16Workload<ConvertFp32ToFp16QueueDescriptor>
{
public:
@@ -23,9 +27,10 @@
// Replace output tensor handle with the given TensorHandle
void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
+ virtual void Reconfigure();
using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
std::vector<TensorHandlePair> m_TensorHandlePairs;
- virtual void Reconfigure();
+ mutable std::unique_ptr<arm_compute::NECast> m_Cast;
};
} //namespace armnn