IVGCVSW-4051 Update ACL pin to 94e0cf960ea6116eb57fa88d9b951f859b52c602
* Add is_initalised() check to CLScheduler in
ClContextControl.
* Now use CLDepthwiseConvolutionLayer instead of
CLDepthwiseConvolutionLayer3x3.
* Now use NEDepthwiseConvolutionLayer instead of
NEDepthwiseConvolutionLayerOptimized.
!android-nn-driver:2212
Signed-off-by: James Conroy <james.conroy@arm.com>
Change-Id: I509af65315a4322dc820a5cc1bbd36ed6999b4a7
diff --git a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
index e8d537f..4b43052 100644
--- a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
@@ -59,20 +59,24 @@
auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis);
int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
+ auto layer = std::make_unique<arm_compute::NEArgMinMaxLayer>();
+
if (m_Data.m_Parameters.m_Function == ArgMinMaxFunction::Max)
{
- m_ArgMinMaxLayer.configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MAX);
+ layer->configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MAX);
}
else
{
- m_ArgMinMaxLayer.configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MIN);
+ layer->configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MIN);
}
+
+ m_ArgMinMaxLayer.reset(layer.release());
}
void NeonArgMinMaxWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonArgMinMaxWorkload_Execute");
- m_ArgMinMaxLayer.run();
+ m_ArgMinMaxLayer->run();
}
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonArgMinMaxWorkload.hpp b/src/backends/neon/workloads/NeonArgMinMaxWorkload.hpp
index 6301b13..6e1cc46 100644
--- a/src/backends/neon/workloads/NeonArgMinMaxWorkload.hpp
+++ b/src/backends/neon/workloads/NeonArgMinMaxWorkload.hpp
@@ -8,7 +8,8 @@
#include <backendsCommon/Workload.hpp>
#include <arm_compute/core/Error.h>
-#include <arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h>
+#include <arm_compute/runtime/IFunction.h>
+
namespace armnn
{
@@ -23,7 +24,7 @@
virtual void Execute() const override;
private:
- mutable arm_compute::NEArgMinMaxLayer m_ArgMinMaxLayer;
+ std::unique_ptr<arm_compute::IFunction> m_ArgMinMaxLayer;
};
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index 18085ed..2093613 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -120,19 +120,19 @@
// Check for optimisation opportunities
arm_compute::Status optimizationStatus =
- arm_compute::NEDepthwiseConvolutionLayerOptimized::validate(inputInfo,
- kernelInfo,
- biasInfo,
- outputInfo,
- padStrideInfo,
- depthMultiplier,
- arm_compute::ActivationLayerInfo(),
- aclDilationInfo);
+ arm_compute::NEDepthwiseConvolutionLayer::validate(inputInfo,
+ kernelInfo,
+ biasInfo,
+ outputInfo,
+ padStrideInfo,
+ depthMultiplier,
+ arm_compute::ActivationLayerInfo(),
+ aclDilationInfo);
if (optimizationStatus.error_code() == arm_compute::ErrorCode::OK)
{
- m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayerOptimized>();
- static_cast<arm_compute::NEDepthwiseConvolutionLayerOptimized*>(
+ m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer>();
+ static_cast<arm_compute::NEDepthwiseConvolutionLayer*>(
m_pDepthwiseConvolutionLayer.get())->configure(&input,
m_KernelTensor.get(),
m_BiasTensor.get(),