IVGCVSW-6732 Tests surrounded in '#if defined(ARMNNREF_ENABLED)' in android-nn-driver do not execute.
* Change to src/backends/cl/workloads/ClLstmFloatWorkload.cpp fix LstmTests_GpuAcc tests.
* Change to src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp & ClConvertFp32ToFp16Workload.hpp
fix MeanTests_GpuAcc and Convolution2DTests_1.1 tests.
* Added UnitTests to src/backends/cl/test/ClImportTensorHandleTests.cpp to test import on Convert Layers.
!android-nn-driver:7264
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: I0c46dc4b9c54eca8771ab12ed0302b6224606957
diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
index 8ccf157..4ac1274 100644
--- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
@@ -80,8 +80,8 @@
// Replace output tensor handle with the given TensorHandle
void ClConvertFp16ToFp32Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
{
- ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
- this->m_Data.m_Inputs[slot] = tensorHandle;
+ ITensorHandle* backupHandle = this->m_Data.m_Outputs[slot];
+ this->m_Data.m_Outputs[slot] = tensorHandle;
try
{
Reconfigure();
@@ -89,7 +89,7 @@
catch(armnn::UnimplementedException& e)
{
// Cannot reconfigure, revert the slot back and throw the exception.
- this->m_Data.m_Inputs[slot] = backupHandle;
+ this->m_Data.m_Outputs[slot] = backupHandle;
throw e;
}
}
diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
index a44a80c..307314d 100644
--- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
@@ -80,8 +80,8 @@
// Replace output tensor handle with the given TensorHandle
void ClConvertFp32ToFp16Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
{
- ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
- this->m_Data.m_Inputs[slot] = tensorHandle;
+ ITensorHandle* backupHandle = this->m_Data.m_Outputs[slot];
+ this->m_Data.m_Outputs[slot] = tensorHandle;
try
{
Reconfigure();
@@ -89,7 +89,7 @@
catch(armnn::UnimplementedException& e)
{
// Cannot reconfigure, revert the slot back and throw the exception.
- this->m_Data.m_Inputs[slot] = backupHandle;
+ this->m_Data.m_Outputs[slot] = backupHandle;
throw e;
}
}
diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
index e190f33..d20c6fc 100644
--- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
@@ -310,7 +310,7 @@
if (paramsInfo.m_ProjectionBias != nullptr)
{
- aclProjectionBiasInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputGateBias());
+ aclProjectionBiasInfo = BuildArmComputeTensorInfo(paramsInfo.GetProjectionBias());
}
lstm_params_info.set_projection_params(&aclProjectionWeightsInfo,
paramsInfo.m_ProjectionBias != nullptr ?