IVGCVSW-7854 Remove/rewrite asserts in the backends.
* Identify usages of ARMNN_ASSERT that should be proper exceptions.
* Change ARMNN_ASSERT in Doctests to CHECK.
* Verify any remaining assertions are reasonable.
Signed-off-by: Colm Donelan <colm.donelan@arm.com>
Change-Id: Ifd1f2a5a4bb60135e8654305035ec70e09c4dc2d
diff --git a/src/backends/neon/NeonTensorHandle.hpp b/src/backends/neon/NeonTensorHandle.hpp
index e5f2107..3036760 100644
--- a/src/backends/neon/NeonTensorHandle.hpp
+++ b/src/backends/neon/NeonTensorHandle.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -8,10 +8,9 @@
#include <BFloat16.hpp>
#include <Half.hpp>
-#include <armnn/utility/Assert.hpp>
-
#include <aclCommon/ArmComputeTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/Exceptions.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <arm_compute/runtime/MemoryGroup.h>
@@ -68,7 +67,7 @@
// If we have enabled Importing, don't manage the tensor
if (!m_IsImportEnabled)
{
- ARMNN_ASSERT(m_MemoryGroup != nullptr);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_MemoryGroup, "arm_compute::MemoryGroup is null.");
m_MemoryGroup->manage(&m_Tensor);
}
}
diff --git a/src/backends/neon/NeonTimer.cpp b/src/backends/neon/NeonTimer.cpp
index dbb1503..88d8cb0 100644
--- a/src/backends/neon/NeonTimer.cpp
+++ b/src/backends/neon/NeonTimer.cpp
@@ -1,12 +1,11 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NeonTimer.hpp"
#include "NeonInterceptorScheduler.hpp"
-#include <armnn/utility/Assert.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <memory>
@@ -21,7 +20,10 @@
void NeonTimer::Start()
{
m_Kernels.clear();
- ARMNN_ASSERT(g_Interceptor->GetKernels() == nullptr);
+ if (g_Interceptor->GetKernels() != nullptr)
+ {
+ throw RuntimeException("This NeonTimer instance has already been started.");
+ }
g_Interceptor->SetKernels(&m_Kernels);
m_RealSchedulerType = arm_compute::Scheduler::get_type();
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index 96429a8..9c32e32 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -1,13 +1,11 @@
//
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NeonWorkloadFactoryHelper.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <armnn/backends/MemCopyWorkload.hpp>
@@ -283,27 +281,20 @@
TEST_CASE("CreateConvolution2dFastMathEnabledWorkload")
{
Graph graph;
- using ModelOptions = std::vector<BackendOptions>;
+ using ModelOptions = std::vector<BackendOptions>;
ModelOptions modelOptions = {};
- BackendOptions cpuAcc("CpuAcc",
- {
- { "FastMathEnabled", true }
- });
+ BackendOptions cpuAcc("CpuAcc", { { "FastMathEnabled", true } });
modelOptions.push_back(cpuAcc);
NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager(), modelOptions);
- auto workload =
- CreateConvolution2dWorkloadFastMathTest<NeonConvolution2dWorkload, armnn::DataType::Float32>(factory,
- graph,
- DataLayout::NCHW,
- modelOptions);
+ auto workload = CreateConvolution2dWorkloadFastMathTest<NeonConvolution2dWorkload, armnn::DataType::Float32>(
+ factory, graph, DataLayout::NCHW, modelOptions);
- ARMNN_ASSERT(workload != nullptr);
+ CHECK(workload != nullptr);
auto conv2dWorkload = PolymorphicDowncast<NeonConvolution2dWorkload*>(workload.get());
- IgnoreUnused(conv2dWorkload);
- ARMNN_ASSERT(conv2dWorkload != nullptr);
- ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
+ CHECK(conv2dWorkload != nullptr);
+ CHECK(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
}
template <typename armnn::DataType DataType>
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index bc8ad5d..d6fd081 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -1,10 +1,12 @@
//
-// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <Graph.hpp>
#include <Network.hpp>
+#include <aclCommon/BaseMemoryManager.hpp>
+
#include <neon/NeonTensorHandle.hpp>
#include <neon/NeonTensorHandleFactory.hpp>
@@ -16,7 +18,6 @@
#include <CommonTestUtils.hpp>
#include <doctest/doctest.h>
-#include <armnn/utility/Assert.hpp>
TEST_SUITE("NeonTensorHandleTests")
{
@@ -190,7 +191,7 @@
NeonTensorHandleFactory handleFactory(memoryManager);
// NeonTensorHandleFactory supports InPlaceComputation
- ARMNN_ASSERT(handleFactory.SupportsInPlaceComputation());
+ CHECK(handleFactory.SupportsInPlaceComputation() == true);
}
}
diff --git a/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp b/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
index a44c9aa..8cd36db 100644
--- a/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
+++ b/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -76,7 +76,8 @@
aclDataLayout = ConvertDataLayout(armnn::DataLayout::NHWC);
break;
default:
- ARMNN_ASSERT_MSG(false, "Unsupported axis");
+ throw InvalidArgumentException("Value for axis: " + std::to_string(descriptor.m_Parameters.m_Axis) +
+ " is not valid");
break;
}
input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp
index f5b0128..270e3fa 100644
--- a/src/backends/neon/workloads/NeonConstantWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -65,7 +65,7 @@
{
const ConstantQueueDescriptor& data = this->m_Data;
- ARMNN_ASSERT(data.m_LayerOutput != nullptr);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(data.m_LayerOutput, "Output tensor handle is null.");
arm_compute::ITensor& output =
PolymorphicDowncast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
arm_compute::DataType computeDataType =
@@ -116,8 +116,7 @@
}
default:
{
- ARMNN_ASSERT_MSG(false, "Unknown data type");
- break;
+ throw InvalidArgumentException("Unknown data type.");
}
}
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index c81022b..fdc52ef 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -95,8 +95,6 @@
m_BiasTensor = std::make_unique<arm_compute::Tensor>();
BuildArmComputeTensor(*m_BiasTensor, info.m_InputTensorInfos[2], m_Data.m_Parameters.m_DataLayout);
m_BiasTensor->info()->set_are_values_constant(info.m_InputTensorInfos[2].IsConstant());
- // We assume here that NeonConvolution2dWorkloadValidate has been called before the constructor.
- ARMNN_ASSERT(info.m_InputTensorInfos[2].IsConstant() == true);
}
arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
@@ -141,8 +139,6 @@
GetGuid());
m_ConvolutionLayer.reset(convolutionLayer.release());
-
- ARMNN_ASSERT(m_ConvolutionLayer);
m_KernelTensorInfo = info.m_InputTensorInfos[1];
if (m_Data.m_Parameters.m_BiasEnabled)
diff --git a/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
index 5bf6e10..ef03dde 100644
--- a/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -105,9 +105,6 @@
this->GetGuid());
m_ConvolutionLayer.reset(convolutionLayer.release());
-
- ARMNN_ASSERT(m_ConvolutionLayer);
-
m_ConvolutionLayer->prepare();
}
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index b9e9ebb..de6601f 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -146,15 +146,12 @@
detailsInfo,
GetGuid());
- ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
-
m_pDepthwiseConvolutionLayer->prepare();
}
void NeonDepthwiseConvolutionWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonDepthwiseConvolutionWorkload_Execute");
- ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
m_pDepthwiseConvolutionLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 9503abd..d372792 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,7 +37,9 @@
arm_compute::TensorInfo* optionalAclBiases = nullptr;
if (descriptor.m_BiasEnabled)
{
- ARMNN_ASSERT(biases.has_value());
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+ biases.has_value(),
+ "NeonFullyConnectedWorkload: Bias was enabled in the descriptor but no value was supplied.");
aclBiases = BuildArmComputeTensorInfo(biases.value());
aclBiases.set_are_values_constant(biases.value().IsConstant());
optionalAclBiases = &aclBiases;
diff --git a/src/backends/neon/workloads/NeonGatherNdWorkload.cpp b/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
index 9388472..59fc20a 100644
--- a/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
+++ b/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -139,8 +139,6 @@
flattenedCoeff_Info.SetShape({ keyIndices["ND"] });
BuildArmComputeTensor(m_FlattenedCoeff, flattenedCoeff_Info);
armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_FlattenedCoeff);
- ARMNN_ASSERT_MSG(indicesInfo.GetDataType() == DataType::Signed32,
- "flattenedCoeff must be same data type as m_FlattenedCoeff");
CopyArmComputeITensorData<int32_t>(flattenedCoeff.data(), m_FlattenedCoeff);
// Prepare the tensor to store the output of the multiplication
diff --git a/src/backends/neon/workloads/NeonSqrtWorkload.cpp b/src/backends/neon/workloads/NeonSqrtWorkload.cpp
index 9c3d8a0..ee57a01 100644
--- a/src/backends/neon/workloads/NeonSqrtWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSqrtWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,7 +31,10 @@
NeonSqrtWorkload::NeonSqrtWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
: NeonBaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
{
- ARMNN_ASSERT(descriptor.m_Parameters.m_Operation == UnaryOperation::Sqrt);
+ if (descriptor.m_Parameters.m_Operation != UnaryOperation::Sqrt)
+ {
+ throw InvalidArgumentException("NeonSqrtWorkload: The descriptor does not indicate a Sqrt operation.");
+ }
ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonSqrtWorkload_Construct",
descriptor.m_Parameters,
diff --git a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
index 2fa118b..2eedf98 100644
--- a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NeonTransposeConvolution2dWorkload.hpp"
@@ -37,9 +37,10 @@
if (descriptor.m_BiasEnabled)
{
- ARMNN_ASSERT(biases.has_value());
-
- aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+ biases.has_value(),
+ "NeonTransposeConvolution2dWorkload: Bias was enabled in the descriptor but no value was supplied.");
+ aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
}
@@ -97,8 +98,6 @@
m_Layer = std::make_unique<arm_compute::NEDeconvolutionLayer>(memoryManager);
m_Layer->configure(&input, m_KernelTensor.get(), m_BiasTensor.get(), &output, padStrideInfo);
- ARMNN_ASSERT(m_Layer);
-
InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
if (m_Data.m_Parameters.m_BiasEnabled)
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index 694c3ab..2c7cd1b 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -1,9 +1,10 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
+#include <armnn/Exceptions.hpp>
#include <armnn/backends/Workload.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <neon/NeonTensorHandle.hpp>
@@ -69,8 +70,7 @@
TensorInfo tensorInfo,
const ITensorHandle* handle)
{
- ARMNN_ASSERT(handle);
-
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(handle, "Null tensor handle passed to InitializeArmComputeTensorData.");
switch(tensorInfo.GetDataType())
{
case DataType::Float16:
@@ -104,8 +104,7 @@
inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
const ConstTensorHandle* handle)
{
- ARMNN_ASSERT(handle);
-
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(handle, "Null tensor handle passed to InitializeArmComputeTensorData.");
switch(handle->GetTensorInfo().GetDataType())
{
case DataType::Float16: