IVGCVSW-5508 Activate compiler warnings in ArmNN TfLite Delegate
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I1a8e2aa618ff693c61010e6150f3ca41b8ab1201
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index 3380c84..d2b1796 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -67,6 +67,7 @@
static const TfLiteRegistration kArmnnSubgraphRegistration = {
// ArmnnSubgraph Init
.init = [](TfLiteContext* tfLiteContext, const char* buffer, size_t length) -> void* {
+ armnn::IgnoreUnused(length);
const TfLiteDelegateParams* parameters = reinterpret_cast<const TfLiteDelegateParams*>(buffer);
return static_cast<void*>(ArmnnSubgraph::Create(
@@ -74,6 +75,7 @@
},
// ArmnnSubgraph Free
.free = [](TfLiteContext* tfLiteContext, void* buffer) -> void {
+ armnn::IgnoreUnused(tfLiteContext);
if (buffer != nullptr)
{
delete static_cast<ArmnnSubgraph*>(buffer);
@@ -208,7 +210,7 @@
const TfLiteIntArray* inputs,
std::vector<armnn::BindingPointInfo>& inputBindings)
{
- const size_t numInputs = inputs->size;
+ const size_t numInputs = static_cast<size_t>(inputs->size);
for (unsigned int i = 0; i < numInputs; ++i)
{
const int32_t tensorId = inputs->data[i];
@@ -227,7 +229,7 @@
outputSlot.SetTensorInfo(tensorInfo);
// Store for creating connections
- delegateData.m_OutputSlotForNode[tensorId] = &outputSlot;
+ delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] = &outputSlot;
inputBindings.push_back(std::make_pair(bindingId, tensorInfo));
}
@@ -240,7 +242,7 @@
const TfLiteIntArray* outputs,
std::vector<armnn::BindingPointInfo>& outputBindings)
{
- const size_t numOutputs = outputs->size;
+ const size_t numOutputs = static_cast<size_t>(outputs->size);
for (unsigned int i = 0; i < numOutputs; ++i)
{
const int32_t tensorId = outputs->data[i];
@@ -250,8 +252,8 @@
armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId);
auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor);
- ARMNN_ASSERT(delegateData.m_OutputSlotForNode[tensorId] != nullptr);
- delegateData.m_OutputSlotForNode[tensorId]->Connect(layer->GetInputSlot(0));
+ ARMNN_ASSERT(delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] != nullptr);
+ delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)]->Connect(layer->GetInputSlot(0));
outputBindings.push_back(std::make_pair(bindingId, tensorInfo));
}
@@ -272,7 +274,6 @@
DelegateData delegateData(delegate->m_Options.GetBackends());
// Build ArmNN Network
- using NetworkOptions = std::vector<armnn::BackendOptions>;
armnn::NetworkOptions networkOptions = {};
armnn::NetworkId networkId;
delegateData.m_Network = armnn::INetwork::Create(networkOptions);
@@ -300,12 +301,12 @@
if (tfLiteContext->GetNodeAndRegistration(
tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
{
- throw armnn::Exception("TfLiteArmnnDelegate: Unable to get node registration: " + nodeIndex);
+ throw armnn::Exception(&"TfLiteArmnnDelegate: Unable to get node registration: " [ nodeIndex]);
}
if (VisitNode(delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
{
- throw armnn::Exception("TfLiteArmnnDelegate: Unable to parse node: " + nodeIndex);
+ throw armnn::Exception(&"TfLiteArmnnDelegate: Unable to parse node: " [ nodeIndex]);
}
}
@@ -359,6 +360,7 @@
TfLiteStatus ArmnnSubgraph::Prepare(TfLiteContext* tfLiteContext)
{
+ armnn::IgnoreUnused(tfLiteContext);
return kTfLiteOk;
}