MLCE-1092 Added layerNames to classic delegate
* All layers added through the classic delegate will have a name that
includes the nodeIndex from the tflite model.
* Added utilities to ClassicDelegateUtils to get the names for the layers.
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Iac567486d1f91c0a99b77ed8963f6b6ca26b0b59
diff --git a/delegate/classic/src/Activation.hpp b/delegate/classic/src/Activation.hpp
index 442ce4f..a93cee4 100644
--- a/delegate/classic/src/Activation.hpp
+++ b/delegate/classic/src/Activation.hpp
@@ -122,14 +122,16 @@
outputTensorInfo,
activationDesc);
}
- armnn::IConnectableLayer* activationLayer = delegateData.m_Network->AddActivationLayer(activationDesc);
+ auto layerName = GetLayerName(activationDesc.m_Function, nodeIndex);
+ armnn::IConnectableLayer* activationLayer = delegateData.m_Network->AddActivationLayer(activationDesc,
+ layerName.c_str());
ARMNN_ASSERT(activationLayer != nullptr);
armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(activationLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(activationLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/ArgMinMax.hpp b/delegate/classic/src/ArgMinMax.hpp
index 4e4a2a3..3729b3b 100644
--- a/delegate/classic/src/ArgMinMax.hpp
+++ b/delegate/classic/src/ArgMinMax.hpp
@@ -112,7 +112,8 @@
}
// Add an ArgMinMax layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddArgMinMaxLayer(desc);
+ auto layerName = GetLayerName(desc.m_Function, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddArgMinMaxLayer(desc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -120,7 +121,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/BatchMatMul.hpp b/delegate/classic/src/BatchMatMul.hpp
index 94b25fe..1caa354 100644
--- a/delegate/classic/src/BatchMatMul.hpp
+++ b/delegate/classic/src/BatchMatMul.hpp
@@ -90,7 +90,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchMatMulLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::BatchMatMul, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchMatMulLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -98,7 +99,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/BatchSpace.hpp b/delegate/classic/src/BatchSpace.hpp
index 30c6dbf..07491ce 100644
--- a/delegate/classic/src/BatchSpace.hpp
+++ b/delegate/classic/src/BatchSpace.hpp
@@ -96,7 +96,8 @@
}
// Add a BatchToSpace layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::BatchToSpaceNd, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -104,7 +105,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -204,7 +205,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/ClassicDelegateUtils.hpp b/delegate/classic/src/ClassicDelegateUtils.hpp
index 2806716..8a9409d 100644
--- a/delegate/classic/src/ClassicDelegateUtils.hpp
+++ b/delegate/classic/src/ClassicDelegateUtils.hpp
@@ -10,6 +10,7 @@
#include <armnn/ArmNN.hpp>
#include <armnn/BackendHelper.hpp>
+#include <armnn/TypesUtils.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
@@ -22,6 +23,8 @@
#include <tensorflow/lite/minimal_logging.h>
#include <tensorflow/lite/kernels/kernel_util.h>
+#include <fmt/format.h>
+
namespace
{
@@ -71,6 +74,41 @@
throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
}
+std::string GetLayerName(armnn::ActivationFunction function, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetActivationFunctionAsCString(function), nodeIndex);
+}
+
+std::string GetLayerName(armnn::ArgMinMaxFunction function, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetArgMinMaxFunctionAsCString(function), nodeIndex);
+}
+
+std::string GetLayerName(armnn::BinaryOperation opType, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetBinaryOperationAsCString(opType), nodeIndex);
+}
+
+std::string GetLayerName(armnn::ComparisonOperation layerType, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetComparisonOperationAsCString(layerType), nodeIndex);
+}
+
+std::string GetLayerName(armnn::LogicalBinaryOperation operation, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetLogicalBinaryOperationAsCString(operation), nodeIndex);
+}
+
+std::string GetLayerName(armnn::UnaryOperation opType, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetUnaryOperationAsCString(opType), nodeIndex);
+}
+
+std::string GetLayerName(armnn::LayerType layerType, int nodeIndex, std::string name = "")
+{
+ return fmt::format("{}{}:{}", GetLayerTypeAsCString(layerType), name, nodeIndex);
+}
+
TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
TfLiteNode* tfLiteNode,
const unsigned int expectedSize,
@@ -181,7 +219,8 @@
TfLiteFusedActivation activationType,
armnn::IConnectableLayer* prevLayer,
unsigned int outputSlotIndex,
- armnnDelegate::DelegateData& data)
+ armnnDelegate::DelegateData& data,
+ int nodeIndex)
{
const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
@@ -250,7 +289,8 @@
{
return kTfLiteError;
}
- armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+ auto layerName = GetLayerName(activationDesc.m_Function, nodeIndex);
+ armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc, layerName.c_str());
activationLayer->SetBackendId(setBackend);
ARMNN_ASSERT(activationLayer != nullptr);
@@ -273,7 +313,8 @@
armnn::IConnectableLayer* prevLayer,
armnn::TensorInfo reshapedOutputTensorInfo,
armnn::TensorInfo outputTensorInfo,
- armnnDelegate::DelegateData& data)
+ armnnDelegate::DelegateData& data,
+ int nodeIndex)
{
armnn::ReshapeDescriptor desc;
desc.m_TargetShape = outputTensorInfo.GetShape();
@@ -295,7 +336,8 @@
return nullptr;
}
- armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc);
+ auto layerName = GetLayerName(armnn::LayerType::Reshape, nodeIndex);
+ armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc, layerName.c_str());
reshapeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(reshapeLayer != nullptr);
@@ -478,7 +520,8 @@
TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
armnnDelegate::DelegateData& delegateData,
TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode)
+ TfLiteNode* tfLiteNode,
+ int nodeIndex)
{
const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
// Process input tensors
@@ -504,7 +547,10 @@
}
auto constantInput = CreateConstTensor(&tfLiteInputTensor,
inputTensorInfo);
- armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
+
+ auto layerName = GetLayerName(armnn::LayerType::Constant, nodeIndex);
+ armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput,
+ layerName.c_str());
constantLayer->SetBackendId(setBackend);
armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(inputTensorInfo);
diff --git a/delegate/classic/src/Comparison.hpp b/delegate/classic/src/Comparison.hpp
index 1db554c..ead08d1 100644
--- a/delegate/classic/src/Comparison.hpp
+++ b/delegate/classic/src/Comparison.hpp
@@ -117,7 +117,9 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor);
+ auto layerName = GetLayerName(descriptor.m_Operation, nodeIndex);
+ armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor,
+ layerName.c_str());
comparisonLayer->SetBackendId(setBackend);
ARMNN_ASSERT(comparisonLayer != nullptr);
@@ -125,7 +127,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(comparisonLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(comparisonLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Control.hpp b/delegate/classic/src/Control.hpp
index e6779f3..0adf262 100644
--- a/delegate/classic/src/Control.hpp
+++ b/delegate/classic/src/Control.hpp
@@ -127,7 +127,9 @@
}
// Setup layer and connect.
- armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Concat, nodeIndex);
+ armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor,
+ layerName.c_str());
concatenationLayer->SetBackendId(setBackend);
ARMNN_ASSERT(concatenationLayer != nullptr);
@@ -135,7 +137,8 @@
auto inputsTensorsProcess = ProcessInputs(concatenationLayer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
@@ -155,7 +158,7 @@
}
// Check and Create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData, nodeIndex);
}
TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
@@ -271,7 +274,8 @@
}
// Setup layer and connect.
- armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc);
+ auto layerName = GetLayerName(armnn::LayerType::Mean, nodeIndex);
+ armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc, layerName.c_str());
meanLayer->SetBackendId(setBackend);
ARMNN_ASSERT(meanLayer != nullptr);
@@ -279,7 +283,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(meanLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(meanLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Convolution.hpp b/delegate/classic/src/Convolution.hpp
index a44f9ee..cf0134e 100644
--- a/delegate/classic/src/Convolution.hpp
+++ b/delegate/classic/src/Convolution.hpp
@@ -131,14 +131,16 @@
}
// Set up filter and biases
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Convolution2d, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
- if(filterTensorInfo.IsConstant())
+ if (filterTensorInfo.IsConstant())
{
auto filter = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]], filterTensorInfo);
- armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
}
@@ -149,7 +151,10 @@
if(biasTensorInfo.IsConstant())
{
auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+
+ auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
+ biasName.c_str());
ARMNN_ASSERT(biasLayer != nullptr);
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
@@ -157,11 +162,12 @@
}
// The data input can also be constant, so we must check that this is also allocated to an input slot
- if(inputTensorInfo.IsConstant())
+ if (inputTensorInfo.IsConstant())
{
auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
- armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
+ armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
}
@@ -183,7 +189,7 @@
}
// Check and Create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
}
// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
@@ -311,7 +317,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Convolution3d, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -321,7 +328,8 @@
{
auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
- armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
ARMNN_ASSERT(weightsLayer != nullptr);
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
@@ -335,7 +343,8 @@
{
auto biases = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases);
+ auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases, biasName.c_str());
ARMNN_ASSERT(biasLayer != nullptr);
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
@@ -348,7 +357,8 @@
{
auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
- armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
+ armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
}
@@ -368,7 +378,7 @@
}
// Check and create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
}
#endif
@@ -485,7 +495,9 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Convolution3d, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor,
+ layerName.c_str());
layer->SetBackendId(setBackend);
if(filterTensorInfo.IsConstant())
@@ -493,7 +505,8 @@
// For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
- armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
}
@@ -504,7 +517,10 @@
if(biasTensorInfo.IsConstant())
{
auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+
+ auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
+ biasName.c_str());
ARMNN_ASSERT(biasLayer != nullptr);
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
@@ -516,7 +532,8 @@
{
auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
- armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
+ armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
}
@@ -537,7 +554,7 @@
return kTfLiteOk;
}
// Check and create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
}
TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
@@ -683,9 +700,11 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
+ auto layerName = GetLayerName(armnn::LayerType::TransposeConvolution2d, nodeIndex);
armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
filterTensor,
- armnn::EmptyOptional());
+ armnn::EmptyOptional(),
+ layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -694,7 +713,8 @@
{
auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]], inputTensorInfo);
- armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
+ armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
}
diff --git a/delegate/classic/src/ElementwiseBinary.hpp b/delegate/classic/src/ElementwiseBinary.hpp
index 8055a69..8309a79 100644
--- a/delegate/classic/src/ElementwiseBinary.hpp
+++ b/delegate/classic/src/ElementwiseBinary.hpp
@@ -250,11 +250,13 @@
}
std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer(
- DelegateData& delegateData,
- const armnn::TensorInfo& outputTensorInfo)
+ DelegateData& delegateData,
+ const armnn::TensorInfo& outputTensorInfo,
+ int nodeIndex)
{
+ auto divName = GetLayerName(armnn::BinaryOperation::Div, nodeIndex);
armnn::IConnectableLayer* divisionLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Div);
+ armnn::BinaryOperation::Div, divName.c_str());
// if the output of the div is Signed32 the Floor layer is not required
if (armnn::DataType::Signed32 == outputTensorInfo.GetDataType())
{
@@ -262,7 +264,8 @@
}
armnn::IOutputSlot& outputSlot = divisionLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer();
+ auto floorName = GetLayerName(armnn::LayerType::Floor, nodeIndex);
+ armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer(floorName.c_str());
outputSlot.Connect(floorLayer->GetInputSlot(0));
return std::make_pair(divisionLayer, floorLayer);
}
@@ -397,46 +400,55 @@
armnn::IConnectableLayer* elementwiseBinaryLayer = nullptr;
MultiLayerFacade multiLayer;
+ std::string layerName;
switch(elementwiseBinaryOperatorCode)
{
case kTfLiteBuiltinAdd:
+ layerName = GetLayerName(armnn::BinaryOperation::Add, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Add);
+ armnn::BinaryOperation::Add, layerName.c_str());
break;
case kTfLiteBuiltinDiv:
+ layerName = GetLayerName(armnn::BinaryOperation::Div, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Div);
+ armnn::BinaryOperation::Div, layerName.c_str());
break;
case kTfLiteBuiltinFloorDiv:
{
- auto layers = AddFloorDivLayer(delegateData, outputTensorInfo);
+ auto layers = AddFloorDivLayer(delegateData, outputTensorInfo, nodeIndex);
multiLayer.AssignValues(layers.first, layers.second);
elementwiseBinaryLayer = &multiLayer;
}
break;
case kTfLiteBuiltinMaximum:
+ layerName = GetLayerName(armnn::BinaryOperation::Maximum, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Maximum);
+ armnn::BinaryOperation::Maximum, layerName.c_str());
break;
case kTfLiteBuiltinMinimum:
+ layerName = GetLayerName(armnn::BinaryOperation::Minimum, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Minimum);
+ armnn::BinaryOperation::Minimum, layerName.c_str());
break;
case kTfLiteBuiltinMul:
+ layerName = GetLayerName(armnn::BinaryOperation::Mul, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Mul);
+ armnn::BinaryOperation::Mul, layerName.c_str());
break;
case kTfLiteBuiltinPow:
+ layerName = GetLayerName(armnn::BinaryOperation::Power, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Power);
+ armnn::BinaryOperation::Power, layerName.c_str());
break;
case kTfLiteBuiltinSquaredDifference:
+ layerName = GetLayerName(armnn::BinaryOperation::SqDiff, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::SqDiff);
+ armnn::BinaryOperation::SqDiff, layerName.c_str());
break;
case kTfLiteBuiltinSub:
+ layerName = GetLayerName(armnn::BinaryOperation::Sub, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Sub);
+ armnn::BinaryOperation::Sub, layerName.c_str());
break;
default:
return kTfLiteError;
@@ -448,7 +460,8 @@
auto inputsTensorsProcess = ProcessInputs(elementwiseBinaryLayer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
@@ -465,7 +478,8 @@
return kTfLiteOk;
}
// Check and Create Activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData,
+ nodeIndex);
}
} // namespace armnnDelegate
diff --git a/delegate/classic/src/ElementwiseUnary.hpp b/delegate/classic/src/ElementwiseUnary.hpp
index 562ce1f..4a898e6 100644
--- a/delegate/classic/src/ElementwiseUnary.hpp
+++ b/delegate/classic/src/ElementwiseUnary.hpp
@@ -71,7 +71,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor);
+ auto layerName = GetLayerName(descriptor.m_Operation, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -79,7 +80,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Fill.hpp b/delegate/classic/src/Fill.hpp
index 15dc91e..e0ba2f9 100644
--- a/delegate/classic/src/Fill.hpp
+++ b/delegate/classic/src/Fill.hpp
@@ -92,7 +92,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Fill, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -102,7 +103,8 @@
auto inputsTensorsProcess = ProcessInputs(layer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/classic/src/FullyConnected.hpp b/delegate/classic/src/FullyConnected.hpp
index 9ce06a8..2d4e987 100644
--- a/delegate/classic/src/FullyConnected.hpp
+++ b/delegate/classic/src/FullyConnected.hpp
@@ -166,7 +166,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::FullyConnected, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -176,7 +177,9 @@
auto weightsTensor = CreateConstTensor(&tfLiteWeightsTensor,
weightsTensorInfo);
- armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(weightsTensor);
+ auto weightsName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Weights");
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(weightsTensor,
+ weightsName.c_str());
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsTensorInfo);
@@ -190,7 +193,9 @@
auto biasTensor = CreateConstTensor(&tfLiteBiasTensor,
biasTensorInfo);
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+ auto biasName = GetLayerName(armnn::LayerType::FullyConnected, nodeIndex, "Bias");
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
+ biasName.c_str());
ARMNN_ASSERT(biasLayer != nullptr);
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
@@ -199,13 +204,14 @@
}
// The data input can also be constant, so we must check that this is also allocated to an input slot
- if(inputTensorInfo.IsConstant())
+ if (inputTensorInfo.IsConstant())
{
auto input =
CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
inputTensorInfo);
- armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ auto constantName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
+ armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input, constantName.c_str());
inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
}
@@ -219,7 +225,9 @@
// Add reshape to flatten to 2D [batch_size, input_size]
armnn::ReshapeDescriptor reshapeDescriptor;
reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
- reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
+
+ auto reshapeName = GetLayerName(armnn::LayerType::Reshape, nodeIndex, "Input");
+ reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor, reshapeName.c_str());
ARMNN_ASSERT(reshapeLayer != nullptr);
reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
@@ -251,7 +259,7 @@
if (outputTensorInfo.GetNumDimensions() > 2)
{
layer = AddReshapeLayer(tfLiteContext, tfLiteNode, layer, reshapedOutputTensorInfo, outputTensorInfo,
- delegateData);
+ delegateData, nodeIndex);
if (!layer)
{
TF_LITE_MAYBE_KERNEL_LOG(
@@ -270,7 +278,7 @@
}
// Check and Create Activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
}
} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/classic/src/Gather.hpp b/delegate/classic/src/Gather.hpp
index f9611a4..30dbd0d 100644
--- a/delegate/classic/src/Gather.hpp
+++ b/delegate/classic/src/Gather.hpp
@@ -88,7 +88,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Gather, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -96,7 +97,8 @@
auto inputsTensorsProcess = ProcessInputs(layer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/classic/src/GatherNd.hpp b/delegate/classic/src/GatherNd.hpp
index e1ee2ac..a49b768 100644
--- a/delegate/classic/src/GatherNd.hpp
+++ b/delegate/classic/src/GatherNd.hpp
@@ -64,7 +64,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherNdLayer();
+ auto layerName = GetLayerName(armnn::LayerType::GatherNd, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherNdLayer(layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -72,7 +73,8 @@
auto inputsTensorsProcess = ProcessInputs(layer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/classic/src/LogicalBinary.hpp b/delegate/classic/src/LogicalBinary.hpp
index d71618e..b80b837 100644
--- a/delegate/classic/src/LogicalBinary.hpp
+++ b/delegate/classic/src/LogicalBinary.hpp
@@ -80,7 +80,9 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc);
+ auto layerName = GetLayerName(desc.m_Operation, nodeIndex);
+ armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc,
+ layerName.c_str());
logicalBinaryLayer->SetBackendId(setBackend);
ARMNN_ASSERT(logicalBinaryLayer != nullptr);
@@ -90,7 +92,8 @@
auto inputsTensorsProcess = ProcessInputs(logicalBinaryLayer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/classic/src/Lstm.hpp b/delegate/classic/src/Lstm.hpp
index 518559f..2abc47f 100644
--- a/delegate/classic/src/Lstm.hpp
+++ b/delegate/classic/src/Lstm.hpp
@@ -242,7 +242,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddLstmLayer(desc, params);
+ auto layerName = GetLayerName(armnn::LayerType::Lstm, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddLstmLayer(desc, params, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
diff --git a/delegate/classic/src/Normalization.hpp b/delegate/classic/src/Normalization.hpp
index ef2e524..befaddd 100644
--- a/delegate/classic/src/Normalization.hpp
+++ b/delegate/classic/src/Normalization.hpp
@@ -63,7 +63,8 @@
}
// Add a L2Normalization layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::L2Normalization, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -71,7 +72,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -142,7 +143,8 @@
}
// Add a Normalization layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Normalization, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -150,7 +152,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Pack.hpp b/delegate/classic/src/Pack.hpp
index 99c8b80..029fd42 100644
--- a/delegate/classic/src/Pack.hpp
+++ b/delegate/classic/src/Pack.hpp
@@ -98,7 +98,8 @@
}
// The TfLite Pack operator is equivalent to the ArmNN Stack operator
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc);
+ auto layerName = GetLayerName(armnn::LayerType::Stack, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -106,7 +107,8 @@
auto inputsTensorsProcess = ProcessInputs(layer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/classic/src/Pad.hpp b/delegate/classic/src/Pad.hpp
index 440a3d0..f8e8014 100644
--- a/delegate/classic/src/Pad.hpp
+++ b/delegate/classic/src/Pad.hpp
@@ -166,7 +166,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Pad, nodeIndex);
+ armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor, layerName.c_str());
padLayer->SetBackendId(setBackend);
ARMNN_ASSERT(padLayer != nullptr);
diff --git a/delegate/classic/src/Pooling.hpp b/delegate/classic/src/Pooling.hpp
index 50e944e..f61a1a2 100644
--- a/delegate/classic/src/Pooling.hpp
+++ b/delegate/classic/src/Pooling.hpp
@@ -117,7 +117,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Pooling2d, nodeIndex);
+ armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor, layerName.c_str());
poolingLayer->SetBackendId(setBackend);
ARMNN_ASSERT(poolingLayer != nullptr);
@@ -125,18 +126,18 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(poolingLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
- if(Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
+ if (Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
{
return kTfLiteError;
}
// Check and create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData, nodeIndex);
}
TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
@@ -302,6 +303,7 @@
}
// Create the Layer
+ auto layerName = GetLayerName(armnn::LayerType::Pooling3d, nodeIndex);
armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
poolingLayer->SetBackendId(setBackend);
ARMNN_ASSERT(poolingLayer != nullptr);
@@ -311,17 +313,17 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(poolingLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
- if(Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
+ if (Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
{
return kTfLiteError;
}
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData, nodeIndex);
}
} // namespace armnnDelegate
diff --git a/delegate/classic/src/Prelu.hpp b/delegate/classic/src/Prelu.hpp
index 4fdad4a..193e3f0 100644
--- a/delegate/classic/src/Prelu.hpp
+++ b/delegate/classic/src/Prelu.hpp
@@ -81,7 +81,8 @@
outputTensorInfo);
}
- armnn::IConnectableLayer* preluLayer = delegateData.m_Network->AddPreluLayer();
+ auto layerName = GetLayerName(armnn::LayerType::Prelu, nodeIndex);
+ armnn::IConnectableLayer* preluLayer = delegateData.m_Network->AddPreluLayer(layerName.c_str());
ARMNN_ASSERT(preluLayer != nullptr);
bool isConstantAlpha = tflite::IsConstantTensor(&tfLiteAlphaTensor);
@@ -91,7 +92,9 @@
{
auto constAlphaTensor = armnn::ConstTensor(alphaTensorInfo, tfLiteAlphaTensor.data.data);
- armnn::IConnectableLayer* constLayer = delegateData.m_Network->AddConstantLayer(constAlphaTensor);
+ auto alphaName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Alpha");
+ armnn::IConnectableLayer* constLayer = delegateData.m_Network->AddConstantLayer(constAlphaTensor,
+ alphaName.c_str());
ARMNN_ASSERT(constLayer != nullptr);
constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
diff --git a/delegate/classic/src/Quantization.hpp b/delegate/classic/src/Quantization.hpp
index f119296..8291854 100644
--- a/delegate/classic/src/Quantization.hpp
+++ b/delegate/classic/src/Quantization.hpp
@@ -70,7 +70,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
+ auto layerName = GetLayerName(armnn::LayerType::Dequantize, nodeIndex);
+ armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer(layerName.c_str());
dequantizeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(dequantizeLayer != nullptr);
@@ -80,7 +81,8 @@
auto inputsTensorsProcess = ProcessInputs(dequantizeLayer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
@@ -152,7 +154,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
+ auto layerName = GetLayerName(armnn::LayerType::Quantize, nodeIndex);
+ armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer(layerName.c_str());
quantizeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(quantizeLayer != nullptr);
@@ -160,7 +163,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(quantizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(quantizeLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Redefine.hpp b/delegate/classic/src/Redefine.hpp
index 2c29083..6b10e44 100644
--- a/delegate/classic/src/Redefine.hpp
+++ b/delegate/classic/src/Redefine.hpp
@@ -64,7 +64,8 @@
}
// Add a Cast layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer();
+ auto layerName = GetLayerName(armnn::LayerType::Cast, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer(layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -72,7 +73,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -206,7 +207,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+ auto layerName = GetLayerName(armnn::LayerType::Reshape, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -214,7 +216,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -291,7 +293,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+ auto layerName = GetLayerName(armnn::LayerType::Reshape, nodeIndex, "Squeeze");
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -299,7 +302,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -396,7 +399,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+ auto layerName = GetLayerName(armnn::LayerType::Reshape, nodeIndex, "ExpandDims");
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -405,7 +409,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Reduce.hpp b/delegate/classic/src/Reduce.hpp
index 2d8b462..8731ef5 100644
--- a/delegate/classic/src/Reduce.hpp
+++ b/delegate/classic/src/Reduce.hpp
@@ -125,8 +125,9 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- // Add an Reduce layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc);
+ // Add a Reduce layer
+ auto layerName = GetLayerName(armnn::LayerType::Reduce, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -134,7 +135,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Resize.hpp b/delegate/classic/src/Resize.hpp
index 32c7f46..cede32b 100644
--- a/delegate/classic/src/Resize.hpp
+++ b/delegate/classic/src/Resize.hpp
@@ -185,14 +185,14 @@
}
- armnn::IConnectableLayer* resizeLayer = nullptr;
- resizeLayer = delegateData.m_Network->AddResizeLayer(desc, layerName.c_str());
+ auto resizeName = GetLayerName(armnn::LayerType::Resize, nodeIndex);
+ armnn::IConnectableLayer* resizeLayer = delegateData.m_Network->AddResizeLayer(desc, resizeName.c_str());
armnn::IOutputSlot& outputSlot = resizeLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(resizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(resizeLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/ReverseV2.hpp b/delegate/classic/src/ReverseV2.hpp
index d49d20b..64941f7 100644
--- a/delegate/classic/src/ReverseV2.hpp
+++ b/delegate/classic/src/ReverseV2.hpp
@@ -109,8 +109,6 @@
}
}
- std::string layerName("ReverseV2");
-
const auto maxDimension = 4;
const auto axisTensorNumValues = static_cast<unsigned int>(tfLiteAxisTensor.dims->size);
@@ -135,13 +133,14 @@
outputTensorInfo);
}
+ auto layerName = GetLayerName(armnn::LayerType::ReverseV2, nodeIndex);
armnn::IConnectableLayer* reverseV2Layer = delegateData.m_Network->AddReverseV2Layer(layerName.c_str());
armnn::IOutputSlot& outputSlot = reverseV2Layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
// Try to connect the Constant Inputs if there are any
- if(ProcessInputs(reverseV2Layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(reverseV2Layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Round.hpp b/delegate/classic/src/Round.hpp
index 7a060b1..d549a45 100644
--- a/delegate/classic/src/Round.hpp
+++ b/delegate/classic/src/Round.hpp
@@ -52,14 +52,15 @@
}
// Add a Floor layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddFloorLayer();
+ auto layerName = GetLayerName(armnn::LayerType::Floor, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddFloorLayer(layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Shape.hpp b/delegate/classic/src/Shape.hpp
index e5dae23..10800b8 100644
--- a/delegate/classic/src/Shape.hpp
+++ b/delegate/classic/src/Shape.hpp
@@ -42,7 +42,7 @@
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
auto* shapeParameters = reinterpret_cast<TfLiteShapeParams*>(tfLiteNode->builtin_data);
- if ( shapeParameters->out_type != kTfLiteInt32 && shapeParameters->out_type != kTfLiteInt64 )
+ if (shapeParameters->out_type != kTfLiteInt32 && shapeParameters->out_type != kTfLiteInt64)
{
TF_LITE_MAYBE_KERNEL_LOG(
tfLiteContext,
@@ -75,7 +75,8 @@
}
// Add a Shape layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddShapeLayer();
+ auto layerName = GetLayerName(armnn::LayerType::Shape, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddShapeLayer(layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -83,7 +84,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Slice.hpp b/delegate/classic/src/Slice.hpp
index a586e02..9a63e43 100644
--- a/delegate/classic/src/Slice.hpp
+++ b/delegate/classic/src/Slice.hpp
@@ -149,9 +149,9 @@
validateFunc(outputTensorInfo, isSupported);
return isSupported ? kTfLiteOk : kTfLiteError;
}
- auto layerName = fmt::format("Slice:{}", nodeIndex);
// Add a Slice layer
+ auto layerName = GetLayerName(armnn::LayerType::Slice, nodeIndex);
armnn::IConnectableLayer* layer = delegateData.m_Network->AddSliceLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -160,7 +160,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Softmax.hpp b/delegate/classic/src/Softmax.hpp
index bfc6874..dd36a3d 100644
--- a/delegate/classic/src/Softmax.hpp
+++ b/delegate/classic/src/Softmax.hpp
@@ -116,6 +116,7 @@
}
}
+ auto layerName = GetLayerName(armnn::LayerType::Softmax, nodeIndex);
armnn::IConnectableLayer* softmaxLayer = nullptr;
switch(softmaxOperatorCode)
@@ -125,13 +126,13 @@
armnn::SoftmaxDescriptor descriptor;
auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(tfLiteNode->builtin_data);
descriptor.m_Beta = params->beta;
- softmaxLayer = delegateData.m_Network->AddSoftmaxLayer(descriptor);
+ softmaxLayer = delegateData.m_Network->AddSoftmaxLayer(descriptor, layerName.c_str());
break;
}
case kTfLiteBuiltinLogSoftmax:
{
armnn::LogSoftmaxDescriptor descriptor;
- softmaxLayer = delegateData.m_Network->AddLogSoftmaxLayer(descriptor);
+ softmaxLayer = delegateData.m_Network->AddLogSoftmaxLayer(descriptor, layerName.c_str());
break;
}
default:
@@ -143,7 +144,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(softmaxLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(softmaxLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/SpaceDepth.hpp b/delegate/classic/src/SpaceDepth.hpp
index cc7f034..b65207b 100644
--- a/delegate/classic/src/SpaceDepth.hpp
+++ b/delegate/classic/src/SpaceDepth.hpp
@@ -64,12 +64,13 @@
}
// Add a SpaceToDepth layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::SpaceToDepth, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -132,7 +133,8 @@
}
// Add a DepthToSpace layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::DepthToSpace, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -140,7 +142,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Split.hpp b/delegate/classic/src/Split.hpp
index 877e0b5..fcd901b 100644
--- a/delegate/classic/src/Split.hpp
+++ b/delegate/classic/src/Split.hpp
@@ -130,7 +130,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Splitter, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -335,7 +336,7 @@
}
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/StridedSlice.hpp b/delegate/classic/src/StridedSlice.hpp
index 998e3d3..43f9641 100644
--- a/delegate/classic/src/StridedSlice.hpp
+++ b/delegate/classic/src/StridedSlice.hpp
@@ -135,7 +135,8 @@
}
// Add a StridedSlice layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddStridedSliceLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::StridedSlice, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddStridedSliceLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -143,7 +144,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Tile.hpp b/delegate/classic/src/Tile.hpp
index 974c771..ea4ebbf 100644
--- a/delegate/classic/src/Tile.hpp
+++ b/delegate/classic/src/Tile.hpp
@@ -148,7 +148,7 @@
tileDescriptor);
}
- std::string layerName("Tile");
+ auto layerName = GetLayerName(armnn::LayerType::Tile, nodeIndex);
armnn::IConnectableLayer* layer = delegateData.m_Network->AddTileLayer(tileDescriptor, layerName.c_str());
if (layer == nullptr)
@@ -158,7 +158,7 @@
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
- if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Transpose.hpp b/delegate/classic/src/Transpose.hpp
index 41178d0..247ddf7 100644
--- a/delegate/classic/src/Transpose.hpp
+++ b/delegate/classic/src/Transpose.hpp
@@ -91,7 +91,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Transpose, nodeIndex);
+ armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor, layerName.c_str());
transposeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(transposeLayer != nullptr);
ARMNN_ASSERT(transposeLayer->GetNumInputSlots() == 1); // permutation vector given to descriptor object
@@ -100,7 +101,7 @@
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(transposeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(transposeLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/UnidirectionalSequenceLstm.hpp b/delegate/classic/src/UnidirectionalSequenceLstm.hpp
index 0e1ad1c..5fa6bb0 100644
--- a/delegate/classic/src/UnidirectionalSequenceLstm.hpp
+++ b/delegate/classic/src/UnidirectionalSequenceLstm.hpp
@@ -278,7 +278,10 @@
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
+ auto layerName = GetLayerName(armnn::LayerType::UnidirectionalSequenceLstm, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc,
+ params,
+ layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
diff --git a/delegate/classic/src/Unpack.hpp b/delegate/classic/src/Unpack.hpp
index 685293b..2cd3256 100644
--- a/delegate/classic/src/Unpack.hpp
+++ b/delegate/classic/src/Unpack.hpp
@@ -171,10 +171,8 @@
return isSupported ? kTfLiteOk : kTfLiteError;
};
- std::string splitterLayerName("Unpack Splitter");
-
- armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc,
- splitterLayerName.c_str());
+ auto layerName = GetLayerName(armnn::LayerType::Splitter, nodeIndex, "Unpack");
+ armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
splitterLayer->SetBackendId(setBackendSplit);
ARMNN_ASSERT(splitterLayer != nullptr);
@@ -189,9 +187,9 @@
// Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
for (unsigned int outputIndex = 0; outputIndex < splitterLayer->GetNumOutputSlots(); ++outputIndex)
{
- std::string reshapeLayerName("Unpack Reshape");
+ auto reshapeName = GetLayerName(armnn::LayerType::Reshape, nodeIndex, "Unpack");
armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor,
- reshapeLayerName.c_str());
+ reshapeName.c_str());
reshapeLayer->SetBackendId(setBackendReshape);
ARMNN_ASSERT(reshapeLayer != nullptr);