IVGCVSW-6724 Accessing ConstTensors from IConnectableLayer
Signed-off-by: Nikhil Raj <nikhil.raj@arm.com>
Change-Id: I01f42a520d15c6dabd2f77c7715c91b8f7026476
diff --git a/delegate/src/MultiLayerFacade.hpp b/delegate/src/MultiLayerFacade.hpp
index ccd011a..02be26c 100644
--- a/delegate/src/MultiLayerFacade.hpp
+++ b/delegate/src/MultiLayerFacade.hpp
@@ -131,6 +131,11 @@
virtual const armnn::BaseDescriptor& GetParameters() const override { return m_NullDescriptor; }
+protected:
+ /// Retrieve the handles to the constant values stored by the layer.
+ /// @return A vector of the constant tensors stored by this layer.
+ ConstantTensors GetConstantTensorsByRef() override { return {}; }
+
private:
armnn::IConnectableLayer* m_FirstLayer;
armnn::IConnectableLayer* m_LastLayer;
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 073f119..6a2193c 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -13,6 +13,7 @@
#include <armnn/Optional.hpp>
#include <armnn/TensorFwd.hpp>
#include <armnn/Logging.hpp>
+#include <armnn/backends/TensorHandle.hpp>
#include <memory>
#include <vector>
@@ -119,6 +120,11 @@
/// the BaseDescriptor IsNull function is invoked.
virtual const BaseDescriptor& GetParameters() const = 0;
+ using ConstantTensors = std::vector<std::reference_wrapper<std::shared_ptr<ConstTensorHandle>>>;
+
+ // Returns ConstantTensors of this Layer if it has any, otherwise returns empty vector.
+ virtual ConstantTensors GetConstantTensorsByRef() = 0;
+
protected:
/// Objects are not deletable via the handle
~IConnectableLayer() {}
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index ecfa1d9..23aa86a 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -394,8 +394,8 @@
LayerType* CloneBase(Graph& graph, Params&& ... params) const;
// Retrieve the Handles to the constants
- using ConstantTensors = std::vector<std::reference_wrapper<std::shared_ptr<ConstTensorHandle>>>;
- virtual ConstantTensors GetConstantTensorsByRef() {return ConstantTensors(); };
+ // Marking this as override and having this here keeps IConnectable abstract with only pure virtual function
+ virtual ConstantTensors GetConstantTensorsByRef() override {return ConstantTensors(); };
// "Blob"
AdditionalInfoObjectPtr m_AdditionalInfoObject;
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index 18d167f..15a42dd 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -67,6 +67,7 @@
Layer::ConstantTensors BatchNormalizationLayer::GetConstantTensorsByRef()
{
+ // For API stability DO NOT ALTER order and add new members to the end of vector
return {m_Mean, m_Variance, m_Beta, m_Gamma};
}
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index a9a9d37..d3dd8cf 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -56,6 +56,7 @@
~ConstantLayer() = default;
/// Retrieve the handles to the constant values stored by the layer.
+ // For API stability DO NOT ALTER order and add new members to the end of vector
ConstantTensors GetConstantTensorsByRef() override { return {m_LayerOutput}; }
};
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index 68e1cb5..ef5db8e 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -140,6 +140,7 @@
Layer::ConstantTensors Convolution2dLayer::GetConstantTensorsByRef()
{
+ // For API stability DO NOT ALTER order and add new members to the end of vector
return {m_Weight, m_Bias};
}
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index db14e22..b23661b 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -145,6 +145,7 @@
Layer::ConstantTensors DepthwiseConvolution2dLayer::GetConstantTensorsByRef()
{
+ // For API stability DO NOT ALTER order and add new members to the end of vector
return {m_Weight, m_Bias};
}
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index 833ef43..58f261c 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -75,6 +75,7 @@
Layer::ConstantTensors DetectionPostProcessLayer::GetConstantTensorsByRef()
{
+ // For API stability DO NOT ALTER order and add new members to the end of vector
return { m_Anchors };
}
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 6a9c3b0..b1ae974 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -77,6 +77,7 @@
Layer::ConstantTensors FullyConnectedLayer::GetConstantTensorsByRef()
{
+ // For API stability DO NOT ALTER order and add new members to the end of vector
return {m_Weight, m_Bias};
}
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 46c7574..06e5e8e 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -269,6 +269,7 @@
Layer::ConstantTensors LstmLayer::GetConstantTensorsByRef()
{
+ // For API stability DO NOT ALTER order and add new members to the end of vector
return {m_BasicParameters.m_InputToForgetWeights,
m_BasicParameters.m_InputToCellWeights,
m_BasicParameters.m_InputToOutputWeights,
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index 17031fa..eb33227 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -271,6 +271,7 @@
Layer::ConstantTensors QLstmLayer::GetConstantTensorsByRef()
{
+ // For API stability DO NOT ALTER order and add new members to the end of vector
return {m_BasicParameters.m_InputToForgetWeights,
m_BasicParameters.m_InputToCellWeights,
m_BasicParameters.m_InputToOutputWeights,
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 7fd39f1..e9b9d1c 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -150,6 +150,7 @@
Layer::ConstantTensors QuantizedLstmLayer::GetConstantTensorsByRef()
{
+ // For API stability DO NOT ALTER order and add new members to the end of vector
return
{
m_QuantizedLstmParameters.m_InputToInputWeights,
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index a1f07f9..1cbaf34 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -118,6 +118,7 @@
Layer::ConstantTensors TransposeConvolution2dLayer::GetConstantTensorsByRef()
{
+ // For API stability DO NOT ALTER order and add new members to the end of vector
return {m_Weight, m_Bias};
}
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
index c9aaa8c..1999614 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
@@ -276,6 +276,7 @@
Layer::ConstantTensors UnidirectionalSequenceLstmLayer::GetConstantTensorsByRef()
{
+ // For API stability DO NOT ALTER order and add new members to the end of vector
return {m_BasicParameters.m_InputToForgetWeights,
m_BasicParameters.m_InputToCellWeights,
m_BasicParameters.m_InputToOutputWeights,
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index 6b3e611..d3dd499 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -614,4 +614,29 @@
CHECK(*sharedWeightPtr == 1);
}
+TEST_CASE("IConnectableLayerConstantTensorsByRef")
+{
+ using namespace armnn;
+ INetworkPtr net(INetwork::Create());
+
+ std::vector<uint8_t> falseData = {3};
+ ConstTensor falseTensor(TensorInfo({1}, DataType::Boolean, 0.0f, 0, true), falseData);
+ IConnectableLayer* constLayer = net->AddConstantLayer(falseTensor, "const");
+ constLayer->GetOutputSlot(0).SetTensorInfo(TensorInfo({1, 1, 1, 1}, DataType::Boolean));
+
+ const TensorInfo& constInfo = constLayer->GetOutputSlot(0).GetTensorInfo();
+
+ const void* weightData = constLayer->GetConstantTensorsByRef()[0].get()->GetConstTensor<void>();
+ auto weightValue = reinterpret_cast<const uint8_t*>(weightData);
+ CHECK(weightValue[0] == 3);
+ TensorInfo weightsInfo = constInfo;
+ ConstTensor weights(weightsInfo, weightData);
+ DepthwiseConvolution2dDescriptor desc;
+ const auto depthwiseLayer = net->AddDepthwiseConvolution2dLayer(desc, weights, EmptyOptional(), "Depthwise");
+
+ const void* resultData = depthwiseLayer->GetConstantTensorsByRef()[0].get()->GetConstTensor<void>();
+ auto resultValue = reinterpret_cast<const uint8_t*>(resultData);
+ CHECK(resultValue[0] == 3);
+}
+
}
diff --git a/src/armnn/test/InferOutputTests.hpp b/src/armnn/test/InferOutputTests.hpp
index 799739b..7ae2aa7 100644
--- a/src/armnn/test/InferOutputTests.hpp
+++ b/src/armnn/test/InferOutputTests.hpp
@@ -700,4 +700,4 @@
CHECK(actualOutShapes.size() == 2);
CHECK(expectedOutShapes[0] == actualOutShapes[0]);
CHECK(expectedOutShapes[1] == actualOutShapes[1]);
-}
+}
\ No newline at end of file