IVGCVSW-6145 ConstTensorsAsInput: Optimizer Fix - GetConstantTensorsByRef

 * Add functionality to check for ConstantTensorsAsInputs to GetConstantTensorsByRef
 * Reorder optimizations so RedirectMembersToConstantInputs occurs after
   Conversion of Constants
 * Ensure graph is in topological order after loading in OptimizedNet
 * Fixed test to check release of m_LayerOutputs.

Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>
Change-Id: I7cff50798d7217e8ea0d2f9b153eabd10174a566
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index ae773cc..b5769f7 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -652,4 +652,9 @@
     return m_Profiler;
 }
 
+void Graph::SetLayersOutOfOrder()
+{
+    m_LayersInOrder = false;
+}
+
 } // namespace armnn
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index 5edf34c..4623461 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -229,6 +229,8 @@
 
     const std::shared_ptr<IProfiler>& GetProfiler() const;
 
+    void SetLayersOutOfOrder();
+
 private:
     template <typename LayerT>
     class LayerInGraphBase;
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 228927d..ec79d5d 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -143,7 +143,10 @@
 
     bool useExternalMemoryManager = false;
     bool useInternalMemoryManager = false;
-    Graph& order = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort();
+    Graph& order = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph();
+    // Ensure Topological order
+    order.SetLayersOutOfOrder();
+    order.TopologicalSort();
 
     if (!networkProperties.m_AsyncEnabled)
     {
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index 7b3382b..dbbd009 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -139,6 +139,13 @@
 
 Layer::ConstantTensors Convolution2dLayer::GetConstantTensorsByRef()
 {
+    Layer::ConstantTensors tensors = GetConnectedConstantAsInputTensors();
+
+    if (!tensors.empty())
+    {
+        return tensors;
+    }
+
     // For API stability DO NOT ALTER order and add new members to the end of vector
     return {m_Weight, m_Bias};
 }
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index 08f6faf..4fd2804 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -142,6 +142,13 @@
 
 Layer::ConstantTensors DepthwiseConvolution2dLayer::GetConstantTensorsByRef()
 {
+    Layer::ConstantTensors tensors = GetConnectedConstantAsInputTensors();
+
+    if (!tensors.empty())
+    {
+        return tensors;
+    }
+
     // For API stability DO NOT ALTER order and add new members to the end of vector
     return {m_Weight, m_Bias};
 }
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index b1ae974..1f006c9 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -77,6 +77,13 @@
 
 Layer::ConstantTensors FullyConnectedLayer::GetConstantTensorsByRef()
 {
+    Layer::ConstantTensors tensors = GetConnectedConstantAsInputTensors();
+
+    if (!tensors.empty())
+    {
+        return tensors;
+    }
+
     // For API stability DO NOT ALTER order and add new members to the end of vector
     return {m_Weight, m_Bias};
 }
diff --git a/src/armnn/layers/LayerWithParameters.hpp b/src/armnn/layers/LayerWithParameters.hpp
index 8d9ddff..40ade95 100644
--- a/src/armnn/layers/LayerWithParameters.hpp
+++ b/src/armnn/layers/LayerWithParameters.hpp
@@ -4,6 +4,7 @@
 //
 #pragma once
 
+#include "ConstantLayer.hpp"
 #include <Layer.hpp>
 
 namespace armnn
@@ -54,6 +55,34 @@
     {
         strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
     }
+
+    Layer::ConstantTensors GetConnectedConstantAsInputTensors()
+    {
+        Layer::ConstantTensors tensors;
+        for (unsigned int i = 0; i < GetNumInputSlots(); ++i)
+        {
+            if (GetInputSlot(i).GetConnection() && GetInputSlot(i).GetConnection()->GetTensorInfo().IsConstant())
+            {
+                auto &inputLayer = GetInputSlot(i).GetConnectedOutputSlot()->GetOwningLayer();
+                if (inputLayer.GetType() == armnn::LayerType::Constant)
+                {
+                    auto &constantLayer = static_cast<ConstantLayer&>(inputLayer);
+
+                    tensors.push_back(constantLayer.m_LayerOutput);
+                }
+            }
+        }
+        if (tensors.empty())
+        {
+            const std::string warningMessage{"GetConnectedConstantAsInputTensors() called on Layer with no "
+                                             "connected Constants as Input Tensors."};
+            ARMNN_LOG(warning) << warningMessage;
+        }
+        return tensors;
+    }
 };
 
+
+
+
 } // namespace
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index 531a0dd..34e5f6d 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -58,4 +58,68 @@
     CHECK(data[3] == Half(4.0f));
 }
 
+
+TEST_CASE("ConvertConstantsFloatToHalfTest_constant")
+{
+    armnn::Graph graph;
+
+    // Create the simple test network with Weights and Biases as inputs to a FullyConnected layer.
+    auto input   = graph.AddLayer<armnn::InputLayer>(0, "Input");
+    auto weights = graph.AddLayer<armnn::ConstantLayer>("Weights");
+    auto biases  = graph.AddLayer<armnn::ConstantLayer>("Biases");
+
+    armnn::FullyConnectedDescriptor desc;
+    desc.m_BiasEnabled = true;
+    desc.m_ConstantWeights = true;
+    auto fcLayer = graph.AddLayer<armnn::FullyConnectedLayer>(desc, "FullyConnected");
+    auto output  = graph.AddLayer<armnn::OutputLayer>(1, "Output");
+
+    float expectedWeightsData[] = { 1.0f, 2.0f, 3.0f, 4.0f };
+    float expectedBiasesData[]  = { 2.0f, 2.0f };
+
+    const armnn::TensorInfo inputInfo  ({ 1, 2, 2, 3 }, armnn::DataType::Float16);
+    const armnn::TensorInfo outputInfo ({ 1, 2, 2, 3 }, armnn::DataType::Float16);
+    const armnn::TensorInfo weightsInfo({ 4 }, armnn::DataType::Float32, 0.0f, 0, true);
+    const armnn::TensorInfo biasesInfo ({ 2 }, armnn::DataType::Float32, 0.0f, 0, true);
+
+    // Set the m_LayerOutput for the optimizer to point to.
+    armnn::ConstTensor weightsTensor(weightsInfo, &expectedWeightsData);
+    armnn::ConstTensor biasesTensor(biasesInfo, &expectedBiasesData);
+    weights->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(weightsTensor);
+    biases->m_LayerOutput  = std::make_unique<armnn::ScopedTensorHandle>(biasesTensor);
+
+    input->GetOutputSlot().SetTensorInfo(inputInfo);
+    weights->GetOutputSlot().SetTensorInfo(weightsInfo);
+    biases->GetOutputSlot().SetTensorInfo(biasesInfo);
+    fcLayer->GetOutputSlot().SetTensorInfo(outputInfo);
+
+    // Connect up the layers
+    input->GetOutputSlot(0).Connect(fcLayer->GetInputSlot(0));
+    weights->GetOutputSlot(0).Connect(fcLayer->GetInputSlot(1));
+    biases->GetOutputSlot(0).Connect(fcLayer->GetInputSlot(2));
+    fcLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    // Check tensor data type before conversion
+    CHECK(weights->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+
+    // Run the optimizer
+    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToHalf()));
+
+    // Check tensor data type after conversion
+    CHECK(weights->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+
+    // Check whether weights data matches expected fp16 data
+    const Half* data = weights->m_LayerOutput->GetConstTensor<Half>();
+    CHECK(data[0] == Half(1.0f));
+    CHECK(data[1] == Half(2.0f));
+    CHECK(data[2] == Half(3.0f));
+    CHECK(data[3] == Half(4.0f));
+
+    // Check whether bias data matches expected fp16 data
+    const Half* biasData = biases->m_LayerOutput->GetConstTensor<Half>();
+    CHECK(biasData[0] == Half(2.0f));
+    CHECK(biasData[1] == Half(2.0f));
+}
+
+
 }
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
index 5ceb8ae..abfb621 100644
--- a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
+++ b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
@@ -108,7 +108,6 @@
     TensorInfo biasInfo = biasLayer->m_LayerOutput->GetTensorInfo();
     biasInfo.SetConstant();
 
-
     weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
     biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
 
@@ -123,15 +122,15 @@
     Connect(layer, output, TensorInfo({ 2, 2, 2, 10 }, armnn::DataType::Float32));
 
     // check the constants that they are not NULL
-    CHECK(layer->m_Weight != nullptr);
-    CHECK(layer->m_Bias != nullptr);
+    CHECK(weightsLayer->m_LayerOutput != nullptr);
+    CHECK(biasLayer->m_LayerOutput != nullptr);
 
     // free up the constants..
     layer->ReleaseConstantData();
 
     // check the constants that they are NULL now
-    CHECK(layer->m_Weight == nullptr);
-    CHECK(layer->m_Bias == nullptr);
+    CHECK(weightsLayer->m_LayerOutput == nullptr);
+    CHECK(biasLayer->m_LayerOutput == nullptr);
 }
 
 TEST_CASE("ReleaseDepthwiseConvolution2dLayerConstantDataTest")