Fix graph copy memory spike
* Change layer storage of ConstTensors to std::shared_ptr<ConstCpuTensorHandle>
* Change clone to share ConstTensor rather than copy
* Remove uses of non-const GetTensor() call
* Reduce scope of non-optimized network in ExeNet, so memory can be released after use
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: Ibb2c7309d12411d21405bd6024c76bcdf5404545
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 6bfad06..dca3ab2 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -419,14 +419,14 @@
throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
}
- const auto parsing_start_time = armnn::GetTimeNow();
- armnn::INetworkPtr network = CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
-
- ARMNN_LOG(info) << "Network parsing time: " << std::setprecision(2)
- << std::fixed << armnn::GetTimeDuration(parsing_start_time).count() << " ms\n";
-
armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork*){}};
{
+ const auto parsing_start_time = armnn::GetTimeNow();
+ armnn::INetworkPtr network = CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
+
+ ARMNN_LOG(info) << "Network parsing time: " << std::setprecision(2)
+ << std::fixed << armnn::GetTimeDuration(parsing_start_time).count() << " ms\n";
+
ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
armnn::OptimizerOptions options;
@@ -460,6 +460,8 @@
{
throw armnn::Exception("Optimize returned nullptr");
}
+
+
}
if (params.m_VisualizePostOptimizationModel)
@@ -470,6 +472,8 @@
optNet->SerializeToDot(file);
}
+
+
armnn::Status ret;
{
ARMNN_SCOPED_HEAP_PROFILING("LoadNetwork");