IVGCVSW-6732 Tests surrounded in '#if defined(ARMNNREF_ENABLED)' in android-nn-driver do not execute.
* Change to src/backends/cl/workloads/ClLstmFloatWorkload.cpp fix LstmTests_GpuAcc tests.
* Change to src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp & ClConvertFp32ToFp16Workload.hpp
fix MeanTests_GpuAcc and Convolution2DTests_1.1 tests.
* Added UnitTests to src/backends/cl/test/ClImportTensorHandleTests.cpp to test import on Convert Layers.
!android-nn-driver:7264
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: I0c46dc4b9c54eca8771ab12ed0302b6224606957
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 339da0d..a365550 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1658,7 +1658,7 @@
return result;
}
-IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
+IOptimizedNetworkPtr Optimize(const Graph& inGraph,
const std::vector<BackendId>& backendPreferences,
const IDeviceSpec& deviceSpec,
const OptimizerOptions& options,
@@ -1667,7 +1667,7 @@
ARMNN_LOG(debug) << options.ToString();
// Enable profiling
- auto profiler = inNetwork.pNetworkImpl->GetGraph().GetProfiler();
+ auto profiler = inGraph.GetProfiler();
ProfilerManager::GetInstance().RegisterProfiler(profiler.get());
profiler->EnableProfiling(options.m_ProfilingEnabled);
@@ -1683,9 +1683,9 @@
}
// Ensure TensorInfo is set on all output slots of ConstantLayers in the graph
- inNetwork.pNetworkImpl->GetGraph().VerifyConstantLayerSetTensorInfo();
+ inGraph.VerifyConstantLayerSetTensorInfo();
- std::unique_ptr<Graph> graph = std::make_unique<Graph>(inNetwork.pNetworkImpl->GetGraph());
+ std::unique_ptr<Graph> graph = std::make_unique<Graph>(inGraph);
auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), options.m_ModelOptions),
&IOptimizedNetwork::Destroy);
@@ -1827,6 +1827,20 @@
}
return optNet;
}
+
+IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
+ const std::vector<BackendId>& backendPreferences,
+ const IDeviceSpec& deviceSpec,
+ const OptimizerOptions& options,
+ Optional<std::vector<std::string>&> messages)
+{
+ return Optimize(inNetwork.pNetworkImpl->GetGraph(),
+ backendPreferences,
+ deviceSpec,
+ options,
+ messages);
+}
+
bool NetworkImpl::GetShapeInferenceMethod()
{
if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() == "ShapeInferenceMethod")
@@ -2000,6 +2014,16 @@
return layer;
}
+IConnectableLayer* NetworkImpl::AddConvertFp16ToFp32Layer(const char* name)
+{
+ return m_Graph->AddLayer<ConvertFp16ToFp32Layer>(name);
+}
+
+IConnectableLayer* NetworkImpl::AddConvertFp32ToFp16Layer(const char* name)
+{
+ return m_Graph->AddLayer<ConvertFp32ToFp16Layer>(name);
+}
+
IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,