IVGCVSW-1642 : introducing the IBackend interface
Change-Id: Iaadee0a08c0594c9a3c802a48fe346e15f2cbbb2
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index 172df1b..d0a0174 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -5,6 +5,7 @@
#pragma once
#include <array>
+#include <memory>
namespace armnn
{
@@ -67,9 +68,9 @@
enum class PaddingMethod
{
/// The padding fields count, but are ignored
- IgnoreValue = 0,
+ IgnoreValue = 0,
/// The padding fields don't count and are ignored
- Exclude = 1
+ Exclude = 1
};
enum class NormalizationAlgorithmChannel
@@ -80,9 +81,9 @@
enum class NormalizationAlgorithmMethod
{
- /// Krichevsky 2012: Local Brightness Normalization
- LocalBrightness = 0,
- /// Jarret 2009: Local Contrast Normalization
+ /// Krichevsky 2012: Local Brightness Normalization
+ LocalBrightness = 0,
+ /// Jarret 2009: Local Contrast Normalization
LocalContrast = 1
};
@@ -95,14 +96,28 @@
enum class Compute
{
/// CPU Execution: Reference C++ kernels
- CpuRef = 0,
+ CpuRef = 0,
/// CPU Execution: NEON: ArmCompute
- CpuAcc = 1,
+ CpuAcc = 1,
/// GPU Execution: OpenCL: ArmCompute
- GpuAcc = 2,
+ GpuAcc = 2,
Undefined = 5
};
+/// Each backend should implement an IBackend.
+class IBackend
+{
+protected:
+ IBackend() {}
+ virtual ~IBackend() {}
+
+public:
+ virtual const std::string& GetId() const = 0;
+};
+
+using IBackendPtr = std::shared_ptr<IBackend>;
+
+/// Device specific knowledge to be passed to the optimizer.
class IDeviceSpec
{
protected:
diff --git a/src/armnn/DeviceSpec.hpp b/src/armnn/DeviceSpec.hpp
index 2eb8174..dbc04f0 100644
--- a/src/armnn/DeviceSpec.hpp
+++ b/src/armnn/DeviceSpec.hpp
@@ -4,8 +4,9 @@
//
#pragma once
-#include "armnn/Types.hpp"
+#include <armnn/Types.hpp>
#include <set>
+#include <vector>
namespace armnn
{
@@ -16,6 +17,11 @@
DeviceSpec() {}
virtual ~DeviceSpec() {}
+ virtual std::vector<IBackendPtr> GetBackends() const
+ {
+ return std::vector<IBackendPtr>();
+ }
+
std::set<Compute> m_SupportedComputeDevices;
};
diff --git a/src/armnn/test/NeonTimerTest.cpp b/src/armnn/test/NeonTimerTest.cpp
index f82924e..6d0429c 100644
--- a/src/armnn/test/NeonTimerTest.cpp
+++ b/src/armnn/test/NeonTimerTest.cpp
@@ -11,7 +11,6 @@
#include <armnn/TypesUtils.hpp>
#include <backends/CpuTensorHandle.hpp>
#include <backends/neon/NeonWorkloadFactory.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <backends/WorkloadFactory.hpp>
#include <backends/test/LayerTests.hpp>
#include <backends/test/TensorCopyUtils.hpp>
diff --git a/src/backends/OutputHandler.cpp b/src/backends/OutputHandler.cpp
index 15e90c7..5516c22 100644
--- a/src/backends/OutputHandler.cpp
+++ b/src/backends/OutputHandler.cpp
@@ -7,7 +7,7 @@
#include <boost/assert.hpp>
#include <boost/log/trivial.hpp>
-#include "WorkloadFactory.hpp"
+#include <backends/WorkloadFactory.hpp>
#include "WorkloadDataCollector.hpp"
#include "ITensorHandle.hpp"
diff --git a/src/backends/Workload.hpp b/src/backends/Workload.hpp
index cf9c6f2..4cfffd4 100644
--- a/src/backends/Workload.hpp
+++ b/src/backends/Workload.hpp
@@ -12,7 +12,7 @@
namespace armnn
{
-// Workload interface to enqueue a layer computation.
+/// Workload interface to enqueue a layer computation.
class IWorkload
{
public:
diff --git a/src/backends/WorkloadData.cpp b/src/backends/WorkloadData.cpp
index 8b28b47..32ed97a 100644
--- a/src/backends/WorkloadData.cpp
+++ b/src/backends/WorkloadData.cpp
@@ -5,7 +5,6 @@
#include "WorkloadData.hpp"
#include "CpuTensorHandle.hpp"
-#include "WorkloadInfo.hpp"
#include <algorithm>
#include <string>
diff --git a/src/backends/WorkloadData.hpp b/src/backends/WorkloadData.hpp
index 9fcc044..aac2228 100644
--- a/src/backends/WorkloadData.hpp
+++ b/src/backends/WorkloadData.hpp
@@ -10,6 +10,7 @@
#include <armnn/Tensor.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/Exceptions.hpp>
+#include <backends/WorkloadInfo.hpp>
#include <InternalTypes.hpp>
#include <backends/OutputHandler.hpp>
diff --git a/src/backends/WorkloadFactory.cpp b/src/backends/WorkloadFactory.cpp
index a70097e..dc9c1bc 100644
--- a/src/backends/WorkloadFactory.cpp
+++ b/src/backends/WorkloadFactory.cpp
@@ -2,7 +2,8 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "WorkloadFactory.hpp"
+#include <backends/WorkloadFactory.hpp>
+
#include <backends/reference/RefWorkloadFactory.hpp>
#include <backends/neon/NeonWorkloadFactory.hpp>
#include <backends/cl/ClWorkloadFactory.hpp>
@@ -53,12 +54,16 @@
}
}
-bool IWorkloadFactory::IsLayerSupported(Compute compute, const Layer& layer, boost::optional<DataType> dataType,
- std::string& outReasonIfUnsupported)
+bool IWorkloadFactory::IsLayerSupported(Compute compute,
+ const IConnectableLayer& connectableLayer,
+ boost::optional<DataType> dataType,
+ std::string& outReasonIfUnsupported)
{
constexpr size_t reasonCapacity = 1024;
char reason[reasonCapacity];
bool result;
+ const Layer& layer = *(boost::polymorphic_downcast<const Layer*>(&connectableLayer));
+
switch(layer.GetType())
{
case LayerType::Activation:
@@ -583,10 +588,12 @@
return result;
}
-bool IWorkloadFactory::IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType,
+bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
+ boost::optional<DataType> dataType,
std::string& outReasonIfUnsupported)
{
- return IsLayerSupported(layer.GetComputeDevice(), layer, dataType, outReasonIfUnsupported);
+ auto layer = boost::polymorphic_downcast<const Layer*>(&connectableLayer);
+ return IsLayerSupported(layer->GetComputeDevice(), connectableLayer, dataType, outReasonIfUnsupported);
}
}
diff --git a/src/backends/WorkloadFactory.hpp b/src/backends/WorkloadFactory.hpp
index 77e810c..38448ca 100644
--- a/src/backends/WorkloadFactory.hpp
+++ b/src/backends/WorkloadFactory.hpp
@@ -4,10 +4,10 @@
//
#pragma once
-#include "Workload.hpp"
#include <memory>
-#include "armnn/TensorFwd.hpp"
-#include "OutputHandler.hpp"
+#include <armnn/TensorFwd.hpp>
+#include <backends/OutputHandler.hpp>
+#include <backends/Workload.hpp>
#include <boost/optional.hpp>
namespace armnn
@@ -32,9 +32,13 @@
/// Inform the memory manager to acquire memory
virtual void Acquire() { }
- static bool IsLayerSupported(Compute compute, const Layer& layer, boost::optional<DataType> dataType,
+ static bool IsLayerSupported(Compute compute,
+ const IConnectableLayer& layer,
+ boost::optional<DataType> dataType,
std::string& outReasonIfUnsupported);
- static bool IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType,
+
+ static bool IsLayerSupported(const IConnectableLayer& layer,
+ boost::optional<DataType> dataType,
std::string& outReasonIfUnsupported);
virtual bool SupportsSubTensors() const = 0;
diff --git a/src/backends/test/ActivationTestImpl.hpp b/src/backends/test/ActivationTestImpl.hpp
index e7d3d6a..6371645 100644
--- a/src/backends/test/ActivationTestImpl.hpp
+++ b/src/backends/test/ActivationTestImpl.hpp
@@ -7,7 +7,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
#include "QuantizeHelper.hpp"
diff --git a/src/backends/test/BatchNormTestImpl.hpp b/src/backends/test/BatchNormTestImpl.hpp
index 35f4e4c..d551221 100644
--- a/src/backends/test/BatchNormTestImpl.hpp
+++ b/src/backends/test/BatchNormTestImpl.hpp
@@ -6,7 +6,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
diff --git a/src/backends/test/Conv2dTestImpl.hpp b/src/backends/test/Conv2dTestImpl.hpp
index ce19365..c593c7b 100644
--- a/src/backends/test/Conv2dTestImpl.hpp
+++ b/src/backends/test/Conv2dTestImpl.hpp
@@ -7,7 +7,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
#include "QuantizeHelper.hpp"
diff --git a/src/backends/test/ConvertFp16ToFp32TestImpl.hpp b/src/backends/test/ConvertFp16ToFp32TestImpl.hpp
index 483689d..2455e96 100644
--- a/src/backends/test/ConvertFp16ToFp32TestImpl.hpp
+++ b/src/backends/test/ConvertFp16ToFp32TestImpl.hpp
@@ -10,7 +10,6 @@
#include <armnn/TypesUtils.hpp>
#include <armnnUtils/Half.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <backends/CpuTensorHandle.hpp>
#include <test/TensorHelpers.hpp>
diff --git a/src/backends/test/ConvertFp32ToFp16TestImpl.hpp b/src/backends/test/ConvertFp32ToFp16TestImpl.hpp
index e4698a9..4eee274 100644
--- a/src/backends/test/ConvertFp32ToFp16TestImpl.hpp
+++ b/src/backends/test/ConvertFp32ToFp16TestImpl.hpp
@@ -10,7 +10,6 @@
#include <armnn/TypesUtils.hpp>
#include <armnnUtils/Half.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <backends/CpuTensorHandle.hpp>
#include <test/TensorHelpers.hpp>
diff --git a/src/backends/test/LstmTestImpl.hpp b/src/backends/test/LstmTestImpl.hpp
index 7d57c86..a7e595c 100644
--- a/src/backends/test/LstmTestImpl.hpp
+++ b/src/backends/test/LstmTestImpl.hpp
@@ -12,7 +12,6 @@
#include "QuantizeHelper.hpp"
#include <backends/CpuTensorHandle.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <backends/WorkloadFactory.hpp>
LayerTestResult<float, 2> LstmNoCifgNoPeepholeNoProjectionTestImpl(armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/test/PermuteTestImpl.hpp b/src/backends/test/PermuteTestImpl.hpp
index 2caf2c8..9e5dda4 100644
--- a/src/backends/test/PermuteTestImpl.hpp
+++ b/src/backends/test/PermuteTestImpl.hpp
@@ -7,7 +7,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
#include "QuantizeHelper.hpp"
diff --git a/src/backends/test/Pooling2dTestImpl.hpp b/src/backends/test/Pooling2dTestImpl.hpp
index 4c69fb9..c87548c 100644
--- a/src/backends/test/Pooling2dTestImpl.hpp
+++ b/src/backends/test/Pooling2dTestImpl.hpp
@@ -7,7 +7,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
#include "QuantizeHelper.hpp"
diff --git a/src/backends/test/ReshapeTestImpl.hpp b/src/backends/test/ReshapeTestImpl.hpp
index cbd3b58..198de53 100644
--- a/src/backends/test/ReshapeTestImpl.hpp
+++ b/src/backends/test/ReshapeTestImpl.hpp
@@ -7,7 +7,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
#include "QuantizeHelper.hpp"
diff --git a/src/backends/test/SoftmaxTestImpl.hpp b/src/backends/test/SoftmaxTestImpl.hpp
index 7ca5f70..0bca8be 100644
--- a/src/backends/test/SoftmaxTestImpl.hpp
+++ b/src/backends/test/SoftmaxTestImpl.hpp
@@ -7,7 +7,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
#include "QuantizeHelper.hpp"
diff --git a/src/backends/test/SplitterTestImpl.hpp b/src/backends/test/SplitterTestImpl.hpp
index 4578ce5..396cc1b 100644
--- a/src/backends/test/SplitterTestImpl.hpp
+++ b/src/backends/test/SplitterTestImpl.hpp
@@ -6,7 +6,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
diff --git a/src/backends/test/WorkloadTestUtils.hpp b/src/backends/test/WorkloadTestUtils.hpp
index a7b7530..97f8ebd 100644
--- a/src/backends/test/WorkloadTestUtils.hpp
+++ b/src/backends/test/WorkloadTestUtils.hpp
@@ -5,7 +5,6 @@
#pragma once
#include <armnn/Tensor.hpp>
-#include <backends/WorkloadInfo.hpp>
namespace armnn
{