IVGCVSW-2019 : replace Compute enum in the backend preferences list

Change-Id: Ie7549fd27378acfa97e68d098e338b8c9d4ea5d2
diff --git a/include/armnn/ArmNN.hpp b/include/armnn/ArmNN.hpp
index c14b958..6fe8b18 100644
--- a/include/armnn/ArmNN.hpp
+++ b/include/armnn/ArmNN.hpp
@@ -4,6 +4,7 @@
 //
 #pragma once
 
+#include "BackendId.hpp"
 #include "Descriptors.hpp"
 #include "Exceptions.hpp"
 #include "IRuntime.hpp"
diff --git a/include/armnn/BackendId.hpp b/include/armnn/BackendId.hpp
index 711833d..72248bc 100644
--- a/include/armnn/BackendId.hpp
+++ b/include/armnn/BackendId.hpp
@@ -75,6 +75,7 @@
 class BackendId final
 {
 public:
+    BackendId() { GetComputeDeviceAsCString(Compute::Undefined); }
     BackendId(UninitializedBackendId) { GetComputeDeviceAsCString(Compute::Undefined); }
     BackendId(const std::string& id) : m_Id{id} {}
     BackendId(const char* id) : m_Id{id} {}
@@ -128,17 +129,21 @@
     const std::string& Get() const { return m_Id; }
 
 private:
-    // backend Id mustn't be empty:
-    BackendId() = delete;
     std::string m_Id;
 };
 
+inline std::ostream& operator<<(std::ostream& os, const BackendId& id)
+{
+    os << id.Get();
+    return os;
+}
+
 template <template <class...> class TContainer>
 inline std::ostream& operator<<(std::ostream& os,
                                 const TContainer<BackendId>& ids)
 {
     os << '[';
-    for (const auto& id : ids) { os << id.Get() << " "; }
+    for (const auto& id : ids) { os << id << " "; }
     os << ']';
     return os;
 }
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index aaf49a3..7a80935 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -4,12 +4,12 @@
 //
 #pragma once
 
-#include "armnn/NetworkFwd.hpp"
-#include "armnn/DescriptorsFwd.hpp"
-#include "armnn/TensorFwd.hpp"
-#include "armnn/Optional.hpp"
+#include <armnn/NetworkFwd.hpp>
+#include <armnn/DescriptorsFwd.hpp>
+#include <armnn/TensorFwd.hpp>
+#include <armnn/Optional.hpp>
 
-#include "armnn/Types.hpp"
+#include <armnn/Types.hpp>
 
 #include <memory>
 #include <vector>
@@ -339,7 +339,7 @@
 /// armnn::Exception if process fails.
 
 IOptimizedNetworkPtr Optimize(const INetwork& network,
-                              const std::vector<armnn::Compute>& backendPreferences,
+                              const std::vector<BackendId>& backendPreferences,
                               const IDeviceSpec& deviceSpec,
                               const OptimizerOptions& options = OptimizerOptions(),
                               Optional<std::vector<std::string>&> errMessages = EmptyOptional());
diff --git a/src/armnn/DeviceSpec.hpp b/src/armnn/DeviceSpec.hpp
index 34acbcb..af0d8f5 100644
--- a/src/armnn/DeviceSpec.hpp
+++ b/src/armnn/DeviceSpec.hpp
@@ -22,7 +22,7 @@
         return std::vector<IBackendSharedPtr>();
     }
 
-    std::set<Compute> m_SupportedComputeDevices;
+    std::set<BackendId> m_SupportedComputeDevices;
 };
 
 }
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index c43f336..8c70e5d 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -94,7 +94,7 @@
 }
 
 IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
-                              const std::vector<armnn::Compute>& backendPreferences,
+                              const std::vector<BackendId>& backendPreferences,
                               const IDeviceSpec& deviceSpec,
                               const OptimizerOptions& options,
                               Optional<std::vector<std::string>&> errMessages)
@@ -133,8 +133,8 @@
     // determine which of the preferred backends we have available for use
     // and whether we have specified CpuRef as one of those backends.
     bool cpuRefUsed = false;
-    std::vector<armnn::Compute> availablePreferredBackends;
-    for (const armnn::Compute& backend : backendPreferences)
+    std::vector<BackendId> availablePreferredBackends;
+    for (const auto& backend : backendPreferences)
     {
         // Check if the backend is in the available backend devices.
         if (std::find(spec.m_SupportedComputeDevices.begin(),
@@ -142,7 +142,7 @@
                       spec.m_SupportedComputeDevices.end())
         {
             availablePreferredBackends.push_back(backend);
-            if (armnn::Compute::CpuRef == backend) {
+            if (backend == armnn::Compute::CpuRef) {
                 cpuRefUsed = true;
             }
         }
@@ -183,7 +183,7 @@
             // which haven't had a scale set and report them all back.
             bErrorFound = true;
         }
-        for (const armnn::Compute& backend : availablePreferredBackends)
+        for (const auto& backend : availablePreferredBackends)
         {
             // need to set the compute device on the layer
             // before we can check if it is supported
@@ -205,7 +205,7 @@
                             InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer);
 
                         // Assign a supported backend to the newly introduced conversion layers
-                        auto AssignFirstSupportedBackend = [&](Layer* layer, Compute preferredBackend)
+                        auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
                         {
                             bool supportedBackendFound = false;
                             std::string reasonIfUnsupported;
@@ -218,7 +218,7 @@
                             }
                             else
                             {
-                                for (const Compute& backend : availablePreferredBackends)
+                                for (const auto& backend : availablePreferredBackends)
                                 {
                                     // Skip preferred backend (we already determined that it is not supported)
                                     if (backend == preferredBackend)
diff --git a/src/armnn/test/EndToEndTest.cpp b/src/armnn/test/EndToEndTest.cpp
index 98b1841..d34bf69 100644
--- a/src/armnn/test/EndToEndTest.cpp
+++ b/src/armnn/test/EndToEndTest.cpp
@@ -74,7 +74,7 @@
     softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // optimize the network
-    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
     // Loads it into the runtime.
@@ -110,7 +110,7 @@
 }
 
 template <typename T>
-void ConstantUsageTest(const std::vector<armnn::Compute>& computeDevice,
+void ConstantUsageTest(const std::vector<armnn::BackendId>& computeDevice,
     const armnn::TensorInfo& commonTensorInfo,
     const std::vector<T>& inputData,
     const std::vector<T>& constantData,
@@ -165,7 +165,7 @@
     BOOST_TEST(outputData == expectedOutputData);
 }
 
-static void ConstantUsageFloat32Test(const std::vector<armnn::Compute>& computeDevice)
+static void ConstantUsageFloat32Test(const std::vector<armnn::BackendId>& computeDevice)
 {
     const armnn::TensorInfo commonTensorInfo({ 2, 3 }, armnn::DataType::Float32);
 
@@ -177,7 +177,7 @@
     );
 }
 
-static void ConstantUsageUint8Test(const std::vector<armnn::Compute>& computeDevice)
+static void ConstantUsageUint8Test(const std::vector<armnn::BackendId>& computeDevice)
 {
     armnn::TensorInfo commonTensorInfo({ 2, 3 }, armnn::DataType::QuantisedAsymm8);
 
@@ -197,7 +197,7 @@
 
 BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32)
 {
-    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
     ConstantUsageFloat32Test(backends);
 }
 
@@ -217,7 +217,7 @@
 
 BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8)
 {
-    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
     ConstantUsageUint8Test(backends);
 }
 
@@ -250,7 +250,7 @@
     add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
     // optimize the network
-    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
     // Loads it into the runtime.
@@ -349,7 +349,7 @@
     activation3->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
     // optimize the network
-    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
     // Loads it into the runtime.
@@ -410,7 +410,7 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
 
     // optimize the network
-    std::vector<Compute> backends = {Compute::CpuAcc, Compute::CpuRef};
+    std::vector<BackendId> backends = {Compute::CpuAcc, Compute::CpuRef};
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
     // Load it into the runtime. It should pass.
@@ -446,7 +446,7 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
 
     // optimize the network
-    std::vector<Compute> backends = {Compute::CpuAcc};
+    std::vector<BackendId> backends = {Compute::CpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
     BOOST_CHECK(!optNet);
 }
diff --git a/src/armnn/test/JsonPrinterTests.cpp b/src/armnn/test/JsonPrinterTests.cpp
index 44609ea..93f32cc 100644
--- a/src/armnn/test/JsonPrinterTests.cpp
+++ b/src/armnn/test/JsonPrinterTests.cpp
@@ -117,7 +117,7 @@
     return sections;
 }
 
-std::string SoftmaxProfilerTestSetupHelper(const std::vector<armnn::Compute>& backends)
+std::string SoftmaxProfilerTestSetupHelper(const std::vector<armnn::BackendId>& backends)
 {
     using namespace armnn;
 
@@ -239,7 +239,7 @@
 }
 
 void SetupSoftmaxProfilerWithSpecifiedBackendsAndValidateJSONPrinterResult(
-        const std::vector<armnn::Compute>& backends)
+        const std::vector<armnn::BackendId>& backends)
 {
     // setup the test fixture and obtain JSON Printer result
     std::string result = SoftmaxProfilerTestSetupHelper(backends);
@@ -250,10 +250,10 @@
     std::string changeLine40;
     std::string changeLine45;
 
-    switch(backends[0]) {
-        case armnn::Compute::GpuAcc: backend = "Cl";
-            changeLine31 = ",\n\"OpenClKernelTimer/: softmax_layer_max_shift_exp_sum_quantized_serial GWS[,,]\": {";
-            changeLine39 = R"(us"
+    if (backends[0] == armnn::Compute::GpuAcc) {
+        backend = "Cl";
+        changeLine31 = ",\n\"OpenClKernelTimer/: softmax_layer_max_shift_exp_sum_quantized_serial GWS[,,]\": {";
+        changeLine39 = R"(us"
 },
 "OpenClKernelTimer/: softmax_layer_norm_quantized GWS[,,]": {
 "raw": [
@@ -263,7 +263,7 @@
 ],
 "unit": "us")";
 
-            changeLine40 = R"(
+        changeLine40 = R"(
 },
 "CopyMemGeneric_Execute": {
 "raw": [
@@ -272,11 +272,13 @@
 
 ],
 "unit": "us")";
-            changeLine45 = "}\n";
-            break;
-        case armnn::Compute::CpuAcc: backend = "Neon";
-            changeLine31 = ",\n\"NeonKernelTimer/: NEFillBorderKernel\": {";
-            changeLine39 = R"(us"
+        changeLine45 = "}\n";
+    }
+    else if (backends[0] == armnn::Compute::CpuAcc)
+    {
+        backend = "Neon";
+        changeLine31 = ",\n\"NeonKernelTimer/: NEFillBorderKernel\": {";
+        changeLine39 = R"(us"
 },
 "NeonKernelTimer/: NELogitsDMaxKernel": {
 "raw": [
@@ -293,7 +295,7 @@
 
 ],
 "unit": "us")";
-            changeLine40 = R"(
+        changeLine40 = R"(
 },
 "CopyMemGeneric_Execute": {
 "raw": [
@@ -302,11 +304,9 @@
 
 ],
 "unit": "us")";
-            changeLine45 = "}\n";
-            break;
-        default:
-            break;
+        changeLine45 = "}\n";
     }
+
     std::string testData = R"({
 "ArmNN": {
 "inference_measurements": {
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 3b426fa..4f8dd7e 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -65,7 +65,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
 
     std::ostringstream ss;
@@ -472,7 +472,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
     static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph().AllocateDynamicBuffers();
     BOOST_CHECK(optNet);
@@ -503,7 +503,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = { armnn::Compute::CpuAcc };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
     BOOST_CHECK(optNet);
     // validate workloads
@@ -534,7 +534,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = { armnn::Compute::GpuAcc };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
     BOOST_CHECK(optNet);
     // validate workloads
@@ -570,7 +570,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = { armnn::Compute::CpuAcc };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
     BOOST_CHECK(!optNet);
 }
@@ -597,7 +597,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
     BOOST_REQUIRE(optNet);
 
@@ -676,7 +676,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = { armnn::Compute::Undefined };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined };
 
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
     BOOST_CHECK(!optNet);
@@ -738,7 +738,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef };
 
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
     BOOST_CHECK(optNet);
@@ -774,7 +774,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = { armnn::Compute::CpuAcc,
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
                                              armnn::Compute::GpuAcc,
                                              armnn::Compute::CpuRef };
 
@@ -818,7 +818,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
 
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -851,7 +851,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
 
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -909,7 +909,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
 
     armnn::OptimizerOptions optimizerOptions;
     optimizerOptions.m_ReduceFp32ToFp16 = true;
@@ -976,7 +976,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = {armnn::Compute::GpuAcc};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
 
     armnn::OptimizerOptions optimizerOptions;
     optimizerOptions.m_ReduceFp32ToFp16 = true;
diff --git a/src/armnn/test/ProfilerTests.cpp b/src/armnn/test/ProfilerTests.cpp
index 8d76844..650c13a 100644
--- a/src/armnn/test/ProfilerTests.cpp
+++ b/src/armnn/test/ProfilerTests.cpp
@@ -163,7 +163,7 @@
     armnn::NetworkId networkIdentifier = 1;
     armnn::INetworkPtr mockNetwork(armnn::INetwork::Create());
     mockNetwork->AddInputLayer(0, "test layer");
-    std::vector<armnn::Compute> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
     runtime->LoadNetwork(networkIdentifier, armnn::Optimize(*mockNetwork, backends, runtime->GetDeviceSpec()));
 
     // Check that now there's a profiler registered for this thread (created and registered by the loading the network).
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 0237387..76f5774 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -39,7 +39,7 @@
     armnn::NetworkId networkIdentifier1 = 1;
     armnn::INetworkPtr mockNetwork1(armnn::INetwork::Create());
     mockNetwork1->AddInputLayer(0, "test layer");
-    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
     runtime->LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, backends, runtime->GetDeviceSpec()));
 
     // Mock network 2.
@@ -71,7 +71,7 @@
 
 BOOST_GLOBAL_FIXTURE(DisableGlobalLeakChecking);
 
-void CreateAndDropDummyNetwork(const std::vector<armnn::Compute>& backends, armnn::Runtime& runtime)
+void CreateAndDropDummyNetwork(const std::vector<armnn::BackendId>& backends, armnn::Runtime& runtime)
 {
     armnn::NetworkId networkIdentifier;
     {
@@ -129,7 +129,7 @@
     armnn::Runtime runtime(options);
     armnn::RuntimeLoadedNetworksReserve(&runtime);
 
-    std::vector<armnn::Compute> backends = {armnn::Compute::GpuAcc};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     {
         // Do a warmup of this so we make sure that all one-time
         // initialization happens before we do the leak checking.
@@ -158,7 +158,7 @@
     armnn::Runtime runtime(options);
     armnn::RuntimeLoadedNetworksReserve(&runtime);
 
-    std::vector<armnn::Compute> backends = {armnn::Compute::CpuAcc};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
     {
         // Do a warmup of this so we make sure that all one-time
         // initialization happens before we do the leak checking.
@@ -187,7 +187,7 @@
     armnn::Runtime runtime(options);
     armnn::RuntimeLoadedNetworksReserve(&runtime);
 
-    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
     {
         // Do a warmup of this so we make sure that all one-time
         // initialization happens before we do the leak checking.
@@ -243,7 +243,7 @@
     VALGRIND_COUNT_LEAKS(leakedBefore, dubious, reachableBefore, suppressed);
 
     // build a mock-network and load it into the runtime
-    std::vector<armnn::Compute> backends = {armnn::Compute::GpuAcc};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     {
         armnn::TensorInfo inputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
         armnn::TensorInfo outputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
@@ -331,7 +331,7 @@
         mockNetwork1->AddInputLayer(0, "test layer");
 
 
-        std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+        std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
         runtime.LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, backends, runtime.GetDeviceSpec()));
     }
 
@@ -379,7 +379,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = { armnn::Compute::CpuAcc };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
     BOOST_CHECK(optNet);
 
@@ -406,7 +406,7 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
-    std::vector<armnn::Compute> backends = { armnn::Compute::GpuAcc };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
     BOOST_CHECK(optNet);
 
@@ -442,7 +442,7 @@
     normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
 
     // optimize the network
-    std::vector<armnn::Compute> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
     // Load it into the runtime. It should success.
@@ -476,7 +476,7 @@
     normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
 
     // Allow fallback to CpuRef.
-    std::vector<armnn::Compute> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
     // optimize the network
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
@@ -524,7 +524,7 @@
             armnn::DataType::QuantisedAsymm8
     ));
 
-    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
     std::vector<std::string> errMessages;
     armnn::IOptimizedNetworkPtr optNet = Optimize(
             *net,
@@ -533,7 +533,7 @@
             OptimizerOptions(),
             errMessages
     );
-    
+
     BOOST_TEST(errMessages.size() == 1);
     BOOST_TEST(errMessages[0] ==
         "ERROR: output 0 of layer Softmax (softmax) is of type "
diff --git a/src/armnnCaffeParser/test/TestInputs.cpp b/src/armnnCaffeParser/test/TestInputs.cpp
index 616d75d..bd13a09 100644
--- a/src/armnnCaffeParser/test/TestInputs.cpp
+++ b/src/armnnCaffeParser/test/TestInputs.cpp
@@ -42,7 +42,7 @@
     armnn::NetworkId netId;
 
     // Check everything works normally
-    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
     {
         network = parser->CreateNetworkFromString(explicitInput.c_str(), {}, { "data" });
         BOOST_TEST(network.get());
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index 404cbf0..e9cbb24 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -44,7 +44,7 @@
     std::vector<uint8_t> m_GraphBinary;
     std::string m_JsonString;
     std::unique_ptr<ITfLiteParser, void (*)(ITfLiteParser *parser)> m_Parser;
-    std::vector<std::pair<armnn::IRuntimePtr, armnn::Compute>> m_Runtimes;
+    std::vector<std::pair<armnn::IRuntimePtr, armnn::BackendId>> m_Runtimes;
     armnn::NetworkId m_NetworkIdentifier;
 
     /// If the single-input-single-output overload of Setup() is called, these will store the input and output name
diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp
index c502ad9..1adada7 100644
--- a/src/armnnUtils/ParserPrototxtFixture.hpp
+++ b/src/armnnUtils/ParserPrototxtFixture.hpp
@@ -59,7 +59,7 @@
 
     std::string                                         m_Prototext;
     std::unique_ptr<TParser, void(*)(TParser* parser)>  m_Parser;
-    std::vector<std::pair<armnn::IRuntimePtr, armnn::Compute>> m_Runtimes;
+    std::vector<std::pair<armnn::IRuntimePtr, armnn::BackendId>> m_Runtimes;
     armnn::NetworkId                                    m_NetworkIdentifier;
 
     /// If the single-input-single-output overload of Setup() is called, these will store the input and output name
diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp
index 90bef36..e8d4591 100644
--- a/src/backends/cl/test/Fp16SupportTest.cpp
+++ b/src/backends/cl/test/Fp16SupportTest.cpp
@@ -73,7 +73,7 @@
    additionLayer->GetOutputSlot(0).SetTensorInfo(fp16TensorInfo);
 
    // optimize the network
-   std::vector<Compute> backends = {Compute::GpuAcc};
+   std::vector<BackendId> backends = {Compute::GpuAcc};
    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
    // Loads it into the runtime.
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index ee20747..7f1bcd3 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -147,7 +147,7 @@
     printf("\n");
 }
 
-void RemoveDuplicateDevices(std::vector<armnn::Compute>& computeDevices)
+void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
 {
     // Mark the duplicate devices as 'Undefined'.
     for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
@@ -166,11 +166,11 @@
                          computeDevices.end());
 }
 
-bool CheckDevicesAreValid(const std::vector<armnn::Compute>& computeDevices)
+bool CheckDevicesAreValid(const std::vector<armnn::BackendId>& computeDevices)
 {
     return (!computeDevices.empty()
             && std::none_of(computeDevices.begin(), computeDevices.end(),
-                            [](armnn::Compute c){ return c == armnn::Compute::Undefined; }));
+                            [](armnn::BackendId c){ return c == armnn::Compute::Undefined; }));
 }
 
 } // namespace
@@ -178,7 +178,7 @@
 template<typename TParser, typename TDataType>
 int MainImpl(const char* modelPath,
              bool isModelBinary,
-             const std::vector<armnn::Compute>& computeDevice,
+             const std::vector<armnn::BackendId>& computeDevice,
              const char* inputName,
              const armnn::TensorShape* inputTensorShape,
              const char* inputTensorDataFilePath,
@@ -232,7 +232,7 @@
 // This will run a test
 int RunTest(const std::string& modelFormat,
             const std::string& inputTensorShapeStr,
-            const vector<armnn::Compute>& computeDevice,
+            const vector<armnn::BackendId>& computeDevice,
             const std::string& modelPath,
             const std::string& inputName,
             const std::string& inputTensorDataFilePath,
@@ -360,7 +360,7 @@
          "caffe-binary, caffe-text, tflite-binary, onnx-binary, onnx-text, tensorflow-binary or tensorflow-text.")
         ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .caffemodel, .prototxt, .tflite,"
          " .onnx")
-        ("compute,c", po::value<std::vector<armnn::Compute>>()->multitoken(),
+        ("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(),
          "The preferred order of devices to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc")
         ("input-name,i", po::value(&inputName), "Identifier of the input tensor in the network.")
         ("subgraph-number,n", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be "
@@ -414,7 +414,7 @@
     boost::trim(outputName);
 
     // Get the preferred order of compute devices.
-    std::vector<armnn::Compute> computeDevices = vm["compute"].as<std::vector<armnn::Compute>>();
+    std::vector<armnn::BackendId> computeDevices = vm["compute"].as<std::vector<armnn::BackendId>>();
 
     // Remove duplicates from the list of compute devices.
     RemoveDuplicateDevices(computeDevices);
@@ -466,7 +466,7 @@
              "caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or tensorflow-text.")
             ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .caffemodel, .prototxt,"
              " .tflite, .onnx")
-            ("compute,c", po::value<std::vector<armnn::Compute>>()->multitoken(),
+            ("compute,c", po::value<std::vector<std::string>>()->multitoken(),
              "The preferred order of devices to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc")
             ("input-name,i", po::value(&inputName), "Identifier of the input tensor in the network.")
             ("subgraph-number,x", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be executed."
@@ -588,7 +588,7 @@
     else // Run single test
     {
         // Get the preferred order of compute devices.
-        std::vector<armnn::Compute> computeDevices = vm["compute"].as<std::vector<armnn::Compute>>();
+        std::vector<armnn::BackendId> computeDevices = vm["compute"].as<std::vector<armnn::BackendId>>();
 
         // Remove duplicates from the list of compute devices.
         RemoveDuplicateDevices(computeDevices);
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 2e0aff9..8645c90 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -3,15 +3,15 @@
 // SPDX-License-Identifier: MIT
 //
 #pragma once
-#include "armnn/ArmNN.hpp"
+#include <armnn/ArmNN.hpp>
 
 #if defined(ARMNN_TF_LITE_PARSER)
-#include "armnnTfLiteParser/ITfLiteParser.hpp"
+#include <armnnTfLiteParser/ITfLiteParser.hpp>
 #endif
 
 #include <HeapProfiling.hpp>
 #if defined(ARMNN_ONNX_PARSER)
-#include "armnnOnnxParser/IOnnxParser.hpp"
+#include <armnnOnnxParser/IOnnxParser.hpp>
 #endif
 
 #include <boost/exception/exception.hpp>
@@ -20,6 +20,7 @@
 #include <boost/format.hpp>
 #include <boost/program_options.hpp>
 #include <boost/filesystem.hpp>
+#include <boost/lexical_cast.hpp>
 
 #include <map>
 #include <string>
@@ -40,7 +41,7 @@
     std::string m_InputBinding;
     std::string m_OutputBinding;
     const armnn::TensorShape* m_InputTensorShape;
-    std::vector<armnn::Compute> m_ComputeDevice;
+    std::vector<armnn::BackendId> m_ComputeDevice;
     bool m_EnableProfiling;
     size_t m_SubgraphId;
     bool m_IsModelBinary;
@@ -195,8 +196,6 @@
     return { { output.first, armnn::Tensor(output.second, outputTensorData.data()) } };
 }
 
-
-
 template <typename IParser, typename TDataType>
 class InferenceModel
 {
@@ -207,7 +206,7 @@
     struct CommandLineOptions
     {
         std::string m_ModelDir;
-        std::vector<armnn::Compute> m_ComputeDevice;
+        std::vector<armnn::BackendId> m_ComputeDevice;
         bool m_VisualizePostOptimizationModel;
         bool m_EnableFp16TurboMode;
     };
@@ -216,11 +215,13 @@
     {
         namespace po = boost::program_options;
 
+        std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc, armnn::Compute::CpuRef};
+
         desc.add_options()
             ("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
                 "Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
-            ("compute,c", po::value<std::vector<armnn::Compute>>(&options.m_ComputeDevice)->default_value
-                 ({armnn::Compute::CpuAcc, armnn::Compute::CpuRef}),
+            ("compute,c", po::value<std::vector<armnn::BackendId>>(&options.m_ComputeDevice)->default_value
+                (defaultBackends),
                 "Which device to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc")
             ("visualize-optimized-model,v",
                 po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp
index 32d828d..3ea7096 100644
--- a/tests/InferenceTest.hpp
+++ b/tests/InferenceTest.hpp
@@ -4,8 +4,8 @@
 //
 #pragma once
 
-#include "armnn/ArmNN.hpp"
-#include "armnn/TypesUtils.hpp"
+#include <armnn/ArmNN.hpp>
+#include <armnn/TypesUtils.hpp>
 #include "InferenceModel.hpp"
 
 #include <Logging.hpp>
@@ -30,6 +30,20 @@
     return in;
 }
 
+inline std::istream& operator>>(std::istream& in, armnn::BackendId& backend)
+{
+    std::string token;
+    in >> token;
+    armnn::Compute compute = armnn::ParseComputeDevice(token.c_str());
+    if (compute == armnn::Compute::Undefined)
+    {
+        in.setstate(std::ios_base::failbit);
+        throw boost::program_options::validation_error(boost::program_options::validation_error::invalid_option_value);
+    }
+    backend = compute;
+    return in;
+}
+
 namespace test
 {
 
diff --git a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
index 34fdbf0..f9fdf8b 100644
--- a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
+++ b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
@@ -36,7 +36,8 @@
 
         namespace po = boost::program_options;
 
-        std::vector<armnn::Compute> computeDevice;
+        std::vector<armnn::BackendId> computeDevice;
+        std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc, armnn::Compute::CpuRef};
         std::string modelDir;
         std::string dataDir;
 
@@ -48,8 +49,7 @@
                 ("help", "Display help messages")
                 ("model-dir,m", po::value<std::string>(&modelDir)->required(),
                     "Path to directory containing the Cifar10 model file")
-                ("compute,c", po::value<std::vector<armnn::Compute>>(&computeDevice)->default_value
-                     ({armnn::Compute::CpuAcc, armnn::Compute::CpuRef}),
+                ("compute,c", po::value<std::vector<armnn::BackendId>>(&computeDevice)->default_value(defaultBackends),
                     "Which device to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc")
                 ("data-dir,d", po::value<std::string>(&dataDir)->required(),
                     "Path to directory containing the Cifar10 test data");
@@ -200,7 +200,7 @@
     }
     catch (const std::exception& e)
     {
-        // Coverity fix: various boost exceptions can be thrown by methods called by this test. 
+        // Coverity fix: various boost exceptions can be thrown by methods called by this test.
         std::cerr << "WARNING: MultipleNetworksCifar10: An error has occurred when running the "
                      "multiple networks inference tests: " << e.what() << std::endl;
         return 1;