IVGCVSW-2886 Support multiple backends in Android driver

Change-Id: I4abe1f750801911570b6dc65c187b828c5929b5f
Signed-off-by: Nattapat Chaimanowong <nattapat.chaimanowong@arm.com>
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
index a591262..dee4a7a 100644
--- a/1.0/HalPolicy.cpp
+++ b/1.0/HalPolicy.cpp
@@ -90,12 +90,12 @@
 
     const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsAdditionSupported,
-                          data.m_Compute,
-                          input0.GetTensorInfo(),
-                          input1.GetTensorInfo(),
-                          outInfo))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsAdditionSupported,
+                                       data.m_Backends,
+                                       input0.GetTensorInfo(),
+                                       input1.GetTensorInfo(),
+                                       outInfo))
     {
         return false;
     }
@@ -289,12 +289,12 @@
     std::vector<const armnn::TensorInfo*> inputTensorInfos;
     std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
         [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
-    if (!IsLayerSupported(__func__,
-                          armnn::IsMergerSupported,
-                          data.m_Compute,
-                          inputTensorInfos,
-                          outputInfo,
-                          mergerDescriptor))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsMergerSupported,
+                                       data.m_Backends,
+                                       inputTensorInfos,
+                                       outputInfo,
+                                       mergerDescriptor))
     {
         return false;
     }
@@ -420,14 +420,14 @@
     desc.m_BiasEnabled = true;
     armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsConvolution2dSupported,
-                          data.m_Compute,
-                          inputInfo,
-                          outputInfo,
-                          desc,
-                          weights.GetInfo(),
-                          biases))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsConvolution2dSupported,
+                                       data.m_Backends,
+                                       inputInfo,
+                                       outputInfo,
+                                       desc,
+                                       weights.GetInfo(),
+                                       biases))
     {
         return false;
     }
@@ -546,14 +546,14 @@
     desc.m_BiasEnabled = true;
     armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsDepthwiseConvolutionSupported,
-                          data.m_Compute,
-                          inputInfo,
-                          outputInfo,
-                          desc,
-                          weights.GetInfo(),
-                          biases))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsDepthwiseConvolutionSupported,
+                                       data.m_Backends,
+                                       inputInfo,
+                                       outputInfo,
+                                       desc,
+                                       weights.GetInfo(),
+                                       biases))
     {
         return false;
     }
@@ -589,11 +589,11 @@
         return Fail("%s: Operation has invalid outputs", __func__);
     }
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsFloorSupported,
-                          data.m_Compute,
-                          input.GetTensorInfo(),
-                          GetTensorInfoForOperand(*outputOperand)))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsFloorSupported,
+                                       data.m_Backends,
+                                       input.GetTensorInfo(),
+                                       GetTensorInfoForOperand(*outputOperand)))
     {
         return false;
     }
@@ -667,14 +667,14 @@
     desc.m_TransposeWeightMatrix = true;
     desc.m_BiasEnabled           = true;
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsFullyConnectedSupported,
-                          data.m_Compute,
-                          reshapedInfo,
-                          outputInfo,
-                          weights.GetInfo(),
-                          bias.GetInfo(),
-                          desc))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsFullyConnectedSupported,
+                                       data.m_Backends,
+                                       reshapedInfo,
+                                       outputInfo,
+                                       weights.GetInfo(),
+                                       bias.GetInfo(),
+                                       desc))
     {
         return false;
     }
@@ -746,12 +746,12 @@
     // window rather than the radius as in AndroidNN.
     descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
 
-    if (!IsLayerSupported(__func__,
-                        armnn::IsNormalizationSupported,
-                        data.m_Compute,
-                        inputInfo,
-                        outputInfo,
-                        descriptor))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsNormalizationSupported,
+                                       data.m_Backends,
+                                       inputInfo,
+                                       outputInfo,
+                                       descriptor))
     {
         return false;
     }
@@ -1037,34 +1037,34 @@
         cellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
     }
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsLstmSupported,
-                          data.m_Compute,
-                          inputInfo,
-                          outputStateInInfo,
-                          cellStateInInfo,
-                          scratchBufferInfo,
-                          outputStateOutInfo,
-                          cellStateOutInfo,
-                          outputInfo,
-                          desc,
-                          inputToForgetWeights,
-                          inputToCellWeights,
-                          inputToOutputWeights,
-                          recurrentToForgetWeights,
-                          recurrentToCellWeights,
-                          recurrentToOutputWeights,
-                          forgetGateBias,
-                          cellBias,
-                          outputGateBias,
-                          inputToInputWeights,
-                          recurrentToInputWeights,
-                          cellToInputWeights,
-                          inputGateBias,
-                          projectionWeights,
-                          projectionBias,
-                          cellToForgetWeights,
-                          cellToOutputWeights))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsLstmSupported,
+                                       data.m_Backends,
+                                       inputInfo,
+                                       outputStateInInfo,
+                                       cellStateInInfo,
+                                       scratchBufferInfo,
+                                       outputStateOutInfo,
+                                       cellStateOutInfo,
+                                       outputInfo,
+                                       desc,
+                                       inputToForgetWeights,
+                                       inputToCellWeights,
+                                       inputToOutputWeights,
+                                       recurrentToForgetWeights,
+                                       recurrentToCellWeights,
+                                       recurrentToOutputWeights,
+                                       forgetGateBias,
+                                       cellBias,
+                                       outputGateBias,
+                                       inputToInputWeights,
+                                       recurrentToInputWeights,
+                                       cellToInputWeights,
+                                       inputGateBias,
+                                       projectionWeights,
+                                       projectionBias,
+                                       cellToForgetWeights,
+                                       cellToOutputWeights))
     {
         return false;
     }
@@ -1102,12 +1102,12 @@
     armnn::L2NormalizationDescriptor desc;
     desc.m_DataLayout = armnn::DataLayout::NHWC;
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsL2NormalizationSupported,
-                          data.m_Compute,
-                          inputInfo,
-                          outputInfo,
-                          desc))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsL2NormalizationSupported,
+                                       data.m_Backends,
+                                       inputInfo,
+                                       outputInfo,
+                                       desc))
     {
         return false;
     }
@@ -1156,12 +1156,12 @@
 
     const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsMultiplicationSupported,
-                          data.m_Compute,
-                          input0.GetTensorInfo(),
-                          input1.GetTensorInfo(),
-                          outInfo))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsMultiplicationSupported,
+                                       data.m_Backends,
+                                       input0.GetTensorInfo(),
+                                       input1.GetTensorInfo(),
+                                       outInfo))
     {
         return false;
     }
@@ -1232,12 +1232,12 @@
         return Fail("%s: Operation has invalid inputs", __func__);
     }
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsSoftmaxSupported,
-                          data.m_Compute,
-                          input.GetTensorInfo(),
-                          outInfo,
-                          desc))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsSoftmaxSupported,
+                                       data.m_Backends,
+                                       input.GetTensorInfo(),
+                                       outInfo,
+                                       desc))
     {
         return false;
     }
@@ -1311,11 +1311,11 @@
     reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
                                                          requestedShape.dimensions.data());
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsReshapeSupported,
-                          data.m_Compute,
-                          input.GetTensorInfo(),
-                          reshapeDescriptor))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsReshapeSupported,
+                                       data.m_Backends,
+                                       input.GetTensorInfo(),
+                                       reshapeDescriptor))
     {
         return false;
     }
@@ -1347,11 +1347,11 @@
     armnn::ResizeBilinearDescriptor desc;
     desc.m_DataLayout = armnn::DataLayout::NHWC;
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsResizeBilinearSupported,
-                          data.m_Compute,
-                          inputInfo,
-                          outputInfo))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsResizeBilinearSupported,
+                                       data.m_Backends,
+                                       inputInfo,
+                                       outputInfo))
     {
         return false;
     }
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index 5530d31..9a0c1bf 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -76,12 +76,12 @@
 
     const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsDivisionSupported,
-                          data.m_Compute,
-                          input0.GetTensorInfo(),
-                          input1.GetTensorInfo(),
-                          outInfo))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsDivisionSupported,
+                                       data.m_Backends,
+                                       input0.GetTensorInfo(),
+                                       input1.GetTensorInfo(),
+                                       outInfo))
     {
         return false;
     }
@@ -127,12 +127,12 @@
 
     const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsSubtractionSupported,
-                          data.m_Compute,
-                          input0.GetTensorInfo(),
-                          input1.GetTensorInfo(),
-                          outInfo))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsSubtractionSupported,
+                                       data.m_Backends,
+                                       input0.GetTensorInfo(),
+                                       input1.GetTensorInfo(),
+                                       outInfo))
     {
         return false;
     }
@@ -200,12 +200,12 @@
 
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsMeanSupported,
-                          data.m_Compute,
-                          inputInfo,
-                          outputInfo,
-                          descriptor))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsMeanSupported,
+                                       data.m_Backends,
+                                       inputInfo,
+                                       outputInfo,
+                                       descriptor))
     {
         return false;
     }
@@ -266,12 +266,12 @@
 
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsPadSupported,
-                          data.m_Compute,
-                          inputInfo,
-                          outputInfo,
-                          descriptor))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsPadSupported,
+                                       data.m_Backends,
+                                       inputInfo,
+                                       outputInfo,
+                                       descriptor))
     {
         return false;
     }
@@ -351,12 +351,12 @@
     }
 
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-    if (!IsLayerSupported(__func__,
-                          armnn::IsSpaceToBatchNdSupported,
-                          data.m_Compute,
-                          inputInfo,
-                          outputInfo,
-                          descriptor))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsSpaceToBatchNdSupported,
+                                       data.m_Backends,
+                                       inputInfo,
+                                       outputInfo,
+                                       descriptor))
     {
         return false;
     }
@@ -428,11 +428,11 @@
         return Fail("%s: Could not read output 0", __func__);
     }
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsReshapeSupported,
-                          data.m_Compute,
-                          inputInfo,
-                          reshapeDesc))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsReshapeSupported,
+                                       data.m_Backends,
+                                       inputInfo,
+                                       reshapeDesc))
     {
         return false;
     }
@@ -517,12 +517,12 @@
     }
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsStridedSliceSupported,
-                          data.m_Compute,
-                          inputInfo,
-                          outputInfo,
-                          descriptor))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsStridedSliceSupported,
+                                       data.m_Backends,
+                                       inputInfo,
+                                       outputInfo,
+                                       descriptor))
     {
         return false;
     }
@@ -590,12 +590,12 @@
 
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsPermuteSupported,
-                          data.m_Compute,
-                          inputInfo,
-                          outputInfo,
-                          permuteDesc))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsPermuteSupported,
+                                       data.m_Backends,
+                                       inputInfo,
+                                       outputInfo,
+                                       permuteDesc))
     {
         return false;
     }
@@ -657,12 +657,12 @@
 
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsBatchToSpaceNdSupported,
-                          data.m_Compute,
-                          inputInfo,
-                          outputInfo,
-                          batchToSpaceNdDesc))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsBatchToSpaceNdSupported,
+                                       data.m_Backends,
+                                       inputInfo,
+                                       outputInfo,
+                                       batchToSpaceNdDesc))
     {
         return false;
     }
diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp
index f6456ee..40bd80a 100644
--- a/ArmnnDriverImpl.cpp
+++ b/ArmnnDriverImpl.cpp
@@ -77,9 +77,9 @@
     }
 
     // Attempt to convert the model to an ArmNN input network (INetwork).
-    ModelToINetworkConverter<HalPolicy> modelConverter(options.GetComputeDevice(),
-                                                        model,
-                                                        options.GetForcedUnsupportedOperations());
+    ModelToINetworkConverter<HalPolicy> modelConverter(options.GetBackends(),
+                                                       model,
+                                                       options.GetForcedUnsupportedOperations());
 
     if (modelConverter.GetConversionResult() != ConversionResult::Success
             && modelConverter.GetConversionResult() != ConversionResult::UnsupportedFeature)
@@ -132,9 +132,9 @@
     // at this point we're being asked to prepare a model that we've already declared support for
     // and the operation indices may be different to those in getSupportedOperations anyway.
     set<unsigned int> unsupportedOperations;
-    ModelToINetworkConverter<HalPolicy> modelConverter(options.GetComputeDevice(),
-                                                        model,
-                                                        unsupportedOperations);
+    ModelToINetworkConverter<HalPolicy> modelConverter(options.GetBackends(),
+                                                       model,
+                                                       unsupportedOperations);
 
     if (modelConverter.GetConversionResult() != ConversionResult::Success)
     {
@@ -151,7 +151,7 @@
     try
     {
         optNet = armnn::Optimize(*modelConverter.GetINetwork(),
-                                 {options.GetComputeDevice()},
+                                 options.GetBackends(),
                                  runtime->GetDeviceSpec(),
                                  OptOptions,
                                  errMessages);
diff --git a/ConversionUtils.cpp b/ConversionUtils.cpp
index 60d1a1f..fb71c75 100644
--- a/ConversionUtils.cpp
+++ b/ConversionUtils.cpp
@@ -150,12 +150,12 @@
             }
         }
 
-        if (!IsLayerSupported(__func__,
-                              armnn::IsActivationSupported,
-                              data.m_Compute,
-                              prevLayer->GetOutputSlot(0).GetTensorInfo(),
-                              tensorInfo,
-                              activationDesc))
+        if (!IsLayerSupportedForAnyBackend(__func__,
+                                           armnn::IsActivationSupported,
+                                           data.m_Backends,
+                                           prevLayer->GetOutputSlot(0).GetTensorInfo(),
+                                           tensorInfo,
+                                           activationDesc))
         {
             return nullptr;
         }
@@ -169,4 +169,4 @@
     return activationLayer;
 }
 
-} // namespace armnn_driver
\ No newline at end of file
+} // namespace armnn_driver
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index ca1f0ae..de4516c 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -19,6 +19,7 @@
 #include <boost/test/tools/floating_point_comparison.hpp>
 
 #include <log/log.h>
+#include <vector>
 
 namespace armnn_driver
 {
@@ -29,12 +30,12 @@
 
 struct ConversionData
 {
-    ConversionData(armnn::Compute compute)
-            : m_Compute(compute)
-            , m_Network(nullptr, nullptr)
+    ConversionData(const std::vector<armnn::BackendId>& backends)
+    : m_Backends(backends)
+    , m_Network(nullptr, nullptr)
     {}
 
-    const armnn::Compute                      m_Compute;
+    const std::vector<armnn::BackendId>       m_Backends;
     armnn::INetworkPtr                        m_Network;
     std::vector<armnn::IOutputSlot*>          m_OutputSlotForOperand;
     std::vector<android::nn::RunTimePoolInfo> m_MemPools;
@@ -139,6 +140,24 @@
     }
 }
 
+template<typename IsLayerSupportedFunc, typename ... Args>
+bool IsLayerSupportedForAnyBackend(const char* funcName,
+                                   IsLayerSupportedFunc f,
+                                   const std::vector<armnn::BackendId>& backends,
+                                   Args&&... args)
+{
+    for (auto&& backend : backends)
+    {
+        if (IsLayerSupported(funcName, f, backend, std::forward<Args>(args)...))
+        {
+            return true;
+        }
+    }
+
+    ALOGD("%s: not supported by any specified backend", funcName);
+    return false;
+}
+
 armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
 {
     return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
@@ -809,10 +828,10 @@
             ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand, model, data);
             if (tensorPin.IsValid())
             {
-                if (!IsLayerSupported(__func__,
-                                      armnn::IsConstantSupported,
-                                      data.m_Compute,
-                                      tensorPin.GetConstTensor().GetInfo()))
+                if (!IsLayerSupportedForAnyBackend(__func__,
+                                                   armnn::IsConstantSupported,
+                                                   data.m_Backends,
+                                                   tensorPin.GetConstTensor().GetInfo()))
                 {
                     return LayerInputHandle();
                 }
@@ -859,12 +878,12 @@
         return false;
     }
     const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
-    if (!IsLayerSupported(__func__,
-                          armnn::IsActivationSupported,
-                          data.m_Compute,
-                          input.GetTensorInfo(),
-                          outInfo,
-                          activationDesc))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsActivationSupported,
+                                       data.m_Backends,
+                                       input.GetTensorInfo(),
+                                       outInfo,
+                                       activationDesc))
     {
         return false;
     }
@@ -976,12 +995,12 @@
         }
     }
 
-    if (!IsLayerSupported(__func__,
-                          armnn::IsPooling2dSupported,
-                          data.m_Compute,
-                          inputInfo,
-                          outputInfo,
-                          desc))
+    if (!IsLayerSupportedForAnyBackend(__func__,
+                                       armnn::IsPooling2dSupported,
+                                       data.m_Backends,
+                                       inputInfo,
+                                       outputInfo,
+                                       desc))
     {
         return false;
     }
diff --git a/DriverOptions.cpp b/DriverOptions.cpp
index 10919a7..cd4b6bf 100644
--- a/DriverOptions.cpp
+++ b/DriverOptions.cpp
@@ -16,6 +16,7 @@
 #include <boost/algorithm/string/predicate.hpp>
 #include <boost/program_options.hpp>
 
+#include <algorithm>
 #include <cassert>
 #include <functional>
 #include <string>
@@ -28,7 +29,16 @@
 {
 
 DriverOptions::DriverOptions(armnn::Compute computeDevice, bool fp16Enabled)
-    : m_ComputeDevice(computeDevice)
+    : m_Backends({computeDevice})
+    , m_VerboseLogging(false)
+    , m_ClTunedParametersMode(armnn::IGpuAccTunedParameters::Mode::UseTunedParameters)
+    , m_EnableGpuProfiling(false)
+    , m_fp16Enabled(fp16Enabled)
+{
+}
+
+DriverOptions::DriverOptions(const std::vector<armnn::BackendId>& backends, bool fp16Enabled)
+    : m_Backends(backends)
     , m_VerboseLogging(false)
     , m_ClTunedParametersMode(armnn::IGpuAccTunedParameters::Mode::UseTunedParameters)
     , m_EnableGpuProfiling(false)
@@ -37,23 +47,22 @@
 }
 
 DriverOptions::DriverOptions(int argc, char** argv)
-    : m_ComputeDevice(armnn::Compute::GpuAcc)
-    , m_VerboseLogging(false)
+    : m_VerboseLogging(false)
     , m_ClTunedParametersMode(armnn::IGpuAccTunedParameters::Mode::UseTunedParameters)
     , m_EnableGpuProfiling(false)
     , m_fp16Enabled(false)
 {
     namespace po = boost::program_options;
 
-    std::string computeDeviceAsString;
     std::string unsupportedOperationsAsString;
     std::string clTunedParametersModeAsString;
 
     po::options_description optionsDesc("Options");
     optionsDesc.add_options()
         ("compute,c",
-         po::value<std::string>(&computeDeviceAsString)->default_value("GpuAcc"),
-         "Which device to run layers on by default. Possible values are: CpuRef, CpuAcc, GpuAcc")
+         po::value<std::vector<std::string>>()->
+            multitoken()->default_value(std::vector<std::string>{"GpuAcc"}, "{GpuAcc}"),
+         "Which backend to run layers on. Possible values are: CpuRef, CpuAcc, GpuAcc")
 
         ("verbose-logging,v",
          po::bool_switch(&m_VerboseLogging),
@@ -99,22 +108,26 @@
         ALOGW("An error occurred attempting to parse program options: %s", e.what());
     }
 
-    if (computeDeviceAsString == "CpuRef")
+    const std::vector<std::string> backends = variablesMap["compute"].as<std::vector<std::string>>();
+    const std::vector<string> supportedDevices({"CpuRef", "CpuAcc", "GpuAcc"});
+    m_Backends.reserve(backends.size());
+
+    for (auto&& backend : backends)
     {
-        m_ComputeDevice = armnn::Compute::CpuRef;
+        if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
+        {
+            ALOGW("Requested unknown backend %s", backend.c_str());
+        }
+        else
+        {
+            m_Backends.emplace_back(backend);
+        }
     }
-    else if (computeDeviceAsString == "GpuAcc")
+
+    if (m_Backends.empty())
     {
-        m_ComputeDevice = armnn::Compute::GpuAcc;
-    }
-    else if (computeDeviceAsString == "CpuAcc")
-    {
-        m_ComputeDevice = armnn::Compute::CpuAcc;
-    }
-    else
-    {
-        ALOGW("Requested unknown compute device %s. Defaulting to compute id %s",
-            computeDeviceAsString.c_str(), GetComputeDeviceAsCString(m_ComputeDevice));
+        m_Backends.emplace_back("GpuAcc");
+        ALOGW("No known backend specified. Defaulting to: GpuAcc");
     }
 
     if (!unsupportedOperationsAsString.empty())
diff --git a/DriverOptions.hpp b/DriverOptions.hpp
index 7271ac1..637ccd6 100644
--- a/DriverOptions.hpp
+++ b/DriverOptions.hpp
@@ -9,6 +9,7 @@
 
 #include <set>
 #include <string>
+#include <vector>
 
 namespace armnn_driver
 {
@@ -17,10 +18,11 @@
 {
 public:
     DriverOptions(armnn::Compute computeDevice, bool fp16Enabled = false);
+    DriverOptions(const std::vector<armnn::BackendId>& backends, bool fp16Enabled);
     DriverOptions(int argc, char** argv);
     DriverOptions(DriverOptions&& other) = default;
 
-    armnn::Compute GetComputeDevice() const { return m_ComputeDevice; }
+    const std::vector<armnn::BackendId>& GetBackends() const { return m_Backends; }
     bool IsVerboseLoggingEnabled() const { return m_VerboseLogging; }
     const std::string& GetRequestInputsAndOutputsDumpDir() const { return m_RequestInputsAndOutputsDumpDir; }
     const std::set<unsigned int>& GetForcedUnsupportedOperations() const { return m_ForcedUnsupportedOperations; }
@@ -30,7 +32,7 @@
     bool GetFp16Enabled() const { return m_fp16Enabled; }
 
 private:
-    armnn::Compute m_ComputeDevice;
+    std::vector<armnn::BackendId> m_Backends;
     bool m_VerboseLogging;
     std::string m_RequestInputsAndOutputsDumpDir;
     std::set<unsigned int> m_ForcedUnsupportedOperations;
diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp
index 8bf84e9..fccd759 100644
--- a/ModelToINetworkConverter.cpp
+++ b/ModelToINetworkConverter.cpp
@@ -13,10 +13,10 @@
 {
 
 template<typename HalPolicy>
-ModelToINetworkConverter<HalPolicy>::ModelToINetworkConverter(armnn::Compute compute,
+ModelToINetworkConverter<HalPolicy>::ModelToINetworkConverter(const std::vector<armnn::BackendId>& backends,
     const HalModel& model,
     const std::set<unsigned int>& forcedUnsupportedOperations)
-    : m_Data(compute)
+    : m_Data(backends)
     , m_Model(model)
     , m_ForcedUnsupportedOperations(forcedUnsupportedOperations)
     , m_ConversionResult(ConversionResult::Success)
diff --git a/ModelToINetworkConverter.hpp b/ModelToINetworkConverter.hpp
index a3758fd..e78c5f0 100644
--- a/ModelToINetworkConverter.hpp
+++ b/ModelToINetworkConverter.hpp
@@ -11,6 +11,7 @@
 #include <armnn/ArmNN.hpp>
 
 #include <set>
+#include <vector>
 
 namespace armnn_driver
 {
@@ -30,7 +31,7 @@
 public:
     using HalModel = typename HalPolicy::Model;
 
-    ModelToINetworkConverter(armnn::Compute compute,
+    ModelToINetworkConverter(const std::vector<armnn::BackendId>& backends,
                              const HalModel& model,
                              const std::set<unsigned int>& forcedUnsupportedOperations);