IVGCVSW-4399 Create Sample Dynamic backend

 * Move IWorkload and WorkloadInfo to include/armnn/backends
 * Add simple sample dynamic backend with addition workload
 * Add sample example to run dynamic backend
 * Unit tests

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I0753ce35b8e8a6223a1471388b49246d82438a44
diff --git a/cmake/GlobalConfig.cmake b/cmake/GlobalConfig.cmake
index ccf0ecc..aa2ebc7 100644
--- a/cmake/GlobalConfig.cmake
+++ b/cmake/GlobalConfig.cmake
@@ -307,6 +307,10 @@
     add_definitions(-DARMNN_DYNAMIC_BACKEND_ENABLED)
 endif()
 
+if(SAMPLE_DYNAMIC_BACKEND)
+    add_definitions(-DSAMPLE_DYNAMIC_BACKEND_ENABLED)
+endif()
+
 # Streamline annotate
 if(PROFILING_BACKEND_STREAMLINE)
     include_directories("${GATOR_ROOT}/annotate")
diff --git a/include/armnn/backends/CMakeLists.txt b/include/armnn/backends/CMakeLists.txt
index 90a022a..94e757f 100644
--- a/include/armnn/backends/CMakeLists.txt
+++ b/include/armnn/backends/CMakeLists.txt
@@ -8,10 +8,12 @@
      DynamicBackend.hpp
      IBackendInternal.hpp
      IBackendContext.hpp
-     ITensorHandleFactory.hpp
      IMemoryManager.hpp
      ITensorHandle.hpp
+     ITensorHandleFactory.hpp
+     IWorkload.hpp
      OptimizationViews.hpp
+     WorkloadInfo.hpp
      profiling/IBackendProfiling.hpp
      profiling/IBackendProfilingContext.hpp
 )
diff --git a/include/armnn/backends/IWorkload.hpp b/include/armnn/backends/IWorkload.hpp
new file mode 100644
index 0000000..0bd8d2d
--- /dev/null
+++ b/include/armnn/backends/IWorkload.hpp
@@ -0,0 +1,26 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Types.hpp>
+
+namespace armnn
+{
+
+/// Workload interface to enqueue a layer computation.
+class IWorkload {
+public:
+    virtual ~IWorkload() {}
+
+    virtual void PostAllocationConfigure() = 0;
+
+    virtual void Execute() const = 0;
+
+    virtual profiling::ProfilingGuid GetGuid() const = 0;
+
+    virtual void RegisterDebugCallback(const DebugCallbackFunction & /*func*/) {}
+};
+
+} //namespace armnn
diff --git a/include/armnn/backends/WorkloadInfo.hpp b/include/armnn/backends/WorkloadInfo.hpp
new file mode 100644
index 0000000..edf3581
--- /dev/null
+++ b/include/armnn/backends/WorkloadInfo.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Tensor.hpp>
+
+#include <vector>
+
+namespace armnn
+{
+
+/// Contains information about inputs and outputs to a layer.
+/// This is needed at construction of workloads, but are not stored.
+struct WorkloadInfo
+{
+    std::vector<TensorInfo> m_InputTensorInfos;
+    std::vector<TensorInfo> m_OutputTensorInfos;
+};
+
+} //namespace armnn
diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt
index 640d5cd..5505de0 100644
--- a/samples/CMakeLists.txt
+++ b/samples/CMakeLists.txt
@@ -2,3 +2,8 @@
     add_executable(SimpleSample SimpleSample.cpp)
     target_link_libraries(SimpleSample armnn ${CMAKE_THREAD_LIBS_INIT})
 endif()
+
+if(SAMPLE_DYNAMIC_BACKEND)
+    add_executable(DynamicSample DynamicSample.cpp)
+    target_link_libraries(DynamicSample armnn ${CMAKE_THREAD_LIBS_INIT})
+endif()
diff --git a/samples/DynamicSample.cpp b/samples/DynamicSample.cpp
new file mode 100644
index 0000000..3abe12f
--- /dev/null
+++ b/samples/DynamicSample.cpp
@@ -0,0 +1,80 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include <armnn/INetwork.hpp>
+#include <armnn/IRuntime.hpp>
+#include <armnn/Utils.hpp>
+#include <armnn/Descriptors.hpp>
+
+#include <iostream>
+
+/// A simple example of using the ArmNN SDK API. In this sample, the users single input number is multiplied by 1.0f
+/// using a fully connected layer with a single neuron to produce an output number that is the same as the input.
+int main()
+{
+    using namespace armnn;
+
+    // Construct ArmNN network
+    armnn::NetworkId networkIdentifier;
+    INetworkPtr myNetwork = INetwork::Create();
+
+    IConnectableLayer* input0 = myNetwork->AddInputLayer(0);
+    IConnectableLayer* input1 = myNetwork->AddInputLayer(1);
+    IConnectableLayer* add    = myNetwork->AddAdditionLayer();
+    IConnectableLayer* output = myNetwork->AddOutputLayer(0);
+
+    input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
+    input1->GetOutputSlot(0).Connect(add->GetInputSlot(1));
+    add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    TensorInfo tensorInfo(TensorShape({2, 1}), DataType::Float32);
+    input0->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+    input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+    add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    // Create ArmNN runtime
+    IRuntime::CreationOptions options; // default options
+    armnn::IRuntimePtr run(armnn::IRuntime::Create(options));
+
+    // Optimise ArmNN network
+    armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"SampleDynamic"}, run->GetDeviceSpec());
+    if (!optNet)
+    {
+        // This shouldn't happen for this simple sample, with reference backend.
+        // But in general usage Optimize could fail if the hardware at runtime cannot
+        // support the model that has been provided.
+        std::cerr << "Error: Failed to optimise the input network." << std::endl;
+        return 1;
+    }
+
+    // Load graph into runtime
+    run->LoadNetwork(networkIdentifier, std::move(optNet));
+
+    // input data
+    std::vector<float> input0Data
+        {
+            5.0f, 3.0f
+        };
+    std::vector<float> input1Data
+        {
+            10.0f, 8.0f
+        };
+    std::vector<float> outputData(2);
+
+    InputTensors inputTensors
+        {
+            {0,armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input0Data.data())},
+            {1,armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input1Data.data())}
+        };
+    OutputTensors outputTensors
+        {
+            {0,armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())}
+        };
+
+    // Execute network
+    run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
+
+    std::cout << "Addition operator result is {" << outputData[0] << "," << outputData[1] << "}" << std::endl;
+    return 0;
+}
diff --git a/src/backends/backendsCommon/Workload.hpp b/src/backends/backendsCommon/Workload.hpp
index f7895a6..8ec09f9 100644
--- a/src/backends/backendsCommon/Workload.hpp
+++ b/src/backends/backendsCommon/Workload.hpp
@@ -7,7 +7,7 @@
 #include "WorkloadData.hpp"
 #include "WorkloadInfo.hpp"
 
-#include <armnn/Types.hpp>
+#include <armnn/backends/IWorkload.hpp>
 #include <Profiling.hpp>
 #include <ProfilingService.hpp>
 
@@ -16,20 +16,6 @@
 namespace armnn
 {
 
-/// Workload interface to enqueue a layer computation.
-class IWorkload
-{
-public:
-    virtual ~IWorkload() {}
-
-    virtual void PostAllocationConfigure() = 0;
-    virtual void Execute() const = 0;
-
-    virtual profiling::ProfilingGuid GetGuid() const = 0;
-
-    virtual void RegisterDebugCallback(const DebugCallbackFunction& /*func*/) {}
-};
-
 // NullWorkload used to denote an unsupported workload when used by the MakeWorkload<> template
 // in the various workload factories.
 // There should never be an instantiation of a NullWorkload.
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index c5fcf15..46681e9 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -31,7 +31,8 @@
     std::vector<ITensorHandle*> m_Outputs;
 
     void ValidateInputsOutputs(const std::string& descName,
-        unsigned int numExpectedIn, unsigned int numExpectedOut) const;
+                               unsigned int numExpectedIn,
+                               unsigned int numExpectedOut) const;
 
 
 protected:
diff --git a/src/backends/backendsCommon/WorkloadInfo.hpp b/src/backends/backendsCommon/WorkloadInfo.hpp
index 304bc0b..cac147c 100644
--- a/src/backends/backendsCommon/WorkloadInfo.hpp
+++ b/src/backends/backendsCommon/WorkloadInfo.hpp
@@ -2,17 +2,8 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#pragma once
 
-namespace armnn
-{
-
-/// Contains information about inputs and outputs to a layer.
-/// This is needed at construction of workloads, but are not stored.
-struct WorkloadInfo
-{
-    std::vector<TensorInfo> m_InputTensorInfos;
-    std::vector<TensorInfo> m_OutputTensorInfos;
-};
-
-} //namespace armnn
+// This file is depricated and will be removed soon.
+// Please use the new header in armnn/backends instead.
+// This will use the new armnn/backends header.
+#include <armnn/backends/WorkloadInfo.hpp>
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.cpp b/src/backends/backendsCommon/test/DynamicBackendTests.cpp
index 40e063d..bb1a5cd 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.cpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.cpp
@@ -71,4 +71,9 @@
 
 #endif
 
+#if defined(SAMPLE_DYNAMIC_BACKEND_ENABLED)
+ARMNN_SIMPLE_TEST_CASE(CreateSampleDynamicBackend, CreateSampleDynamicBackendTestImpl);
+ARMNN_SIMPLE_TEST_CASE(SampleDynamicBackendEndToEnd, SampleDynamicBackendEndToEndTestImpl);
+#endif
+
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index 4238ef6..1276776 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -19,7 +19,6 @@
 
 #include <string>
 #include <memory>
-#include <string>
 
 #include <boost/test/unit_test.hpp>
 #include <boost/filesystem.hpp>
@@ -1438,3 +1437,123 @@
 }
 
 #endif
+
+#if defined(SAMPLE_DYNAMIC_BACKEND_ENABLED)
+void CreateSampleDynamicBackendTestImpl()
+{
+    using namespace armnn;
+
+    // Using the path override in CreationOptions to load the reference dynamic backend
+    IRuntime::CreationOptions creationOptions;
+    IRuntimePtr runtime = IRuntime::Create(creationOptions);
+
+    const BackendRegistry& backendRegistry = BackendRegistryInstance();
+    BOOST_TEST(backendRegistry.Size() >= 1);
+
+    BackendIdSet backendIds = backendRegistry.GetBackendIds();
+    BOOST_TEST((backendIds.find("SampleDynamic") != backendIds.end()));
+
+    const DeviceSpec& deviceSpec = *boost::polymorphic_downcast<const DeviceSpec*>(&runtime->GetDeviceSpec());
+    BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
+    BOOST_TEST(supportedBackendIds.size()>= 1);
+    BOOST_TEST((supportedBackendIds.find("SampleDynamic") != supportedBackendIds.end()));
+
+    // Get the factory function
+    auto sampleDynamicBackendFactoryFunction = backendRegistry.GetFactory("SampleDynamic");
+    BOOST_TEST((sampleDynamicBackendFactoryFunction != nullptr));
+
+    // Use the factory function to create an instance of the dynamic backend
+    IBackendInternalUniquePtr sampleDynamicBackend = sampleDynamicBackendFactoryFunction();
+    BOOST_TEST((sampleDynamicBackend != nullptr));
+    BOOST_TEST((sampleDynamicBackend->GetId() == "SampleDynamic"));
+
+    // Test the backend instance by querying the layer support
+    IBackendInternal::ILayerSupportSharedPtr sampleLayerSupport = sampleDynamicBackend->GetLayerSupport();
+    BOOST_TEST((sampleLayerSupport != nullptr));
+
+    TensorShape inputShape {  1, 16, 16, 16 };
+    TensorShape outputShape{  1, 16, 16, 16 };
+    TensorShape weightShape{ 16,  1,  1, 16 };
+    TensorInfo inputInfo (inputShape,  DataType::Float32);
+    TensorInfo outputInfo(outputShape, DataType::Float32);
+    TensorInfo weightInfo(weightShape, DataType::Float32);
+    Convolution2dDescriptor convolution2dDescriptor;
+    bool sampleConvolution2dSupported =
+            sampleLayerSupport->IsConvolution2dSupported(inputInfo,
+                                                         outputInfo,
+                                                         convolution2dDescriptor,
+                                                         weightInfo,
+                                                         EmptyOptional());
+    BOOST_TEST(!sampleConvolution2dSupported);
+
+    // Test the backend instance by creating a workload
+    IBackendInternal::IWorkloadFactoryPtr sampleWorkloadFactory = sampleDynamicBackend->CreateWorkloadFactory();
+    BOOST_TEST((sampleWorkloadFactory != nullptr));
+
+    // Create dummy settings for the workload
+    AdditionQueueDescriptor additionQueueDescriptor;
+    WorkloadInfo workloadInfo
+    {
+        { inputInfo, inputInfo },
+        { outputInfo }
+    };
+
+    // Create a addition workload
+    auto workload = sampleWorkloadFactory->CreateAddition(additionQueueDescriptor, workloadInfo);
+    BOOST_TEST((workload != nullptr));
+}
+
+void SampleDynamicBackendEndToEndTestImpl()
+{
+    using namespace armnn;
+    using namespace boost::filesystem;
+    // Create runtime in which test will run
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+
+    // Builds up the structure of the network.
+    INetworkPtr net(INetwork::Create());
+
+    IConnectableLayer* input0 = net->AddInputLayer(0);
+    IConnectableLayer* input1 = net->AddInputLayer(1);
+    IConnectableLayer* add = net->AddAdditionLayer();
+    IConnectableLayer* output = net->AddOutputLayer(0);
+
+    input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
+    input1->GetOutputSlot(0).Connect(add->GetInputSlot(1));
+    add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    TensorInfo tensorInfo(TensorShape({2, 1}), DataType::Float32);
+    input0->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+    input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+    add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    // optimize the network
+    IOptimizedNetworkPtr optNet = Optimize(*net, {"SampleDynamic"}, runtime->GetDeviceSpec());
+
+    // Loads it into the runtime.
+    NetworkId netId;
+    runtime->LoadNetwork(netId, std::move(optNet));
+
+    std::vector<float> input0Data{ 5.0f, 3.0f };
+    std::vector<float> input1Data{ 10.0f, 8.0f };
+    std::vector<float> expectedOutputData{ 15.0f, 11.0f };
+    std::vector<float> outputData(2);
+
+    InputTensors inputTensors
+        {
+            {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input0Data.data())},
+            {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())}
+        };
+    OutputTensors outputTensors
+        {
+            {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
+        };
+
+    // Does the inference.
+    runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+    // Checks the results.
+    BOOST_TEST(outputData == expectedOutputData);
+}
+#endif
diff --git a/src/dynamic/sample/CMakeLists.txt b/src/dynamic/sample/CMakeLists.txt
new file mode 100644
index 0000000..aeb870c
--- /dev/null
+++ b/src/dynamic/sample/CMakeLists.txt
@@ -0,0 +1,34 @@
+#
+# Copyright © 2020 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+cmake_minimum_required (VERSION 3.0.2)
+project(sample-dynamic)
+
+set(CMAKE_POSITION_INDEPENDENT_CODE ON)
+
+list(APPEND armnnSampleDynamicBackend_sources
+        SampleDynamicAdditionWorkload.cpp
+        SampleDynamicAdditionWorkload.hpp
+        SampleDynamicBackend.cpp
+        SampleDynamicBackend.hpp
+        SampleDynamicLayerSupport.cpp
+        SampleDynamicLayerSupport.hpp
+        SampleDynamicWorkloadFactory.cpp
+        SampleDynamicWorkloadFactory.hpp
+        SampleMemoryManager.cpp
+        SampleMemoryManager.hpp
+        SampleTensorHandle.cpp
+        SampleTensorHandle.hpp
+)
+
+add_library(Arm_SampleDynamic_backend MODULE ${armnnSampleDynamicBackend_sources})
+
+target_include_directories(Arm_SampleDynamic_backend PRIVATE ${PROJECT_SOURCE_DIR}/../../../include)
+target_include_directories(Arm_SampleDynamic_backend PRIVATE ${PROJECT_SOURCE_DIR}/../../../third-party)
+target_include_directories(Arm_SampleDynamic_backend PRIVATE ${PROJECT_SOURCE_DIR}/../../../src/armnn)
+target_include_directories(Arm_SampleDynamic_backend PRIVATE ${PROJECT_SOURCE_DIR}/../../../src/armnnUtils)
+target_include_directories(Arm_SampleDynamic_backend PRIVATE ${PROJECT_SOURCE_DIR}/../../../src/backends)
+target_include_directories(Arm_SampleDynamic_backend PRIVATE ${PROJECT_SOURCE_DIR}/../../../src/profiling)
+
diff --git a/src/dynamic/sample/SampleDynamicAdditionWorkload.cpp b/src/dynamic/sample/SampleDynamicAdditionWorkload.cpp
new file mode 100644
index 0000000..0fa57a7
--- /dev/null
+++ b/src/dynamic/sample/SampleDynamicAdditionWorkload.cpp
@@ -0,0 +1,54 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/backends/ITensorHandle.hpp>
+
+#include "SampleDynamicAdditionWorkload.hpp"
+#include "SampleTensorHandle.hpp"
+
+namespace armnn
+{
+
+inline const TensorInfo& GetTensorInfo(const ITensorHandle* tensorHandle)
+{
+    // We know that reference workloads use RefTensorHandles for inputs and outputs
+    const SampleTensorHandle* sampleTensorHandle =
+        static_cast<const SampleTensorHandle*>(tensorHandle);
+    return sampleTensorHandle->GetTensorInfo();
+}
+
+const float* GetInputTensorData(unsigned int idx, const AdditionQueueDescriptor& data)
+{
+    const ITensorHandle* tensorHandle = data.m_Inputs[idx];
+    return reinterpret_cast<const float*>(tensorHandle->Map());
+}
+
+float* GetOutputTensorData(unsigned int idx, const AdditionQueueDescriptor& data)
+{
+    ITensorHandle* tensorHandle = data.m_Outputs[idx];
+    return reinterpret_cast<float*>(tensorHandle->Map());
+}
+
+SampleDynamicAdditionWorkload::SampleDynamicAdditionWorkload(const AdditionQueueDescriptor& descriptor,
+                                                             const WorkloadInfo& info)
+    : BaseWorkload(descriptor, info)
+{}
+
+void SampleDynamicAdditionWorkload::Execute() const
+{
+    const TensorInfo& info = GetTensorInfo(m_Data.m_Inputs[0]);
+    unsigned int num = info.GetNumElements();
+
+    const float* inputData0 = GetInputTensorData(0, m_Data);
+    const float* inputData1 = GetInputTensorData(1, m_Data);
+    float* outputData       = GetOutputTensorData(0, m_Data);
+
+    for (unsigned int i = 0; i < num; ++i)
+    {
+        outputData[i] = inputData0[i] + inputData1[i];
+    }
+}
+
+} // namespace armnn
diff --git a/src/dynamic/sample/SampleDynamicAdditionWorkload.hpp b/src/dynamic/sample/SampleDynamicAdditionWorkload.hpp
new file mode 100644
index 0000000..8362588
--- /dev/null
+++ b/src/dynamic/sample/SampleDynamicAdditionWorkload.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class SampleDynamicAdditionWorkload : public BaseWorkload<AdditionQueueDescriptor>
+{
+public:
+    SampleDynamicAdditionWorkload(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+    void Execute() const override;
+};
+
+} // namespace armnn
diff --git a/src/dynamic/sample/SampleDynamicBackend.cpp b/src/dynamic/sample/SampleDynamicBackend.cpp
new file mode 100644
index 0000000..1863c1c
--- /dev/null
+++ b/src/dynamic/sample/SampleDynamicBackend.cpp
@@ -0,0 +1,91 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SampleDynamicBackend.hpp"
+#include "SampleDynamicLayerSupport.hpp"
+#include "SampleDynamicWorkloadFactory.hpp"
+#include "SampleMemoryManager.hpp"
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <armnn/backends/OptimizationViews.hpp>
+
+namespace armnn
+{
+
+constexpr const char * SampleDynamicBackendId() { return "SampleDynamic"; }
+
+class SampleDynamicBackend : public IBackendInternal
+{
+public:
+    SampleDynamicBackend()  = default;
+    ~SampleDynamicBackend() = default;
+
+    static const BackendId& GetIdStatic()
+    {
+        static const BackendId s_Id{SampleDynamicBackendId()};
+        return s_Id;
+    }
+
+    const BackendId& GetId() const override { return GetIdStatic(); }
+
+    IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override
+    {
+        return std::make_unique<SampleMemoryManager>();
+    }
+
+    IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory(
+        const IMemoryManagerSharedPtr& memoryManager) const override
+    {
+        return std::make_unique<SampleDynamicWorkloadFactory>();
+    }
+
+    IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
+    {
+        static ILayerSupportSharedPtr layerSupport{new SampleDynamicLayerSupport};
+        return layerSupport;
+    }
+
+    IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override
+    {
+        return IBackendContextPtr{};
+    }
+
+    OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override
+    {
+        OptimizationViews optimizationViews;
+
+        optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
+
+        return optimizationViews;
+    }
+
+};
+
+} // namespace armnn
+
+const char* GetBackendId()
+{
+    return armnn::SampleDynamicBackend::GetIdStatic().Get().c_str();
+}
+
+void GetVersion(uint32_t* outMajor, uint32_t* outMinor)
+{
+    if (!outMajor || !outMinor)
+    {
+        return;
+    }
+
+    armnn::BackendVersion apiVersion = armnn::IBackendInternal::GetApiVersion();
+
+    *outMajor = apiVersion.m_Major;
+    *outMinor = apiVersion.m_Minor;
+}
+
+void* BackendFactory()
+{
+    return new armnn::SampleDynamicBackend();
+}
+
+
diff --git a/src/dynamic/sample/SampleDynamicBackend.hpp b/src/dynamic/sample/SampleDynamicBackend.hpp
new file mode 100644
index 0000000..8be1038
--- /dev/null
+++ b/src/dynamic/sample/SampleDynamicBackend.hpp
@@ -0,0 +1,15 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <cstdint>
+
+extern "C"
+{
+const char* GetBackendId();
+void GetVersion(uint32_t* outMajor, uint32_t* outMinor);
+void* BackendFactory();
+}
diff --git a/src/dynamic/sample/SampleDynamicLayerSupport.cpp b/src/dynamic/sample/SampleDynamicLayerSupport.cpp
new file mode 100644
index 0000000..031d39c
--- /dev/null
+++ b/src/dynamic/sample/SampleDynamicLayerSupport.cpp
@@ -0,0 +1,51 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SampleDynamicLayerSupport.hpp"
+
+#include <InternalTypes.hpp>
+#include <LayerSupportCommon.hpp>
+#include <armnn/Types.hpp>
+
+namespace armnn
+{
+
+bool SampleDynamicLayerSupport::IsInputSupported(const TensorInfo& input,
+                                                 Optional<std::string&> reasonIfUnsupported) const
+{
+    return true;
+}
+
+bool SampleDynamicLayerSupport::IsOutputSupported(const TensorInfo& output,
+                                                  Optional<std::string&> reasonIfUnsupported) const
+{
+    return true;
+}
+
+bool SampleDynamicLayerSupport::IsAdditionSupported(const TensorInfo& input0,
+                                                    const TensorInfo& input1,
+                                                    const TensorInfo& output,
+                                                    Optional<std::string&> reasonIfUnsupported) const
+{
+
+    if (input0.GetDataType() != armnn::DataType::Float32)
+    {
+        return false;
+    }
+
+    if (input0.GetDataType() != input1.GetDataType())
+    {
+        return false;
+    }
+
+    if (input0.GetDataType() != output.GetDataType())
+    {
+        return false;
+    }
+
+    return true;
+}
+
+} // namespace armnn
diff --git a/src/dynamic/sample/SampleDynamicLayerSupport.hpp b/src/dynamic/sample/SampleDynamicLayerSupport.hpp
new file mode 100644
index 0000000..f6aa0cb
--- /dev/null
+++ b/src/dynamic/sample/SampleDynamicLayerSupport.hpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/LayerSupportBase.hpp>
+
+namespace armnn
+{
+
+class SampleDynamicLayerSupport : public LayerSupportBase
+{
+public:
+    bool IsAdditionSupported(const TensorInfo& input0,
+                             const TensorInfo& input1,
+                             const TensorInfo& output,
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsInputSupported(const TensorInfo& input,
+                          Optional<std::string&> reasonIfUnsupported) const override;
+
+    bool IsOutputSupported(const TensorInfo& output,
+                           Optional<std::string&> reasonIfUnsupported) const override;
+};
+
+} // namespace armnn
diff --git a/src/dynamic/sample/SampleDynamicWorkloadFactory.cpp b/src/dynamic/sample/SampleDynamicWorkloadFactory.cpp
new file mode 100644
index 0000000..0fb5504
--- /dev/null
+++ b/src/dynamic/sample/SampleDynamicWorkloadFactory.cpp
@@ -0,0 +1,75 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/MemCopyWorkload.hpp>
+
+#include "SampleDynamicAdditionWorkload.hpp"
+#include "SampleDynamicBackend.hpp"
+#include "SampleDynamicWorkloadFactory.hpp"
+#include "SampleTensorHandle.hpp"
+
+namespace armnn
+{
+
+namespace
+{
+static const BackendId s_Id{  GetBackendId() };
+}
+
+SampleDynamicWorkloadFactory::SampleDynamicWorkloadFactory(const std::shared_ptr<SampleMemoryManager>& memoryManager)
+    : m_MemoryManager(memoryManager)
+{
+}
+
+SampleDynamicWorkloadFactory::SampleDynamicWorkloadFactory()
+    : m_MemoryManager(new SampleMemoryManager())
+{
+}
+
+const BackendId& SampleDynamicWorkloadFactory::GetBackendId() const
+{
+    return s_Id;
+}
+
+bool SampleDynamicWorkloadFactory::IsLayerSupported(const IConnectableLayer& layer,
+                                                    Optional<DataType> dataType,
+                                                    std::string& outReasonIfUnsupported)
+{
+    return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
+}
+
+std::unique_ptr<ITensorHandle> SampleDynamicWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                                                const bool isMemoryManaged) const
+{
+    return std::make_unique<ScopedCpuTensorHandle>(tensorInfo);
+}
+
+std::unique_ptr<ITensorHandle> SampleDynamicWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                                                DataLayout dataLayout,
+                                                                                const bool isMemoryManaged) const
+{
+    return std::make_unique<ScopedCpuTensorHandle>(tensorInfo);
+}
+
+std::unique_ptr<IWorkload> SampleDynamicWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
+                                                                        const WorkloadInfo& info) const
+{
+    return std::make_unique<SampleDynamicAdditionWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> SampleDynamicWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
+                                                                     const WorkloadInfo& info) const
+{
+    return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> SampleDynamicWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
+                                                                      const WorkloadInfo& info) const
+{
+    return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
+}
+
+} // namespace armnn
diff --git a/src/dynamic/sample/SampleDynamicWorkloadFactory.hpp b/src/dynamic/sample/SampleDynamicWorkloadFactory.hpp
new file mode 100644
index 0000000..88b6798
--- /dev/null
+++ b/src/dynamic/sample/SampleDynamicWorkloadFactory.hpp
@@ -0,0 +1,62 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "SampleMemoryManager.hpp"
+
+#include <armnn/Optional.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+// Sample Dynamic workload factory.
+class SampleDynamicWorkloadFactory : public IWorkloadFactory
+{
+public:
+    explicit SampleDynamicWorkloadFactory(const std::shared_ptr<SampleMemoryManager>& memoryManager);
+    SampleDynamicWorkloadFactory();
+
+    ~SampleDynamicWorkloadFactory() {}
+
+    const BackendId& GetBackendId() const override;
+
+    static bool IsLayerSupported(const IConnectableLayer& layer,
+                                 Optional<DataType> dataType,
+                                 std::string& outReasonIfUnsupported);
+
+    bool SupportsSubTensors() const override { return false; }
+
+    std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent,
+                                                         TensorShape const& subTensorShape,
+                                                         unsigned int const* subTensorOrigin) const override
+    {
+        boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+        return nullptr;
+    }
+
+    std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                      const bool IsMemoryManaged = true) const override;
+
+    std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                      DataLayout dataLayout,
+                                                      const bool IsMemoryManaged = true) const override;
+
+    std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
+                                              const WorkloadInfo& info) const override;
+
+
+    std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
+                                            const WorkloadInfo& info) const override;
+
+private:
+    mutable std::shared_ptr<SampleMemoryManager> m_MemoryManager;
+
+};
+
+} // namespace armnn
diff --git a/src/dynamic/sample/SampleMemoryManager.cpp b/src/dynamic/sample/SampleMemoryManager.cpp
new file mode 100644
index 0000000..30a7548
--- /dev/null
+++ b/src/dynamic/sample/SampleMemoryManager.cpp
@@ -0,0 +1,95 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SampleMemoryManager.hpp"
+
+#include <algorithm>
+
+namespace armnn
+{
+
+SampleMemoryManager::SampleMemoryManager()
+{}
+
+SampleMemoryManager::~SampleMemoryManager()
+{}
+
+SampleMemoryManager::Pool* SampleMemoryManager::Manage(unsigned int numBytes)
+{
+    if (!m_FreePools.empty())
+    {
+        Pool* res = m_FreePools.back();
+        m_FreePools.pop_back();
+        res->Reserve(numBytes);
+        return res;
+    }
+    else
+    {
+        m_Pools.push_front(Pool(numBytes));
+        return &m_Pools.front();
+    }
+}
+
+void SampleMemoryManager::Allocate(SampleMemoryManager::Pool* pool)
+{
+    m_FreePools.push_back(pool);
+}
+
+void* SampleMemoryManager::GetPointer(SampleMemoryManager::Pool* pool)
+{
+    return pool->GetPointer();
+}
+
+void SampleMemoryManager::Acquire()
+{
+    for (Pool &pool: m_Pools)
+    {
+         pool.Acquire();
+    }
+}
+
+void SampleMemoryManager::Release()
+{
+    for (Pool &pool: m_Pools)
+    {
+         pool.Release();
+    }
+}
+
+SampleMemoryManager::Pool::Pool(unsigned int numBytes)
+    : m_Size(numBytes),
+      m_Pointer(nullptr)
+{}
+
+SampleMemoryManager::Pool::~Pool()
+{
+    if (m_Pointer)
+    {
+        Release();
+    }
+}
+
+void* SampleMemoryManager::Pool::GetPointer()
+{
+    return m_Pointer;
+}
+
+void SampleMemoryManager::Pool::Reserve(unsigned int numBytes)
+{
+    m_Size = std::max(m_Size, numBytes);
+}
+
+void SampleMemoryManager::Pool::Acquire()
+{
+    m_Pointer = ::operator new(size_t(m_Size));
+}
+
+void SampleMemoryManager::Pool::Release()
+{
+    ::operator delete(m_Pointer);
+    m_Pointer = nullptr;
+}
+
+}
diff --git a/src/dynamic/sample/SampleMemoryManager.hpp b/src/dynamic/sample/SampleMemoryManager.hpp
new file mode 100644
index 0000000..0993bc1
--- /dev/null
+++ b/src/dynamic/sample/SampleMemoryManager.hpp
@@ -0,0 +1,59 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/backends/IMemoryManager.hpp>
+
+#include <forward_list>
+#include <vector>
+
+namespace armnn
+{
+
+// An implementation of IMemoryManager to be used with SampleTensorHandle
+class SampleMemoryManager : public IMemoryManager
+{
+public:
+    SampleMemoryManager();
+    virtual ~SampleMemoryManager();
+
+    class Pool;
+
+    Pool* Manage(unsigned int numBytes);
+
+    void Allocate(Pool *pool);
+
+    void* GetPointer(Pool *pool);
+
+    void Acquire() override;
+    void Release() override;
+
+    class Pool
+    {
+    public:
+        Pool(unsigned int numBytes);
+        ~Pool();
+
+        void Acquire();
+        void Release();
+
+        void* GetPointer();
+
+        void Reserve(unsigned int numBytes);
+
+    private:
+        unsigned int m_Size;
+        void* m_Pointer;
+    };
+
+private:
+    SampleMemoryManager(const SampleMemoryManager&) = delete; // Noncopyable
+    SampleMemoryManager& operator=(const SampleMemoryManager&) = delete; // Noncopyable
+
+    std::forward_list<Pool> m_Pools;
+    std::vector<Pool*> m_FreePools;
+};
+
+}
diff --git a/src/dynamic/sample/SampleTensorHandle.cpp b/src/dynamic/sample/SampleTensorHandle.cpp
new file mode 100644
index 0000000..48f8cf4
--- /dev/null
+++ b/src/dynamic/sample/SampleTensorHandle.cpp
@@ -0,0 +1,137 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SampleTensorHandle.hpp"
+
+namespace armnn
+{
+
+SampleTensorHandle::SampleTensorHandle(const TensorInfo &tensorInfo,
+                                       std::shared_ptr<SampleMemoryManager> &memoryManager)
+    : m_TensorInfo(tensorInfo),
+      m_MemoryManager(memoryManager),
+      m_Pool(nullptr),
+      m_UnmanagedMemory(nullptr),
+      m_ImportFlags(static_cast<MemorySourceFlags>(MemorySource::Undefined)),
+      m_Imported(false)
+{
+
+}
+
+SampleTensorHandle::SampleTensorHandle(const TensorInfo& tensorInfo,
+                                       std::shared_ptr<SampleMemoryManager> &memoryManager,
+                                       MemorySourceFlags importFlags)
+    : m_TensorInfo(tensorInfo),
+      m_MemoryManager(memoryManager),
+      m_Pool(nullptr),
+      m_UnmanagedMemory(nullptr),
+      m_ImportFlags(importFlags),
+      m_Imported(false)
+{
+
+}
+
+SampleTensorHandle::~SampleTensorHandle()
+{
+    if (!m_Pool)
+    {
+        // unmanaged
+        if (!m_Imported)
+        {
+            ::operator delete(m_UnmanagedMemory);
+        }
+    }
+}
+
+void SampleTensorHandle::Manage()
+{
+    m_Pool = m_MemoryManager->Manage(m_TensorInfo.GetNumBytes());
+}
+
+void SampleTensorHandle::Allocate()
+{
+    if (!m_UnmanagedMemory)
+    {
+        if (!m_Pool)
+        {
+            // unmanaged
+            m_UnmanagedMemory = ::operator new(m_TensorInfo.GetNumBytes());
+        }
+        else
+        {
+            m_MemoryManager->Allocate(m_Pool);
+        }
+    }
+    else
+    {
+        throw InvalidArgumentException("SampleTensorHandle::Allocate Trying to allocate a SampleTensorHandle"
+                                       "that already has allocated memory.");
+    }
+}
+
+const void* SampleTensorHandle::Map(bool /*unused*/) const
+{
+    return GetPointer();
+}
+
+void* SampleTensorHandle::GetPointer() const
+{
+    if (m_UnmanagedMemory)
+    {
+        return m_UnmanagedMemory;
+    }
+    else
+    {
+        return m_MemoryManager->GetPointer(m_Pool);
+    }
+}
+
+bool SampleTensorHandle::Import(void* memory, MemorySource source)
+{
+
+    if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
+    {
+        if (source == MemorySource::Malloc)
+        {
+            // Check memory alignment
+            constexpr uintptr_t alignment = sizeof(size_t);
+            if (reinterpret_cast<uintptr_t>(memory) % alignment)
+            {
+                if (m_Imported)
+                {
+                    m_Imported = false;
+                    m_UnmanagedMemory = nullptr;
+                }
+
+                return false;
+            }
+
+            // m_UnmanagedMemory not yet allocated.
+            if (!m_Imported && !m_UnmanagedMemory)
+            {
+                m_UnmanagedMemory = memory;
+                m_Imported = true;
+                return true;
+            }
+
+            // m_UnmanagedMemory initially allocated with Allocate().
+            if (!m_Imported && m_UnmanagedMemory)
+            {
+                return false;
+            }
+
+            // m_UnmanagedMemory previously imported.
+            if (m_Imported)
+            {
+                m_UnmanagedMemory = memory;
+                return true;
+            }
+        }
+    }
+
+    return false;
+}
+
+}
diff --git a/src/dynamic/sample/SampleTensorHandle.hpp b/src/dynamic/sample/SampleTensorHandle.hpp
new file mode 100644
index 0000000..c08edc6
--- /dev/null
+++ b/src/dynamic/sample/SampleTensorHandle.hpp
@@ -0,0 +1,78 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+#include "SampleMemoryManager.hpp"
+
+namespace armnn
+{
+
+// An implementation of ITensorHandle with simple "bump the pointer" memory-management behaviour
+class SampleTensorHandle : public ITensorHandle
+{
+public:
+    SampleTensorHandle(const TensorInfo& tensorInfo, std::shared_ptr<SampleMemoryManager> &memoryManager);
+
+    SampleTensorHandle(const TensorInfo& tensorInfo,
+                       std::shared_ptr<SampleMemoryManager> &memoryManager,
+                       MemorySourceFlags importFlags);
+
+    ~SampleTensorHandle();
+
+    virtual void Manage() override;
+
+    virtual void Allocate() override;
+
+    virtual ITensorHandle* GetParent() const override
+    {
+        return nullptr;
+    }
+
+    virtual const void* Map(bool /* blocking = true */) const override;
+    using ITensorHandle::Map;
+
+    virtual void Unmap() const override
+    {}
+
+    TensorShape GetStrides() const override
+    {
+        return GetUnpaddedTensorStrides(m_TensorInfo);
+    }
+
+    TensorShape GetShape() const override
+    {
+        return m_TensorInfo.GetShape();
+    }
+
+    const TensorInfo& GetTensorInfo() const
+    {
+        return m_TensorInfo;
+    }
+
+    virtual MemorySourceFlags GetImportFlags() const override
+    {
+        return m_ImportFlags;
+    }
+
+    virtual bool Import(void* memory, MemorySource source) override;
+
+private:
+    void* GetPointer() const;
+
+    SampleTensorHandle(const SampleTensorHandle& other) = delete; // noncopyable
+    SampleTensorHandle& operator=(const SampleTensorHandle& other) = delete; //noncopyable
+
+    TensorInfo m_TensorInfo;
+
+    std::shared_ptr<SampleMemoryManager> m_MemoryManager;
+    SampleMemoryManager::Pool* m_Pool;
+    mutable void *m_UnmanagedMemory;
+    MemorySourceFlags m_ImportFlags;
+    bool m_Imported;
+};
+
+}