IVGCVSW-3307 Add RefMemoryManager

Simple pool memory manager for use in the reference backend,
in order to make the backend usable for testing large networks.

Change-Id: I5694da29052c60f95b57da595c64cc114d75b8ba
Signed-off-by: Matthew Bentham <Matthew.Bentham@arm.com>
diff --git a/src/backends/reference/CMakeLists.txt b/src/backends/reference/CMakeLists.txt
index fabffea..281e916 100644
--- a/src/backends/reference/CMakeLists.txt
+++ b/src/backends/reference/CMakeLists.txt
@@ -11,6 +11,8 @@
     RefTensorHandle.cpp
     RefLayerSupport.cpp
     RefLayerSupport.hpp
+    RefMemoryManager.hpp
+    RefMemoryManager.cpp
     RefWorkloadFactory.cpp
     RefWorkloadFactory.hpp
 )
diff --git a/src/backends/reference/RefBackend.cpp b/src/backends/reference/RefBackend.cpp
index 0a296be..3680831 100644
--- a/src/backends/reference/RefBackend.cpp
+++ b/src/backends/reference/RefBackend.cpp
@@ -15,6 +15,7 @@
 #include <Optimizer.hpp>
 
 #include <boost/cast.hpp>
+#include <boost/polymorphic_pointer_cast.hpp>
 
 namespace armnn
 {
@@ -43,7 +44,7 @@
 IBackendInternal::IWorkloadFactoryPtr RefBackend::CreateWorkloadFactory(
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const
 {
-    return std::make_unique<RefWorkloadFactory>();
+    return std::make_unique<RefWorkloadFactory>(boost::polymorphic_pointer_downcast<RefMemoryManager>(memoryManager));
 }
 
 IBackendInternal::IBackendContextPtr RefBackend::CreateBackendContext(const IRuntime::CreationOptions&) const
@@ -53,7 +54,7 @@
 
 IBackendInternal::IMemoryManagerUniquePtr RefBackend::CreateMemoryManager() const
 {
-    return IMemoryManagerUniquePtr{};
+    return std::make_unique<RefMemoryManager>();
 }
 
 IBackendInternal::Optimizations RefBackend::GetOptimizations() const
diff --git a/src/backends/reference/RefMemoryManager.cpp b/src/backends/reference/RefMemoryManager.cpp
new file mode 100644
index 0000000..0f4a289
--- /dev/null
+++ b/src/backends/reference/RefMemoryManager.cpp
@@ -0,0 +1,100 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "RefMemoryManager.hpp"
+
+#include <boost/assert.hpp>
+
+namespace armnn
+{
+
+RefMemoryManager::RefMemoryManager()
+{}
+
+RefMemoryManager::~RefMemoryManager()
+{}
+
+RefMemoryManager::Pool* RefMemoryManager::Manage(unsigned int numBytes)
+{
+    if (!m_FreePools.empty())
+    {
+        Pool* res = m_FreePools.back();
+        m_FreePools.pop_back();
+        res->Reserve(numBytes);
+        return res;
+    }
+    else
+    {
+        m_Pools.push_front(Pool(numBytes));
+        return &m_Pools.front();
+    }
+}
+
+void RefMemoryManager::Allocate(RefMemoryManager::Pool* pool)
+{
+    BOOST_ASSERT(pool);
+    m_FreePools.push_back(pool);
+}
+
+void* RefMemoryManager::GetPointer(RefMemoryManager::Pool* pool)
+{
+    return pool->GetPointer();
+}
+
+void RefMemoryManager::Acquire()
+{
+    for (Pool &pool: m_Pools)
+    {
+         pool.Acquire();
+    }
+}
+
+void RefMemoryManager::Release()
+{
+    for (Pool &pool: m_Pools)
+    {
+         pool.Release();
+    }
+}
+
+RefMemoryManager::Pool::Pool(unsigned int numBytes)
+    : m_Size(numBytes),
+      m_Pointer(nullptr)
+{}
+
+RefMemoryManager::Pool::~Pool()
+{
+    if (m_Pointer)
+    {
+        Release();
+    }
+}
+
+void* RefMemoryManager::Pool::GetPointer()
+{
+    BOOST_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired"); 
+    return m_Pointer;
+}
+
+void RefMemoryManager::Pool::Reserve(unsigned int numBytes)
+{
+    BOOST_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
+    m_Size = std::max(m_Size, numBytes);
+}
+
+void RefMemoryManager::Pool::Acquire()
+{
+    BOOST_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired"); 
+    BOOST_ASSERT(m_Size >= 0);
+    m_Pointer = ::operator new(size_t(m_Size));
+}
+
+void RefMemoryManager::Pool::Release()
+{
+    BOOST_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired"); 
+    ::operator delete(m_Pointer);
+    m_Pointer = nullptr;
+}
+
+}
diff --git a/src/backends/reference/RefMemoryManager.hpp b/src/backends/reference/RefMemoryManager.hpp
new file mode 100644
index 0000000..5daac79
--- /dev/null
+++ b/src/backends/reference/RefMemoryManager.hpp
@@ -0,0 +1,59 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <backendsCommon/IMemoryManager.hpp>
+
+#include <forward_list>
+#include <vector>
+
+namespace armnn
+{
+
+// An implementation of IMemoryManager to be used with RefTensorHandle
+class RefMemoryManager : public IMemoryManager
+{
+public:
+    RefMemoryManager();
+    virtual ~RefMemoryManager();
+
+    class Pool;
+
+    Pool* Manage(unsigned int numBytes);
+
+    void Allocate(Pool *pool);
+
+    void* GetPointer(Pool *pool);
+
+    void Acquire() override;
+    void Release() override;
+
+    class Pool
+    {
+    public:
+        Pool(unsigned int numBytes);
+        ~Pool();
+
+        void Acquire();
+        void Release();
+
+        void* GetPointer();
+
+        void Reserve(unsigned int numBytes);
+
+    private:
+        unsigned int m_Size;
+        void* m_Pointer;
+    };
+    
+private:
+    RefMemoryManager(const RefMemoryManager&) = delete; // Noncopyable
+    RefMemoryManager& operator=(const RefMemoryManager&) = delete; // Noncopyable
+
+    std::forward_list<Pool> m_Pools;
+    std::vector<Pool*> m_FreePools;
+};
+
+}
diff --git a/src/backends/reference/RefTensorHandle.cpp b/src/backends/reference/RefTensorHandle.cpp
index b7670f6..fe9310f 100644
--- a/src/backends/reference/RefTensorHandle.cpp
+++ b/src/backends/reference/RefTensorHandle.cpp
@@ -7,39 +7,83 @@
 namespace armnn
 {
 
-RefTensorHandle::RefTensorHandle(const TensorInfo &tensorInfo):
+RefTensorHandle::RefTensorHandle(const TensorInfo &tensorInfo, std::shared_ptr<RefMemoryManager> &memoryManager):
     m_TensorInfo(tensorInfo),
-    m_Memory(nullptr)
+    m_MemoryManager(memoryManager),
+    m_Pool(nullptr),
+    m_UnmanagedMemory(nullptr)
 {
 
 }
 
 RefTensorHandle::~RefTensorHandle()
 {
-    ::operator delete(m_Memory);
+    if (!m_Pool)
+    {
+        // unmanaged
+        ::operator delete(m_UnmanagedMemory);
+    }
+}
+
+void RefTensorHandle::Manage()
+{
+    BOOST_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
+    BOOST_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
+
+    m_Pool = m_MemoryManager->Manage(m_TensorInfo.GetNumBytes());
 }
 
 void RefTensorHandle::Allocate()
 {
-    if (m_Memory == nullptr)
+    if (!m_UnmanagedMemory)
     {
-        m_Memory = ::operator new(m_TensorInfo.GetNumBytes());
+        if (!m_Pool)
+        {
+            // unmanaged
+            m_UnmanagedMemory = ::operator new(m_TensorInfo.GetNumBytes());
+        }
+        else
+        {
+            m_MemoryManager->Allocate(m_Pool);
+        }
     }
     else
     {
         throw InvalidArgumentException("RefTensorHandle::Allocate Trying to allocate a RefTensorHandle"
-                                           "that already has allocated memory.");
+                                       "that already has allocated memory.");
     }
 }
 
-void RefTensorHandle::CopyOutTo(void* memory) const
+const void* RefTensorHandle::Map(bool /*unused*/) const
 {
-    memcpy(memory, m_Memory, m_TensorInfo.GetNumBytes());
+    return GetPointer();
 }
 
-void RefTensorHandle::CopyInFrom(const void* memory)
+void* RefTensorHandle::GetPointer() const
 {
-    memcpy(m_Memory, memory, m_TensorInfo.GetNumBytes());
+    if (m_UnmanagedMemory)
+    {
+        return m_UnmanagedMemory;
+    }
+    else
+    {
+        BOOST_ASSERT_MSG(m_Pool, "RefTensorHandle::GetPointer called on unmanaged, unallocated tensor handle");
+        return m_MemoryManager->GetPointer(m_Pool);
+    }
 }
 
-}
\ No newline at end of file
+void RefTensorHandle::CopyOutTo(void* dest) const
+{
+    const void *src = GetPointer();
+    BOOST_ASSERT(src);
+    memcpy(dest, src, m_TensorInfo.GetNumBytes());
+}
+
+void RefTensorHandle::CopyInFrom(const void* src)
+{
+    void *dest = GetPointer();
+    BOOST_ASSERT(dest);
+    memcpy(dest, src, m_TensorInfo.GetNumBytes());
+}
+
+}
diff --git a/src/backends/reference/RefTensorHandle.hpp b/src/backends/reference/RefTensorHandle.hpp
index 66d840a..ad47ee5 100644
--- a/src/backends/reference/RefTensorHandle.hpp
+++ b/src/backends/reference/RefTensorHandle.hpp
@@ -6,6 +6,8 @@
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 
+#include "RefMemoryManager.hpp"
+
 namespace armnn
 {
 
@@ -13,28 +15,25 @@
 class RefTensorHandle : public ITensorHandle
 {
 public:
-    RefTensorHandle(const TensorInfo& tensorInfo);
+    RefTensorHandle(const TensorInfo& tensorInfo, std::shared_ptr<RefMemoryManager> &memoryManager);
 
     ~RefTensorHandle();
 
-    virtual void Manage() override
-    {}
+    virtual void Manage() override;
+
+    virtual void Allocate() override;
 
     virtual ITensorHandle* GetParent() const override
     {
         return nullptr;
     }
 
-    virtual const void* Map(bool /* blocking = true */) const override
-    {
-        return m_Memory;
-    }
+    virtual const void* Map(bool /* blocking = true */) const override;
+    using ITensorHandle::Map;
 
     virtual void Unmap() const override
     {}
 
-    virtual void Allocate() override;
-
     TensorShape GetStrides() const override
     {
         return GetUnpaddedTensorStrides(m_TensorInfo);
@@ -55,12 +54,16 @@
     void CopyOutTo(void*) const override;
     void CopyInFrom(const void*) override;
 
-    RefTensorHandle(const RefTensorHandle& other) = delete;
+    void* GetPointer() const;
 
-    RefTensorHandle& operator=(const RefTensorHandle& other) = delete;
+    RefTensorHandle(const RefTensorHandle& other) = delete; // noncopyable
+    RefTensorHandle& operator=(const RefTensorHandle& other) = delete; //noncopyable
 
     TensorInfo m_TensorInfo;
-    void* m_Memory;
+
+    std::shared_ptr<RefMemoryManager> m_MemoryManager;
+    RefMemoryManager::Pool* m_Pool;
+    mutable void *m_UnmanagedMemory;
 };
 
-}
\ No newline at end of file
+}
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index b16e856..7ae5b97 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -55,7 +55,13 @@
     return IsDataType<DataType::QuantisedAsymm8>(info);
 }
 
+RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
+    : m_MemoryManager(memoryManager)
+{
+}
+
 RefWorkloadFactory::RefWorkloadFactory()
+    : m_MemoryManager(new RefMemoryManager())
 {
 }
 
@@ -73,13 +79,13 @@
 
 std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const
 {
-    return std::make_unique<RefTensorHandle>(tensorInfo);
+    return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
 }
 
 std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
                                                                       DataLayout dataLayout) const
 {
-    return std::make_unique<RefTensorHandle>(tensorInfo);
+    return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
 }
 
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 1a40259..9ef1522 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -8,6 +8,8 @@
 #include <backendsCommon/WorkloadFactory.hpp>
 #include <backendsCommon/OutputHandler.hpp>
 
+#include "RefMemoryManager.hpp"
+
 #include <boost/core/ignore_unused.hpp>
 
 
@@ -30,7 +32,9 @@
 class RefWorkloadFactory : public IWorkloadFactory
 {
 public:
-    explicit RefWorkloadFactory();
+    explicit RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager);
+    RefWorkloadFactory();
+
     ~RefWorkloadFactory() {}
 
     const BackendId& GetBackendId() const override;
@@ -203,6 +207,8 @@
 
     template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
     std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info) const;
+
+    mutable std::shared_ptr<RefMemoryManager> m_MemoryManager;
 };
 
 } // namespace armnn
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index a736a88..411ab7e 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -10,6 +10,7 @@
 BACKEND_SOURCES := \
         RefBackend.cpp \
         RefLayerSupport.cpp \
+        RefMemoryManager.cpp \
         RefTensorHandle.cpp \
         RefWorkloadFactory.cpp \
         workloads/Activation.cpp \
@@ -85,5 +86,6 @@
         test/RefJsonPrinterTests.cpp \
         test/RefLayerSupportTests.cpp \
         test/RefLayerTests.cpp \
+        test/RefMemoryManagerTests.cpp \
         test/RefOptimizedNetworkTests.cpp \
         test/RefRuntimeTests.cpp
diff --git a/src/backends/reference/test/CMakeLists.txt b/src/backends/reference/test/CMakeLists.txt
index 9e5711e..b56b353 100644
--- a/src/backends/reference/test/CMakeLists.txt
+++ b/src/backends/reference/test/CMakeLists.txt
@@ -10,8 +10,10 @@
     RefJsonPrinterTests.cpp
     RefLayerSupportTests.cpp
     RefLayerTests.cpp
+    RefMemoryManagerTests.cpp
     RefOptimizedNetworkTests.cpp
     RefRuntimeTests.cpp
+    RefTensorHandleTests.cpp
     RefWorkloadFactoryHelper.hpp
 )
 
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 945a874..8fe18f5 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -36,6 +36,14 @@
     BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
     BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
 }
+
+armnn::RefWorkloadFactory GetFactory()
+{
+    std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
+    return RefWorkloadFactory(memoryManager);
+}
+
+
 }
 
 BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
@@ -44,7 +52,7 @@
 static void RefCreateActivationWorkloadTest()
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
 
     // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest).
@@ -70,7 +78,7 @@
 static void RefCreateElementwiseWorkloadTest()
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateElementwiseWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(
         factory, graph);
 
@@ -180,7 +188,7 @@
 static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>(factory,
                                                                                                    graph,
                                                                                                    dataLayout);
@@ -244,7 +252,7 @@
 BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
 {
     Graph                graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
 
     // Checks that outputs and inputs are as we expect them
@@ -255,7 +263,7 @@
 BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
 {
     Graph                graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
 
     // Checks that outputs and inputs are as we expect them
@@ -266,7 +274,7 @@
 static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dWorkload, DataType::Float32>
                     (factory, graph, dataLayout);
 
@@ -294,7 +302,7 @@
 static void RefCreateDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateDepthwiseConvolution2dWorkloadTest<RefDepthwiseConvolution2dWorkload, DataType::Float32>
             (factory, graph, dataLayout);
 
@@ -318,7 +326,7 @@
 static void RefCreateFullyConnectedWorkloadTest()
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
 
     // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
@@ -348,7 +356,7 @@
 static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout)
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
 
     TensorShape inputShape;
@@ -405,7 +413,7 @@
 static void RefCreatePooling2dWorkloadTest(DataLayout dataLayout)
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph, dataLayout);
 
     TensorShape inputShape;
@@ -463,7 +471,7 @@
 static void RefCreateSoftmaxWorkloadTest()
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
 
     // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
@@ -492,7 +500,7 @@
 static void RefCreateSplitterWorkloadTest()
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
 
     // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
@@ -530,7 +538,7 @@
     // of the concat.
 
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workloads = CreateSplitterConcatWorkloadTest<SplitterWorkloadType, ConcatWorkloadType, DataType>
             (factory, graph);
 
@@ -570,7 +578,7 @@
     // We created a splitter with two outputs. That each of those outputs is used by two different activation layers.
 
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     std::unique_ptr<SplitterWorkloadType> wlSplitter;
     std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
     std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
@@ -617,7 +625,7 @@
 static void RefCreateResizeBilinearTest(DataLayout dataLayout)
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
 
     TensorShape inputShape;
@@ -665,7 +673,7 @@
 static void RefCreateRsqrtTest()
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
 
     auto workload = CreateRsqrtWorkloadTest<RsqrtWorkloadType, DataType>(factory, graph);
 
@@ -723,7 +731,7 @@
 static void RefCreateL2NormalizationTest(DataLayout dataLayout)
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload =
             CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
 
@@ -781,7 +789,7 @@
 static void RefCreateReshapeWorkloadTest()
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
 
     // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
@@ -811,7 +819,7 @@
                                         unsigned int concatAxis)
 {
     Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
 
     CheckInputsOutput(std::move(workload),
@@ -869,7 +877,7 @@
 static void RefCreateConstantWorkloadTest(const armnn::TensorShape& outputShape)
 {
     armnn::Graph graph;
-    RefWorkloadFactory factory;
+    RefWorkloadFactory factory = GetFactory();
     auto workload = CreateConstantWorkloadTest<ConstantWorkloadType, DataType>(factory, graph, outputShape);
 
     // Check output is as expected
diff --git a/src/backends/reference/test/RefMemoryManagerTests.cpp b/src/backends/reference/test/RefMemoryManagerTests.cpp
new file mode 100644
index 0000000..15b7c2a
--- /dev/null
+++ b/src/backends/reference/test/RefMemoryManagerTests.cpp
@@ -0,0 +1,51 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <reference/RefMemoryManager.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(RefMemoryManagerTests)
+using namespace armnn;
+using Pool = RefMemoryManager::Pool;
+
+BOOST_AUTO_TEST_CASE(ManageOneThing)
+{
+    RefMemoryManager memoryManager;
+
+    Pool* pool = memoryManager.Manage(10);
+
+    BOOST_CHECK(pool);
+
+    memoryManager.Acquire();
+
+    BOOST_CHECK(memoryManager.GetPointer(pool) != nullptr); // Yields a valid pointer
+
+    memoryManager.Release();
+}
+
+BOOST_AUTO_TEST_CASE(ManageTwoThings)
+{
+    RefMemoryManager memoryManager;
+
+    Pool* pool1 = memoryManager.Manage(10);
+    Pool* pool2 = memoryManager.Manage(5);
+
+    BOOST_CHECK(pool1);
+    BOOST_CHECK(pool2);
+
+    memoryManager.Acquire();
+
+    void *p1 = memoryManager.GetPointer(pool1);
+    void *p2 = memoryManager.GetPointer(pool2);
+
+    BOOST_CHECK(p1);
+    BOOST_CHECK(p2);
+    BOOST_CHECK(p1 != p2);
+
+    memoryManager.Release();
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/reference/test/RefTensorHandleTests.cpp b/src/backends/reference/test/RefTensorHandleTests.cpp
new file mode 100644
index 0000000..accf900
--- /dev/null
+++ b/src/backends/reference/test/RefTensorHandleTests.cpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include <reference/RefTensorHandle.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(RefTensorHandleTests)
+using namespace armnn;
+
+BOOST_AUTO_TEST_CASE(AcquireAndRelease)
+{
+    std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
+
+    TensorInfo info({1,1,1,1}, DataType::Float32);
+    RefTensorHandle handle(info, memoryManager);
+
+    handle.Manage();
+    handle.Allocate();
+
+    memoryManager->Acquire();
+    {
+        float *buffer = reinterpret_cast<float *>(handle.Map());
+
+        BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+
+        buffer[0] = 2.5f;
+
+        BOOST_CHECK(buffer[0] == 2.5f); // Memory is writable and readable
+
+    }
+    memoryManager->Release();
+
+    memoryManager->Acquire();
+    {
+        float *buffer = reinterpret_cast<float *>(handle.Map());
+
+        BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+
+        buffer[0] = 3.5f;
+
+        BOOST_CHECK(buffer[0] == 3.5f); // Memory is writable and readable
+    }
+    memoryManager->Release();
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/backends/reference/workloads/RefDebugWorkload.cpp b/src/backends/reference/workloads/RefDebugWorkload.cpp
index 64b1b1c..be2d82f 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.cpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.cpp
@@ -36,7 +36,6 @@
     }
 
     std::memcpy(outputData, inputData, inputInfo.GetNumElements()*sizeof(T));
-
 }
 
 template<armnn::DataType DataType>