COMPMID-485: Memory Manager

Change-Id: Ib421b7622838f050038cd81e7426bb1413a7d6e6
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/87376
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/src/core/CL/OpenCL.cpp b/src/core/CL/OpenCL.cpp
index 085e186..0f44ad9 100644
--- a/src/core/CL/OpenCL.cpp
+++ b/src/core/CL/OpenCL.cpp
@@ -95,6 +95,7 @@
     clReleaseContext          = reinterpret_cast<clReleaseContext_func>(dlsym(handle, "clReleaseContext"));
     clRetainCommandQueue      = reinterpret_cast<clRetainCommandQueue_func>(dlsym(handle, "clRetainCommandQueue"));
     clEnqueueUnmapMemObject   = reinterpret_cast<clEnqueueUnmapMemObject_func>(dlsym(handle, "clEnqueueUnmapMemObject"));
+    clRetainMemObject         = reinterpret_cast<clRetainMemObject_func>(dlsym(handle, "clRetainMemObject"));
     clReleaseMemObject        = reinterpret_cast<clReleaseMemObject_func>(dlsym(handle, "clReleaseMemObject"));
     clGetDeviceInfo           = reinterpret_cast<clGetDeviceInfo_func>(dlsym(handle, "clGetDeviceInfo"));
     clGetDeviceIDs            = reinterpret_cast<clGetDeviceIDs_func>(dlsym(handle, "clGetDeviceIDs"));
@@ -175,6 +176,20 @@
     }
 }
 
+cl_int clRetainMemObject(cl_mem memobj)
+{
+    arm_compute::CLSymbols::get().load_default();
+    auto func = arm_compute::CLSymbols::get().clRetainMemObject;
+    if(func != nullptr)
+    {
+        return func(memobj);
+    }
+    else
+    {
+        return CL_OUT_OF_RESOURCES;
+    }
+}
+
 cl_int clReleaseMemObject(cl_mem memobj)
 {
     arm_compute::CLSymbols::get().load_default();
diff --git a/src/runtime/Allocator.cpp b/src/runtime/Allocator.cpp
new file mode 100644
index 0000000..50b0f0e
--- /dev/null
+++ b/src/runtime/Allocator.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/Allocator.h"
+
+#include "arm_compute/core/Error.h"
+
+#include <cstddef>
+
+using namespace arm_compute;
+
+void *Allocator::allocate(size_t size, size_t alignment)
+{
+    ARM_COMPUTE_UNUSED(alignment);
+    return ::operator new(size);
+}
+
+void Allocator::free(void *ptr)
+{
+    ::operator delete(ptr);
+}
diff --git a/src/runtime/BlobLifetimeManager.cpp b/src/runtime/BlobLifetimeManager.cpp
new file mode 100644
index 0000000..c60d8c1
--- /dev/null
+++ b/src/runtime/BlobLifetimeManager.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/BlobLifetimeManager.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/runtime/BlobMemoryPool.h"
+#include "arm_compute/runtime/IAllocator.h"
+#include "arm_compute/runtime/IMemoryGroup.h"
+#include "support/ToolchainSupport.h"
+
+#include <algorithm>
+#include <cmath>
+#include <map>
+#include <vector>
+
+using namespace arm_compute;
+
+BlobLifetimeManager::BlobLifetimeManager()
+    : _active_group(nullptr), _active_elements(), _finalized_groups(), _blobs()
+{
+}
+
+void BlobLifetimeManager::register_group(IMemoryGroup *group)
+{
+    if(_active_group == nullptr)
+    {
+        ARM_COMPUTE_ERROR_ON(group == nullptr);
+        _active_group = group;
+    }
+}
+
+void BlobLifetimeManager::start_lifetime(void *obj)
+{
+    ARM_COMPUTE_ERROR_ON(obj == nullptr);
+    ARM_COMPUTE_ERROR_ON_MSG(std::find_if(std::begin(_active_elements), std::end(_active_elements), [&obj](const Element & e)
+    {
+        return obj == e.id;
+    }) != std::end(_active_elements),
+    "Memory object is already registered!");
+
+    // Insert object in groups and mark its finalized state to false
+    _active_elements.emplace_back(obj);
+}
+
+void BlobLifetimeManager::end_lifetime(void *obj, void **handle, size_t size)
+{
+    ARM_COMPUTE_ERROR_ON(obj == nullptr);
+
+    // Find object
+    auto it = std::find_if(std::begin(_active_elements), std::end(_active_elements), [&obj](const Element & e)
+    {
+        return obj == e.id;
+    });
+    ARM_COMPUTE_ERROR_ON(it == std::end(_active_elements));
+
+    // Update object fields and mark object as complete
+    it->handle = handle;
+    it->size   = size;
+    it->status = true;
+
+    // Check if all object are finalized and reset active group
+    if(are_all_finalized())
+    {
+        // Update finalized groups
+        _finalized_groups[_active_group].insert(std::end(_finalized_groups[_active_group]), std::begin(_active_elements), std::end(_active_elements));
+
+        // Update blobs and group mappings
+        update_blobs_and_mappings();
+
+        // Reset state
+        _active_elements.clear();
+        _active_group = nullptr;
+    }
+}
+
+std::unique_ptr<IMemoryPool> BlobLifetimeManager::create_pool(IAllocator *allocator)
+{
+    ARM_COMPUTE_ERROR_ON(allocator == nullptr);
+    return support::cpp14::make_unique<BlobMemoryPool>(allocator, _blobs);
+}
+
+bool BlobLifetimeManager::are_all_finalized() const
+{
+    return !std::any_of(std::begin(_active_elements), std::end(_active_elements), [](const Element e)
+    {
+        return !e.status;
+    });
+}
+
+MappingType BlobLifetimeManager::mapping_type() const
+{
+    return MappingType::BLOBS;
+}
+
+void BlobLifetimeManager::update_blobs_and_mappings()
+{
+    ARM_COMPUTE_ERROR_ON(!are_all_finalized());
+    ARM_COMPUTE_ERROR_ON(_active_group == nullptr);
+
+    // Sort finalized group requirements in descending order
+    auto group = _finalized_groups[_active_group];
+    std::sort(std::begin(group), std::end(group), [](const Element & a, const Element & b)
+    {
+        return a.size > b.size;
+    });
+    std::vector<size_t> group_sizes;
+    std::transform(std::begin(group), std::end(group), std::back_inserter(group_sizes), [](const Element & e)
+    {
+        return e.size;
+    });
+
+    // Update blob sizes
+    size_t max_size = std::max(_blobs.size(), group_sizes.size());
+    _blobs.resize(max_size, 0);
+    group_sizes.resize(max_size, 0);
+    std::transform(std::begin(_blobs), std::end(_blobs), std::begin(group_sizes), std::begin(_blobs), [](size_t lhs, size_t rhs)
+    {
+        return std::max(lhs, rhs);
+    });
+
+    // Calculate group mappings
+    auto &group_mappings = _active_group->mappings();
+    int   blob_idx       = 0;
+    for(auto &e : group)
+    {
+        group_mappings[e.handle] = blob_idx++;
+    }
+}
diff --git a/src/runtime/BlobMemoryPool.cpp b/src/runtime/BlobMemoryPool.cpp
new file mode 100644
index 0000000..6571c75
--- /dev/null
+++ b/src/runtime/BlobMemoryPool.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/BlobMemoryPool.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/runtime/IMemoryPool.h"
+#include "arm_compute/runtime/Types.h"
+#include "support/ToolchainSupport.h"
+
+#include <vector>
+
+using namespace arm_compute;
+
+BlobMemoryPool::BlobMemoryPool(IAllocator *allocator, std::vector<size_t> blob_sizes)
+    : _allocator(allocator), _blobs(), _blob_sizes(std::move(blob_sizes))
+{
+    ARM_COMPUTE_ERROR_ON(!allocator);
+    allocate_blobs(_blob_sizes);
+}
+
+BlobMemoryPool::~BlobMemoryPool()
+{
+    ARM_COMPUTE_ERROR_ON(!_allocator);
+    free_blobs();
+}
+
+void BlobMemoryPool::acquire(MemoryMappings &handles)
+{
+    ARM_COMPUTE_ERROR_ON(handles.size() > _blobs.size());
+
+    // Set memory to handlers
+    for(auto &handle : handles)
+    {
+        ARM_COMPUTE_ERROR_ON(handle.first == nullptr);
+        *handle.first = _blobs[handle.second];
+    }
+}
+
+void BlobMemoryPool::release(MemoryMappings &handles)
+{
+    for(auto &handle : handles)
+    {
+        ARM_COMPUTE_ERROR_ON(handle.first == nullptr);
+        *handle.first = nullptr;
+    }
+}
+
+MappingType BlobMemoryPool::mapping_type() const
+{
+    return MappingType::BLOBS;
+}
+
+std::unique_ptr<IMemoryPool> BlobMemoryPool::duplicate()
+{
+    ARM_COMPUTE_ERROR_ON(!_allocator);
+    return support::cpp14::make_unique<BlobMemoryPool>(_allocator, _blob_sizes);
+}
+
+void BlobMemoryPool::allocate_blobs(const std::vector<size_t> &sizes)
+{
+    ARM_COMPUTE_ERROR_ON(!_allocator);
+
+    for(const auto &size : sizes)
+    {
+        _blobs.push_back(_allocator->allocate(size, 0));
+    }
+}
+
+void BlobMemoryPool::free_blobs()
+{
+    ARM_COMPUTE_ERROR_ON(!_allocator);
+
+    for(auto &blob : _blobs)
+    {
+        _allocator->free(blob);
+    }
+    _blobs.clear();
+}
\ No newline at end of file
diff --git a/src/runtime/CL/CLBufferAllocator.cpp b/src/runtime/CL/CLBufferAllocator.cpp
new file mode 100644
index 0000000..9a5c13a
--- /dev/null
+++ b/src/runtime/CL/CLBufferAllocator.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/CLBufferAllocator.h"
+
+#include "arm_compute/core/CL/OpenCL.h"
+#include "arm_compute/core/Error.h"
+
+#include <cstddef>
+
+using namespace arm_compute;
+
+CLBufferAllocator::CLBufferAllocator(cl::Context context)
+    : _context(std::move(context))
+{
+}
+
+void *CLBufferAllocator::allocate(size_t size, size_t alignment)
+{
+    ARM_COMPUTE_UNUSED(alignment);
+    cl_mem buf = clCreateBuffer(_context.get(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size, nullptr, nullptr);
+    return static_cast<void *>(buf);
+}
+
+void CLBufferAllocator::free(void *ptr)
+{
+    ARM_COMPUTE_ERROR_ON(ptr == nullptr);
+    clReleaseMemObject(static_cast<cl_mem>(ptr));
+}
diff --git a/src/runtime/CL/CLTensor.cpp b/src/runtime/CL/CLTensor.cpp
index eefa033..bc513d1 100644
--- a/src/runtime/CL/CLTensor.cpp
+++ b/src/runtime/CL/CLTensor.cpp
@@ -28,7 +28,7 @@
 using namespace arm_compute;
 
 CLTensor::CLTensor()
-    : _allocator()
+    : _allocator(this)
 {
 }
 
diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp
index 8112a71..ad165fa 100644
--- a/src/runtime/CL/CLTensorAllocator.cpp
+++ b/src/runtime/CL/CLTensorAllocator.cpp
@@ -25,15 +25,21 @@
 
 #include "arm_compute/core/Error.h"
 #include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/runtime/CL/CLMemoryGroup.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
 
 using namespace arm_compute;
 
-CLTensorAllocator::CLTensorAllocator()
-    : _buffer(), _mapping(nullptr)
+CLTensorAllocator::CLTensorAllocator(CLTensor *owner)
+    : _associated_memory_group(nullptr), _buffer(), _mapping(nullptr), _owner(owner)
 {
 }
 
+CLTensorAllocator::~CLTensorAllocator()
+{
+    _buffer = cl::Buffer();
+}
+
 uint8_t *CLTensorAllocator::data()
 {
     return _mapping;
@@ -47,17 +53,32 @@
 void CLTensorAllocator::allocate()
 {
     ARM_COMPUTE_ERROR_ON(_buffer.get() != nullptr);
-
-    _buffer = cl::Buffer(CLScheduler::get().context(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, info().total_size());
+    if(_associated_memory_group == nullptr)
+    {
+        _buffer = cl::Buffer(CLScheduler::get().context(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, info().total_size());
+    }
+    else
+    {
+        _associated_memory_group->finalize_memory(_owner, reinterpret_cast<void **>(&_buffer()), info().total_size());
+    }
     info().set_is_resizable(false);
 }
 
 void CLTensorAllocator::free()
 {
-    ARM_COMPUTE_ERROR_ON(_buffer.get() == nullptr);
+    if(_associated_memory_group == nullptr)
+    {
+        _buffer = cl::Buffer();
+        info().set_is_resizable(true);
+    }
+}
 
-    _buffer = cl::Buffer();
-    info().set_is_resizable(true);
+void CLTensorAllocator::set_associated_memory_group(CLMemoryGroup *associated_memory_group)
+{
+    ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
+    ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr);
+    ARM_COMPUTE_ERROR_ON(_buffer.get() != nullptr);
+    _associated_memory_group = associated_memory_group;
 }
 
 uint8_t *CLTensorAllocator::lock()
diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp
index 0bbec94..4b1bfd8 100644
--- a/src/runtime/CL/functions/CLConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp
@@ -30,12 +30,13 @@
 #include "arm_compute/runtime/CL/CLScheduler.h"
 
 #include <cmath>
+#include <memory>
 #include <tuple>
 
 using namespace arm_compute;
 
-CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights()
-    : _weights_reshape_kernel(), _weights_transposed_kernel(), _weights_reshaped(), _transpose1xW(false)
+CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager)
+    : _memory_group(std::move(memory_manager)), _weights_reshape_kernel(), _weights_transposed_kernel(), _weights_reshaped(), _transpose1xW(false)
 {
 }
 
@@ -68,6 +69,7 @@
         TensorInfo         info_wr(shape_wr, 1, dt, fixed_point_position);
 
         _weights_reshaped.allocator()->init(info_wr);
+        _memory_group.manage(&_weights_reshaped);
         _weights_reshape_kernel.configure(weights, biases, &_weights_reshaped);
         _weights_transposed_kernel.configure(&_weights_reshaped, output);
         _weights_reshaped.allocator()->allocate();
@@ -80,17 +82,21 @@
 
 void CLConvolutionLayerReshapeWeights::run()
 {
+    _memory_group.acquire();
+
     cl::CommandQueue q = CLScheduler::get().queue();
     CLScheduler::get().enqueue(_weights_reshape_kernel);
     if(_transpose1xW)
     {
         CLScheduler::get().enqueue(_weights_transposed_kernel);
     }
+
+    _memory_group.release();
 }
 
-CLConvolutionLayer::CLConvolutionLayer()
-    : _reshape_weights(), _input_im2col_kernel(), _input_interleave_kernel(), _mm_kernel(), _output_col2im_kernel(), _input_im2col_reshaped(), _input_interleaved_reshaped(), _weights_reshaped(),
-      _weights_transposed(), _gemm_output(), _has_bias(false), _is_fully_connected_convolution(false), _are_weights_reshaped(false)
+CLConvolutionLayer::CLConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
+    : _memory_group(std::move(memory_manager)), _reshape_weights(), _input_im2col_kernel(), _input_interleave_kernel(), _mm_kernel(), _output_col2im_kernel(), _input_im2col_reshaped(),
+      _input_interleaved_reshaped(), _weights_reshaped(), _weights_transposed(), _gemm_output(), _has_bias(false), _is_fully_connected_convolution(false), _are_weights_reshaped(false)
 {
 }
 
@@ -179,6 +185,7 @@
     shape_im2col.set(1, mat_input_rows);
     shape_im2col.set(2, 1);
     _input_im2col_reshaped.allocator()->init(TensorInfo(shape_im2col, 1, dt, fixed_point_position));
+    _memory_group.manage(&_input_im2col_reshaped);
 
     // Create tensor (interleave) to prepare input tensor for GEMM
     if(!_is_fully_connected_convolution)
@@ -187,6 +194,7 @@
         shape_interleaved.set(0, shape_interleaved.x() * 4);
         shape_interleaved.set(1, std::ceil(shape_interleaved.y() / 4.f));
         _input_interleaved_reshaped.allocator()->init(TensorInfo(shape_interleaved, 1, dt, fixed_point_position));
+        _memory_group.manage(&_input_interleaved_reshaped);
     }
 
     // Create GEMM output tensor
@@ -194,6 +202,7 @@
     shape_gemm.set(0, mat_weights_cols);
     shape_gemm.set(1, mat_input_rows);
     _gemm_output.allocator()->init(TensorInfo(shape_gemm, 1, dt, fixed_point_position));
+    _memory_group.manage(&_gemm_output);
 
     // Configure kernels
     _input_im2col_kernel.configure(input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, _has_bias);
@@ -208,8 +217,11 @@
     {
         _input_interleave_kernel.configure(&_input_im2col_reshaped, &_input_interleaved_reshaped);
         _mm_kernel.configure(&_input_interleaved_reshaped, weights, &_gemm_output, 1.0f);
+        _input_interleaved_reshaped.allocator()->allocate();
     }
+    _input_im2col_reshaped.allocator()->allocate();
     _output_col2im_kernel.configure(&_gemm_output, output, std::make_pair(conv_w, conv_h));
+    _gemm_output.allocator()->allocate();
 
     ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one");
 
@@ -218,12 +230,6 @@
     {
         _weights_reshaped.allocator()->allocate();
     }
-    _input_im2col_reshaped.allocator()->allocate();
-    if(!_is_fully_connected_convolution)
-    {
-        _input_interleaved_reshaped.allocator()->allocate();
-    }
-    _gemm_output.allocator()->allocate();
 }
 
 void CLConvolutionLayer::run()
@@ -235,6 +241,8 @@
         _reshape_weights.run();
     }
 
+    _memory_group.acquire();
+
     // Run input reshaping
     CLScheduler::get().enqueue(_input_im2col_kernel);
     if(!_is_fully_connected_convolution)
@@ -247,4 +255,6 @@
 
     // Reshape output matrix
     CLScheduler::get().enqueue(_output_col2im_kernel, false);
+
+    _memory_group.release();
 }
diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
index f7cea55..ee1558f 100644
--- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
@@ -39,9 +39,9 @@
     _kernel = std::move(k);
 }
 
-CLFullyConnectedLayer::CLFullyConnectedLayer()
-    : _im2col_kernel(), _reshape_weights_kernel(), _mm_kernel(), _accumulate_biases_kernel(), _im2col_output(), _reshape_weights_output(), _are_weights_reshaped(true), _is_fc_after_conv(true),
-      _accumulate_biases(false)
+CLFullyConnectedLayer::CLFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
+    : _memory_group(std::move(memory_manager)), _im2col_kernel(), _reshape_weights_kernel(), _mm_kernel(), _accumulate_biases_kernel(), _im2col_output(), _reshape_weights_output(),
+      _are_weights_reshaped(true), _is_fc_after_conv(true), _accumulate_biases(false)
 {
 }
 
@@ -63,6 +63,7 @@
     _im2col_output.allocator()->init(TensorInfo(shape_im2col, 1, dt, fixed_point_position));
 
     // Configure im2col kernel
+    _memory_group.manage(&_im2col_output);
     _im2col_kernel.configure(input, &_im2col_output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false);
 
     // Configure matrix multiply kernel
@@ -158,6 +159,8 @@
         _reshape_weights_kernel.run();
     }
 
+    _memory_group.acquire();
+
     // Linearize input if it comes from a convolutional layer
     if(_is_fc_after_conv)
     {
@@ -172,4 +175,6 @@
     {
         CLScheduler::get().enqueue(_accumulate_biases_kernel);
     }
+
+    _memory_group.release();
 }
diff --git a/src/runtime/CL/functions/CLSoftmaxLayer.cpp b/src/runtime/CL/functions/CLSoftmaxLayer.cpp
index 850eb2c..7505a2c 100644
--- a/src/runtime/CL/functions/CLSoftmaxLayer.cpp
+++ b/src/runtime/CL/functions/CLSoftmaxLayer.cpp
@@ -25,12 +25,13 @@
 
 #include "arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h"
 #include "arm_compute/core/Helpers.h"
+#include "arm_compute/runtime/CL/CLMemoryGroup.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
 
 using namespace arm_compute;
 
-CLSoftmaxLayer::CLSoftmaxLayer()
-    : _max_kernel(), _shift_exp_sum_kernel(), _norm_kernel(), _max(), _sum(), _tmp()
+CLSoftmaxLayer::CLSoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager)
+    : _memory_group(std::move(memory_manager)), _max_kernel(), _shift_exp_sum_kernel(), _norm_kernel(), _max(), _sum(), _tmp()
 {
 }
 
@@ -47,6 +48,11 @@
     _max.allocator()->init(tensor_info_max_sum);
     _sum.allocator()->init(tensor_info_max_sum);
 
+    // Manage intermediate buffers
+    _memory_group.manage(&_tmp);
+    _memory_group.manage(&_max);
+    _memory_group.manage(&_sum);
+
     // Configure Kernels
     _max_kernel.configure(input, &_max);
     _shift_exp_sum_kernel.configure(input, &_max, &_tmp, &_sum);
@@ -60,7 +66,11 @@
 
 void CLSoftmaxLayer::run()
 {
+    _memory_group.acquire();
+
     CLScheduler::get().enqueue(_max_kernel, false);
     CLScheduler::get().enqueue(_shift_exp_sum_kernel, false);
     CLScheduler::get().enqueue(_norm_kernel);
+
+    _memory_group.release();
 }
diff --git a/src/runtime/MemoryManagerOnDemand.cpp b/src/runtime/MemoryManagerOnDemand.cpp
new file mode 100644
index 0000000..4dfa28b
--- /dev/null
+++ b/src/runtime/MemoryManagerOnDemand.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016, 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/runtime/ILifetimeManager.h"
+#include "arm_compute/runtime/IPoolManager.h"
+
+#include <memory>
+
+using namespace arm_compute;
+
+MemoryManagerOnDemand::MemoryManagerOnDemand(std::shared_ptr<ILifetimeManager> lifetime_manager, std::shared_ptr<IPoolManager> pool_manager)
+    : _lifetime_mgr(std::move(lifetime_manager)), _pool_mgr(std::move(pool_manager)), _allocator(nullptr), _is_finalized(false), _num_pools(1)
+{
+    ARM_COMPUTE_ERROR_ON_MSG(!_lifetime_mgr, "Lifetime manager not specified correctly!");
+    ARM_COMPUTE_ERROR_ON_MSG(!_pool_mgr, "Pool manager not specified correctly!");
+}
+
+bool MemoryManagerOnDemand::is_finalized() const
+{
+    return _is_finalized;
+}
+
+void MemoryManagerOnDemand::set_num_pools(unsigned int num_pools)
+{
+    ARM_COMPUTE_ERROR_ON(num_pools == 0);
+    _num_pools = num_pools;
+}
+
+void MemoryManagerOnDemand::set_allocator(IAllocator *allocator)
+{
+    ARM_COMPUTE_ERROR_ON_MSG(is_finalized(), "Memory manager is already finalized!");
+    ARM_COMPUTE_ERROR_ON(allocator == nullptr);
+    _allocator = allocator;
+}
+
+ILifetimeManager *MemoryManagerOnDemand::lifetime_manager()
+{
+    return _lifetime_mgr.get();
+}
+
+IPoolManager *MemoryManagerOnDemand::pool_manager()
+{
+    return _pool_mgr.get();
+}
+
+void MemoryManagerOnDemand::finalize()
+{
+    ARM_COMPUTE_ERROR_ON_MSG(is_finalized(), "Memory manager is already finalized!");
+    ARM_COMPUTE_ERROR_ON(!_lifetime_mgr);
+    ARM_COMPUTE_ERROR_ON(!_pool_mgr);
+    ARM_COMPUTE_ERROR_ON_MSG(!_lifetime_mgr->are_all_finalized(), "All the objects have not been finalized! ");
+    ARM_COMPUTE_ERROR_ON(_allocator == nullptr);
+
+    // Create pools
+    auto pool_template = _lifetime_mgr->create_pool(_allocator);
+    for(int i = _num_pools; i > 1; --i)
+    {
+        auto pool = pool_template->duplicate();
+        _pool_mgr->register_pool(std::move(pool));
+    }
+    _pool_mgr->register_pool(std::move(pool_template));
+
+    // Set finalized to true
+    _is_finalized = true;
+}
diff --git a/src/runtime/NEON/functions/NEConvolutionLayer.cpp b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
index 1c87f60..0466a4a 100644
--- a/src/runtime/NEON/functions/NEConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
@@ -34,8 +34,8 @@
 
 using namespace arm_compute;
 
-NEConvolutionLayerReshapeWeights::NEConvolutionLayerReshapeWeights()
-    : _weights_reshape_kernel(), _weights_transposed_kernel(), _weights_reshaped(), _transpose1xW(false)
+NEConvolutionLayerReshapeWeights::NEConvolutionLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager)
+    : _memory_group(std::move(memory_manager)), _weights_reshape_kernel(), _weights_transposed_kernel(), _weights_reshaped(), _transpose1xW(false)
 {
 }
 
@@ -68,6 +68,7 @@
         TensorInfo         info_wr(shape_wr, 1, weights->info()->data_type(), weights->info()->fixed_point_position());
 
         _weights_reshaped.allocator()->init(info_wr);
+        _memory_group.manage(&_weights_reshaped);
         _weights_reshape_kernel.configure(weights, biases, &_weights_reshaped);
         _weights_transposed_kernel.configure(&_weights_reshaped, output);
         _weights_reshaped.allocator()->allocate();
@@ -80,16 +81,20 @@
 
 void NEConvolutionLayerReshapeWeights::run()
 {
+    _memory_group.acquire();
+
     NEScheduler::get().schedule(&_weights_reshape_kernel, 3);
     if(_transpose1xW)
     {
         NEScheduler::get().schedule(&_weights_transposed_kernel, Window::DimY);
     }
+
+    _memory_group.release();
 }
 
-NEConvolutionLayer::NEConvolutionLayer()
-    : _input_im2col_kernel(), _input_interleave_kernel(), _reshape_weights(), _mm_kernel(), _output_col2im_kernel(), _input_im2col_reshaped(), _input_interleaved_reshaped(), _weights_reshaped(),
-      _gemm_output(), _has_bias(false), _is_fully_connected_convolution(false), _are_weights_reshaped(false)
+NEConvolutionLayer::NEConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
+    : _memory_group(std::move(memory_manager)), _input_im2col_kernel(), _input_interleave_kernel(), _reshape_weights(), _mm_kernel(), _output_col2im_kernel(), _input_im2col_reshaped(),
+      _input_interleaved_reshaped(), _weights_reshaped(), _gemm_output(), _has_bias(false), _is_fully_connected_convolution(false), _are_weights_reshaped(false)
 {
 }
 
@@ -175,6 +180,7 @@
     shape_im2col.set(1, mat_input_rows);
     shape_im2col.set(2, 1);
     _input_im2col_reshaped.allocator()->init(TensorInfo(shape_im2col, 1, dt, fixed_point_position));
+    _memory_group.manage(&_input_im2col_reshaped);
 
     // Create tensor (interleave) to prepare input tensor for GEMM
     if(!_is_fully_connected_convolution)
@@ -183,6 +189,7 @@
         shape_interleaved.set(0, shape_interleaved.x() * 4);
         shape_interleaved.set(1, std::ceil(shape_interleaved.y() / 4.f));
         _input_interleaved_reshaped.allocator()->init(TensorInfo(shape_interleaved, 1, dt, fixed_point_position));
+        _memory_group.manage(&_input_interleaved_reshaped);
     }
 
     // Create GEMM output tensor
@@ -190,6 +197,7 @@
     shape_gemm.set(0, mat_weights_cols);
     shape_gemm.set(1, mat_input_rows);
     _gemm_output.allocator()->init(TensorInfo(shape_gemm, 1, dt, fixed_point_position));
+    _memory_group.manage(&_gemm_output);
 
     // Configure kernels
     _input_im2col_kernel.configure(input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, _has_bias);
@@ -201,8 +209,11 @@
     {
         _input_interleave_kernel.configure(&_input_im2col_reshaped, &_input_interleaved_reshaped);
         _mm_kernel.configure(&_input_interleaved_reshaped, weights, &_gemm_output, 1.0f);
+        _input_interleaved_reshaped.allocator()->allocate();
     }
+    _input_im2col_reshaped.allocator()->allocate();
     _output_col2im_kernel.configure(&_gemm_output, output, std::make_pair(conv_w, conv_h));
+    _gemm_output.allocator()->allocate();
 
     ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one");
 
@@ -211,12 +222,6 @@
     {
         _weights_reshaped.allocator()->allocate();
     }
-    _input_im2col_reshaped.allocator()->allocate();
-    if(!_is_fully_connected_convolution)
-    {
-        _input_interleaved_reshaped.allocator()->allocate();
-    }
-    _gemm_output.allocator()->allocate();
 }
 
 void NEConvolutionLayer::run()
@@ -228,6 +233,8 @@
         _reshape_weights.run();
     }
 
+    _memory_group.acquire();
+
     // Run input reshaping
     NEScheduler::get().schedule(&_input_im2col_kernel, Window::DimY);
     if(!_is_fully_connected_convolution)
@@ -241,4 +248,6 @@
 
     // Reshape output matrix
     NEScheduler::get().schedule(&_output_col2im_kernel, Window::DimY);
+
+    _memory_group.release();
 }
diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
index 39983bf..2e8d105 100644
--- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
+++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
@@ -32,8 +32,8 @@
 
 namespace arm_compute
 {
-NEFullyConnectedLayerReshapeWeights::NEFullyConnectedLayerReshapeWeights()
-    : _transpose_kernel(), _transpose1xW_kernel(), _transpose_output(), _transpose_weights(false), _is_batched_fc_layer(false)
+NEFullyConnectedLayerReshapeWeights::NEFullyConnectedLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager)
+    : _memory_group(std::move(memory_manager)), _transpose_kernel(), _transpose1xW_kernel(), _transpose_output(), _transpose_weights(false), _is_batched_fc_layer(false)
 {
 }
 
@@ -58,6 +58,7 @@
             // Initialize the output tensor for transpose
             TensorShape shape_transposed(input->info()->dimension(1), input->info()->dimension(0));
             _transpose_output.allocator()->init(TensorInfo(shape_transposed, 1, data_type, fixed_point_position));
+            _memory_group.manage(&_transpose_output);
             _transpose_kernel.configure(input, &_transpose_output);
 
             // Configure transpose 1xW kernel
@@ -87,6 +88,8 @@
 
 void NEFullyConnectedLayerReshapeWeights::run()
 {
+    _memory_group.acquire();
+
     if(_transpose_weights)
     {
         NEScheduler::get().schedule(&_transpose_kernel, Window::DimY);
@@ -96,11 +99,13 @@
     {
         NEScheduler::get().schedule(&_transpose1xW_kernel, Window::DimY);
     }
+
+    _memory_group.release();
 }
 
-NEFullyConnectedLayer::NEFullyConnectedLayer()
-    : _im2col_kernel(), _reshape_weights_kernel(), _interleave4x4_kernel(), _mm_kernel(), _accumulate_biases_kernel(), _im2col_output(), _interleave4x4_output(), _reshape_weights_output(),
-      _are_weights_reshaped(false), _is_batched_fc_layer(false), _linearize_input(false), _accumulate_biases(false)
+NEFullyConnectedLayer::NEFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
+    : _memory_group(std::move(memory_manager)), _im2col_kernel(), _reshape_weights_kernel(), _interleave4x4_kernel(), _mm_kernel(), _accumulate_biases_kernel(), _im2col_output(), _interleave4x4_output(),
+      _reshape_weights_output(), _are_weights_reshaped(false), _is_batched_fc_layer(false), _linearize_input(false), _accumulate_biases(false)
 {
 }
 
@@ -191,6 +196,7 @@
         _im2col_output.allocator()->init(TensorInfo(shape_im2col, 1, data_type, fixed_point_position));
 
         // Configure im2col kernel
+        _memory_group.manage(&_im2col_output);
         _im2col_kernel.configure(input, &_im2col_output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false);
 
         multiply_input = &_im2col_output;
@@ -204,6 +210,7 @@
         _interleave4x4_output.allocator()->init(TensorInfo(shape_interleaved, 1, data_type, fixed_point_position));
 
         // Configure interleave4x4 kernel
+        _memory_group.manage(&_interleave4x4_output);
         _interleave4x4_kernel.configure(multiply_input, &_interleave4x4_output);
 
         multiply_input = &_interleave4x4_output;
@@ -248,6 +255,8 @@
         _reshape_weights_kernel.run();
     }
 
+    _memory_group.acquire();
+
     // Linearize input if it comes from a convolutional layer
     if(_linearize_input)
     {
@@ -268,5 +277,7 @@
     {
         NEScheduler::get().schedule(&_accumulate_biases_kernel, Window::DimY);
     }
+
+    _memory_group.release();
 }
 } // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NESoftmaxLayer.cpp b/src/runtime/NEON/functions/NESoftmaxLayer.cpp
index 13dfa4a..cc5d4e9 100644
--- a/src/runtime/NEON/functions/NESoftmaxLayer.cpp
+++ b/src/runtime/NEON/functions/NESoftmaxLayer.cpp
@@ -31,8 +31,8 @@
 
 using namespace arm_compute;
 
-NESoftmaxLayer::NESoftmaxLayer()
-    : _max_kernel(), _shift_exp_sum_kernel(), _norm_kernel(), _fill_border_kernel(), _max(), _sum(), _tmp()
+NESoftmaxLayer::NESoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager)
+    : _memory_group(std::move(memory_manager)), _max_kernel(), _shift_exp_sum_kernel(), _norm_kernel(), _fill_border_kernel(), _max(), _sum(), _tmp()
 {
 }
 
@@ -50,6 +50,11 @@
     _max.allocator()->init(tensor_info_max_sum);
     _sum.allocator()->init(tensor_info_max_sum);
 
+    // Manage intermediate buffers
+    _memory_group.manage(&_tmp);
+    _memory_group.manage(&_max);
+    _memory_group.manage(&_sum);
+
     // Configure Kernels
     _max_kernel.configure(input, &_max);
     _shift_exp_sum_kernel.configure(input, &_max, &_tmp, &_sum);
@@ -64,8 +69,12 @@
 
 void NESoftmaxLayer::run()
 {
+    _memory_group.acquire();
+
     NEScheduler::get().schedule(&_fill_border_kernel, Window::DimY);
     NEScheduler::get().schedule(&_max_kernel, Window::DimY);
     NEScheduler::get().schedule(&_shift_exp_sum_kernel, Window::DimY);
     NEScheduler::get().schedule(&_norm_kernel, Window::DimY);
+
+    _memory_group.release();
 }
diff --git a/src/runtime/PoolManager.cpp b/src/runtime/PoolManager.cpp
new file mode 100644
index 0000000..42cc943
--- /dev/null
+++ b/src/runtime/PoolManager.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/PoolManager.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/runtime/IMemoryPool.h"
+#include "support/ToolchainSupport.h"
+
+#include <list>
+
+using namespace arm_compute;
+
+PoolManager::PoolManager()
+    : _free_pools(), _occupied_pools(), _sem(), _mtx()
+{
+}
+
+IMemoryPool *PoolManager::lock_pool()
+{
+    ARM_COMPUTE_ERROR_ON_MSG(_free_pools.empty() && _occupied_pools.empty(), "Haven't setup any pools!");
+
+    _sem->wait();
+    std::lock_guard<arm_compute::Mutex> lock(_mtx);
+    ARM_COMPUTE_ERROR_ON_MSG(_free_pools.empty(), "Empty pool must exist as semaphore has been signalled");
+    _occupied_pools.splice(std::begin(_occupied_pools), _free_pools, std::begin(_free_pools));
+    return _occupied_pools.front().get();
+}
+
+void PoolManager::unlock_pool(IMemoryPool *pool)
+{
+    ARM_COMPUTE_ERROR_ON_MSG(_free_pools.empty() && _occupied_pools.empty(), "Haven't setup any pools!");
+
+    std::lock_guard<arm_compute::Mutex> lock(_mtx);
+    auto it = std::find_if(std::begin(_occupied_pools), std::end(_occupied_pools), [pool](const std::unique_ptr<IMemoryPool> &pool_it)
+    {
+        return pool_it.get() == pool;
+    });
+    ARM_COMPUTE_ERROR_ON_MSG(it == std::end(_occupied_pools), "Pool to be unlocked couldn't be found!");
+    _free_pools.splice(std::begin(_free_pools), _occupied_pools, it);
+    _sem->signal();
+}
+
+void PoolManager::register_pool(std::unique_ptr<IMemoryPool> pool)
+{
+    std::lock_guard<arm_compute::Mutex> lock(_mtx);
+    ARM_COMPUTE_ERROR_ON_MSG(!_occupied_pools.empty(), "All pools should be free in order to register a new one!");
+
+    // Set pool
+    _free_pools.push_front(std::move(pool));
+
+    // Update semaphore
+    _sem = arm_compute::support::cpp14::make_unique<arm_compute::Semaphore>(_free_pools.size());
+}
diff --git a/src/runtime/Tensor.cpp b/src/runtime/Tensor.cpp
index 435068c..a76c37e 100644
--- a/src/runtime/Tensor.cpp
+++ b/src/runtime/Tensor.cpp
@@ -26,7 +26,7 @@
 using namespace arm_compute;
 
 Tensor::Tensor()
-    : _allocator()
+    : _allocator(this)
 {
 }
 
diff --git a/src/runtime/TensorAllocator.cpp b/src/runtime/TensorAllocator.cpp
index 5c719c7..272b9f5 100644
--- a/src/runtime/TensorAllocator.cpp
+++ b/src/runtime/TensorAllocator.cpp
@@ -26,6 +26,7 @@
 #include "arm_compute/core/Coordinates.h"
 #include "arm_compute/core/Error.h"
 #include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/runtime/MemoryGroup.h"
 
 #include <cstddef>
 
@@ -63,11 +64,50 @@
 }
 } // namespace
 
-TensorAllocator::TensorAllocator()
-    : _buffer(nullptr)
+TensorAllocator::TensorAllocator(Tensor *owner)
+    : _associated_memory_group(nullptr), _buffer(nullptr), _owner(owner)
 {
 }
 
+TensorAllocator::~TensorAllocator()
+{
+    if((_associated_memory_group == nullptr) && (_buffer != nullptr))
+    {
+        delete[] _buffer;
+        _buffer = nullptr;
+        info().set_is_resizable(true);
+    }
+}
+
+TensorAllocator::TensorAllocator(TensorAllocator &&o) noexcept
+    : ITensorAllocator(std::move(o)),
+      _associated_memory_group(o._associated_memory_group),
+      _buffer(o._buffer),
+      _owner(o._owner)
+{
+    o._associated_memory_group = nullptr;
+    o._buffer                  = nullptr;
+    o._owner                   = nullptr;
+}
+
+TensorAllocator &TensorAllocator::operator=(TensorAllocator &&o) noexcept
+{
+    if(&o != this)
+    {
+        _associated_memory_group   = o._associated_memory_group;
+        o._associated_memory_group = nullptr;
+
+        _buffer   = o._buffer;
+        o._buffer = nullptr;
+
+        _owner   = o._owner;
+        o._owner = nullptr;
+
+        ITensorAllocator::operator=(std::move(o));
+    }
+    return *this;
+}
+
 void TensorAllocator::init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo sub_info)
 {
     // Get parent info
@@ -90,28 +130,44 @@
 
 uint8_t *TensorAllocator::data() const
 {
-    return (_buffer != nullptr) ? _buffer.get()->data() : nullptr;
+    return _buffer;
 }
 
 void TensorAllocator::allocate()
 {
     ARM_COMPUTE_ERROR_ON(_buffer != nullptr);
-
-    _buffer = std::make_shared<std::vector<uint8_t>>(info().total_size());
+    if(_associated_memory_group == nullptr)
+    {
+        _buffer = new uint8_t[info().total_size()]();
+    }
+    else
+    {
+        _associated_memory_group->finalize_memory(_owner, reinterpret_cast<void **>(&_buffer), info().total_size());
+    }
     info().set_is_resizable(false);
 }
 
 void TensorAllocator::free()
 {
-    ARM_COMPUTE_ERROR_ON(_buffer == nullptr);
+    if((_associated_memory_group == nullptr) && (_buffer != nullptr))
+    {
+        delete[] _buffer;
+        _buffer = nullptr;
+        info().set_is_resizable(true);
+    }
+}
 
-    _buffer.reset();
-    info().set_is_resizable(true);
+void TensorAllocator::set_associated_memory_group(MemoryGroup *associated_memory_group)
+{
+    ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
+    ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr);
+    ARM_COMPUTE_ERROR_ON(_buffer != nullptr);
+    _associated_memory_group = associated_memory_group;
 }
 
 uint8_t *TensorAllocator::lock()
 {
-    return (_buffer != nullptr) ? _buffer.get()->data() : nullptr;
+    return _buffer;
 }
 
 void TensorAllocator::unlock()