IVGCVSW-59 Add documentation to the public API

        * Add documentation to the Descriptors
        * Add documentation to the layers

Change-Id: I5e0849753903565227fd47d329a600fd90b2feb9
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 9a33cc0..2b30c2b 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -15,15 +15,21 @@
 namespace armnn
 {
 
+/// An ActivationDescriptor for the ActivationLayer.
 struct ActivationDescriptor
 {
     ActivationDescriptor() : m_Function(ActivationFunction::Sigmoid), m_A(0), m_B(0) {};
 
+    /// @brief The activation function to use
+    /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
     ActivationFunction m_Function;
+    /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
     float              m_A;
+    /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
     float              m_B;
 };
 
+/// A PermuteDescriptor for the PermuteLayer.
 struct PermuteDescriptor
 {
     PermuteDescriptor()
@@ -34,18 +40,22 @@
         : m_DimMappings(dimMappings)
     {
     }
-
+    /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
+    /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
     PermutationVector m_DimMappings;
 };
 
+/// A SoftmaxDescriptor for the SoftmaxLayer.
 struct SoftmaxDescriptor
 {
     SoftmaxDescriptor() : m_Beta(1.0f) {};
-
+    /// Exponentiation value.
     float              m_Beta;
 };
 
-
+/// @brief An OriginsDescriptor for the MergerLayer.
+/// Descriptor to configure the merging process. Number of views must be equal to the number of inputs, and
+/// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
 struct OriginsDescriptor
 {
     OriginsDescriptor();
@@ -57,13 +67,24 @@
 
     OriginsDescriptor& operator=(OriginsDescriptor rhs);
 
+    /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
+    /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
+    /// If the coord is greater than or equal to GetNumViews(), then the coord argument is out of range.
     Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
+    /// Get the number of views.
     uint32_t GetNumViews() const;
+    /// Get the number of dimensions.
     uint32_t GetNumDimensions() const;
+    /// Return the view origin at the int value idx.
     const uint32_t* GetViewOrigin(uint32_t idx) const;
+    /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
+    /// The number of views must match number of elements in the new ordering array.
     void ReorderOrigins(unsigned int*  newOrdering, unsigned int numNewOrdering);
+    /// Swap the ViewsDescriptor values first and second.
     friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
+    /// Set the concatenation axis value.
     void SetConcatAxis(unsigned int concatAxis);
+    /// Get the concatenation axis value.
     unsigned int GetConcatAxis() const;
 
 private:
@@ -73,6 +94,9 @@
     uint32_t**   m_ViewOrigins;
 };
 
+/// @brief A ViewsDescriptor for the SplitterLayer.
+/// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
+/// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
 struct ViewsDescriptor
 {
     ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
@@ -84,22 +108,33 @@
 
     ViewsDescriptor& operator=(ViewsDescriptor rhs);
 
+    /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
+    /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
+    /// If the coord is greater than or equal to GetNumViews(), then the coord argument is out of range.
     Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
+    /// @brief Set the size of the views. The arguments are: view, dimension, value.
+    /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
+    /// If the coord is greater than or equal to GetNumViews(), then the coord argument is out of range.
     Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
 
+    /// Get the number of views.
     uint32_t GetNumViews() const;
+    /// Get the number of dimensions.
     uint32_t GetNumDimensions() const;
+    /// Get the view origin at the int value idx.
     const uint32_t* GetViewOrigin(uint32_t idx) const;
+    /// Get the view sizes at the int value idx.
     const uint32_t* GetViewSizes(uint32_t idx) const;
 
+    /// Swap the ViewsDescriptor value first and second.
     friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
 private:
     OriginsDescriptor m_Origins;
     uint32_t**        m_ViewSizes;
 };
 
-/// Convenience template to create an OriginsDescriptor to use when creating a Merger layer for performing concatenation
-/// of a number of input tensors
+/// @brief Convenience template to create an OriginsDescriptor to use when creating a MergerLayer for performing
+/// concatenation of a number of input tensors.
 template <typename TensorShapeIt>
 OriginsDescriptor CreateMergerDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last,
     unsigned int concatenationDimension)
@@ -170,6 +205,7 @@
     return viewsDescriptor;
 }
 
+/// A Pooling2dDescriptor for the Pooling2dLayer.
 struct Pooling2dDescriptor
 {
     Pooling2dDescriptor()
@@ -187,20 +223,33 @@
     , m_DataLayout(DataLayout::NCHW)
     {};
 
+    /// The pooling algorithm to use (Max. Average, L2).
     PoolingAlgorithm    m_PoolType;
+    /// Padding left value in the width dimension.
     uint32_t            m_PadLeft;
+    /// Padding right value in the width dimension.
     uint32_t            m_PadRight;
+    /// Padding top value in the height dimension.
     uint32_t            m_PadTop;
+    /// Padding bottom value in the height dimension.
     uint32_t            m_PadBottom;
+    /// Pooling width value.
     uint32_t            m_PoolWidth;
+    /// Pooling height value.
     uint32_t            m_PoolHeight;
+    /// Stride value when proceeding through input for the width dimension.
     uint32_t            m_StrideX;
+    /// Stride value when proceeding through input for the height dimension.
     uint32_t            m_StrideY;
+    /// The rounding method for the output shape. (Floor, Ceiling).
     OutputShapeRounding m_OutputShapeRounding;
+    /// The padding method to be used. (Exclude, IgnoreValue).
     PaddingMethod       m_PaddingMethod;
+    /// The data layout to be used (NCHW, NHWC).
     DataLayout   m_DataLayout;
 };
 
+/// A FullyConnectedDescriptor for the FullyConnectedLayer.
 struct FullyConnectedDescriptor
 {
     FullyConnectedDescriptor()
@@ -208,10 +257,13 @@
     , m_TransposeWeightMatrix(false)
     {};
 
+    /// Enable/disable bias.
     bool m_BiasEnabled;
+    /// Enable/disable transpose weight matrix.
     bool m_TransposeWeightMatrix;
 };
 
+/// A Convolution2dDescriptor for the Convolution2dLayer.
 struct Convolution2dDescriptor
 {
     Convolution2dDescriptor()
@@ -225,16 +277,25 @@
     , m_DataLayout(DataLayout::NCHW)
     {};
 
+    /// Padding left value in the width dimension.
     uint32_t             m_PadLeft;
+    /// Padding right value in the width dimension.
     uint32_t             m_PadRight;
+    /// Padding top value in the height dimension.
     uint32_t             m_PadTop;
+    /// Padding bottom value in the height dimension.
     uint32_t             m_PadBottom;
+    /// Stride value when proceeding through input for the width dimension.
     uint32_t             m_StrideX;
+    /// Stride value when proceeding through input for the height dimension.
     uint32_t             m_StrideY;
+    /// Enable/disable bias.
     bool                 m_BiasEnabled;
+    /// The data layout to be used (NCHW, NHWC).
     DataLayout           m_DataLayout;
 };
 
+/// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
 struct DepthwiseConvolution2dDescriptor
 {
     DepthwiseConvolution2dDescriptor()
@@ -248,17 +309,25 @@
     ,   m_DataLayout(DataLayout::NCHW)
     {}
 
+    /// Padding left value in the width dimension.
     uint32_t   m_PadLeft;
+    /// Padding right value in the width dimension.
     uint32_t   m_PadRight;
+    /// Padding top value in the height dimension.
     uint32_t   m_PadTop;
+    /// Padding bottom value in the height dimension.
     uint32_t   m_PadBottom;
+    /// Stride value when proceeding through input for the width dimension.
     uint32_t   m_StrideX;
+    /// Stride value when proceeding through input for the height dimension.
     uint32_t   m_StrideY;
+    /// Enable/disable bias.
     bool       m_BiasEnabled;
+    /// The data layout to be used (NCHW, NHWC).
     DataLayout m_DataLayout;
 };
 
-
+/// A NormalizationDescriptor for the NormalizationLayer.
 struct NormalizationDescriptor
 {
     NormalizationDescriptor()
@@ -271,24 +340,34 @@
     , m_DataLayout(DataLayout::NCHW)
     {}
 
+    /// Normalization channel algorithm to use (Across, Within).
     NormalizationAlgorithmChannel m_NormChannelType;
+    /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
     NormalizationAlgorithmMethod  m_NormMethodType;
+    /// Depth radius value.
     uint32_t                      m_NormSize;
+    /// Alpha value for the normalization equation.
     float                         m_Alpha;
+    /// Beta value for the normalization equation.
     float                         m_Beta;
+    /// Kappa value used for the across channel normalization equation.
     float                         m_K;
+    /// The data layout to be used (NCHW, NHWC).
     DataLayout                    m_DataLayout;
 };
 
+/// A L2NormalizationDescriptor for the L2NormalizationLayer.
 struct L2NormalizationDescriptor
 {
     L2NormalizationDescriptor()
         : m_DataLayout(DataLayout::NCHW)
     {}
 
+    /// The data layout to be used (NCHW, NHWC).
     DataLayout m_DataLayout;
 };
 
+/// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
 struct BatchNormalizationDescriptor
 {
     BatchNormalizationDescriptor()
@@ -296,10 +375,13 @@
     , m_DataLayout(DataLayout::NCHW)
     {}
 
+    /// Value to add to the variance. Used to avoid dividing by zero.
     float m_Eps;
+    /// The data layout to be used (NCHW, NHWC).
     DataLayout m_DataLayout;
 };
 
+/// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
 struct BatchToSpaceNdDescriptor
 {
     BatchToSpaceNdDescriptor()
@@ -315,11 +397,15 @@
         , m_DataLayout(DataLayout::NCHW)
     {}
 
+    /// Block shape values.
     std::vector<unsigned int> m_BlockShape;
+    /// The values to crop from the input dimension.
     std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
+    /// The data layout to be used (NCHW, NHWC).
     DataLayout m_DataLayout;
 };
 
+/// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
 struct FakeQuantizationDescriptor
 {
     FakeQuantizationDescriptor()
@@ -327,10 +413,13 @@
     , m_Max(6.0f)
     {}
 
+    /// Minimum value.
     float m_Min;
+    /// Maximum value.
     float m_Max;
 };
 
+/// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
 struct ResizeBilinearDescriptor
 {
     ResizeBilinearDescriptor()
@@ -339,11 +428,15 @@
     , m_DataLayout(DataLayout::NCHW)
     {}
 
+    /// Target width value.
     uint32_t          m_TargetWidth;
+    /// Target height value.
     uint32_t          m_TargetHeight;
+    /// The data layout to be used (NCHW, NHWC).
     DataLayout m_DataLayout;
 };
 
+/// A ReshapeDescriptor for the ReshapeLayer.
 struct ReshapeDescriptor
 {
     ReshapeDescriptor()
@@ -354,9 +447,11 @@
     : m_TargetShape(shape)
     {}
 
+    /// Target shape value.
     TensorShape m_TargetShape;
 };
 
+/// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
 struct SpaceToBatchNdDescriptor
 {
     SpaceToBatchNdDescriptor()
@@ -372,12 +467,16 @@
     , m_DataLayout(DataLayout::NCHW)
     {}
 
+    /// Block shape value.
     std::vector<unsigned int> m_BlockShape;
+    /// @brief Specifies the padding values for the input dimension:
+    /// heightPad{top, bottom} widthPad{left, right}.
     std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
+    /// The data layout to be used (NCHW, NHWC).
     DataLayout m_DataLayout;
 };
 
-// temporary descriptor for Lstm
+/// An LstmDescriptor for the LstmLayer.
 struct LstmDescriptor
 {
     LstmDescriptor()
@@ -389,14 +488,22 @@
     , m_ProjectionEnabled(false)
     {}
 
+    /// @brief The activation function to use.
+    /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
     uint32_t m_ActivationFunc;
+    /// Clipping threshold value for the cell state.
     float m_ClippingThresCell;
+    /// Clipping threshold value for the projection.
     float m_ClippingThresProj;
+    /// Enable/disable cifg (coupled input & forget gate).
     bool m_CifgEnabled;
+    /// Enable/disable peephole.
     bool m_PeepholeEnabled;
+    /// Enable/disable the projection layer.
     bool m_ProjectionEnabled;
 };
 
+/// A MeanDescriptor for the MeanLayer.
 struct MeanDescriptor
 {
     MeanDescriptor()
@@ -409,10 +516,13 @@
     , m_KeepDims(keepDims)
     {}
 
+    /// Values for the dimensions to reduce.
     std::vector<unsigned int> m_Axis;
+    /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
     bool m_KeepDims;
 };
 
+/// A PadDescriptor for the PadLayer.
 struct PadDescriptor
 {
     PadDescriptor()
@@ -422,12 +532,14 @@
     : m_PadList(padList)
     {}
 
-    // first is number of values to add before the tensor in the dimension,
-    // second is the number of values to add after the tensor in the dimension
-    // the number of pairs should match the number of dimensions in the input tensor.
+    /// @brief Specifies the padding for input dimension.
+    /// First is the number of values to add before the tensor in the dimension.
+    /// Second is the number of values to add after the tensor in the dimension.
+    /// The number of pairs should match the number of dimensions in the input tensor.
     std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
 };
 
+/// A StridedSliceDescriptor for the StridedSliceLayer.
 struct StridedSliceDescriptor
 {
     StridedSliceDescriptor(const std::vector<int>& begin,
@@ -453,19 +565,32 @@
                        unsigned int axis,
                        int startForAxis) const;
 
+    /// Begin values for the input that will be sliced.
     std::vector<int> m_Begin;
+    /// End values for the input that will be sliced.
     std::vector<int> m_End;
+    /// Stride values for the input that will be sliced.
     std::vector<int> m_Stride;
 
+    /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
+    /// range is used for the dimension.
     int32_t m_BeginMask;
+    /// @brief End mask value. If set, then the end is disregarded and the fullest range
+    /// is used for the dimension.
     int32_t m_EndMask;
+    /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
     int32_t m_ShrinkAxisMask;
+    /// Ellipsis mask value.
     int32_t m_EllipsisMask;
+    /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
+    /// a new 1 dimension is inserted to this location of the output tensor.
     int32_t m_NewAxisMask;
 
+    /// The data layout to be used (NCHW, NHWC).
     DataLayout m_DataLayout;
 };
 
+/// A DebugDescriptor for the DebugLayer.
 struct DebugDescriptor
 {
     DebugDescriptor()
@@ -477,7 +602,9 @@
     , m_SlotIndex(index)
     {}
 
+    /// The name of the debug layer.
     std::string m_LayerName;
+    /// The slot index of the debug layer.
     unsigned int m_SlotIndex;
 };
 
diff --git a/src/armnn/layers/ActivationLayer.hpp b/src/armnn/layers/ActivationLayer.hpp
index 5ea8c4e..390955a 100644
--- a/src/armnn/layers/ActivationLayer.hpp
+++ b/src/armnn/layers/ActivationLayer.hpp
@@ -8,19 +8,31 @@
 
 namespace armnn
 {
-
+/// This layer represents an activation operation with the specified activation function.
 class ActivationLayer : public LayerWithParameters<ActivationDescriptor>
 {
 public:
+    /// Makes a workload for the Activation type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     ActivationLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s) will lead to a valid configuration of @ref ActivationLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create an ActivationLayer.
+    /// @param [in] param ActivationDescriptor to configure the activation operation.
+    /// @param [in] name Optional name for the layer.
     ActivationLayer(const ActivationDescriptor &param, const char* name);
+
+    /// Default destructor
     ~ActivationLayer() = default;
 };
 
diff --git a/src/armnn/layers/AdditionLayer.hpp b/src/armnn/layers/AdditionLayer.hpp
index 9cdf09f..4a6b8d9 100644
--- a/src/armnn/layers/AdditionLayer.hpp
+++ b/src/armnn/layers/AdditionLayer.hpp
@@ -9,17 +9,27 @@
 
 namespace armnn
 {
-
+/// This layer represents an addition operation.
 class AdditionLayer : public ElementwiseBaseLayer
 {
 public:
+    /// Makes a workload for the Addition type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     AdditionLayer* Clone(Graph& graph) const override;
 
 protected:
+    /// Constructor to create an AdditionLayer.
+    /// @param [in] name Optional name for the layer.
     AdditionLayer(const char* name);
+
+    /// Default destructor
     ~AdditionLayer() = default;
 };
 
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index 9ed15be..02d8456 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -11,25 +11,45 @@
 
 class ScopedCpuTensorHandle;
 
+/// This layer represents a batch normalization operation.
 class BatchNormalizationLayer : public LayerWithParameters<BatchNormalizationDescriptor>
 {
 public:
+    /// A unique pointer to store Mean values
     std::unique_ptr<ScopedCpuTensorHandle> m_Mean;
+    /// A unique pointer to store Variance values
     std::unique_ptr<ScopedCpuTensorHandle> m_Variance;
+    /// A unique pointer to store Beta values
     std::unique_ptr<ScopedCpuTensorHandle> m_Beta;
+    /// A unique pointer to store Gamma values
     std::unique_ptr<ScopedCpuTensorHandle> m_Gamma;
 
+    /// Makes a workload for the BatchNormalization type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     BatchNormalizationLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref BatchNormalizationLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create a BatchNormalizationLayer.
+    /// @param [in] param BatchNormalizationDescriptor to configure the batch normalization operation.
+    /// @param [in] name Optional name for the layer.
     BatchNormalizationLayer(const BatchNormalizationDescriptor& param, const char* name);
+
+    /// Default destructor
     ~BatchNormalizationLayer() = default;
 
+    /// Retrieve the handles to the constant values stored by the layer.
+    /// @return A vector of the constant tensors stored by this layer.
     ConstantTensors GetConstantTensorsByRef() override;
 };
 
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.hpp b/src/armnn/layers/BatchToSpaceNdLayer.hpp
index eb5f979..fc4dd71 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.hpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.hpp
@@ -9,20 +9,38 @@
 namespace armnn
 {
 
+/// This layer represents a BatchToSpaceNd operation.
 class BatchToSpaceNdLayer : public LayerWithParameters<BatchToSpaceNdDescriptor>
 {
 public:
+    /// Makes a workload for the BatchToSpaceNd type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     BatchToSpaceNdLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref BatchToSpaceNdLayer.
     void ValidateTensorShapesFromInputs() override;
 
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
 protected:
+    /// Constructor to create a BatchToSpaceNdLayer.
+    /// @param [in] param BatchToSpaceNdDescriptor to configure the BatchToSpaceNd operation.
+    /// @param [in] name Optional name for the layer.
     BatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& param, const char* name);
+
+    /// Default destructor
     ~BatchToSpaceNdLayer() = default;
 };
 
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index a9c9111..c3316b7 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -11,26 +11,44 @@
 
 class ScopedCpuTensorHandle;
 
+/// A layer that the constant data can be bound to.
 class ConstantLayer : public Layer
 {
 public:
+    /// Makes a workload for the Constant type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
-        const IWorkloadFactory& factory) const override;
+                                                      const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     ConstantLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref ConstantLayer
     void ValidateTensorShapesFromInputs() override;
 
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return a vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    // Free up the constant source data
+    /// Free up the constant source data stored by the layer.
     void ReleaseConstantData() override {};
 
     std::unique_ptr<ScopedCpuTensorHandle> m_LayerOutput;
 protected:
+    /// Constructor to create a ConstantLayer.
+    /// @param [in] name Optional name for the layer.
     ConstantLayer(const char* name);
+
+    /// Default destructor
     ~ConstantLayer() = default;
 
+    /// Retrieve the handles to the constant values stored by the layer.
     ConstantTensors GetConstantTensorsByRef() override { return {m_LayerOutput}; }
 
 };
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
index bf28a84..bdfc981 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
@@ -10,18 +10,31 @@
 namespace armnn
 {
 
+/// This layer converts data type Float 16 to Float 32.
 class ConvertFp16ToFp32Layer : public Layer
 {
 public:
+    /// Makes a workload for the ConvertFp16ToFp32 type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     ConvertFp16ToFp32Layer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref ConvertFp16ToFp32Layer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create a ConvertFp16ToFp32Layer.
+    /// @param [in] name Optional name for the layer.
     ConvertFp16ToFp32Layer(const char* name);
+
+    /// Default destructor
     ~ConvertFp16ToFp32Layer() = default;
 };
 
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
index 0027193..524f974 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
@@ -9,18 +9,31 @@
 namespace armnn
 {
 
+/// This layer converts data type Float 32 to Float 16.
 class ConvertFp32ToFp16Layer : public Layer
 {
 public:
+    /// Makes a workload for the ConvertFp32ToFp16 type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     ConvertFp32ToFp16Layer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref ConvertFp32ToFp16Layer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create a ConvertFp32ToFp16Layer.
+    /// @param [in] name Optional name for the layer.
     ConvertFp32ToFp16Layer(const char* name);
+
+    /// Default destructor
     ~ConvertFp32ToFp16Layer() = default;
 };
 
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index 31a9117..b812b1b 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -11,25 +11,47 @@
 
 class ScopedCpuTensorHandle;
 
+/// This layer represents a convolution 2d operation.
 class Convolution2dLayer : public LayerWithParameters<Convolution2dDescriptor>
 {
 public:
+    /// A unique pointer to store Weight values.
     std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
+    /// A unique pointer to store Bias values.
     std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
 
+    /// Makes a workload for the Convolution2d type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     Convolution2dLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref Convolution2dLayer.
     void ValidateTensorShapesFromInputs() override;
 
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
 protected:
+    /// Constructor to create a Convolution2dLayer.
+    /// @param [in] param Convolution2dDescriptor to configure the convolution2d operation.
+    /// @param [in] name Optional name for the layer.
     Convolution2dLayer(const Convolution2dDescriptor& param, const char* name);
+
+    /// Default destructor
     ~Convolution2dLayer() = default;
 
+    /// Retrieve the handles to the constant values stored by the layer.
+    /// @return A vector of the constant tensors stored by this layer.
     ConstantTensors GetConstantTensorsByRef() override;
 };
 
diff --git a/src/armnn/layers/DebugLayer.hpp b/src/armnn/layers/DebugLayer.hpp
index 6aaa271..fc777a8 100644
--- a/src/armnn/layers/DebugLayer.hpp
+++ b/src/armnn/layers/DebugLayer.hpp
@@ -9,18 +9,32 @@
 namespace armnn
 {
 
+/// This layer visualizes the data flowing through the network.
 class DebugLayer : public LayerWithParameters<DebugDescriptor>
 {
 public:
+    /// Makes a workload for the Debug type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     DebugLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref DebugLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create a DebugLayer.
+    /// @param [in] param DebugDescriptor to configure the debug layer.
+    /// @param [in] name Optional name for the layer.
     DebugLayer(const DebugDescriptor& param, const char* name);
+
+    /// Default destructor
     ~DebugLayer() = default;
 };
 
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index ae91fde..9d5b658 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -11,25 +11,47 @@
 
 class ScopedCpuTensorHandle;
 
+/// This layer represents a depthwise convolution 2d operation.
 class DepthwiseConvolution2dLayer : public LayerWithParameters<DepthwiseConvolution2dDescriptor>
 {
 public:
+    /// A unique pointer to store Weight values.
     std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
+    /// A unique pointer to store Bias values.
     std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
 
+    /// Makes a workload for the DepthwiseConvolution2d type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     DepthwiseConvolution2dLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref DepthwiseConvolution2dLayer.
     void ValidateTensorShapesFromInputs() override;
 
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
 protected:
+    /// Constructor to create a DepthwiseConvolution2dLayer.
+    /// @param [in] param DepthwiseConvolution2dDescriptor to configure the depthwise convolution2d.
+    /// @param [in] name Optional name for the layer.
     DepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor& param, const char* name);
+
+    /// Default destructor
     ~DepthwiseConvolution2dLayer() = default;
 
+    /// Retrieve the handles to the constant values stored by the layer.
+    /// @return A vector of the constant tensors stored by this layer.
     ConstantTensors GetConstantTensorsByRef() override;
 };
 
diff --git a/src/armnn/layers/DivisionLayer.hpp b/src/armnn/layers/DivisionLayer.hpp
index 158f8e8..e1dca2f 100644
--- a/src/armnn/layers/DivisionLayer.hpp
+++ b/src/armnn/layers/DivisionLayer.hpp
@@ -10,16 +10,27 @@
 namespace armnn
 {
 
+/// This layer represents a division operation.
 class DivisionLayer : public ElementwiseBaseLayer
 {
 public:
+    /// Makes a workload for the Division type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     DivisionLayer* Clone(Graph& graph) const override;
 
 protected:
+    /// Constructor to create a DivisionLayer.
+    /// @param [in] name Optional name for the layer.
     DivisionLayer(const char* name);
+
+    /// Default destructor
     ~DivisionLayer() = default;
 };
 
diff --git a/src/armnn/layers/ElementwiseBaseLayer.hpp b/src/armnn/layers/ElementwiseBaseLayer.hpp
index f0821ec..5c2bbaa 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.hpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.hpp
@@ -10,17 +10,30 @@
 namespace armnn
 {
 
-/// NOTE: this is an abstract class, it does not implement:
-///  std::unique_ptr<IWorkload> Layer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const = 0;
-///  Layer* Clone(Graph& graph) const = 0;
+/// NOTE: this is an abstract class to encapsulate the element wise operations, it does not implement:
+/// std::unique_ptr<IWorkload> Layer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const = 0;
+/// Layer* Clone(Graph& graph) const = 0;
 class ElementwiseBaseLayer : public Layer
 {
 public:
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of the element wise operation.
     void ValidateTensorShapesFromInputs() override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
 protected:
+    /// @param numInputSlots The number of input slots for the layer.
+    /// @param numOutputSlots The number of output slots for the layer.
+    /// @param type The layer type.
+    /// @param name Optional name for the layer.
     ElementwiseBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name);
+
+    /// Default destructor
     ~ElementwiseBaseLayer() = default;
 };
 
diff --git a/src/armnn/layers/EqualLayer.hpp b/src/armnn/layers/EqualLayer.hpp
index 956ae3c..198bae7 100644
--- a/src/armnn/layers/EqualLayer.hpp
+++ b/src/armnn/layers/EqualLayer.hpp
@@ -9,17 +9,27 @@
 
 namespace armnn
 {
-
+/// This layer represents an equal operation.
 class EqualLayer : public ElementwiseBaseLayer
 {
 public:
+    /// Makes a workload for the Equal type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     EqualLayer* Clone(Graph& graph) const override;
 
 protected:
+    /// Constructor to create a EqualLayer.
+    /// @param [in] name Optional name for the layer.
     EqualLayer(const char* name);
+
+    /// Default destructor
     ~EqualLayer() = default;
 };
 
diff --git a/src/armnn/layers/FakeQuantizationLayer.hpp b/src/armnn/layers/FakeQuantizationLayer.hpp
index 60541aa..5e7b5cb 100644
--- a/src/armnn/layers/FakeQuantizationLayer.hpp
+++ b/src/armnn/layers/FakeQuantizationLayer.hpp
@@ -9,18 +9,32 @@
 namespace armnn
 {
 
+/// This layer represents a fake quantization operation.
 class FakeQuantizationLayer : public LayerWithParameters<FakeQuantizationDescriptor>
 {
 public:
+    /// Makes a workload for the FakeQuantization type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     FakeQuantizationLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref FakeQuantizationLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create a FakeQuantizationLayer.
+    /// @param [in] param FakeQuantizationDescriptor to configure the fake quantization operation.
+    /// @param [in] name Optional name for the layer.
     FakeQuantizationLayer(const FakeQuantizationDescriptor& descriptor, const char* name);
+
+    /// Default destructor
     ~FakeQuantizationLayer() = default;
 };
 
diff --git a/src/armnn/layers/FloorLayer.hpp b/src/armnn/layers/FloorLayer.hpp
index bf9c25a..546d136 100644
--- a/src/armnn/layers/FloorLayer.hpp
+++ b/src/armnn/layers/FloorLayer.hpp
@@ -9,18 +9,31 @@
 namespace armnn
 {
 
+/// This layer represents a floor operation.
 class FloorLayer : public Layer
 {
 public:
+    /// Makes a workload for the Floor type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     FloorLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref FloorLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create a FloorLayer.
+    /// @param [in] name Optional name for the layer.
     FloorLayer(const char* name);
+
+    /// Default destructor
     ~FloorLayer() = default;
 };
 
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index ad0ac1c..0a404b7 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -11,24 +11,47 @@
 
 class ScopedCpuTensorHandle;
 
+/// This layer represents a fully connected operation.
 class FullyConnectedLayer : public LayerWithParameters<FullyConnectedDescriptor>
 {
 public:
+    /// A unique pointer to store Weight values.
     std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
+    /// A unique pointer to store Bias values.
     std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
 
+    /// Makes a workload for the FullyConnected type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     FullyConnectedLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref FullyConnectedLayer.
     void ValidateTensorShapesFromInputs() override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
 protected:
+    /// Constructor to create a FullyConnectedLayer.
+    /// @param [in] param FullyConnectedDescriptor to configure the fully connected operation.
+    /// @param [in] name Optional name for the layer.
     FullyConnectedLayer(const FullyConnectedDescriptor& param, const char* name);
+
+    /// Default destructor
     ~FullyConnectedLayer() = default;
 
+    /// Retrieve the handles to the constant values stored by the layer.
+    /// @return A vector of the constant tensors stored by this layer.
     ConstantTensors GetConstantTensorsByRef() override;
 };
 
diff --git a/src/armnn/layers/GreaterLayer.hpp b/src/armnn/layers/GreaterLayer.hpp
index 9297a82..8b33f41 100644
--- a/src/armnn/layers/GreaterLayer.hpp
+++ b/src/armnn/layers/GreaterLayer.hpp
@@ -10,16 +10,27 @@
 namespace armnn
 {
 
+/// This layer represents a greater operation.
 class GreaterLayer : public ElementwiseBaseLayer
 {
 public:
+    /// Makes a workload for the Greater type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     GreaterLayer* Clone(Graph& graph) const override;
 
 protected:
+    /// Constructor to create a GreaterLayer.
+    /// @param [in] name Optional name for the layer.
     GreaterLayer(const char* name);
+
+    /// Default destructor
     ~GreaterLayer() = default;
 };
 
diff --git a/src/armnn/layers/InputLayer.hpp b/src/armnn/layers/InputLayer.hpp
index 6d71c69..1f6f245 100644
--- a/src/armnn/layers/InputLayer.hpp
+++ b/src/armnn/layers/InputLayer.hpp
@@ -9,18 +9,32 @@
 namespace armnn
 {
 
+/// A layer user-provided data can be bound to (e.g. inputs, outputs).
 class InputLayer : public BindableLayer
 {
 public:
+    /// Makes a workload for the Input type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     InputLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref InputLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create an InputLayer.
+    /// @param id The layer binding id number.
+    /// @param name Optional name for the layer.
     InputLayer(LayerBindingId id, const char* name);
+
+    /// Default destructor
     ~InputLayer() = default;
 };
 
diff --git a/src/armnn/layers/L2NormalizationLayer.hpp b/src/armnn/layers/L2NormalizationLayer.hpp
index bf4d49e..bae3d82 100644
--- a/src/armnn/layers/L2NormalizationLayer.hpp
+++ b/src/armnn/layers/L2NormalizationLayer.hpp
@@ -9,18 +9,32 @@
 namespace armnn
 {
 
+/// This layer represents a L2 normalization operation.
 class L2NormalizationLayer : public LayerWithParameters<L2NormalizationDescriptor>
 {
 public:
+    /// Makes a workload for the L2Normalization type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
-        const IWorkloadFactory& factory) const override;
+                                                      const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     L2NormalizationLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref L2NormalizationLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create a L2NormalizationLayer.
+    /// @param [in] param L2NormalizationDescriptor to configure the L2 normalization operation.
+    /// @param [in] name Optional name for the layer.
     L2NormalizationLayer(const L2NormalizationDescriptor& param, const char* name);
+
+    /// Default destructor
     ~L2NormalizationLayer() = default;
 };
 
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index 247fec3..6004d96 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -13,37 +13,55 @@
 
 struct LstmOptCifgParameters
 {
+    /// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_InputToInputWeights;
+    /// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToInputWeights;
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_CellToInputWeights;
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_InputGateBias;
 };
 
 struct LstmOptProjectionParameters
 {
+    /// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionWeights;
+    /// A unique pointer to represent 1D weights tensor with dimensions [output_size].
     std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionBias;
 };
 
 struct LstmOptPeepholeParameters
 {
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_CellToForgetWeights;
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_CellToOutputWeights;
 };
 
 struct LstmBasicParameters
 {
+    /// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_InputToForgetWeights;
+    /// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_InputToCellWeights;
+    /// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_InputToOutputWeights;
+    /// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToForgetWeights;
+    /// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToCellWeights;
+    /// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToOutputWeights;
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_ForgetGateBias;
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_CellBias;
+    /// A unique pointer to represent 1D weights tensor with dimensions [num_units].
     std::unique_ptr<ScopedCpuTensorHandle> m_OutputGateBias;
 };
 
+/// This layer represents a LSTM operation.
 class LstmLayer : public LayerWithParameters<LstmDescriptor>
 {
 public:
@@ -53,17 +71,38 @@
     LstmOptProjectionParameters m_ProjectionParameters;
     LstmOptPeepholeParameters m_PeepholeParameters;
 
+    /// Makes a workload for the LSTM type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     LstmLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref LstmLayer.
     void ValidateTensorShapesFromInputs() override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
 protected:
+    /// Constructor to create a LstmLayer.
+    /// @param [in] param LstmDescriptor to configure the lstm operation.
+    /// @param [in] name Optional name for the layer.
     LstmLayer(const LstmDescriptor& param, const char* name);
+
+    /// Default destructor
     ~LstmLayer() = default;
 
+    /// Retrieve the handles to the constant values stored by the layer.
+    /// @return A vector of the constant tensors stored by this layer.
     Layer::ConstantTensors GetConstantTensorsByRef() override;
 };
 
diff --git a/src/armnn/layers/MaximumLayer.hpp b/src/armnn/layers/MaximumLayer.hpp
index 18a4ed3..82ee6e8 100644
--- a/src/armnn/layers/MaximumLayer.hpp
+++ b/src/armnn/layers/MaximumLayer.hpp
@@ -10,17 +10,27 @@
 namespace armnn
 {
 
+/// This layer represents a maximum operation.
 class MaximumLayer : public ElementwiseBaseLayer
 {
 public:
+    /// Makes a workload for the Maximum type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     MaximumLayer* Clone(Graph& graph) const override;
 
 protected:
+    /// Constructor to create a MaximumLayer.
+    /// @param [in] name Optional name for the layer.
     MaximumLayer(const char* name);
 
+    /// Default destructor
     ~MaximumLayer() = default;
 };
 
diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp
index ecb9297..3896569 100644
--- a/src/armnn/layers/MeanLayer.hpp
+++ b/src/armnn/layers/MeanLayer.hpp
@@ -10,18 +10,32 @@
 namespace armnn
 {
 
+/// This layer represents a mean operation.
 class MeanLayer : public LayerWithParameters<MeanDescriptor>
 {
 public:
+    /// Makes a workload for the Mean type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     MeanLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref MeanLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create a MeanLayer.
+    /// @param [in] param MeanDescriptor to configure the mean operation.
+    /// @param [in] name Optional name for the layer.
     MeanLayer(const MeanDescriptor& param, const char* name);
+
+    /// Default destructor
     ~MeanLayer() = default;
 
 };
diff --git a/src/armnn/layers/MemCopyLayer.hpp b/src/armnn/layers/MemCopyLayer.hpp
index 3d79ec3..051b18f 100644
--- a/src/armnn/layers/MemCopyLayer.hpp
+++ b/src/armnn/layers/MemCopyLayer.hpp
@@ -9,18 +9,31 @@
 namespace armnn
 {
 
+/// This layer represents a memory copy operation.
 class MemCopyLayer : public Layer
 {
 public:
-    virtual std::unique_ptr<IWorkload>
-    CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const override;
+    /// Makes a workload for the MemCopy type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
+                                                     const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     MemCopyLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref MemCopyLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create a MemCopyLayer.
+    /// @param [in] name Optional name for the layer.
     MemCopyLayer(const char* name);
+
+    /// Default destructor
     ~MemCopyLayer() = default;
 };
 
diff --git a/src/armnn/layers/MergerLayer.hpp b/src/armnn/layers/MergerLayer.hpp
index 02f852c..f0bf62b 100644
--- a/src/armnn/layers/MergerLayer.hpp
+++ b/src/armnn/layers/MergerLayer.hpp
@@ -9,20 +9,44 @@
 namespace armnn
 {
 
+/// This layer represents a merge operation.
 class MergerLayer : public LayerWithParameters<OriginsDescriptor>
 {
 public:
+    /// Makes a workload for the Merger type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
+
+    /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported
+    /// otherwise creates tensor handlers.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
     virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     MergerLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref MergerLayer.
     void ValidateTensorShapesFromInputs() override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
 protected:
+    /// Constructor to create a MergerLayer.
+    /// @param [in] param OriginsDescriptor to configure the merger operation.
+    /// @param [in] name Optional name for the layer.
     MergerLayer(const OriginsDescriptor& param, const char* name);
+
+    /// Default destructor
     ~MergerLayer() = default;
 };
 
diff --git a/src/armnn/layers/MinimumLayer.hpp b/src/armnn/layers/MinimumLayer.hpp
index 4338237..3801c62 100644
--- a/src/armnn/layers/MinimumLayer.hpp
+++ b/src/armnn/layers/MinimumLayer.hpp
@@ -10,16 +10,27 @@
 namespace armnn
 {
 
+/// This layer represents a minimum operation.
 class MinimumLayer : public ElementwiseBaseLayer
 {
 public:
+    /// Makes a workload for the Minimum type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     MinimumLayer* Clone(Graph& graph) const override;
 
 protected:
+    /// Constructor to create a MinimumLayer.
+    /// @param [in] name Optional name for the layer.
     MinimumLayer(const char* name);
+
+    /// Default destructor
     ~MinimumLayer() = default;
 
 };
diff --git a/src/armnn/layers/MultiplicationLayer.hpp b/src/armnn/layers/MultiplicationLayer.hpp
index 8a7bfde..9f15e3a 100644
--- a/src/armnn/layers/MultiplicationLayer.hpp
+++ b/src/armnn/layers/MultiplicationLayer.hpp
@@ -10,16 +10,27 @@
 namespace armnn
 {
 
+/// This layer represents a multiplication operation.
 class MultiplicationLayer : public ElementwiseBaseLayer
 {
 public:
+    /// Makes a workload for the Multiplication type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     MultiplicationLayer* Clone(Graph& graph) const override;
 
 protected:
+    /// Constructor to create a MultiplicationLayer.
+    /// @param [in] name Optional name for the layer
     MultiplicationLayer(const char* name);
+
+    /// Default destructor
     ~MultiplicationLayer() = default;
 };
 
diff --git a/src/armnn/layers/NormalizationLayer.hpp b/src/armnn/layers/NormalizationLayer.hpp
index b67c0fb..9fd7875 100644
--- a/src/armnn/layers/NormalizationLayer.hpp
+++ b/src/armnn/layers/NormalizationLayer.hpp
@@ -9,18 +9,32 @@
 namespace armnn
 {
 
+/// This layer represents a normalization operation.
 class NormalizationLayer : public LayerWithParameters<NormalizationDescriptor>
 {
 public:
+    /// Makes a workload for the Normalization type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     NormalizationLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref NormalizationLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create a NormalizationLayer.
+    /// @param [in] param NormalizationDescriptor to configure the normalization operation.
+    /// @param [in] name Optional name for the layer.
     NormalizationLayer(const NormalizationDescriptor& param, const char* name);
+
+    /// Default destructor
     ~NormalizationLayer() = default;
 };
 
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index 9f77e1f..080b301 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -9,22 +9,41 @@
 namespace armnn
 {
 
+/// A layer user-provided data can be bound to (e.g. inputs, outputs).
 class OutputLayer : public BindableLayer
 {
 public:
+    /// Returns nullptr for Output type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
+
+    /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported
+    /// otherwise creates tensor handlers by default. Ignores parameters for Output type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
     virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override
     {
         boost::ignore_unused(graph, factory);
     }
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     OutputLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref OutputLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create an OutputLayer.
+    /// @param id The layer binding id number.
+    /// @param name Optional name for the layer.
     OutputLayer(LayerBindingId id, const char* name);
+
+    /// Default destructor
     ~OutputLayer() = default;
 };
 
diff --git a/src/armnn/layers/PadLayer.hpp b/src/armnn/layers/PadLayer.hpp
index e53a90c..569365a 100644
--- a/src/armnn/layers/PadLayer.hpp
+++ b/src/armnn/layers/PadLayer.hpp
@@ -10,18 +10,32 @@
 namespace armnn
 {
 
+/// This layer represents a pad operation.
 class PadLayer : public LayerWithParameters<PadDescriptor>
 {
 public:
+    /// Makes a workload for the Pad type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     PadLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref PadLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create a PadLayer.
+    /// @param [in] param PadDescriptor to configure the pad operation.
+    /// @param [in] name Optional name for the layer.
     PadLayer(const PadDescriptor& param, const char* name);
+
+    /// Default destructor
     ~PadLayer() = default;
 
 };
diff --git a/src/armnn/layers/PermuteLayer.hpp b/src/armnn/layers/PermuteLayer.hpp
index a32307d..e1f391a 100644
--- a/src/armnn/layers/PermuteLayer.hpp
+++ b/src/armnn/layers/PermuteLayer.hpp
@@ -9,28 +9,49 @@
 namespace armnn
 {
 
+/// This layer represents a permutation operation.
 class PermuteLayer : public LayerWithParameters<PermuteDescriptor>
 {
 public:
+    /// Makes a workload for the Permute type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     PermuteLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref PermuteLayer.
     void ValidateTensorShapesFromInputs() override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    /// @return a permutation vector represents the memory layout of the tensor elements.
     const PermutationVector& GetPermutation() const
     {
         return m_Param.m_DimMappings;
     }
 
+    /// Indicates if the other layer received is inverse of this one.
+    /// @param other The other layer to be compared with.
+    /// @return true if other layer is inverse of this false otherwise.
     bool IsInverse(const Layer& other) const
     {
         return (other.GetType() == LayerType::Permute) &&
             GetPermutation().IsInverse(boost::polymorphic_downcast<const PermuteLayer*>(&other)->GetPermutation());
     }
 
+    /// Indicates if the other layer received is equal to this one.
+    /// @param other The other layer to be compare with.
+    /// @return true if other layer is equal to this false otherwise.
     bool IsEqual(const Layer& other) const
     {
         return (other.GetType() == LayerType::Permute) &&
@@ -38,7 +59,12 @@
     }
 
 protected:
+    /// Constructor to create a PermuteLayer.
+    /// @param [in] param PermuteDescriptor to configure the permute operation.
+    /// @param [in] name Optional name for the layer.
     PermuteLayer(const PermuteDescriptor& param, const char* name);
+
+    /// Default destructor
     ~PermuteLayer() = default;
 };
 
diff --git a/src/armnn/layers/Pooling2dLayer.hpp b/src/armnn/layers/Pooling2dLayer.hpp
index 50b8875..ee6a6ac 100644
--- a/src/armnn/layers/Pooling2dLayer.hpp
+++ b/src/armnn/layers/Pooling2dLayer.hpp
@@ -9,19 +9,38 @@
 namespace armnn
 {
 
+/// This layer represents a pooling 2d operation.
 class Pooling2dLayer : public LayerWithParameters<Pooling2dDescriptor>
 {
 public:
+    /// Makes a workload for the Pooling2d type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     Pooling2dLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref Pooling2dLayer.
     void ValidateTensorShapesFromInputs() override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
 protected:
+    /// Constructor to create a Pooling2dLayer.
+    /// @param [in] param Pooling2dDescriptor to configure the pooling2d operation.
+    /// @param [in] name Optional name for the layer.
     Pooling2dLayer(const Pooling2dDescriptor& param, const char* name);
+
+    /// Default destructor
     ~Pooling2dLayer() = default;
 };
 
diff --git a/src/armnn/layers/ReshapeLayer.hpp b/src/armnn/layers/ReshapeLayer.hpp
index c3afee3..13e73be 100644
--- a/src/armnn/layers/ReshapeLayer.hpp
+++ b/src/armnn/layers/ReshapeLayer.hpp
@@ -9,17 +9,34 @@
 namespace armnn
 {
 
+/// This layer represents a reshape operation.
 class ReshapeLayer : public LayerWithParameters<ReshapeDescriptor>
 {
 public:
+    /// Makes a workload for the Reshape type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
-        const IWorkloadFactory& factory) const override;
+                                                      const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     ReshapeLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref ReshapeLayer.
     void ValidateTensorShapesFromInputs() override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    /// Indicates if the other layer received is equal to this one.
+    /// @param other The other layer to be compared with.
+    /// @return true if other layer is equal to this false otherwise.
     bool IsEqual(const Layer& other) const
     {
         return (other.GetType() == LayerType::Reshape) &&
@@ -27,7 +44,12 @@
     }
 
 protected:
+    /// Constructor to create a ReshapeLayer.
+    /// @param [in] param ReshapeDescriptor to configure the reshape operation.
+    /// @param [in] name Optional name for the layer.
     ReshapeLayer(const ReshapeDescriptor& desc, const char* name);
+
+    /// Default destructor
     ~ReshapeLayer() = default;
 };
 
diff --git a/src/armnn/layers/ResizeBilinearLayer.hpp b/src/armnn/layers/ResizeBilinearLayer.hpp
index 2225a29..185d8a5 100644
--- a/src/armnn/layers/ResizeBilinearLayer.hpp
+++ b/src/armnn/layers/ResizeBilinearLayer.hpp
@@ -9,19 +9,38 @@
 namespace armnn
 {
 
+/// This layer represents a resize bilinear operation.
 class ResizeBilinearLayer : public LayerWithParameters<ResizeBilinearDescriptor>
 {
 public:
-    virtual std::unique_ptr<IWorkload>
-        CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const override;
+    /// Makes a workload for the ResizeBilinear type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
+                                                     const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     ResizeBilinearLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref ResizeBilinearLayer.
     void ValidateTensorShapesFromInputs() override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
 protected:
+    /// Constructor to create a ResizeBilinearLayerLayer.
+    /// @param [in] param ResizeBilinearDescriptor to configure the resize bilinear operation.
+    /// @param [in] name Optional name for the layer.
     ResizeBilinearLayer(const ResizeBilinearDescriptor& param, const char* name);
+
+    /// Default destructor
     ~ResizeBilinearLayer() = default;
 };
 
diff --git a/src/armnn/layers/SoftmaxLayer.hpp b/src/armnn/layers/SoftmaxLayer.hpp
index ae15c36..f6bc894 100644
--- a/src/armnn/layers/SoftmaxLayer.hpp
+++ b/src/armnn/layers/SoftmaxLayer.hpp
@@ -9,18 +9,32 @@
 namespace armnn
 {
 
+/// This layer represents a softmax operation.
 class SoftmaxLayer : public LayerWithParameters<SoftmaxDescriptor>
 {
 public:
+    /// Makes a workload for the Softmax type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     SoftmaxLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref SoftmaxLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create a SoftmaxLayer.
+    /// @param [in] param SoftmaxDescriptor to configure the softmax operation.
+    /// @param [in] name Optional name for the layer.
     SoftmaxLayer(const SoftmaxDescriptor& param, const char* name);
+
+    /// Default destructor
     ~SoftmaxLayer() = default;
 };
 
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.hpp b/src/armnn/layers/SpaceToBatchNdLayer.hpp
index 090fb35..3e91bbd 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.hpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.hpp
@@ -10,20 +10,38 @@
 namespace armnn
 {
 
+/// This layer represents a SpaceToBatchNd operation.
 class SpaceToBatchNdLayer : public LayerWithParameters<SpaceToBatchNdDescriptor>
 {
 public:
+    /// Makes a workload for the SpaceToBatchNd type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     SpaceToBatchNdLayer* Clone(Graph& graph) const override;
 
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref SpaceToBatchNdLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create a SpaceToBatchNdLayer.
+    /// @param [in] param SpaceToBatchNdDescriptor to configure the SpaceToBatchNdLayer operation.
+    /// @param [in] name Optional name for the layer.
     SpaceToBatchNdLayer(const SpaceToBatchNdDescriptor param, const char* name);
+
+    /// Default destructor
     ~SpaceToBatchNdLayer() = default;
 };
 
diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp
index 411efde..6a73510 100644
--- a/src/armnn/layers/SplitterLayer.hpp
+++ b/src/armnn/layers/SplitterLayer.hpp
@@ -9,20 +9,44 @@
 namespace armnn
 {
 
+/// This layer represents a split operation.
 class SplitterLayer : public LayerWithParameters<ViewsDescriptor>
 {
 public:
+    /// Makes a workload for the Splitter type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
+
+    /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported
+    /// otherwise creates tensor handlers.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
     virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     SplitterLayer* Clone(Graph& graph) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref SplitterLayer.
     void ValidateTensorShapesFromInputs() override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
 protected:
+    /// Constructor to create a SplitterLayer.
+    /// @param [in] param ViewsDescriptor to configure the splitter operation.
+    /// @param [in] name Optional name for the layer.
     SplitterLayer(const ViewsDescriptor& param, const char* name);
+
+    /// Default destructor
     ~SplitterLayer() = default;
 };
 
diff --git a/src/armnn/layers/StridedSliceLayer.hpp b/src/armnn/layers/StridedSliceLayer.hpp
index c3aad53..761dc5e 100644
--- a/src/armnn/layers/StridedSliceLayer.hpp
+++ b/src/armnn/layers/StridedSliceLayer.hpp
@@ -9,20 +9,38 @@
 namespace armnn
 {
 
+/// This layer represents a strided slice operation.
 class StridedSliceLayer : public LayerWithParameters<StridedSliceDescriptor>
 {
 public:
+    /// Makes a workload for the StridedSlice type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     StridedSliceLayer* Clone(Graph& graph) const override;
 
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref StridedSliceLayer.
     void ValidateTensorShapesFromInputs() override;
 
 protected:
+    /// Constructor to create a StridedSliceLayer.
+    /// @param [in] param StridedSliceDescriptor to configure the strided slice layer.
+    /// @param [in] name Optional name for the layer.
     StridedSliceLayer(const StridedSliceDescriptor& param, const char* name);
+
+    /// Default destructor
     ~StridedSliceLayer() = default;
 };
 
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
index d1bccfe..15d5684 100644
--- a/src/armnn/layers/SubtractionLayer.hpp
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -10,16 +10,27 @@
 namespace armnn
 {
 
+/// This layer represents a subtraction operation.
 class SubtractionLayer : public ElementwiseBaseLayer
 {
 public:
+    /// Makes a workload for the Subtraction type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
                                                       const IWorkloadFactory& factory) const override;
 
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
     SubtractionLayer* Clone(Graph& graph) const override;
 
 protected:
+    /// Constructor to create a SubtractionLayer.
+    /// @param [in] name Optional name for the layer.
     SubtractionLayer(const char* name);
+
+    /// Default destructor
     ~SubtractionLayer() = default;
 };