COMPMID-417: Add grouping in convolution layer

-Adds grouping support in convolution layer
-Adds Normalization layer node in graph
-Adds alexnet example
-Fixes FullyConnectedLayer output autoconfigure (works only for 1d batch
space)

Change-Id: I5bd75f9a8b08cfd68f7c34745150266c2bc4221f
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/89518
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/src/graph/nodes/ConvolutionLayer.cpp b/src/graph/nodes/ConvolutionLayer.cpp
index b80bf93..ce9f096 100644
--- a/src/graph/nodes/ConvolutionLayer.cpp
+++ b/src/graph/nodes/ConvolutionLayer.cpp
@@ -24,60 +24,155 @@
 #include "arm_compute/graph/nodes/ConvolutionLayer.h"
 
 #include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
+#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
+#include "arm_compute/runtime/IFunction.h"
 #include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
 #include "support/ToolchainSupport.h"
+#include "utils/GraphTypePrinter.h"
 #include "utils/TypePrinter.h"
 
+#include <tuple>
+#include <vector>
+
 using namespace arm_compute::graph;
 
 namespace
 {
-template <typename ConvolutionType, typename TensorType, Hint hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
+/** Calculates the output shaped of the convolution layer
+ *
+ * @param[in] input_shape   Input tensor shape
+ * @param[in] weights_shape Weights shape
+ * @param[in] conv_info     Convolution information (padding, stride, etc.)
+ *
+ * @return The expected output tensor shape
+ */
+TensorShape calculate_convolution_layer_output_shape(const TensorShape &input_shape, const TensorShape &weights_shape, const PadStrideInfo &conv_info)
 {
-    bool weights_are_loaded = weights.tensor() != nullptr;
-    bool biases_are_loaded  = biases.tensor() != nullptr;
+    unsigned int output_width  = 0;
+    unsigned int output_height = 0;
 
+    // Get output width and height
+    std::tie(output_width, output_height) = arm_compute::scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), conv_info);
+
+    // Create output shape
+    TensorShape output_shape = input_shape;
+    output_shape.set(0, output_width);
+    output_shape.set(1, output_height);
+    output_shape.set(2, weights_shape[3]);
+
+    return output_shape;
+}
+
+// Instantiate GEMM based convolution layer
+template <typename ConvolutionType, typename TensorType, Hint hint>
+std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
+{
     auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
     conv->configure(
         dynamic_cast<TensorType *>(input),
-        dynamic_cast<TensorType *>(weights.set_target(hint)),
-        dynamic_cast<TensorType *>(biases.set_target(hint)),
+        dynamic_cast<TensorType *>(weights),
+        dynamic_cast<TensorType *>(biases),
         dynamic_cast<TensorType *>(output),
         conv_info, weights_info);
-    if(!weights_are_loaded)
-    {
-        weights.allocate_and_fill_if_needed();
-    }
-    if(!biases_are_loaded)
-    {
-        biases.allocate_and_fill_if_needed();
-    }
+    return std::move(conv);
+}
 
+// Instantiate direct convolution layer
+template <typename ConvolutionType, typename TensorType, Hint hint>
+std::unique_ptr<arm_compute::IFunction> instantiate_direct_function(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info)
+{
+    auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
+    conv->configure(
+        dynamic_cast<TensorType *>(input),
+        dynamic_cast<TensorType *>(weights),
+        dynamic_cast<TensorType *>(biases),
+        dynamic_cast<TensorType *>(output),
+        conv_info);
     return std::move(conv);
 }
 
 template <Hint                          hint>
-std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info);
+std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
+                                                    ConvolutionMethodHint conv_method);
 
 template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
+                                                                  ConvolutionMethodHint conv_method)
 {
-    return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::CLTensor, Hint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
+    if(conv_method == ConvolutionMethodHint::GEMM)
+    {
+        return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::ICLTensor, Hint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
+    }
+    else
+    {
+        return instantiate_direct_function<arm_compute::CLDirectConvolutionLayer, arm_compute::ICLTensor, Hint::OPENCL>(input, weights, biases, output, conv_info);
+    }
 }
 
 template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
+                                                                ConvolutionMethodHint conv_method)
 {
-    return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::Tensor, Hint::NEON>(input, weights, biases, output, conv_info, weights_info);
+    if(conv_method == ConvolutionMethodHint::GEMM)
+    {
+        return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::ITensor, Hint::NEON>(input, weights, biases, output, conv_info, weights_info);
+    }
+    else
+    {
+        return instantiate_direct_function<arm_compute::NEDirectConvolutionLayer, arm_compute::ITensor, Hint::NEON>(input, weights, biases, output, conv_info);
+    }
 }
 } // namespace
 
+/** Grouped Convolution function */
+class GroupedConvolutionFunction final : public arm_compute::IFunction
+{
+public:
+    /** Default Constructor */
+    GroupedConvolutionFunction()
+        : _convolutions()
+    {
+    }
+    /** Default Destructor */
+    ~GroupedConvolutionFunction() final = default;
+    /** Prevent instances from being copy constructed */
+    GroupedConvolutionFunction(const GroupedConvolutionFunction &) = delete;
+    /** Prevent instances from being copy assigned */
+    GroupedConvolutionFunction &operator=(const GroupedConvolutionFunction &) = delete;
+    /** Allow instances to be move constructed */
+    GroupedConvolutionFunction(GroupedConvolutionFunction &&) noexcept = default;
+    /** Allow instances to be move assigned */
+    GroupedConvolutionFunction &operator=(GroupedConvolutionFunction &&) noexcept = default;
+    /** Adds a convolution
+     *
+     * @param convolution Convolution function to add
+     */
+    void add_convolution_function(std::unique_ptr<IFunction> convolution)
+    {
+        _convolutions.emplace_back(std::move(convolution));
+    }
+
+    // Inherited methods overriden:
+    void run() override
+    {
+        for(auto &c : _convolutions)
+        {
+            c->run();
+        }
+    }
+
+private:
+    std::vector<std::unique_ptr<IFunction>> _convolutions;
+};
+
 std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
 {
+    // Set weights and biases info
     if(_weights.tensor() == nullptr)
     {
-        _weights.set_info(TensorInfo(TensorShape(_conv_width, _conv_height, input->info()->dimension(2), _ofm), input->info()->num_channels(), input->info()->data_type(),
+        _weights.set_info(TensorInfo(TensorShape(_conv_width, _conv_height, input->info()->dimension(2) / _num_groups, _ofm),
+                                     input->info()->num_channels(), input->info()->data_type(),
                                      input->info()->fixed_point_position()));
     }
     if(_biases.tensor() == nullptr)
@@ -90,13 +185,40 @@
     _input  = input;
     _output = output;
 
-    if(_hint == Hint::OPENCL)
+    // Check if the weights and biases are loaded
+    bool weights_are_loaded = _weights.tensor() != nullptr;
+    bool biases_are_loaded  = _weights.tensor() != nullptr;
+
+    // Set bias and weights target
+    _weights.set_target(_hint);
+    _biases.set_target(_hint);
+
+    // Calculate output shape
+    TensorShape output_shape = calculate_convolution_layer_output_shape(_input->info()->tensor_shape(), _weights.info().tensor_shape(), _conv_info);
+
+    // Output auto inizialitation if not yet initialized
+    arm_compute::auto_init_if_empty(*_output->info(), output_shape, 1, _input->info()->data_type(), _input->info()->fixed_point_position());
+
+    // Create appropriate convolution function
+    // TODO(geopin01): Fix convolution layer hints once the GraphContext has been added
+    if(_num_groups == 1)
     {
-        func = instantiate<Hint::OPENCL>(input, _weights, _biases, output, _conv_info, _weights_info);
+        func = instantiate_convolution(ConvolutionMethodHint::GEMM);
     }
     else
     {
-        func = instantiate<Hint::NEON>(input, _weights, _biases, output, _conv_info, _weights_info);
+        func = instantiate_grouped_convolution(ConvolutionMethodHint::GEMM);
+    }
+
+    // Fill weights
+    if(!weights_are_loaded)
+    {
+        _weights.allocate_and_fill_if_needed();
+    }
+    // Fill biases
+    if(!biases_are_loaded)
+    {
+        _biases.allocate_and_fill_if_needed();
     }
 
     return func;
@@ -112,6 +234,97 @@
     {
         std::cout << "Instantiating NEConvolutionLayer";
     }
-    std::cout << " Type: " << _input->info()->data_type() << " Input Shape: " << _input->info()->tensor_shape() << " Weights shape: " << _weights.info().tensor_shape() << " Biases Shape: " <<
-              _biases.info().tensor_shape() << " Output Shape: " << _output->info()->tensor_shape() << " PadStrideInfo: " << _conv_info << "WeightsInfo: " << _weights_info << std::endl;
+    std::cout << " Data Type: " << _input->info()->data_type()
+              << " Input Shape: " << _input->info()->tensor_shape()
+              << " Weights shape: " << _weights.info().tensor_shape()
+              << " Biases Shape: " << _biases.info().tensor_shape()
+              << " Output Shape: " << _output->info()->tensor_shape()
+              << " PadStrideInfo: " << _conv_info
+              << " Groups: " << _num_groups
+              << " WeightsInfo: " << _weights_info
+              << std::endl;
+}
+
+std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_convolution(ConvolutionMethodHint conv_method_hint)
+{
+    std::unique_ptr<arm_compute::IFunction> func;
+    if(_hint == Hint::OPENCL)
+    {
+        func = instantiate<Hint::OPENCL>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
+    }
+    else
+    {
+        func = instantiate<Hint::NEON>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
+    }
+    return func;
+}
+
+std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_grouped_convolution(ConvolutionMethodHint conv_method_hint)
+{
+    // Get tensor shapes
+    TensorShape input_shape   = _input->info()->tensor_shape();
+    TensorShape output_shape  = _output->info()->tensor_shape();
+    TensorShape weights_shape = _weights.info().tensor_shape();
+    TensorShape biases_shape  = _biases.info().tensor_shape();
+
+    ARM_COMPUTE_ERROR_ON_MSG((input_shape.z() % _num_groups) != 0, "Input depth not multiple of the number of groups!");
+    ARM_COMPUTE_ERROR_ON_MSG((output_shape.z() % _num_groups) != 0, "Output depth not multiple of the number of groups!");
+    ARM_COMPUTE_ERROR_ON_MSG((weights_shape[3] % _num_groups) != 0, "Number of kernels not multiple of the number of groups!");
+    ARM_COMPUTE_ERROR_ON_MSG((biases_shape.x() % _num_groups) != 0, "Biases not multiple of the number of groups!");
+
+    // Create a grouped convolution function
+    auto grouped_conv = arm_compute::support::cpp14::make_unique<GroupedConvolutionFunction>();
+
+    // Create sub-tensors vectors
+    _is = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
+    _os = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
+    _ws = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
+    _bs = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
+
+    // Calculate sub-tensor splits
+    const int input_split   = input_shape.z() / _num_groups;
+    const int output_split  = output_shape.z() / _num_groups;
+    const int weights_split = weights_shape[3] / _num_groups;
+    const int biases_split  = biases_shape.x() / _num_groups;
+
+    // Calculate sub-tensor shapes
+    input_shape.set(2, input_split);
+    output_shape.set(2, output_split);
+    weights_shape.set(3, weights_split);
+    biases_shape.set(0, biases_split);
+
+    // Configure sub-tensors
+    for(int i = 0; i < static_cast<int>(_num_groups); ++i)
+    {
+        // Create convolution function
+        std::unique_ptr<arm_compute::IFunction> func;
+
+        // Calculate sub-tensors starting coordinates
+        Coordinates input_coord(0, 0, input_split * i);
+        Coordinates output_coord(0, 0, output_split * i);
+        Coordinates weights_coord(0, 0, 0, weights_split * i);
+        Coordinates biases_coord(biases_split * i);
+
+        // Create sub-tensors for input, output, weights and bias
+        auto hint_to_use = (_hint == Hint::OPENCL) ? Hint::OPENCL : Hint::NEON;
+        _is[i]           = SubTensor(_input, input_shape, input_coord, hint_to_use);
+        _os[i]           = SubTensor(_output, output_shape, output_coord, hint_to_use);
+        _ws[i]           = SubTensor(_weights.tensor(), weights_shape, weights_coord, hint_to_use);
+        _bs[i]           = SubTensor(_biases.tensor(), biases_shape, biases_coord, hint_to_use);
+
+        // Instantiate convolution function
+        if(_hint == Hint::OPENCL)
+        {
+            func = instantiate<Hint::OPENCL>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
+        }
+        else
+        {
+            func = instantiate<Hint::NEON>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
+        }
+
+        // Add convolution function to the list of convolutions for the grouped convolution
+        grouped_conv->add_convolution_function(std::move(func));
+    }
+
+    return std::move(grouped_conv);
 }