IVGCVSW-3705 Add Channel Shuffle Front end and Ref Implementation

* Add front end
* Add reference workload
* Add unit tests
* Add Serializer and Deserializer
* Update ArmNN Versioning

Signed-off-by: Simon Obute <simon.obute@arm.com>
Change-Id: I9ac1f953af3974382eac8e8d62d794d2344e8f47
diff --git a/Android.mk b/Android.mk
index d3f1dcf..69fe9ee 100644
--- a/Android.mk
+++ b/Android.mk
@@ -156,6 +156,7 @@
         src/armnn/layers/BatchNormalizationLayer.cpp \
         src/armnn/layers/BatchToSpaceNdLayer.cpp \
         src/armnn/layers/CastLayer.cpp \
+        src/armnn/layers/ChannelShuffleLayer.cpp \
         src/armnn/layers/ComparisonLayer.cpp \
         src/armnn/layers/ConcatLayer.cpp \
         src/armnn/layers/ConstantLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 69a6827..67f8997 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -221,6 +221,8 @@
     src/armnn/layers/BatchToSpaceNdLayer.cpp
     src/armnn/layers/CastLayer.hpp
     src/armnn/layers/CastLayer.cpp
+    src/armnn/layers/ChannelShuffleLayer.hpp
+    src/armnn/layers/ChannelShuffleLayer.cpp
     src/armnn/layers/ComparisonLayer.hpp
     src/armnn/layers/ComparisonLayer.cpp
     src/armnn/layers/ConcatLayer.hpp
@@ -785,6 +787,7 @@
             src/armnnDeserializer/test/DeserializeBatchToSpaceNd.cpp
             src/armnnDeserializer/test/DeserializeBatchNormalization.cpp
             src/armnnDeserializer/test/DeserializeCast.cpp
+            src/armnnDeserializer/test/DeserializeChannelShuffle.cpp
             src/armnnDeserializer/test/DeserializeComparison.cpp
             src/armnnDeserializer/test/DeserializeConstant.cpp
             src/armnnDeserializer/test/DeserializeConvolution2d.cpp
diff --git a/InstallationViaAptRepository.md b/InstallationViaAptRepository.md
index 5693841..93eb56e 100644
--- a/InstallationViaAptRepository.md
+++ b/InstallationViaAptRepository.md
@@ -117,7 +117,7 @@
  sudo apt-get install -y python3-pyarmnn armnn-latest-all
  # Verify installation via python:
  python3 -c "import pyarmnn as ann;print(ann.GetVersion())"
- # Returns '{ARMNN_MAJOR_VERSION}.0.0' e.g. 26.0.0
+ # Returns '{ARMNN_MAJOR_VERSION}.0.0' e.g. 27.0.0
 ```
 This will install PyArmNN and the three backends for Neon (CpuAcc), OpenCL (GpuAcc) and our Reference Backend.
 It will also install their dependencies including the arm-compute-library package along with the Tensorflow Lite Parser
@@ -168,7 +168,7 @@
 
 
  # Export the ARMNN_MAJOR_VERSION to the latest visible e.g. libarmnn25 to allow installation using the below examples
- export ARMNN_MAJOR_VERSION=26
+ export ARMNN_MAJOR_VERSION=27
 
   # As the Tensorflow Lite Parser is now ABI stable it will have a different version to ARMNN_MAJOR_VERSION please choose latest version:
   apt-cache search libarmnntfliteparser
diff --git a/delegate/include/Version.hpp b/delegate/include/Version.hpp
index 8e37c7a..c66bf16 100644
--- a/delegate/include/Version.hpp
+++ b/delegate/include/Version.hpp
@@ -14,7 +14,7 @@
 
 // ArmNN Delegate version components
 #define DELEGATE_MAJOR_VERSION 24
-#define DELEGATE_MINOR_VERSION 2
+#define DELEGATE_MINOR_VERSION 3
 #define DELEGATE_PATCH_VERSION 0
 
 /// DELEGATE_VERSION: "X.Y.Z"
diff --git a/docs/01_02_deserializer_serializer.dox b/docs/01_02_deserializer_serializer.dox
index 81b4ad0..55259dd 100644
--- a/docs/01_02_deserializer_serializer.dox
+++ b/docs/01_02_deserializer_serializer.dox
@@ -25,6 +25,7 @@
 - BatchToSpaceNd
 - BatchNormalization
 - Cast
+- ChannelShuffle
 - Comparison
 - Concat
 - Constant
@@ -113,6 +114,7 @@
 - BatchToSpaceNd
 - BatchNormalization
 - Cast
+- ChannelShuffle
 - Concat
 - Comparison
 - Constant
diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp
index dee3b48..e3478a7 100644
--- a/include/armnn/BackendHelper.hpp
+++ b/include/armnn/BackendHelper.hpp
@@ -66,6 +66,11 @@
                          const TensorInfo& output,
                          Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
+    bool IsChannelShuffleSupported(const TensorInfo& input,
+                                   const TensorInfo& output,
+                                   const ChannelShuffleDescriptor& descriptor,
+                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
     bool IsComparisonSupported(const TensorInfo& input0,
                                const TensorInfo& input1,
                                const TensorInfo& output,
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 341dbec..d571f22 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -1342,4 +1342,26 @@
     ReduceOperation m_ReduceOperation;
 };
 
+/// A ChannelShuffleDescriptor for the ChannelShuffle operator
+struct ChannelShuffleDescriptor : BaseDescriptor
+{
+    ChannelShuffleDescriptor()
+        : m_NumGroups(0), m_Axis(0)
+    {}
+
+    ChannelShuffleDescriptor(const uint32_t& numGroups, const uint32_t& axis)
+        : m_NumGroups(numGroups), m_Axis(axis)
+    {}
+
+    bool operator ==(const ChannelShuffleDescriptor& rhs) const
+    {
+        return m_NumGroups == rhs.m_NumGroups;
+    }
+
+    /// Number of groups for the channel shuffle operation
+    uint32_t m_NumGroups;
+    /// Axis to apply channel shuffle operation on
+    uint32_t m_Axis;
+};
+
 } // namespace armnn
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index 3b43c42..396b728 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -13,6 +13,7 @@
 struct ArgMinMaxDescriptor;
 struct BatchNormalizationDescriptor;
 struct BatchToSpaceNdDescriptor;
+struct ChannelShuffleDescriptor;
 struct ComparisonDescriptor;
 struct Convolution2dDescriptor;
 struct DepthwiseConvolution2dDescriptor;
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 3bbc406..37aeaf4 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -704,6 +704,13 @@
                                                           const LstmInputParams& params,
                                                           const char* name = nullptr);
 
+    /// Add a ChannelShuffle layer to the network
+    /// @param descriptor - Parameters for the ChannelShuffle operation
+    /// @param name - Optional name for the layer
+    /// @return - Interface for configuring the layer
+    IConnectableLayer* AddChannelShuffleLayer(const ChannelShuffleDescriptor& descriptor,
+                                              const char* name = nullptr);
+
     void Accept(ILayerVisitor& visitor) const;
 
     void ExecuteStrategy(IStrategy& strategy) const;
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index c3b439a..2fab6b4 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -421,7 +421,7 @@
     X(Cast) \
     X(Shape) \
     X(UnidirectionalSequenceLstm) \
-
+    X(ChannelShuffle) \
 // New layers should be added at last to minimize instability.
 
 /// When adding a new layer, adapt also the LastLayer enum value in the
diff --git a/include/armnn/Version.hpp b/include/armnn/Version.hpp
index 5347097..3a5b568 100644
--- a/include/armnn/Version.hpp
+++ b/include/armnn/Version.hpp
@@ -10,7 +10,7 @@
 #define STRINGIFY_MACRO(s) #s
 
 // ArmNN version components
-#define ARMNN_MAJOR_VERSION 26
+#define ARMNN_MAJOR_VERSION 27
 #define ARMNN_MINOR_VERSION 0
 #define ARMNN_PATCH_VERSION 0
 
diff --git a/include/armnn/backends/ILayerSupport.hpp b/include/armnn/backends/ILayerSupport.hpp
index 7ba565a..f511ee4 100644
--- a/include/armnn/backends/ILayerSupport.hpp
+++ b/include/armnn/backends/ILayerSupport.hpp
@@ -64,6 +64,11 @@
                                  const TensorInfo& output,
                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
+    virtual bool IsChannelShuffleSupported(const TensorInfo& input,
+                                           const TensorInfo& output,
+                                           const ChannelShuffleDescriptor& descriptor,
+                                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
     virtual bool IsComparisonSupported(const TensorInfo& input0,
                                        const TensorInfo& input1,
                                        const TensorInfo& output,
diff --git a/include/armnnOnnxParser/Version.hpp b/include/armnnOnnxParser/Version.hpp
index 78b4b04..da3e392 100644
--- a/include/armnnOnnxParser/Version.hpp
+++ b/include/armnnOnnxParser/Version.hpp
@@ -14,7 +14,7 @@
 
 // OnnxParser version components
 #define ONNX_PARSER_MAJOR_VERSION 24
-#define ONNX_PARSER_MINOR_VERSION 2
+#define ONNX_PARSER_MINOR_VERSION 3
 #define ONNX_PARSER_PATCH_VERSION 0
 
 /// ONNX_PARSER_VERSION: "X.Y.Z"
diff --git a/include/armnnTfLiteParser/Version.hpp b/include/armnnTfLiteParser/Version.hpp
index c781b58..b0490ce 100644
--- a/include/armnnTfLiteParser/Version.hpp
+++ b/include/armnnTfLiteParser/Version.hpp
@@ -14,7 +14,7 @@
 
 // TfLiteParser version components
 #define TFLITE_PARSER_MAJOR_VERSION 24
-#define TFLITE_PARSER_MINOR_VERSION 2
+#define TFLITE_PARSER_MINOR_VERSION 3
 #define TFLITE_PARSER_PATCH_VERSION 0
 
 /// TFLITE_PARSER_VERSION: "X.Y.Z"
diff --git a/python/pyarmnn/README.md b/python/pyarmnn/README.md
index a610624..0c88ccc 100644
--- a/python/pyarmnn/README.md
+++ b/python/pyarmnn/README.md
@@ -91,14 +91,14 @@
 ```bash
 $ python setup.py sdist
 ```
-As the result you will get `./dist/pyarmnn-26.0.0.tar.gz` file. As you can see it is platform independent.
+As the result you will get `./dist/pyarmnn-27.0.0.tar.gz` file. As you can see it is platform independent.
 
 ##### 5. Build the binary package
 
 ```bash
 $ python setup.py bdist_wheel
 ```
-As the result you will get something like `./dist/pyarmnn-26.0.0-cp36-cp36m-linux_x86_64.whl` file. As you can see it
+As the result you will get something like `./dist/pyarmnn-27.0.0-cp36-cp36m-linux_x86_64.whl` file. As you can see it
  is platform dependent.
 
 # PyArmNN installation
@@ -107,8 +107,8 @@
 
 Binary package is platform dependent, the name of the package will indicate the platform it was built for, e.g.:
 
-* Linux x86 64bit machine: pyarmnn-26.0.0-cp36-cp36m-*linux_x86_64*.whl
-* Linux Aarch 64 bit machine: pyarmnn-26.0.0-cp36-cp36m-*linux_aarch64*.whl
+* Linux x86 64bit machine: pyarmnn-27.0.0-cp36-cp36m-*linux_x86_64*.whl
+* Linux Aarch 64 bit machine: pyarmnn-27.0.0-cp36-cp36m-*linux_aarch64*.whl
 
 The source package is platform independent but installation involves compilation of Arm NN python extension. You will need to have g++ compatible with C++ 14 standard and a python development library installed on the build machine.
 
@@ -126,7 +126,7 @@
 ```
 Install PyArmNN from binary by pointing to the wheel file:
 ```bash
-$ pip install /path/to/pyarmnn-26.0.0-cp36-cp36m-linux_aarch64.whl
+$ pip install /path/to/pyarmnn-27.0.0-cp36-cp36m-linux_aarch64.whl
 ```
 
 ## Installing from source package
@@ -143,7 +143,7 @@
 
 Install PyArmNN as follows:
 ```bash
-$ pip install /path/to/pyarmnn-26.0.0.tar.gz
+$ pip install /path/to/pyarmnn-27.0.0.tar.gz
 ```
 
 If PyArmNN installation script fails to find Arm NN libraries it will raise an error like this
@@ -157,7 +157,7 @@
 You can also verify it by running the following and getting output similar to below:
 ```bash
 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'26.0.0'
+'27.0.0'
 ```
 
 # PyArmNN API overview
diff --git a/python/pyarmnn/examples/image_classification/README.md b/python/pyarmnn/examples/image_classification/README.md
index ea8f4c3..c7dbc6f 100644
--- a/python/pyarmnn/examples/image_classification/README.md
+++ b/python/pyarmnn/examples/image_classification/README.md
@@ -20,7 +20,7 @@
 You can also verify it by running the following and getting output similar to below:

 ```bash

 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"

-'26.0.0'

+'27.0.0'

 ```

 

 ##### Dependencies

diff --git a/python/pyarmnn/examples/object_detection/README.md b/python/pyarmnn/examples/object_detection/README.md
index 97bb164..1e3bbc4 100644
--- a/python/pyarmnn/examples/object_detection/README.md
+++ b/python/pyarmnn/examples/object_detection/README.md
@@ -23,7 +23,7 @@
 You can also verify it by running the following and getting output similar to below:
 ```bash
 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'26.0.0'
+'27.0.0'
 ```
 
 ##### Dependencies
diff --git a/python/pyarmnn/examples/speech_recognition/README.md b/python/pyarmnn/examples/speech_recognition/README.md
index cad4126..7d6a4fc 100644
--- a/python/pyarmnn/examples/speech_recognition/README.md
+++ b/python/pyarmnn/examples/speech_recognition/README.md
@@ -18,7 +18,7 @@
 
 ```bash
 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'26.0.0'
+'27.0.0'
 ```
 
 ### Dependencies
diff --git a/python/pyarmnn/src/pyarmnn/_version.py b/python/pyarmnn/src/pyarmnn/_version.py
index 587efc4..803a66f 100644
--- a/python/pyarmnn/src/pyarmnn/_version.py
+++ b/python/pyarmnn/src/pyarmnn/_version.py
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: MIT
 import os
 
-version_info = (26, 0, 0)
+version_info = (27, 0, 0)
 
 __dev_version_env = os.getenv("PYARMNN_DEV_VER", "")
 
@@ -24,7 +24,7 @@
     """Compares expected Arm NN version and Arm NN version used to build the package.
 
     Args:
-        installed_armnn_version (str): Arm NN version used to generate the package (e.g. 26.0.0)
+        installed_armnn_version (str): Arm NN version used to generate the package (e.g. 27.0.0)
         expected_armnn_version (str): Expected Arm NN version
 
     Returns:
diff --git a/python/pyarmnn/test/test_setup.py b/python/pyarmnn/test/test_setup.py
index 8f1d81c..90eb76e 100644
--- a/python/pyarmnn/test/test_setup.py
+++ b/python/pyarmnn/test/test_setup.py
@@ -87,15 +87,15 @@
 
 
 def test_armnn_version():
-    check_armnn_version('26.0.0', '26.0.0')
+    check_armnn_version('27.0.0', '27.0.0')
 
 
 def test_incorrect_armnn_version():
     with pytest.raises(AssertionError) as err:
-        check_armnn_version('26.0.0', '26.1.0')
+        check_armnn_version('27.0.0', '27.1.0')
 
-    assert 'Expected ArmNN version is 26.1.0 but installed ArmNN version is 26.0.0' in str(err.value)
+    assert 'Expected ArmNN version is 27.1.0 but installed ArmNN version is 27.0.0' in str(err.value)
 
 
 def test_armnn_version_patch_does_not_matter():
-    check_armnn_version('26.0.0', '26.0.1')
+    check_armnn_version('27.0.0', '27.0.1')
diff --git a/python/pyarmnn/test/test_version.py b/python/pyarmnn/test/test_version.py
index 9c21e82..2bbcc05 100644
--- a/python/pyarmnn/test/test_version.py
+++ b/python/pyarmnn/test/test_version.py
@@ -18,7 +18,7 @@
 
     importlib.reload(v)
 
-    assert "26.0.0.dev1" == v.__version__
+    assert "27.0.0.dev1" == v.__version__
 
     del os.environ["PYARMNN_DEV_VER"]
     del v
@@ -30,7 +30,7 @@
 
     importlib.reload(v)
 
-    assert "26.0.0" == v.__arm_ml_version__
+    assert "27.0.0" == v.__arm_ml_version__
 
     del os.environ["PYARMNN_DEV_VER"]
     del v
diff --git a/samples/ObjectDetection/Readme.md b/samples/ObjectDetection/Readme.md
index 613a8ac..95c6b73 100644
--- a/samples/ObjectDetection/Readme.md
+++ b/samples/ObjectDetection/Readme.md
@@ -168,10 +168,10 @@
 The full list of libs after cross-compilation to copy on your board:
 ```
 libarmnn.so
-libarmnn.so.26
-libarmnn.so.26.0
+libarmnn.so.27
+libarmnn.so.27.0
 libarmnnTfLiteParser.so
-libarmnnTfLiteParser.so.24.2
+libarmnnTfLiteParser.so.24.3
 libavcodec.so
 libavcodec.so.58
 libavcodec.so.58.54.100
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 594d769..c17d076 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -206,6 +206,16 @@
     return m_LayerSupport->IsCastSupported(input, output, reasonIfUnsupported.value());
 }
 
+bool LayerSupportHandle::IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output,
+                                                   const ChannelShuffleDescriptor &descriptor,
+                                                   Optional<std::string &> reasonIfUnsupported)
+{
+    return m_LayerSupport->IsChannelShuffleSupported(input,
+                                                     output,
+                                                     descriptor,
+                                                     reasonIfUnsupported.value());
+}
+
 bool LayerSupportHandle::IsComparisonSupported(const TensorInfo& input0,
                                                const TensorInfo& input1,
                                                const TensorInfo& output,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index e3ae23c..6f39ca0 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -12,6 +12,7 @@
 #include "layers/BatchNormalizationLayer.hpp"
 #include "layers/BatchToSpaceNdLayer.hpp"
 #include "layers/CastLayer.hpp"
+#include "layers/ChannelShuffleLayer.hpp"
 #include "layers/ComparisonLayer.hpp"
 #include "layers/ConcatLayer.hpp"
 #include "layers/ConstantLayer.hpp"
@@ -109,6 +110,7 @@
 DECLARE_LAYER(BatchNormalization)
 DECLARE_LAYER(BatchToSpaceNd)
 DECLARE_LAYER(Cast)
+DECLARE_LAYER(ChannelShuffle)
 DECLARE_LAYER(Comparison)
 DECLARE_LAYER(Concat)
 DECLARE_LAYER(Constant)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index db7b4c9..8409717 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -512,6 +512,12 @@
     return pNetworkImpl->AddUnidirectionalSequenceLstmLayer(descriptor, params, name);
 }
 
+IConnectableLayer* INetwork::AddChannelShuffleLayer(const ChannelShuffleDescriptor &descriptor,
+                                                    const char* name)
+{
+    return pNetworkImpl->AddChannelShuffleLayer(descriptor, name);
+}
+
 void INetwork::Accept(ILayerVisitor& visitor) const
 {
     return pNetworkImpl->Accept(visitor);
@@ -1817,6 +1823,11 @@
 {
     return m_Graph->AddLayer<CastLayer>(name);
 }
+IConnectableLayer* NetworkImpl::AddChannelShuffleLayer(const ChannelShuffleDescriptor& channelShuffleDescriptor,
+                                               const char* name)
+{
+    return m_Graph->AddLayer<ChannelShuffleLayer>(channelShuffleDescriptor, name);
+}
 
 IConnectableLayer* NetworkImpl::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
                                                const char* name)
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index c22c865..67c5b5a 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -64,6 +64,9 @@
 
     IConnectableLayer* AddCastLayer(const char* name = nullptr);
 
+    IConnectableLayer* AddChannelShuffleLayer(const ChannelShuffleDescriptor& channelShuffleDescriptor,
+                                              const char* name = nullptr);
+
     IConnectableLayer* AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
                                           const char* name = nullptr);
 
diff --git a/src/armnn/SerializeLayerParameters.cpp b/src/armnn/SerializeLayerParameters.cpp
index 16ffb8f..73e0cbc 100644
--- a/src/armnn/SerializeLayerParameters.cpp
+++ b/src/armnn/SerializeLayerParameters.cpp
@@ -62,6 +62,13 @@
     fn("DataLayout", GetDataLayoutName(desc.m_DataLayout));
 }
 
+void StringifyLayerParameters<ChannelShuffleDescriptor>::Serialize(ParameterStringifyFunction& fn,
+                                                                   const ChannelShuffleDescriptor& desc)
+{
+    fn("Axis", std::to_string(desc.m_Axis));
+    fn("NumGroups", std::to_string(desc.m_NumGroups));
+}
+
 void StringifyLayerParameters<ComparisonDescriptor>::Serialize(ParameterStringifyFunction& fn,
                                                                const ComparisonDescriptor& desc)
 {
diff --git a/src/armnn/SerializeLayerParameters.hpp b/src/armnn/SerializeLayerParameters.hpp
index 9c5b826..f8fe5e2 100644
--- a/src/armnn/SerializeLayerParameters.hpp
+++ b/src/armnn/SerializeLayerParameters.hpp
@@ -40,6 +40,11 @@
     static void Serialize(ParameterStringifyFunction& fn, const BatchToSpaceNdDescriptor& desc);
 };
 
+template <> struct StringifyLayerParameters<ChannelShuffleDescriptor>
+{
+    static void Serialize(ParameterStringifyFunction& fn, const ChannelShuffleDescriptor& desc);
+};
+
 template <> struct StringifyLayerParameters<ComparisonDescriptor>
 {
     static void Serialize(ParameterStringifyFunction& fn, const ComparisonDescriptor& desc);
diff --git a/src/armnn/layers/ChannelShuffleLayer.cpp b/src/armnn/layers/ChannelShuffleLayer.cpp
new file mode 100644
index 0000000..a3b85f1
--- /dev/null
+++ b/src/armnn/layers/ChannelShuffleLayer.cpp
@@ -0,0 +1,54 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ChannelShuffleLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include  <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+ChannelShuffleLayer::ChannelShuffleLayer(const ChannelShuffleDescriptor& param, const char* name)
+: LayerWithParameters(1, 1, LayerType::ChannelShuffle, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> ChannelShuffleLayer::CreateWorkload(const IWorkloadFactory &factory) const
+{
+    ChannelShuffleQueueDescriptor descriptor;
+    SetAdditionalInfo(descriptor);
+
+    return factory.CreateChannelShuffle(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+ChannelShuffleLayer* ChannelShuffleLayer::Clone(Graph& graph) const
+{
+    return CloneBase<ChannelShuffleLayer>(graph, m_Param, GetName());
+}
+
+void ChannelShuffleLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(1, CHECK_LOCATION());
+
+    const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
+
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+
+    auto inferredShapes = Layer::InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+    ARMNN_ASSERT(inferredShapes.size() == 1);
+
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ChannelShuffleLayer");
+}
+void ChannelShuffleLayer::Accept(ILayerVisitor& visitor) const
+{
+    IgnoreUnused(visitor);
+    throw armnn::Exception("ChannelShuffleLayer: VisitChannelShuffleLayer is not implemented");
+}
+}
\ No newline at end of file
diff --git a/src/armnn/layers/ChannelShuffleLayer.hpp b/src/armnn/layers/ChannelShuffleLayer.hpp
new file mode 100644
index 0000000..399b651
--- /dev/null
+++ b/src/armnn/layers/ChannelShuffleLayer.hpp
@@ -0,0 +1,37 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+class ChannelShuffleLayer : public LayerWithParameters<ChannelShuffleDescriptor>
+{
+public:
+    void Accept(ILayerVisitor& visitor) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param graph The graph into which this layer is being cloned
+    ChannelShuffleLayer* Clone(Graph& graph) const override;
+
+    /// Makes a workload for the ChannelShuffle type.
+    /// @param factory The workload factory which will create the workload
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref ChannelShuffleLayer.
+    /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+    void ValidateTensorShapesFromInputs() override;
+
+    // TODO Do you need to create an InferOutputShapes function for ChannelShuffle?
+protected:
+    ChannelShuffleLayer(const ChannelShuffleDescriptor& param, const char* name);
+
+    ~ChannelShuffleLayer() = default;
+};
+
+} // namespace
\ No newline at end of file
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 074429b..1341581 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -215,8 +215,9 @@
     m_ParserFunctions[Layer_ArgMinMaxLayer]              = &DeserializerImpl::ParseArgMinMax;
     m_ParserFunctions[Layer_BatchToSpaceNdLayer]         = &DeserializerImpl::ParseBatchToSpaceNd;
     m_ParserFunctions[Layer_BatchNormalizationLayer]     = &DeserializerImpl::ParseBatchNormalization;
-    m_ParserFunctions[Layer_ComparisonLayer]             = &DeserializerImpl::ParseComparison;
     m_ParserFunctions[Layer_CastLayer]                   = &DeserializerImpl::ParseCast;
+    m_ParserFunctions[Layer_ChannelShuffleLayer]         = &DeserializerImpl::ParseChannelShuffle;
+    m_ParserFunctions[Layer_ComparisonLayer]             = &DeserializerImpl::ParseComparison;
     m_ParserFunctions[Layer_ConcatLayer]                 = &DeserializerImpl::ParseConcat;
     m_ParserFunctions[Layer_ConstantLayer]               = &DeserializerImpl::ParseConstant;
     m_ParserFunctions[Layer_Convolution2dLayer]          = &DeserializerImpl::ParseConvolution2d;
@@ -293,6 +294,8 @@
             return graphPtr->layers()->Get(layerIndex)->layer_as_BatchNormalizationLayer()->base();
         case Layer::Layer_CastLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_CastLayer()->base();
+        case Layer::Layer_ChannelShuffleLayer:
+            return graphPtr->layers()->Get(layerIndex)->layer_as_ChannelShuffleLayer()->base();
         case Layer::Layer_ComparisonLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_ComparisonLayer()->base();
         case Layer::Layer_ConcatLayer:
@@ -1780,7 +1783,30 @@
             throw armnn::Exception("unknown layer type, should be concat or merger");
     }
 }
+void IDeserializer::DeserializerImpl::ParseChannelShuffle(GraphPtr graph, unsigned int layerIndex)
+{
+    CHECK_LAYERS(graph, 0, layerIndex);
 
+    TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
+    CHECK_VALID_SIZE(inputs.size(), 1);
+
+    TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    armnn::ChannelShuffleDescriptor descriptor;
+    descriptor.m_Axis = graph->layers()->Get(layerIndex)->layer_as_ChannelShuffleLayer()->descriptor()->axis();
+    descriptor.m_NumGroups =
+                   graph->layers()->Get(layerIndex)->layer_as_ChannelShuffleLayer()->descriptor()->numGroups();
+
+    auto layerName = GetLayerName(graph, layerIndex);
+    IConnectableLayer* layer = m_Network->AddChannelShuffleLayer(descriptor, layerName.c_str());
+
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    RegisterInputSlots(graph, layerIndex, layer);
+    RegisterOutputSlots(graph, layerIndex, layer);
+}
 void IDeserializer::DeserializerImpl::ParseComparison(GraphPtr graph, unsigned int layerIndex)
 {
     CHECK_LAYERS(graph, 0, layerIndex);
diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp
index b4dc68b..a07e41f 100644
--- a/src/armnnDeserializer/Deserializer.hpp
+++ b/src/armnnDeserializer/Deserializer.hpp
@@ -88,6 +88,7 @@
     void ParseBatchToSpaceNd(GraphPtr graph, unsigned int layerIndex);
     void ParseBatchNormalization(GraphPtr graph, unsigned int layerIndex);
     void ParseCast(GraphPtr graph, unsigned int layerIndex);
+    void ParseChannelShuffle(GraphPtr graph, unsigned int layerIndex);
     void ParseComparison(GraphPtr graph, unsigned int layerIndex);
     void ParseConcat(GraphPtr graph, unsigned int layerIndex);
     void ParseConstant(GraphPtr graph, unsigned int layerIndex);
diff --git a/src/armnnDeserializer/test/DeserializeChannelShuffle.cpp b/src/armnnDeserializer/test/DeserializeChannelShuffle.cpp
new file mode 100644
index 0000000..19c2ece
--- /dev/null
+++ b/src/armnnDeserializer/test/DeserializeChannelShuffle.cpp
@@ -0,0 +1,146 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include <armnnDeserializer/IDeserializer.hpp>
+
+#include <string>
+
+TEST_SUITE("Deserializer_ChannelShuffle")
+{
+struct ChannelShuffleFixture : public ParserFlatbuffersSerializeFixture
+{
+    explicit ChannelShuffleFixture()
+    {
+        m_JsonString = R"(
+        {
+          layers: [
+            {
+              layer_type: "InputLayer",
+              layer: {
+                base: {
+                  base: {
+                    layerName: "InputLayer",
+                    layerType: "Input",
+                    inputSlots: [
+
+                    ],
+                    outputSlots: [
+                      {
+                        tensorInfo: {
+                          dimensions: [
+                            3,
+                            12
+                          ],
+                          dataType: "Float32",
+                          quantizationScale: 0.0,
+                          dimensionSpecificity: [
+                            true,
+                            true
+                          ]
+                        }
+                      }
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              layer_type: "ChannelShuffleLayer",
+              layer: {
+                base: {
+                  index: 1,
+                  layerName: "channelShuffle",
+                  layerType: "ChannelShuffle",
+                  inputSlots: [
+                    {
+                      connection: {
+                        sourceLayerIndex: 0,
+                        outputSlotIndex: 0
+                      }
+                    }
+                  ],
+                  outputSlots: [
+                    {
+                      tensorInfo: {
+                        dimensions: [
+                          3,
+                          12
+                        ],
+                        dataType: "Float32",
+                        quantizationScale: 0.0,
+                        dimensionSpecificity: [
+                          true,
+                          true
+                        ]
+                      }
+                    }
+                  ]
+                },
+                descriptor: {
+                  axis: 1,
+                  numGroups: 3
+                }
+              }
+            },
+            {
+              layer_type: "OutputLayer",
+              layer: {
+                base: {
+                  base: {
+                    index: 2,
+                    layerName: "OutputLayer",
+                    layerType: "Output",
+                    inputSlots: [
+                      {
+                        connection: {
+                          sourceLayerIndex: 1,
+                          outputSlotIndex: 0
+                        }
+                      }
+                    ],
+                    outputSlots: [
+
+                    ]
+                  }
+                }
+              }
+            }
+          ],
+          inputIds: [
+            0
+          ],
+          outputIds: [
+            0
+          ],
+          featureVersions: {
+            bindingIdsScheme: 1,
+            weightsLayoutScheme: 1,
+            constantTensorsAsInputs: 1
+          }
+        }
+    )";
+    SetupSingleInputSingleOutput("InputLayer", "OutputLayer");
+    }
+};
+
+struct SimpleChannelShuffleFixtureFloat32 : ChannelShuffleFixture
+{
+    SimpleChannelShuffleFixtureFloat32() : ChannelShuffleFixture(){}
+};
+
+TEST_CASE_FIXTURE(SimpleChannelShuffleFixtureFloat32, "ChannelShuffleFloat32")
+{
+    RunTest<2, armnn::DataType::Float32>(0,
+                                         {{"InputLayer",
+                                           {  0, 1, 2, 3,        4, 5, 6, 7,       8, 9, 10, 11,
+                                            12, 13, 14, 15,   16, 17, 18, 19,   20, 21, 22, 23,
+                                            24, 25, 26, 27,   28, 29, 30, 31,   32, 33, 34, 35}}},
+                                         {{"OutputLayer",
+                                           { 0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11,
+                                            12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23,
+                                            24, 28, 32, 25, 29, 33, 26, 30, 34, 27, 31, 35 }}});
+}
+}
\ No newline at end of file
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index a285a11..740090b 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -176,6 +176,7 @@
     Cast = 61,
     Shape = 62,
     UnidirectionalSequenceLstm = 63,
+    ChannelShuffle = 64,
 }
 
 // Base layer table to be used as part of other layers
@@ -228,6 +229,16 @@
     base:LayerBase;
 }
 
+table ChannelShuffleLayer {
+    base:LayerBase;
+    descriptor:ChannelShuffleDescriptor;
+}
+
+table ChannelShuffleDescriptor {
+    axis:uint = 0;
+    numGroups:uint = 0;
+}
+
 enum ComparisonOperation : byte {
     Equal = 0,
     Greater = 1,
@@ -1000,6 +1011,7 @@
     CastLayer,
     ShapeLayer,
     UnidirectionalSequenceLstmLayer,
+    ChannelShuffleLayer,
 }
 
 table AnyLayer {
diff --git a/src/armnnSerializer/ArmnnSchema_generated.h b/src/armnnSerializer/ArmnnSchema_generated.h
index cf28a7a..653ea6a 100644
--- a/src/armnnSerializer/ArmnnSchema_generated.h
+++ b/src/armnnSerializer/ArmnnSchema_generated.h
@@ -65,6 +65,12 @@
 struct CastLayer;
 struct CastLayerBuilder;
 
+struct ChannelShuffleLayer;
+struct ChannelShuffleLayerBuilder;
+
+struct ChannelShuffleDescriptor;
+struct ChannelShuffleDescriptorBuilder;
+
 struct ComparisonDescriptor;
 struct ComparisonDescriptorBuilder;
 
@@ -588,7 +594,7 @@
 }
 
 inline const char *EnumNameReduceOperation(ReduceOperation e) {
-  if (flatbuffers::IsOutRange(e, ReduceOperation_Sum, ReduceOperation_Min)) return "";
+  if (flatbuffers::IsOutRange(e, ReduceOperation_Sum, ReduceOperation_Prod)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesReduceOperation()[index];
 }
@@ -750,11 +756,12 @@
   LayerType_Cast = 61,
   LayerType_Shape = 62,
   LayerType_UnidirectionalSequenceLstm = 63,
+  LayerType_ChannelShuffle = 64,
   LayerType_MIN = LayerType_Addition,
-  LayerType_MAX = LayerType_UnidirectionalSequenceLstm
+  LayerType_MAX = LayerType_ChannelShuffle
 };
 
-inline const LayerType (&EnumValuesLayerType())[64] {
+inline const LayerType (&EnumValuesLayerType())[65] {
   static const LayerType values[] = {
     LayerType_Addition,
     LayerType_Input,
@@ -819,13 +826,14 @@
     LayerType_Reduce,
     LayerType_Cast,
     LayerType_Shape,
-    LayerType_UnidirectionalSequenceLstm
+    LayerType_UnidirectionalSequenceLstm,
+    LayerType_ChannelShuffle
   };
   return values;
 }
 
 inline const char * const *EnumNamesLayerType() {
-  static const char * const names[65] = {
+  static const char * const names[66] = {
     "Addition",
     "Input",
     "Multiplication",
@@ -890,13 +898,14 @@
     "Cast",
     "Shape",
     "UnidirectionalSequenceLstm",
+    "ChannelShuffle",
     nullptr
   };
   return names;
 }
 
 inline const char *EnumNameLayerType(LayerType e) {
-  if (flatbuffers::IsOutRange(e, LayerType_Addition, LayerType_UnidirectionalSequenceLstm)) return "";
+  if (flatbuffers::IsOutRange(e, LayerType_Addition, LayerType_ChannelShuffle)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesLayerType()[index];
 }
@@ -1240,11 +1249,12 @@
   Layer_CastLayer = 62,
   Layer_ShapeLayer = 63,
   Layer_UnidirectionalSequenceLstmLayer = 64,
+  Layer_ChannelShuffleLayer = 65,
   Layer_MIN = Layer_NONE,
-  Layer_MAX = Layer_UnidirectionalSequenceLstmLayer
+  Layer_MAX = Layer_ChannelShuffleLayer
 };
 
-inline const Layer (&EnumValuesLayer())[65] {
+inline const Layer (&EnumValuesLayer())[66] {
   static const Layer values[] = {
     Layer_NONE,
     Layer_ActivationLayer,
@@ -1310,13 +1320,14 @@
     Layer_ReduceLayer,
     Layer_CastLayer,
     Layer_ShapeLayer,
-    Layer_UnidirectionalSequenceLstmLayer
+    Layer_UnidirectionalSequenceLstmLayer,
+    Layer_ChannelShuffleLayer
   };
   return values;
 }
 
 inline const char * const *EnumNamesLayer() {
-  static const char * const names[66] = {
+  static const char * const names[67] = {
     "NONE",
     "ActivationLayer",
     "AdditionLayer",
@@ -1382,13 +1393,14 @@
     "CastLayer",
     "ShapeLayer",
     "UnidirectionalSequenceLstmLayer",
+    "ChannelShuffleLayer",
     nullptr
   };
   return names;
 }
 
 inline const char *EnumNameLayer(Layer e) {
-  if (flatbuffers::IsOutRange(e, Layer_NONE, Layer_UnidirectionalSequenceLstmLayer)) return "";
+  if (flatbuffers::IsOutRange(e, Layer_NONE, Layer_ChannelShuffleLayer)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesLayer()[index];
 }
@@ -1653,6 +1665,10 @@
   static const Layer enum_value = Layer_UnidirectionalSequenceLstmLayer;
 };
 
+template<> struct LayerTraits<armnnSerializer::ChannelShuffleLayer> {
+  static const Layer enum_value = Layer_ChannelShuffleLayer;
+};
+
 bool VerifyLayer(flatbuffers::Verifier &verifier, const void *obj, Layer type);
 bool VerifyLayerVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types);
 
@@ -2747,6 +2763,112 @@
   return builder_.Finish();
 }
 
+struct ChannelShuffleLayer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+  typedef ChannelShuffleLayerBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_BASE = 4,
+    VT_DESCRIPTOR = 6
+  };
+  const armnnSerializer::LayerBase *base() const {
+    return GetPointer<const armnnSerializer::LayerBase *>(VT_BASE);
+  }
+  const armnnSerializer::ChannelShuffleDescriptor *descriptor() const {
+    return GetPointer<const armnnSerializer::ChannelShuffleDescriptor *>(VT_DESCRIPTOR);
+  }
+  bool Verify(flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_BASE) &&
+           verifier.VerifyTable(base()) &&
+           VerifyOffset(verifier, VT_DESCRIPTOR) &&
+           verifier.VerifyTable(descriptor()) &&
+           verifier.EndTable();
+  }
+};
+
+struct ChannelShuffleLayerBuilder {
+  typedef ChannelShuffleLayer Table;
+  flatbuffers::FlatBufferBuilder &fbb_;
+  flatbuffers::uoffset_t start_;
+  void add_base(flatbuffers::Offset<armnnSerializer::LayerBase> base) {
+    fbb_.AddOffset(ChannelShuffleLayer::VT_BASE, base);
+  }
+  void add_descriptor(flatbuffers::Offset<armnnSerializer::ChannelShuffleDescriptor> descriptor) {
+    fbb_.AddOffset(ChannelShuffleLayer::VT_DESCRIPTOR, descriptor);
+  }
+  explicit ChannelShuffleLayerBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ChannelShuffleLayerBuilder &operator=(const ChannelShuffleLayerBuilder &);
+  flatbuffers::Offset<ChannelShuffleLayer> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = flatbuffers::Offset<ChannelShuffleLayer>(end);
+    return o;
+  }
+};
+
+inline flatbuffers::Offset<ChannelShuffleLayer> CreateChannelShuffleLayer(
+    flatbuffers::FlatBufferBuilder &_fbb,
+    flatbuffers::Offset<armnnSerializer::LayerBase> base = 0,
+    flatbuffers::Offset<armnnSerializer::ChannelShuffleDescriptor> descriptor = 0) {
+  ChannelShuffleLayerBuilder builder_(_fbb);
+  builder_.add_descriptor(descriptor);
+  builder_.add_base(base);
+  return builder_.Finish();
+}
+
+struct ChannelShuffleDescriptor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+  typedef ChannelShuffleDescriptorBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_AXIS = 4,
+    VT_NUMGROUPS = 6
+  };
+  uint32_t axis() const {
+    return GetField<uint32_t>(VT_AXIS, 0);
+  }
+  uint32_t numGroups() const {
+    return GetField<uint32_t>(VT_NUMGROUPS, 0);
+  }
+  bool Verify(flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyField<uint32_t>(verifier, VT_AXIS) &&
+           VerifyField<uint32_t>(verifier, VT_NUMGROUPS) &&
+           verifier.EndTable();
+  }
+};
+
+struct ChannelShuffleDescriptorBuilder {
+  typedef ChannelShuffleDescriptor Table;
+  flatbuffers::FlatBufferBuilder &fbb_;
+  flatbuffers::uoffset_t start_;
+  void add_axis(uint32_t axis) {
+    fbb_.AddElement<uint32_t>(ChannelShuffleDescriptor::VT_AXIS, axis, 0);
+  }
+  void add_numGroups(uint32_t numGroups) {
+    fbb_.AddElement<uint32_t>(ChannelShuffleDescriptor::VT_NUMGROUPS, numGroups, 0);
+  }
+  explicit ChannelShuffleDescriptorBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ChannelShuffleDescriptorBuilder &operator=(const ChannelShuffleDescriptorBuilder &);
+  flatbuffers::Offset<ChannelShuffleDescriptor> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = flatbuffers::Offset<ChannelShuffleDescriptor>(end);
+    return o;
+  }
+};
+
+inline flatbuffers::Offset<ChannelShuffleDescriptor> CreateChannelShuffleDescriptor(
+    flatbuffers::FlatBufferBuilder &_fbb,
+    uint32_t axis = 0,
+    uint32_t numGroups = 0) {
+  ChannelShuffleDescriptorBuilder builder_(_fbb);
+  builder_.add_numGroups(numGroups);
+  builder_.add_axis(axis);
+  return builder_.Finish();
+}
+
 struct ComparisonDescriptor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
   typedef ComparisonDescriptorBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -9838,6 +9960,9 @@
   const armnnSerializer::UnidirectionalSequenceLstmLayer *layer_as_UnidirectionalSequenceLstmLayer() const {
     return layer_type() == armnnSerializer::Layer_UnidirectionalSequenceLstmLayer ? static_cast<const armnnSerializer::UnidirectionalSequenceLstmLayer *>(layer()) : nullptr;
   }
+  const armnnSerializer::ChannelShuffleLayer *layer_as_ChannelShuffleLayer() const {
+    return layer_type() == armnnSerializer::Layer_ChannelShuffleLayer ? static_cast<const armnnSerializer::ChannelShuffleLayer *>(layer()) : nullptr;
+  }
   bool Verify(flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<uint8_t>(verifier, VT_LAYER_TYPE) &&
@@ -10103,6 +10228,10 @@
   return layer_as_UnidirectionalSequenceLstmLayer();
 }
 
+template<> inline const armnnSerializer::ChannelShuffleLayer *AnyLayer::layer_as<armnnSerializer::ChannelShuffleLayer>() const {
+  return layer_as_ChannelShuffleLayer();
+}
+
 struct AnyLayerBuilder {
   typedef AnyLayer Table;
   flatbuffers::FlatBufferBuilder &fbb_;
@@ -10589,6 +10718,10 @@
       auto ptr = reinterpret_cast<const armnnSerializer::UnidirectionalSequenceLstmLayer *>(obj);
       return verifier.VerifyTable(ptr);
     }
+    case Layer_ChannelShuffleLayer: {
+      auto ptr = reinterpret_cast<const armnnSerializer::ChannelShuffleLayer *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
     default: return true;
   }
 }
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 195b416..9a3a270 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -302,6 +302,19 @@
     CreateAnyLayer(fbCastLayer.o, serializer::Layer::Layer_CastLayer);
 }
 
+void SerializerStrategy::SerializeChannelShuffleLayer(const armnn::IConnectableLayer* layer,
+                                                      const armnn::ChannelShuffleDescriptor& descriptor,
+                                                      const char* name)
+{
+    IgnoreUnused(name);
+    auto fbDescriptor = CreateChannelShuffleDescriptor(m_flatBufferBuilder,
+                                                       descriptor.m_Axis,
+                                                       descriptor.m_NumGroups);
+    auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_ChannelShuffle);
+    auto fbChannelShuffleLayer = serializer::CreateChannelShuffleLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
+    CreateAnyLayer(fbChannelShuffleLayer.o, serializer::Layer::Layer_ChannelShuffleLayer);
+}
+
 void SerializerStrategy::SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
                                              const armnn::ComparisonDescriptor& descriptor,
                                              const char* name)
@@ -1997,6 +2010,15 @@
             SerializeCastLayer(layer, name);
             break;
         }
+        case armnn::LayerType::ChannelShuffle :
+        {
+            const armnn::ChannelShuffleDescriptor& layerDescriptor =
+                                                     static_cast<const armnn::ChannelShuffleDescriptor&>(descriptor);
+            SerializeChannelShuffleLayer(layer,
+                                         layerDescriptor,
+                                         name);
+            break;
+        }
         case armnn::LayerType::Comparison :
         {
             const armnn::ComparisonDescriptor& layerDescriptor =
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 18b2cc7..43fb0f4 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -131,6 +131,10 @@
     void SerializeCastLayer(const armnn::IConnectableLayer* layer,
                             const char* name = nullptr);
 
+    void SerializeChannelShuffleLayer(const armnn::IConnectableLayer* layer,
+                                      const armnn::ChannelShuffleDescriptor& descriptor,
+                                      const char* name = nullptr);
+
     void SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
                                   const armnn::ComparisonDescriptor& descriptor,
                                   const char* name = nullptr);
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 9e9df0d..cd7fd5c 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -202,6 +202,34 @@
         deserializedNetwork->ExecuteStrategy(verifier);
 }
 
+TEST_CASE("SerializeChannelShuffle")
+{
+    const std::string layerName("channelShuffle");
+    const armnn::TensorInfo inputInfo({1, 9}, armnn::DataType::Float32);
+    const armnn::TensorInfo outputInfo({1, 9}, armnn::DataType::Float32);
+
+    armnn::ChannelShuffleDescriptor descriptor({3, 1});
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const ChannelShuffleLayer =
+            network->AddChannelShuffleLayer(descriptor, layerName.c_str());
+    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+
+    inputLayer->GetOutputSlot(0).Connect(ChannelShuffleLayer->GetInputSlot(0));
+    ChannelShuffleLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+    ChannelShuffleLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    CHECK(deserializedNetwork);
+
+    LayerVerifierBaseWithDescriptor<armnn::ChannelShuffleDescriptor> verifier(
+            layerName, {inputInfo}, {outputInfo}, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
+}
+
 TEST_CASE("SerializeComparison")
 {
     const std::string layerName("comparison");
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 138d453..2753c92 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -95,6 +95,14 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsChannelShuffleSupported(const TensorInfo&, //input
+                                                 const TensorInfo&, //output
+                                                 const ChannelShuffleDescriptor&, //descriptor
+                                                 Optional<std::string &> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 bool LayerSupportBase::IsComparisonSupported(const TensorInfo&, // input0
                                              const TensorInfo&, // input1
                                              const TensorInfo&, // output
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 533a2c6..cc68a22 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -51,6 +51,11 @@
                          const TensorInfo& output,
                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsChannelShuffleSupported(const TensorInfo& input,
+                                   const TensorInfo& output,
+                                   const ChannelShuffleDescriptor& descriptor,
+                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsComparisonSupported(const TensorInfo& input0,
                                const TensorInfo& input1,
                                const TensorInfo& output,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index d87f858..a6def84 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -2971,6 +2971,19 @@
     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
 }
 
+void ChannelShuffleQueueDescriptor::Validate(const WorkloadInfo &workloadInfo) const
+{
+    const std::string descriptorName{"TransposeQueueDescriptor"};
+
+    ValidateNumInputs(workloadInfo, descriptorName, 1);
+    ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+    const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
+    const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+    ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+}
+
 void QLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
 {
     const std::string descriptorName{"QLstmQueueDescriptor"};
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 78da00b..b90c29c 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -747,4 +747,9 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct ChannelShuffleQueueDescriptor : QueueDescriptorWithParameters<ChannelShuffleDescriptor>
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 } // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 3f5972d..00263ec 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -152,6 +152,21 @@
                                                         reason);
             break;
         }
+        case LayerType::ChannelShuffle:
+        {
+            auto cLayer = PolymorphicDowncast<const ChannelShuffleLayer*>(&layer);
+
+            const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+
+            const ChannelShuffleDescriptor descriptor = cLayer->GetParameters();
+
+            result = layerSupportObject.IsChannelShuffleSupported(OverrideDataType(input, dataType),
+                                                                  OverrideDataType(output, dataType),
+                                                                  descriptor,
+                                                                  reason);
+            break;
+        }
         case LayerType::Comparison:
         {
             auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
@@ -1501,6 +1516,12 @@
     return std::unique_ptr<IWorkload>();
 }
 
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& /*descriptor*/,
+                                                                  const WorkloadInfo& /*info*/) const
+{
+    return std::unique_ptr<IWorkload>();
+}
+
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
                                                               const WorkloadInfo& /*info*/) const
 {
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index efb8d99..e84657e 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -90,6 +90,9 @@
     virtual std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
                                                   const WorkloadInfo& Info) const;
 
+    virtual std::unique_ptr<IWorkload> CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
+                                                            const WorkloadInfo& info) const;
+
     virtual std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
                                                         const WorkloadInfo& Info) const;
 
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 7ebc997..7d3558c 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -45,6 +45,7 @@
     test/layerTests/ArgMinMaxTestImpl.cpp \
     test/layerTests/BatchNormalizationTestImpl.cpp \
     test/layerTests/CastTestImpl.cpp \
+    test/layerTests/ChannelShuffleTestImpl.cpp \
     test/layerTests/ComparisonTestImpl.cpp \
     test/layerTests/ConcatTestImpl.cpp \
     test/layerTests/ConstantTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index c9bc5e7..292ec0e 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -64,6 +64,8 @@
     layerTests/BatchToSpaceNdTestImpl.hpp
     layerTests/CastTestImpl.cpp
     layerTests/CastTestImpl.hpp
+    layerTests/ChannelShuffleTestImpl.cpp
+    layerTests/ChannelShuffleTestImpl.hpp
     layerTests/ComparisonTestImpl.cpp
     layerTests/ComparisonTestImpl.hpp
     layerTests/ConcatTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 21b33d2..c2d2184 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -620,6 +620,8 @@
 
 DECLARE_LAYER_POLICY_1_PARAM(Cast)
 
+DECLARE_LAYER_POLICY_2_PARAM(ChannelShuffle)
+
 DECLARE_LAYER_POLICY_2_PARAM(Comparison)
 
 DECLARE_LAYER_POLICY_2_PARAM(Concat)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 0690637..9f1fa88 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -12,6 +12,7 @@
 #include <backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp>
 #include <backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp>
 #include <backendsCommon/test/layerTests/CastTestImpl.hpp>
+#include <backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp>
 #include <backendsCommon/test/layerTests/ComparisonTestImpl.hpp>
 #include <backendsCommon/test/layerTests/ConcatTestImpl.hpp>
 #include <backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp
new file mode 100644
index 0000000..46ee7d9
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp
@@ -0,0 +1,269 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ChannelShuffleTestImpl.hpp"
+
+#include <backendsCommon/test/DataTypeUtils.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+namespace
+{
+
+template<typename T, size_t NumDims>
+LayerTestResult<T, NumDims> ChannelShuffleTestImpl(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
+        armnn::ChannelShuffleDescriptor descriptor,
+        armnn::TensorInfo inputTensorInfo,
+        armnn::TensorInfo outputTensorInfo,
+        const std::vector<T>& inputData,
+        const std::vector<T>& outputExpectedData)
+{
+    IgnoreUnused(memoryManager);
+    std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ChannelShuffleQueueDescriptor data;
+    data.m_Parameters = descriptor;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateChannelShuffle(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), inputData.data());
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+    return LayerTestResult<T, NumDims>(actualOutput,
+                                       outputExpectedData,
+                                       outputHandle->GetShape(),
+                                       outputTensorInfo.GetShape());
+}
+} // anonymous namespace
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> SimpleChannelShuffleTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[] = { 1,9,1,1 };
+    unsigned int outputShape[] = { 1,9,1,1 };
+
+    armnn::ChannelShuffleDescriptor descriptor;
+    descriptor.m_Axis = 1;
+    descriptor.m_NumGroups = 3;
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+    inputTensorInfo.SetQuantizationScale(1.0f);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+    outputTensorInfo.SetQuantizationScale(1.0f);
+
+    auto input = ConvertToDataType<ArmnnType>(
+            {
+                0.0f, 1.0f, 2.0f,   3.0f, 4.0f, 5.0f,   6.0f, 7.0f, 8.0f
+            },
+            inputTensorInfo);
+    auto outputExpected = ConvertToDataType<ArmnnType>(
+            {
+                0.0f, 3.0f, 6.0f, 1.0f, 4.0f, 7.0f, 2.0f, 5.0f, 8.0f
+            },
+            outputTensorInfo);
+
+    return ChannelShuffleTestImpl<T, 4>(
+                workloadFactory,
+                memoryManager,
+                tensorHandleFactory,
+                descriptor,
+                inputTensorInfo,
+                outputTensorInfo,
+                input,
+                outputExpected);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ChannelShuffle2DTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[] = { 3, 12 };
+    unsigned int outputShape[] = { 3, 12 };
+
+    armnn::ChannelShuffleDescriptor descriptor;
+    descriptor.m_Axis = 1;
+    descriptor.m_NumGroups = 3;
+
+    inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
+    inputTensorInfo.SetQuantizationScale(1.0f);
+    outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
+    outputTensorInfo.SetQuantizationScale(1.0f);
+
+    auto input = ConvertToDataType<ArmnnType>(
+            {
+                0, 1, 2, 3,       4, 5, 6, 7,       8, 9, 10, 11,
+               12, 13, 14, 15,   16, 17, 18, 19,   20, 21, 22, 23,
+               24, 25, 26, 27,   28, 29, 30, 31,   32, 33, 34, 35
+            },
+            inputTensorInfo);
+
+    auto outputExpected = ConvertToDataType<ArmnnType>(
+            {
+                0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11,
+                12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23,
+                24, 28, 32, 25, 29, 33, 26, 30, 34, 27, 31, 35
+            },
+            outputTensorInfo);
+
+    return ChannelShuffleTestImpl<T, 2>(
+            workloadFactory,
+            memoryManager,
+            tensorHandleFactory,
+            descriptor,
+            inputTensorInfo,
+            outputTensorInfo,
+            input,
+            outputExpected);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ChannelShuffle4DTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[] = { 2, 9, 1, 2 };
+    unsigned int outputShape[] = { 2, 9, 1, 2 };
+
+    armnn::ChannelShuffleDescriptor descriptor;
+    descriptor.m_Axis = 1;
+    descriptor.m_NumGroups = 3;
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+    inputTensorInfo.SetQuantizationScale(1.0f);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+    outputTensorInfo.SetQuantizationScale(1.0f);
+
+    auto input = ConvertToDataType<ArmnnType>(
+            {
+                    0.0f, 1.0f,
+                    2.0f, 3.0f,
+                    4.0f, 5.0f,
+
+                    6.0f, 7.0f,
+                    8.0f, 9.0f,
+                    10.0f, 11.0f,
+
+                    12.0f, 13.0f,
+                    14.0f, 15.0f,
+                    16.0f, 17.0f,
+
+                    18.0f, 19.0f,
+                    20.0f, 21.0f,
+                    22.0f, 23.0f,
+
+                    24.0f, 25.0f,
+                    26.0f, 27.0f,
+                    28.0f, 29.0f,
+
+                    30.0f, 31.0f,
+                    32.0f, 33.0f,
+                    34.0f, 35.0f
+            },
+            inputTensorInfo);
+
+    auto outputExpected = ConvertToDataType<ArmnnType>(
+            {
+                    0.0f, 1.0f,
+                    6.0f, 7.0f,
+                    12.0f, 13.0f,
+                    2.0f, 3.0f,
+                    8.0f, 9.0f,
+                    14.0f, 15.0f,
+                    4.0f, 5.0f,
+                    10.0f, 11.0f,
+                    16.0f, 17.0f,
+
+                    18.0f, 19.0f,
+                    24.0f, 25.0f,
+                    30.0f, 31.0f,
+                    20.0f, 21.0f,
+                    26.0f, 27.0f,
+                    32.0f, 33.0f,
+                    22.0f, 23.0f,
+                    28.0f, 29.0f,
+                    34.0f, 35.0f
+            },
+            outputTensorInfo);
+
+    return ChannelShuffleTestImpl<T, 4>(
+            workloadFactory,
+            memoryManager,
+            tensorHandleFactory,
+            descriptor,
+            inputTensorInfo,
+            outputTensorInfo,
+            input,
+            outputExpected);
+}
+
+//
+// Explicit template specializations
+//
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+SimpleChannelShuffleTest<armnn::DataType::Float32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+SimpleChannelShuffleTest<armnn::DataType::QAsymmU8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ChannelShuffle2DTest<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+ChannelShuffle2DTest<armnn::DataType::QAsymmU8>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+ChannelShuffle4DTest<armnn::DataType::Float32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ChannelShuffle4DTest<armnn::DataType::QAsymmU8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp
new file mode 100644
index 0000000..3500e72
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp
@@ -0,0 +1,31 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> SimpleChannelShuffleTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ChannelShuffle2DTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ChannelShuffle4DTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
\ No newline at end of file
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 5eba3e5..aaf9aa0 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -333,6 +333,39 @@
     return supported;
 }
 
+bool RefLayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
+                                                const TensorInfo& output,
+                                                const ChannelShuffleDescriptor& descriptor,
+                                                Optional<std::string&> reasonIfUnsupported) const
+{
+    IgnoreUnused(descriptor);
+    bool supported = true;
+
+    // Define supported output and inputs types.
+    std::array<DataType, 7> supportedTypes =
+    {
+        DataType::BFloat16,
+        DataType::Float32,
+        DataType::Float16,
+        DataType::QAsymmS8,
+        DataType::QAsymmU8,
+        DataType::QSymmS8,
+        DataType::QSymmS16
+    };
+
+    supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+                                  "Reference ChannelShuffle: input is not a supported type.");
+
+    supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+                                  "Reference ChannelShuffle: output is not a supported type.");
+
+    supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+                                  "Reference ChannelShuffle: input and output types are mismatched.");
+
+    return supported;
+}
+
+
 bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0,
                                             const TensorInfo& input1,
                                             const TensorInfo& output,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index a1b4dc7..2693dc1 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -50,6 +50,11 @@
                          const TensorInfo& output,
                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsChannelShuffleSupported(const TensorInfo& input,
+                                   const TensorInfo& output,
+                                   const ChannelShuffleDescriptor& descriptor,
+                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsComparisonSupported(const TensorInfo& input0,
                                const TensorInfo& input1,
                                const TensorInfo& output,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 16cf17c..681b73a 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -183,6 +183,12 @@
     return std::make_unique<RefCastWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor &descriptor,
+                                                                    const WorkloadInfo &info) const
+{
+    return std::make_unique<RefChannelShuffleWorkload>(descriptor,info);
+}
+
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
                                                                 const WorkloadInfo& info) const
 {
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 113aca7..fe3eb54 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -88,6 +88,9 @@
     std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
+                                                    const WorkloadInfo& info) const override;
+
     std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 17ddbe0..2dc2bc4 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -49,6 +49,7 @@
         workloads/RefBatchNormalizationWorkload.cpp \
         workloads/RefBatchToSpaceNdWorkload.cpp \
         workloads/RefCastWorkload.cpp \
+        workloads/RefChannelShuffleWorkload.cpp \
         workloads/RefComparisonWorkload.cpp \
         workloads/RefConcatWorkload.cpp \
         workloads/RefConstantWorkload.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 18490e2..b085515 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1398,6 +1398,13 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(Reshape5d, Reshape5dTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ReshapeBoolean, ReshapeBooleanTest)
 
+// ChannelShuffle
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleChannelShuffleFloat32, SimpleChannelShuffleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleChannelShuffleQAsymmU8, SimpleChannelShuffleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ChannelShuffle2DFloat32, ChannelShuffle2DTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ChannelShuffle2DQAsymmU8, ChannelShuffle2DTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ChannelShuffle4DFloat32, ChannelShuffle4DTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ChannelShuffle4DQAsymmU8, ChannelShuffle4DTest<DataType::QAsymmU8>)
 
 // Rsqrt
 ARMNN_AUTO_TEST_CASE_WITH_THF(Rsqrt2d, Rsqrt2dTest<DataType::Float32>)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index b9f477c..0ab8c6b 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -68,6 +68,9 @@
     RefBatchToSpaceNdWorkload.hpp
     RefCastWorkload.cpp
     RefCastWorkload.hpp
+    RefChannelShuffleWorkload.cpp
+    RefChannelShuffleWorkload.hpp
+    RefShapeWorkload.hpp
     RefComparisonWorkload.cpp
     RefComparisonWorkload.hpp
     RefConcatWorkload.cpp
diff --git a/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp b/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp
new file mode 100644
index 0000000..6571715
--- /dev/null
+++ b/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp
@@ -0,0 +1,83 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <backendsCommon/test/DataTypeUtils.hpp>
+#include <armnn/backends/ITensorHandleFactory.hpp>
+#include <armnnUtils/Transpose.hpp>
+#include "RefChannelShuffleWorkload.hpp"
+#include "RefWorkloadUtils.hpp"
+#include "Profiling.hpp"
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+
+namespace armnn
+{
+void RefChannelShuffleWorkload::Execute() const
+{
+    Execute(m_Data.m_Inputs, m_Data.m_Outputs);
+}
+
+void RefChannelShuffleWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+{
+    Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+}
+
+// Reference implementation for channel shuffle taken from
+// https://android.googlesource.com/platform/frameworks/ml/+/refs/heads/master/nn/common/operations/ChannelShuffle.cpp
+void RefChannelShuffleWorkload::Execute(std::vector<ITensorHandle*> inputs,
+                                        std::vector<ITensorHandle*> outputs) const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefChannelShuffleWorkload_Execute");
+
+    const TensorInfo& inputInfo  = GetTensorInfo(inputs[0]);
+    const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
+    std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputInfo, inputs[0]->Map());
+    Decoder<float>& decoder = *decoderPtr;
+
+    std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputInfo, outputs[0]->Map());
+    Encoder<float>& encoder = *encoderPtr;
+
+    auto getNumberOfElements = [](const TensorShape& tensorShape,uint32_t startAxis, uint32_t lastAxis)
+    {
+        uint32_t count = 1;
+        for (uint32_t i = startAxis; i < lastAxis; i++)
+        {
+            count *= tensorShape[i];
+        }
+        return count;
+    };
+    const TensorShape tensorShape = GetTensorInfo(inputs[0]).GetShape();
+    uint32_t channelsAxis = m_Data.m_Parameters.m_Axis; // channelsAxis to perform channel shuffle on
+
+    const uint32_t numGroups = m_Data.m_Parameters.m_NumGroups;
+    const uint32_t groupSize = tensorShape[channelsAxis] / numGroups;
+
+    uint32_t outerSize = getNumberOfElements(tensorShape, 0, channelsAxis);
+    uint32_t innerSize = getNumberOfElements(tensorShape, channelsAxis + 1, tensorShape.GetNumDimensions());
+
+    for (uint32_t outer = 0; outer < outerSize; ++outer)
+    {
+        for (uint32_t inner = 0; inner < innerSize; ++inner)
+        {
+            uint32_t decoderStep1 = outer * tensorShape[channelsAxis] * innerSize + inner;
+            decoder += decoderStep1;
+            uint32_t encoderStep1 = outer * tensorShape[channelsAxis] * innerSize + inner;
+            encoder += encoderStep1;
+            for (uint32_t i = 0; i < groupSize; i++)
+            {
+                for (uint32_t j = 0; j < numGroups; j++, encoder += innerSize, encoderStep1 += innerSize)
+                {
+                    decoder += innerSize * (i + j * groupSize);
+                    float decoded = decoder.Get();
+                    encoder.Set(decoded);
+                    decoder -= innerSize * (i + j * groupSize);
+                }
+            }
+            decoder -= decoderStep1;
+            encoder -= encoderStep1;
+        }
+    }
+}
+}
\ No newline at end of file
diff --git a/src/backends/reference/workloads/RefChannelShuffleWorkload.hpp b/src/backends/reference/workloads/RefChannelShuffleWorkload.hpp
new file mode 100644
index 0000000..0e4c454
--- /dev/null
+++ b/src/backends/reference/workloads/RefChannelShuffleWorkload.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefChannelShuffleWorkload : public BaseWorkload<ChannelShuffleQueueDescriptor>
+{
+public:
+    using BaseWorkload<ChannelShuffleQueueDescriptor>::BaseWorkload;
+    void Execute() const override;
+    void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+
+private:
+    void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
+};
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index d3ae58e..1cf84ee 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -19,6 +19,7 @@
 #include "RefBatchNormalizationWorkload.hpp"
 #include "RefBatchToSpaceNdWorkload.hpp"
 #include "RefCastWorkload.hpp"
+#include "RefChannelShuffleWorkload.hpp"
 #include "RefComparisonWorkload.hpp"
 #include "RefConvolution2dWorkload.hpp"
 #include "RefConstantWorkload.hpp"