IVGCVSW-3707 Add Channel Shuffle Workload to CpuAcc backend

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I2e3dee3c73fe58c7cfcb3ce3667884202f46e6aa
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 154108e..ec64f90 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -28,13 +28,15 @@
 #include "workloads/NeonBatchNormalizationWorkload.hpp"
 #include "workloads/NeonBatchToSpaceNdWorkload.hpp"
 #include "workloads/NeonCastWorkload.hpp"
-#include "workloads/NeonExpWorkload.hpp"
+#include "workloads/NeonChannelShuffleWorkload.hpp"
 #include "workloads/NeonComparisonWorkload.hpp"
+#include "workloads/NeonConcatWorkload.hpp"
 #include "workloads/NeonConstantWorkload.hpp"
 #include "workloads/NeonConvolution2dWorkload.hpp"
 #include "workloads/NeonDepthToSpaceWorkload.hpp"
 #include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
 #include "workloads/NeonDequantizeWorkload.hpp"
+#include "workloads/NeonExpWorkload.hpp"
 #include "workloads/NeonInstanceNormalizationWorkload.hpp"
 #include "workloads/NeonL2NormalizationFloatWorkload.hpp"
 #include "workloads/NeonLogWorkload.hpp"
@@ -45,7 +47,6 @@
 #include "workloads/NeonLstmFloatWorkload.hpp"
 #include "workloads/NeonMaximumWorkload.hpp"
 #include "workloads/NeonMeanWorkload.hpp"
-#include "workloads/NeonConcatWorkload.hpp"
 #include "workloads/NeonMinimumWorkload.hpp"
 #include "workloads/NeonMultiplicationWorkload.hpp"
 #include "workloads/NeonDivisionWorkload.hpp"
@@ -233,6 +234,18 @@
                                    output);
 }
 
+bool NeonLayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
+                                                 const TensorInfo& output,
+                                                 const ChannelShuffleDescriptor& descriptor,
+                                                 Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonChannelShuffleValidate,
+                                   reasonIfUnsupported,
+                                   input,
+                                   output,
+                                   descriptor);
+}
+
 bool NeonLayerSupport::IsComparisonSupported(const TensorInfo& input0,
                                              const TensorInfo& input1,
                                              const TensorInfo& output,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index c2c81f4..fc1e1f6 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -57,6 +57,11 @@
                          const TensorInfo& output,
                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsChannelShuffleSupported(const TensorInfo& input,
+                                   const TensorInfo& output,
+                                   const ChannelShuffleDescriptor& descriptor,
+                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsComparisonSupported(const TensorInfo& input0,
                                const TensorInfo& input1,
                                const TensorInfo& output,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 5ccec62..9ec7583 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -178,6 +178,12 @@
     return std::make_unique<NeonCastWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
+                                                                     const WorkloadInfo& info) const
+{
+    return std::make_unique<NeonChannelShuffleWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
                                                                  const WorkloadInfo& info) const
 {
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index e791bbc..41fc506 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -74,6 +74,9 @@
     std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
+                                                    const WorkloadInfo& info) const override;
+
     std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
                                                 const WorkloadInfo& Info) const override;
 
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 9906c80..9869af0 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -29,6 +29,7 @@
         workloads/NeonBatchNormalizationWorkload.cpp \
         workloads/NeonBatchToSpaceNdWorkload.cpp \
         workloads/NeonCastWorkload.cpp \
+        workloads/NeonChannelShuffleWorkload.cpp \
         workloads/NeonComparisonWorkload.cpp \
         workloads/NeonConcatWorkload.cpp \
         workloads/NeonConstantWorkload.cpp \
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 75f9648..65870a3 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -1419,6 +1419,9 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToIn8, CastFloat32ToInt82dTest)
 ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToUInt8, CastFloat32ToUInt82dTest)
 
+// ChannelShuffle
+ARMNN_AUTO_TEST_CASE_WITH_THF(ChannelShuffle4DFloat32, ChannelShuffle4DTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ChannelShuffle4DQAsymmU8, ChannelShuffle4DTest<DataType::QAsymmU8>)
 
 #if defined(ARMNNREF_ENABLED)
 
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index d08dd7e..6451f4c 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -18,6 +18,8 @@
     NeonBatchToSpaceNdWorkload.hpp
     NeonCastWorkload.cpp
     NeonCastWorkload.hpp
+    NeonChannelShuffleWorkload.cpp
+    NeonChannelShuffleWorkload.hpp
     NeonComparisonWorkload.cpp
     NeonComparisonWorkload.hpp
     NeonConcatWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp b/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
new file mode 100644
index 0000000..b28ee44
--- /dev/null
+++ b/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
@@ -0,0 +1,94 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonChannelShuffleWorkload.hpp"
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status NeonChannelShuffleValidate(const TensorInfo& input,
+                                               const TensorInfo& output,
+                                               const ChannelShuffleDescriptor& descriptor)
+{
+    arm_compute::TensorInfo aclInputInfo  = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+    arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    // In Arm NN and in NNAPI, channel shuffle implementation is datalayout agnostic and it has axis as a parameter.
+    // The channel shuffle Implementation for Neon is dependent on datalayout and does not have axis as a parameter,
+    // it only supports channel shuffle for 4D tensors in dimension C (1 or 3).
+    arm_compute::DataLayout aclDataLayout;
+    if (input.GetNumDimensions() == 4)
+    {
+        switch (descriptor.m_Axis)
+        {
+            case 1:
+                aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW);
+                break;
+            case 3:
+                aclDataLayout = ConvertDataLayout(armnn::DataLayout::NHWC);
+                break;
+            default:
+                return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported axis"};
+        }
+        aclInputInfo.set_data_layout(aclDataLayout);
+        aclOutputInfo.set_data_layout(aclDataLayout);
+        return arm_compute::NEChannelShuffleLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_NumGroups);
+    }
+    else
+    {
+        return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported number of dimensions"};
+    }
+}
+
+NeonChannelShuffleWorkload::NeonChannelShuffleWorkload(const ChannelShuffleQueueDescriptor& descriptor, 
+                                                       const WorkloadInfo& info)
+    : BaseWorkload<ChannelShuffleQueueDescriptor>(descriptor, info)
+{
+    // Report Profiling Details
+    ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonChannelShufflenWorkload_Construct",
+                                         descriptor.m_Parameters,
+                                         info,
+                                         this->GetGuid());
+
+    m_Data.ValidateInputsOutputs("NeonChannelShuffleWorkload", 1, 1);
+
+    arm_compute::ITensor& input  = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    // In Arm NN and in NNAPI, channel shuffle implementation is datalayout agnostic and it has axis as a parameter.
+    // The channel shuffle Implementation for Neon is dependent on datalayout and does not have axis as a parameter,
+    // it only supports channel shuffle for 4D tensors in dimension C (1 or 3).
+    arm_compute::DataLayout aclDataLayout;
+    switch (descriptor.m_Parameters.m_Axis)
+    {
+        case 1:
+            aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW);
+            break;
+        case 3:
+            aclDataLayout = ConvertDataLayout(armnn::DataLayout::NHWC);
+            break;
+        default:
+            ARMNN_ASSERT_MSG(false, "Unsupported axis");
+            break;
+    }
+    input.info()->set_data_layout(aclDataLayout);
+    output.info()->set_data_layout(aclDataLayout);
+
+    m_ChannelShuffleLayer.configure(&input, &output, descriptor.m_Parameters.m_NumGroups);
+}
+
+void NeonChannelShuffleWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonChannelShuffleWorkload_Execute", this->GetGuid());
+    m_ChannelShuffleLayer.run();
+}
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonChannelShuffleWorkload.hpp b/src/backends/neon/workloads/NeonChannelShuffleWorkload.hpp
new file mode 100644
index 0000000..f0f20ae
--- /dev/null
+++ b/src/backends/neon/workloads/NeonChannelShuffleWorkload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/NEON/functions/NEChannelShuffleLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonChannelShuffleValidate(const TensorInfo& input,
+                                               const TensorInfo& output,
+                                               const ChannelShuffleDescriptor& descriptor);
+
+class NeonChannelShuffleWorkload : public BaseWorkload<ChannelShuffleQueueDescriptor>
+{
+public:
+    NeonChannelShuffleWorkload(const ChannelShuffleQueueDescriptor& descriptor, const WorkloadInfo& info);
+    virtual void Execute() const override;
+
+private:
+    mutable arm_compute::NEChannelShuffleLayer m_ChannelShuffleLayer;
+};
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 2fb4b17..4d51d18 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -11,6 +11,7 @@
 #include "NeonBatchNormalizationWorkload.hpp"
 #include "NeonBatchToSpaceNdWorkload.hpp"
 #include "NeonCastWorkload.hpp"
+#include "NeonChannelShuffleWorkload.hpp"
 #include "NeonComparisonWorkload.hpp"
 #include "NeonConcatWorkload.hpp"
 #include "NeonConstantWorkload.hpp"