IVGCVSW-4375 Add support for Transpose

 * Added TransposeLayer
 * Added CL, Neon and Ref Workloads
 * Added Transpose utilities
 * Added Serializer and Deserializer support
 * Added Quantizer support

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I04c755ba7cb5b1edf72b3c9f3c0314878032e3c7
diff --git a/Android.mk b/Android.mk
index d1fa1b0..e423f25 100644
--- a/Android.mk
+++ b/Android.mk
@@ -120,6 +120,7 @@
         src/armnnUtils/NetworkSockets.cpp \
         src/armnnUtils/Filesystem.cpp \
         src/armnnUtils/Processes.cpp \
+        src/armnnUtils/Transpose.cpp \
         src/armnn/layers/ActivationLayer.cpp \
         src/armnn/layers/AdditionLayer.cpp \
         src/armnn/layers/ArgMinMaxLayer.cpp \
@@ -177,6 +178,7 @@
         src/armnn/layers/SubtractionLayer.cpp \
         src/armnn/layers/SwitchLayer.cpp \
         src/armnn/layers/TransposeConvolution2dLayer.cpp \
+        src/armnn/layers/TransposeLayer.cpp \
         src/profiling/BufferManager.cpp \
         src/profiling/CommandHandler.cpp \
         src/profiling/CommandHandlerFunctor.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 563de1a..9396316 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -40,6 +40,7 @@
     include/armnnUtils/Permute.hpp
     include/armnnUtils/FloatingPointConverter.hpp
     include/armnnUtils/TensorUtils.hpp
+    include/armnnUtils/Transpose.hpp
     src/armnnUtils/Filesystem.hpp
     src/armnnUtils/Filesystem.cpp
     src/armnnUtils/Processes.hpp
@@ -69,6 +70,7 @@
     src/armnnUtils/QuantizeHelper.hpp
     src/armnnUtils/TensorIOUtils.hpp
     src/armnnUtils/TensorUtils.cpp
+    src/armnnUtils/Transpose.cpp
     src/armnnUtils/NetworkSockets.hpp
     src/armnnUtils/NetworkSockets.cpp
     )
@@ -360,6 +362,8 @@
     src/armnn/layers/SwitchLayer.hpp
     src/armnn/layers/TransposeConvolution2dLayer.cpp
     src/armnn/layers/TransposeConvolution2dLayer.hpp
+    src/armnn/layers/TransposeLayer.hpp
+    src/armnn/layers/TransposeLayer.cpp
     src/armnn/BackendRegistry.cpp
     src/armnn/BackendSettings.hpp
     src/armnn/BackendHelper.cpp
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 2d7b17e..f1b29cc 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -1119,4 +1119,25 @@
     DataLayout m_DataLayout;
 };
 
+/// A TransposeDescriptor for the TransposeLayer.
+struct TransposeDescriptor
+{
+    TransposeDescriptor()
+            : m_DimMappings{}
+    {}
+
+    TransposeDescriptor(const PermutationVector& dimMappings)
+            : m_DimMappings(dimMappings)
+    {}
+
+    bool operator ==(const TransposeDescriptor &rhs) const
+    {
+        return m_DimMappings.IsEqual(rhs.m_DimMappings);
+    }
+
+    /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
+    /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
+    PermutationVector m_DimMappings;
+};
+
 } // namespace armnn
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index d03c61d..144c1ef 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -40,6 +40,7 @@
 struct StandInDescriptor;
 struct StridedSliceDescriptor;
 struct TransposeConvolution2dDescriptor;
+struct TransposeDescriptor;
 struct ViewsDescriptor;
 
 using ConcatDescriptor       = OriginsDescriptor;
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index d1bbf99..af91e87 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -369,6 +369,11 @@
         const Optional<TensorInfo>& biases,
         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
+    virtual bool IsTransposeSupported(const TensorInfo& input,
+                                      const TensorInfo& output,
+                                      const TransposeDescriptor& descriptor,
+                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
 }; // class ILayerSupport
 
 using ILayerSupportSharedPtr = std::shared_ptr<ILayerSupport>;
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
index 46f9e56..972915d 100644
--- a/include/armnn/ILayerVisitor.hpp
+++ b/include/armnn/ILayerVisitor.hpp
@@ -494,6 +494,14 @@
                                                   const Optional<ConstTensor>& biases,
                                                   const char* name = nullptr) = 0;
 
+    /// Function that a transpose  layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+    /// @param layer - pointer to the layer which is calling back to this visit function.
+    /// @param transposeDescriptor - TransposeDescriptor to configure the transpose.
+    /// @param name - Optional name for the layer.
+    virtual void VisitTransposeLayer(const IConnectableLayer* layer,
+                                     const TransposeDescriptor& transposeDescriptor,
+                                     const char* name = nullptr) = 0;
+
     virtual void StartVisit() {}
     virtual void FinishVisit() {}
 
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 1b1c874..71eb9ff 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -511,6 +511,13 @@
                                                               const Optional<ConstTensor>& biases,
                                                               const char* name = nullptr) = 0;
 
+    /// Adds a transpose layer to the network.
+    /// @param transposeDescriptor - TransposeDescriptor to configure the transpose.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    virtual IConnectableLayer* AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
+                                                 const char* name = nullptr) = 0;
+
     /// Adds a stack layer to the network.
     /// @param descriptor - Description of the stack layer.
     /// @param name - Optional name for the layer.
@@ -518,7 +525,6 @@
     virtual IConnectableLayer* AddStackLayer(const StackDescriptor& descriptor,
                                              const char* name = nullptr) = 0;
 
-
     /// Add a stand-in layer for a type unknown to the Arm NN framework.
     /// Note: Due to the nature of this layer, no validation can be performed by the framework.
     /// Furthermore, Any model containing this layer cannot make use of dynamic tensors since the
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
index 6fd9a66..511917c 100644
--- a/include/armnn/LayerVisitorBase.hpp
+++ b/include/armnn/LayerVisitorBase.hpp
@@ -246,6 +246,10 @@
                                           const Optional<ConstTensor>&,
                                           const char*) override { DefaultPolicy::Apply(__func__); }
 
+    void VisitTransposeLayer(const IConnectableLayer*,
+                             const TransposeDescriptor&,
+                             const char*) override { DefaultPolicy::Apply(__func__); }
+
 };
 
 } // namespace armnn
diff --git a/include/armnnUtils/Transpose.hpp b/include/armnnUtils/Transpose.hpp
new file mode 100644
index 0000000..0a1ba7d
--- /dev/null
+++ b/include/armnnUtils/Transpose.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/TensorFwd.hpp>
+#include <armnn/Types.hpp>
+
+namespace armnnUtils
+{
+
+armnn::TensorShape TransposeTensorShape(const armnn::TensorShape& srcShape, const armnn::PermutationVector& mappings);
+
+armnn::TensorInfo TransposeTensorShape(const armnn::TensorInfo& info, const armnn::PermutationVector& mappings);
+
+void Transpose(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings,
+               const void* src, void* dst, size_t dataTypeSize);
+
+} // namespace armnnUtils
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index 10e7f50..c032e44 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -70,6 +70,7 @@
         case LayerType::Subtraction: return "Subtraction";
         case LayerType::Switch: return "Switch";
         case LayerType::TransposeConvolution2d: return "TransposeConvolution2d";
+        case LayerType::Transpose: return "Transpose";
         default:
             BOOST_ASSERT_MSG(false, "Unknown layer type");
             return "Unknown";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 2d7be3c..351f12c 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -69,9 +69,10 @@
     StridedSlice,
     Subtraction,
     Switch,
+    TransposeConvolution2d,
     // Last layer goes here.
     LastLayer,
-    TransposeConvolution2d = LastLayer
+    Transpose = LastLayer
 };
 
 const char* GetLayerTypeAsCString(LayerType type);
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 2d486f4..f3ce7e6 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -62,6 +62,7 @@
 #include "layers/SubtractionLayer.hpp"
 #include "layers/SwitchLayer.hpp"
 #include "layers/TransposeConvolution2dLayer.hpp"
+#include "layers/TransposeLayer.hpp"
 
 namespace armnn
 {
@@ -145,6 +146,7 @@
 DECLARE_LAYER(StridedSlice)
 DECLARE_LAYER(Subtraction)
 DECLARE_LAYER(Switch)
+DECLARE_LAYER(Transpose)
 DECLARE_LAYER(TransposeConvolution2d)
 
 }
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 7edc624..b405a77 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1526,6 +1526,12 @@
     return layer;
 }
 
+IConnectableLayer* Network::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
+                                              const char* name)
+{
+    return m_Graph->AddLayer<TransposeLayer>(transposeDescriptor, name);
+}
+
 IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor,
                                           const char* name)
 {
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 23a8e47..5da6813 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -226,6 +226,9 @@
                                                       const Optional<ConstTensor>& biases,
                                                       const char* name = nullptr) override;
 
+    IConnectableLayer* AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
+                                         const char* name = nullptr) override;
+
     IConnectableLayer* AddStackLayer(const StackDescriptor& stackDescriptor,
                                      const char* name = nullptr) override;
 
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 51818eb..8e7c45f 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -561,4 +561,13 @@
     SetQuantizedInputConnections(layer, newLayer);
 }
 
+void QuantizerVisitor::VisitTransposeLayer(const IConnectableLayer* layer,
+                                           const TransposeDescriptor& transposeDescriptor,
+                                           const char* name)
+{
+    IConnectableLayer* newLayer = m_QuantizedNetwork->AddTransposeLayer(transposeDescriptor, name);
+    RecordLayer(layer, newLayer);
+    SetQuantizedInputConnections(layer, newLayer);
+}
+
 } //namespace armnn
diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp
index 4013033..29500ab 100644
--- a/src/armnn/QuantizerVisitor.hpp
+++ b/src/armnn/QuantizerVisitor.hpp
@@ -187,6 +187,10 @@
                                           const Optional<ConstTensor>& biases,
                                           const char* name = nullptr) override;
 
+    void VisitTransposeLayer(const IConnectableLayer* layer,
+                             const TransposeDescriptor& descriptor,
+                             const char* name = nullptr) override;
+
     /// Extract the quantized network
     INetworkPtr RetrieveFinalNetwork() { return std::move(m_QuantizedNetwork); }
 
diff --git a/src/armnn/SerializeLayerParameters.cpp b/src/armnn/SerializeLayerParameters.cpp
index 544e389..76b92f3 100644
--- a/src/armnn/SerializeLayerParameters.cpp
+++ b/src/armnn/SerializeLayerParameters.cpp
@@ -491,4 +491,24 @@
     fn("DataLayout", GetDataLayoutName(desc.m_DataLayout));
 }
 
+void StringifyLayerParameters<TransposeDescriptor>::Serialize(ParameterStringifyFunction& fn,
+                                                              const TransposeDescriptor& desc)
+{
+    std::stringstream ss;
+    ss <<  "[";
+    bool addComma = false;
+    for (auto it : desc.m_DimMappings)
+    {
+        if (addComma)
+        {
+            ss << ",";
+        }
+        ss << it;
+        addComma = true;
+    }
+    ss << "]";
+
+    fn("DimMappings",ss.str());
+}
+
 } // namespace armnn
\ No newline at end of file
diff --git a/src/armnn/SerializeLayerParameters.hpp b/src/armnn/SerializeLayerParameters.hpp
index 76ca0a5..ae921c4 100644
--- a/src/armnn/SerializeLayerParameters.hpp
+++ b/src/armnn/SerializeLayerParameters.hpp
@@ -160,4 +160,9 @@
     static void Serialize(ParameterStringifyFunction& fn, const TransposeConvolution2dDescriptor& desc);
 };
 
+template <> struct StringifyLayerParameters<TransposeDescriptor>
+{
+    static void Serialize(ParameterStringifyFunction& fn, const TransposeDescriptor& desc);
+};
+
 } // namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
new file mode 100644
index 0000000..3c22b54
--- /dev/null
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -0,0 +1,62 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TransposeLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+
+#include <armnnUtils/Transpose.hpp>
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+TransposeLayer::TransposeLayer(const TransposeDescriptor& param, const char* name)
+    : LayerWithParameters(1, 1, LayerType::Transpose, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> TransposeLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+    TransposeQueueDescriptor descriptor;
+    return factory.CreateTranspose(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+TransposeLayer* TransposeLayer::Clone(Graph& graph) const
+{
+    return CloneBase<TransposeLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> TransposeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+    BOOST_ASSERT(inputShapes.size() == 1);
+    const TensorShape& inShape = inputShapes[0];
+    return std::vector<TensorShape> ({armnnUtils::TransposeTensorShape(inShape, m_Param.m_DimMappings)});
+}
+
+void TransposeLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(1, CHECK_LOCATION());
+
+    auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+    BOOST_ASSERT(inferredShapes.size() == 1);
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "TransposeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+        GetOutputSlot(0).GetTensorInfo().GetShape(),
+        inferredShapes[0]);
+}
+
+void TransposeLayer::Accept(ILayerVisitor& visitor) const
+{
+    visitor.VisitTransposeLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/TransposeLayer.hpp b/src/armnn/layers/TransposeLayer.hpp
new file mode 100644
index 0000000..4906bc9
--- /dev/null
+++ b/src/armnn/layers/TransposeLayer.hpp
@@ -0,0 +1,70 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a transpose operation.
+class TransposeLayer : public LayerWithParameters<TransposeDescriptor>
+{
+public:
+    /// Makes a workload for the Transpose type.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
+    TransposeLayer* Clone(Graph& graph) const override;
+
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref TransposeLayer.
+    void ValidateTensorShapesFromInputs() override;
+
+    /// Infers the output shapes from given input shapes and the permutation vector.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
+    std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+    /// @return a permutation vector describing the permutation for the dimensions of the input tensor.
+    const PermutationVector& GetPermutation() const
+    {
+        return m_Param.m_DimMappings;
+    }
+
+    /// Indicates if the other layer received is inverse of this one.
+    /// @param [in] other The other layer to be compared with.
+    /// @return true if other layer is inverse of this false otherwise.
+    bool IsInverse(const Layer& other) const
+    {
+        return (other.GetType() == LayerType::Transpose) &&
+            GetPermutation().IsInverse(boost::polymorphic_downcast<const TransposeLayer*>(&other)->GetPermutation());
+    }
+
+    /// Indicates if the other layer received is equal to this one.
+    /// @param [in] other The other layer to be compare with.
+    /// @return true if other layer is equal to this false otherwise.
+    bool IsEqual(const Layer& other) const
+    {
+        return (other.GetType() == LayerType::Transpose) &&
+               GetPermutation().IsEqual(boost::polymorphic_downcast<const TransposeLayer*>(&other)->GetPermutation());
+    }
+
+    void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+    /// Constructor to create a TransposeLayer.
+    /// @param [in] param TransposeDescriptor to configure the permute operation.
+    /// @param [in] name Optional name for the layer.
+    TransposeLayer(const TransposeDescriptor& param, const char* name);
+
+    /// Default destructor
+    ~TransposeLayer() = default;
+};
+
+} // namespace
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 61a38f9..0d81649 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -12,6 +12,7 @@
 #include <armnn/QuantizedLstmParams.hpp>
 
 #include <armnnUtils/Permute.hpp>
+#include <armnnUtils/Transpose.hpp>
 
 #include <ParserHelper.hpp>
 #include <VerificationHelpers.hpp>
@@ -241,6 +242,7 @@
     m_ParserFunctions[Layer_SubtractionLayer]            = &Deserializer::ParseSubtraction;
     m_ParserFunctions[Layer_SwitchLayer]                 = &Deserializer::ParseSwitch;
     m_ParserFunctions[Layer_TransposeConvolution2dLayer] = &Deserializer::ParseTransposeConvolution2d;
+    m_ParserFunctions[Layer_TransposeLayer]              = &Deserializer::ParseTranspose;
 }
 
 Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPtr, unsigned int layerIndex)
@@ -357,6 +359,8 @@
             return graphPtr->layers()->Get(layerIndex)->layer_as_SwitchLayer()->base();
         case Layer::Layer_TransposeConvolution2dLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_TransposeConvolution2dLayer()->base();
+        case Layer::Layer_TransposeLayer:
+            return graphPtr->layers()->Get(layerIndex)->layer_as_TransposeLayer()->base();
         case Layer::Layer_NONE:
         default:
             throw ParseException(boost::str(
@@ -2721,6 +2725,29 @@
     RegisterOutputSlots(graph, layerIndex, layer);
 }
 
+void Deserializer::ParseTranspose(GraphPtr graph, unsigned int layerIndex)
+{
+    CHECK_LAYERS(graph, 0, layerIndex);
+
+    auto dimsMapping = graph->layers()->Get(layerIndex)->layer_as_TransposeLayer()->descriptor()->dimMappings();
+
+    auto inputs = GetInputs(graph, layerIndex);
+    CHECK_VALID_SIZE(inputs.size(), 1);
+
+    auto outputs = GetOutputs(graph, layerIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+    auto outputInfo = ToTensorInfo(outputs[0]);
+
+    auto layerName = GetLayerName(graph, layerIndex);
+    const armnn::TransposeDescriptor descriptor(armnn::PermutationVector(dimsMapping->data(), dimsMapping->Length()));
+
+    IConnectableLayer* layer = m_Network->AddTransposeLayer(descriptor, layerName.c_str());
+    layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    RegisterInputSlots(graph, layerIndex, layer);
+    RegisterOutputSlots(graph, layerIndex, layer);
+}
+
 void Deserializer::ParseTransposeConvolution2d(GraphPtr graph, unsigned int layerIndex)
 {
     CHECK_LAYERS(graph, 0, layerIndex);
diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp
index 8e8fe1a..f7e47cc 100644
--- a/src/armnnDeserializer/Deserializer.hpp
+++ b/src/armnnDeserializer/Deserializer.hpp
@@ -128,6 +128,7 @@
     void ParseStridedSlice(GraphPtr graph, unsigned int layerIndex);
     void ParseSubtraction(GraphPtr graph, unsigned int layerIndex);
     void ParseSwitch(GraphPtr graph, unsigned int layerIndex);
+    void ParseTranspose(GraphPtr graph, unsigned int layerIndex);
     void ParseTransposeConvolution2d(GraphPtr graph, unsigned int layerIndex);
 
     void RegisterInputSlots(GraphPtr graph, uint32_t layerIndex,
diff --git a/src/armnnDeserializer/test/DeserializeTranspose.cpp b/src/armnnDeserializer/test/DeserializeTranspose.cpp
new file mode 100644
index 0000000..bf0f043
--- /dev/null
+++ b/src/armnnDeserializer/test/DeserializeTranspose.cpp
@@ -0,0 +1,137 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../Deserializer.hpp"
+
+#include <string>
+
+BOOST_AUTO_TEST_SUITE(Deserializer)
+
+struct TransposeFixture : public ParserFlatbuffersSerializeFixture
+{
+    explicit TransposeFixture(const std::string &inputShape,
+                              const std::string &dimMappings,
+                              const std::string &outputShape,
+                              const std::string &dataType)
+    {
+        m_JsonString = R"(
+            {
+                inputIds: [0],
+                outputIds: [2],
+                layers: [
+                    {
+                        layer_type: "InputLayer",
+                        layer: {
+                            base: {
+                                layerBindingId: 0,
+                                base: {
+                                    index: 0,
+                                    layerName: "InputLayer",
+                                    layerType: "Input",
+                                    inputSlots: [{
+                                        index: 0,
+                                        connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+                                    }],
+                                    outputSlots: [{
+                                        index: 0,
+                                        tensorInfo: {
+                                            dimensions: )" + inputShape + R"(,
+                                            dataType: )" + dataType + R"(
+                                        }
+                                    }]
+                                }
+                            }
+                        }
+                    },
+                    {
+                        layer_type: "TransposeLayer",
+                        layer: {
+                            base: {
+                                index: 1,
+                                layerName: "TransposeLayer",
+                                layerType: "Transpose",
+                                inputSlots: [{
+                                    index: 0,
+                                    connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+                                }],
+                                outputSlots: [{
+                                    index: 0,
+                                    tensorInfo: {
+                                        dimensions: )" + outputShape + R"(,
+                                        dataType: )" + dataType + R"(
+                                    }
+                                }]
+                            },
+                            descriptor: {
+                                dimMappings: )" + dimMappings + R"(,
+                            }
+                        }
+                    },
+                    {
+                        layer_type: "OutputLayer",
+                        layer: {
+                            base:{
+                                layerBindingId: 2,
+                                base: {
+                                    index: 2,
+                                    layerName: "OutputLayer",
+                                    layerType: "Output",
+                                    inputSlots: [{
+                                        index: 0,
+                                        connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+                                    }],
+                                    outputSlots: [{
+                                        index: 0,
+                                        tensorInfo: {
+                                            dimensions: )" + outputShape + R"(,
+                                            dataType: )" + dataType + R"(
+                                        },
+                                    }],
+                                }
+                            }
+                        },
+                    }
+                ]
+            }
+        )";
+        SetupSingleInputSingleOutput("InputLayer", "OutputLayer");
+    }
+};
+
+struct SimpleTranspose2DFixture : TransposeFixture
+{
+    SimpleTranspose2DFixture() : TransposeFixture("[ 2, 3 ]",
+                                                  "[ 1, 0 ]",
+                                                  "[ 3, 2 ]",
+                                                  "QuantisedAsymm8") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SimpleTranspose2DQuantisedAsymm8, SimpleTranspose2DFixture)
+{
+    RunTest<2, armnn::DataType::QAsymmU8>(0,
+                                                 { 1, 2, 3, 4, 5, 6 },
+                                                 { 1, 4, 2, 5, 3, 6 });
+}
+
+struct SimpleTranspose4DFixture : TransposeFixture
+{
+    SimpleTranspose4DFixture() : TransposeFixture("[ 1, 2, 3, 4 ]",
+                                                  "[ 3, 2, 1, 0 ]",
+                                                  "[ 4, 3, 2, 1 ]",
+                                                  "QuantisedAsymm8") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SimpleTranspose4DQuantisedAsymm8, SimpleTranspose4DFixture)
+{
+    RunTest<4, armnn::DataType::QAsymmU8>(0,
+                                                 {  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,
+                                                   13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
+                                                 {  1, 13,  5, 17,  9, 21,  2, 14,  6, 18, 10, 22,
+                                                    3, 15,  7, 19, 11, 23,  4, 16,  8, 20, 12, 24 });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index 0697517..d175d41 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -149,7 +149,8 @@
     LogSoftmax = 51,
     Comparison = 52,
     StandIn = 53,
-    ElementwiseUnary = 54
+    ElementwiseUnary = 54,
+    Transpose = 55
 }
 
 // Base layer table to be used as part of other layers
@@ -732,6 +733,15 @@
     dataLayout:DataLayout = NCHW;
 }
 
+table TransposeLayer {
+    base:LayerBase;
+    descriptor:TransposeDescriptor;
+}
+
+table TransposeDescriptor {
+    dimMappings:[uint];
+}
+
 table ResizeLayer {
     base:LayerBase;
     descriptor:ResizeDescriptor;
@@ -820,7 +830,8 @@
     LogSoftmaxLayer,
     ComparisonLayer,
     StandInLayer,
-    ElementwiseUnaryLayer
+    ElementwiseUnaryLayer,
+    TransposeLayer
 }
 
 table AnyLayer {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 3c01842..a3fdcf8 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -1301,6 +1301,33 @@
     CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_TransposeConvolution2dLayer);
 }
 
+void SerializerVisitor::VisitTransposeLayer(const armnn::IConnectableLayer* layer,
+                                            const armnn::TransposeDescriptor& descriptor,
+                                            const char* name)
+{
+    boost::ignore_unused(name);
+
+    // Create FlatBuffer BaseLayer
+    auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
+
+    std::vector<unsigned int> dimMappings;
+    for (unsigned int i=0; i<descriptor.m_DimMappings.GetSize(); ++i)
+    {
+        dimMappings.push_back(descriptor.m_DimMappings[i]);
+    }
+
+    auto flatBufferDesc = serializer::CreateTransposeDescriptor(m_flatBufferBuilder,
+                                                                m_flatBufferBuilder.CreateVector(dimMappings));
+
+    // Create the FlatBuffer TransposeLayer
+    auto flatBufferLayer = serializer::CreateTransposeLayer(m_flatBufferBuilder,
+                                                            flatBufferBaseLayer,
+                                                            flatBufferDesc);
+
+    // Add the AnyLayer to the FlatBufferLayers
+    CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_TransposeLayer);
+}
+
 void SerializerVisitor::VisitQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
                                                 const armnn::QuantizedLstmInputParams& params,
                                                 const char* name)
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 14d2776..737cf3b 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -270,6 +270,10 @@
                                           const armnn::Optional<armnn::ConstTensor>& biases,
                                           const char* = nullptr) override;
 
+    void VisitTransposeLayer(const armnn::IConnectableLayer* layer,
+                             const armnn::TransposeDescriptor& descriptor,
+                             const char* name = nullptr) override;
+
 private:
 
     /// Creates the Input Slots and Output Slots and LayerBase for the layer.
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 47804fe..8c9c92b 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -2501,6 +2501,34 @@
     deserializedNetwork->Accept(verifier);
 }
 
+BOOST_AUTO_TEST_CASE(SerializeTranspose)
+{
+    DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Transpose)
+
+    const std::string layerName("transpose");
+    const armnn::TensorInfo inputTensorInfo({4, 3, 2, 1}, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo({1, 2, 3, 4}, armnn::DataType::Float32);
+
+    armnn::TransposeDescriptor descriptor(armnn::PermutationVector({3, 2, 1, 0}));
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const transposeLayer = network->AddTransposeLayer(descriptor, layerName.c_str());
+    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+
+    inputLayer->GetOutputSlot(0).Connect(transposeLayer->GetInputSlot(0));
+    transposeLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+    transposeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    BOOST_CHECK(deserializedNetwork);
+
+    TransposeLayerVerifier verifier(layerName, {inputTensorInfo}, {outputTensorInfo}, descriptor);
+    deserializedNetwork->Accept(verifier);
+}
+
 BOOST_AUTO_TEST_CASE(SerializeTransposeConvolution2d)
 {
     using Descriptor = armnn::TransposeConvolution2dDescriptor;
diff --git a/src/armnnUtils/Transpose.cpp b/src/armnnUtils/Transpose.cpp
new file mode 100644
index 0000000..3f3837c
--- /dev/null
+++ b/src/armnnUtils/Transpose.cpp
@@ -0,0 +1,126 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/Tensor.hpp>
+
+#include <armnnUtils/Transpose.hpp>
+
+#include "Half.hpp"
+
+#include <cassert>
+#include <cstring>
+
+namespace
+{
+
+class TransposeLoop
+{
+public:
+    using size_type = unsigned int;
+
+    TransposeLoop(const armnn::TensorShape& srcShape, const armnn::PermutationVector& mappings)
+        : m_SrcShape(srcShape)
+    {
+        assert(srcShape.GetNumDimensions() == mappings.GetSize());
+
+        const size_type numDims = srcShape.GetNumDimensions();
+
+        size_type srcStride = 1U;
+        size_type dstStride = 1U;
+
+        for (size_type i = numDims - 1U, k = 0U; k < numDims; ++k, --i)
+        {
+            m_SrcStrides[i] = srcStride;
+            m_DstStrides[mappings[i]] = dstStride;
+
+            srcStride *= srcShape[i];
+            dstStride *= srcShape[mappings[i]];
+        }
+    }
+
+    void Unroll(const void* srcData, void* dstData, size_t dataTypeSize)
+    {
+        assert(srcData);
+        assert(dstData);
+        assert(dataTypeSize > 0);
+
+        const unsigned char* srcDataPtr = reinterpret_cast<const unsigned char*>(srcData);
+        unsigned char* dstDataPtr       = reinterpret_cast<unsigned char*>(dstData);
+
+        const unsigned char* const srcEndPtr = srcDataPtr + m_SrcShape.GetNumElements() * dataTypeSize;
+        unsigned char* const       dstEndPtr = dstDataPtr + m_SrcShape.GetNumElements() * dataTypeSize;
+
+        Unroll(0, srcDataPtr, dstDataPtr, srcEndPtr, dstEndPtr, dataTypeSize);
+    }
+
+private:
+    void Unroll(size_type dimension,
+                const unsigned char* srcData, unsigned char* dstData,
+                const unsigned char* srcEnd, unsigned char* dstEnd,
+                size_t dataTypeSize)
+    {
+        assert(srcData);
+        assert(dstData);
+        assert(srcEnd);
+        assert(dstEnd);
+        assert(srcData < srcEnd);
+        assert(dstData < dstEnd);
+        assert(dataTypeSize > 0);
+
+        if (dimension >= m_SrcShape.GetNumDimensions())
+        {
+            ::memcpy(dstData, srcData, dataTypeSize);
+        }
+        else
+        {
+            for (size_type i = 0; i < m_SrcShape[dimension]; i++)
+            {
+                Unroll(dimension + 1, srcData, dstData, srcEnd, dstEnd, dataTypeSize);
+
+                srcData += m_SrcStrides[dimension] * dataTypeSize;
+                dstData += m_DstStrides[dimension] * dataTypeSize;
+            }
+        }
+    }
+
+    armnn::TensorShape m_SrcShape;
+    std::array<size_type, armnn::MaxNumOfTensorDimensions> m_SrcStrides;
+    std::array<size_type, armnn::MaxNumOfTensorDimensions> m_DstStrides;
+};
+
+} // namespace
+
+namespace armnnUtils
+{
+
+armnn::TensorShape TransposeTensorShape(const armnn::TensorShape& srcShape, const armnn::PermutationVector& mappings)
+{
+    assert(srcShape.GetNumDimensions() == mappings.GetSize());
+
+    const unsigned int numDims = mappings.GetSize();
+    unsigned int outDims[armnn::MaxNumOfTensorDimensions];
+
+    for (unsigned int i = 0U; i < numDims; ++i)
+    {
+        outDims[i] = srcShape[mappings[i]];
+    }
+    armnn::TensorShape permutedShape(numDims, outDims);
+    return permutedShape;
+}
+
+armnn::TensorInfo TransposeTensorShape(const armnn::TensorInfo& info, const armnn::PermutationVector& mappings)
+{
+    armnn::TensorInfo outInfo(info);
+    outInfo.SetShape(TransposeTensorShape(info.GetShape(), mappings));
+    return outInfo;
+}
+
+void Transpose(const armnn::TensorShape& srcShape, const armnn::PermutationVector& mappings,
+             const void* src, void* dst, size_t dataTypeSize)
+{
+    TransposeLoop(srcShape, mappings).Unroll(src, dst, dataTypeSize);
+}
+
+} // namespace armnnUtils
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index 49fef5b..84091e8 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -214,7 +214,34 @@
     {
         aclPerm.set(i - start, perm[i] - start);
     }
+    return aclPerm;
+}
 
+arm_compute::PermutationVector BuildArmComputeTransposeVector(const armnn::PermutationVector& perm)
+{
+    arm_compute::PermutationVector aclPerm;
+    std::map<unsigned int, unsigned int> permuteMappings;
+    for (unsigned int i = 0; i < perm.GetSize(); ++i)
+    {
+        permuteMappings[perm[i]] = i;
+    }
+
+    std::vector<unsigned int> permuteVector;
+    for (unsigned int i = 0; i < perm.GetSize(); ++i)
+    {
+        permuteVector.push_back(permuteMappings.at(i));
+    }
+
+    unsigned int start = 0;
+    while ((start < perm.GetSize()) && (start == permuteVector[start]))
+    {
+        ++start;
+    }
+
+    for (unsigned int i = start; i < perm.GetSize(); ++i)
+    {
+        aclPerm.set(i - start, permuteVector[i] - start);
+    }
     return aclPerm;
 }
 
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.hpp b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
index b4ff0f7..9b236e1 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
@@ -60,6 +60,9 @@
 /// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector.
 arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::PermutationVector& vector);
 
+/// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector.
+arm_compute::PermutationVector BuildArmComputeTransposeVector(const armnn::PermutationVector& vector);
+
 /// Utility function used to setup an arm_compute::Size2D object from width and height values.
 arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height);
 
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 449b809..1279134 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -579,4 +579,12 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsTransposeSupported(const TensorInfo& /*input*/,
+                                            const TensorInfo& /*output*/,
+                                            const TransposeDescriptor& /*descriptor*/,
+                                            Optional<std::string&> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 } // namespace armnn
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 459ac03..888bef5 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -353,6 +353,12 @@
         const TensorInfo& weights,
         const Optional<TensorInfo>& biases,
         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsTransposeSupported(const TensorInfo& input,
+                              const TensorInfo& output,
+                              const TransposeDescriptor& descriptor,
+                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
 };
 
 } // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 410469e..9b7a242 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -2680,6 +2680,35 @@
     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
 }
 
+void TransposeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    const std::string descriptorName{"TransposeQueueDescriptor"};
+
+    ValidateNumInputs(workloadInfo,  descriptorName, 1);
+    ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+    const PermutationVector& mapping = m_Parameters.m_DimMappings;
+
+    const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
+    const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+    ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, mapping.GetSize(), "input");
+    ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
+
+    for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
+    {
+        if (inputTensorInfo.GetShape()[mapping[i]] != outputTensorInfo.GetShape()[i])
+        {
+            throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(mapping[i]) +
+                                           " (=" + to_string(inputTensorInfo.GetShape()[mapping[i]]) + ") " +
+                                           "must match dst dimension " + to_string(i) +
+                                           " (=" + to_string(outputTensorInfo.GetShape()[i]) + ")");
+        }
+    }
+
+    ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+}
+
 void QuantizedLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
 {
     const std::string descriptorName{"QuantizedLstmQueueDescriptor"};
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 46681e9..06289fa 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -504,6 +504,11 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct TransposeQueueDescriptor : QueueDescriptorWithParameters<TransposeDescriptor>
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 struct QuantizedLstmQueueDescriptor : QueueDescriptor
 {
     QuantizedLstmQueueDescriptor()
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 23ff70a..6ac76ec 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -1023,6 +1023,17 @@
                                                           reason);
             break;
         }
+        case LayerType::Transpose:
+        {
+            auto cLayer = boost::polymorphic_downcast<const TransposeLayer*>(&layer);
+            const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+            result = layerSupportObject->IsTransposeSupported(OverrideDataType(input, dataType),
+                                                              OverrideDataType(output, dataType),
+                                                              cLayer->GetParameters(),
+                                                              reason);
+            break;
+        }
         case LayerType::TransposeConvolution2d:
         {
             auto cLayer = boost::polymorphic_downcast<const TransposeConvolution2dLayer*>(&layer);
@@ -1315,7 +1326,7 @@
 }
 
 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
-                                                           const WorkloadInfo&/**/ /*info*/) const
+                                                           const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
@@ -1379,7 +1390,7 @@
 {
     return std::unique_ptr<IWorkload>();
 }
-/**/
+
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
                                                            const WorkloadInfo& /*info*/) const
 {
@@ -1428,6 +1439,12 @@
     return std::unique_ptr<IWorkload>();
 }
 
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
+                                                             const WorkloadInfo& /*info*/) const
+{
+    return std::unique_ptr<IWorkload>();
+}
+
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
     const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
     const WorkloadInfo& /*info*/) const
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index e1cdff6..dae58b6 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -235,6 +235,9 @@
     virtual std::unique_ptr<IWorkload> CreateSwitch(const SwitchQueueDescriptor& descriptor,
                                                     const WorkloadInfo& Info) const;
 
+    virtual std::unique_ptr<IWorkload> CreateTranspose(const TransposeQueueDescriptor& descriptor,
+                                                       const WorkloadInfo& info) const;
+
     virtual std::unique_ptr<IWorkload> CreateTransposeConvolution2d(
         const TransposeConvolution2dQueueDescriptor& descriptor,
         const WorkloadInfo& info) const;
diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
index 9602cc3..960dbd3 100644
--- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp
+++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
@@ -266,6 +266,10 @@
                                             const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
+    std::unique_ptr<IWorkload> CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
+                                               const WorkloadInfo& /*info*/) const override
+    { return nullptr; }
+
     std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
                                                             const WorkloadInfo& /*info*/) const override
     { return nullptr; }
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 1a899aa..395a63d 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -537,6 +537,8 @@
 
 DECLARE_LAYER_POLICY_1_PARAM(Switch)
 
+DECLARE_LAYER_POLICY_2_PARAM(Transpose)
+
 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
 
 
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index eba7944..62a66df 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -53,3 +53,4 @@
 #include <backendsCommon/test/layerTests/StridedSliceTestImpl.hpp>
 #include <backendsCommon/test/layerTests/SubtractionTestImpl.hpp>
 #include <backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp>
+#include <backendsCommon/test/layerTests/TransposeTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
new file mode 100644
index 0000000..3949dcc
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
@@ -0,0 +1,240 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <ResolveType.hpp>
+
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+template<typename T>
+LayerTestResult<T, 4> SimpleTransposeTestImpl(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        armnn::TransposeDescriptor descriptor,
+        armnn::TensorInfo inputTensorInfo,
+        armnn::TensorInfo outputTensorInfo,
+        const std::vector<T>& inputData,
+        const std::vector<T>& outputExpectedData)
+{
+    boost::ignore_unused(memoryManager);
+    auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+    LayerTestResult<T, 4> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::TransposeQueueDescriptor data;
+    data.m_Parameters = descriptor;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateTranspose(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> SimpleTransposeTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[] = { 1, 2, 2, 2 };
+    unsigned int outputShape[] = { 1, 2, 2, 2 };
+
+    armnn::TransposeDescriptor descriptor;
+    descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(0.5f);
+        inputTensorInfo.SetQuantizationOffset(5);
+        outputTensorInfo.SetQuantizationScale(0.5f);
+        outputTensorInfo.SetQuantizationOffset(5);
+    }
+
+    std::vector<T> input = std::vector<T>(
+    {
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8
+    });
+
+    std::vector<T> outputExpected = std::vector<T>(
+    {
+        1, 5, 2, 6,
+        3, 7, 4, 8
+    });
+
+    return SimpleTransposeTestImpl<T>(workloadFactory, memoryManager,
+                                    descriptor, inputTensorInfo,
+                                    outputTensorInfo, input, outputExpected);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> TransposeValueSet1Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[]  = { 1, 2, 2, 3 };
+    unsigned int outputShape[] = { 1, 3, 2, 2 };
+
+    armnn::TransposeDescriptor descriptor;
+    descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(0.5f);
+        inputTensorInfo.SetQuantizationOffset(5);
+        outputTensorInfo.SetQuantizationScale(0.5f);
+        outputTensorInfo.SetQuantizationOffset(5);
+    }
+
+    std::vector<T> input = std::vector<T>(
+    {
+         1,  2,  3,
+        11, 12, 13,
+        21, 22, 23,
+        31, 32, 33
+    });
+
+    std::vector<T> outputExpected = std::vector<T>(
+    {
+        1, 11, 21, 31,
+        2, 12, 22, 32,
+        3, 13, 23, 33
+    });
+
+    return SimpleTransposeTestImpl<T>(workloadFactory, memoryManager,
+                                    descriptor, inputTensorInfo,
+                                    outputTensorInfo, input, outputExpected);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> TransposeValueSet2Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[]  = { 1, 3, 2, 2 };
+    unsigned int outputShape[] = { 1, 2, 2, 3 };
+
+    armnn::TransposeDescriptor descriptor;
+    descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(0.5f);
+        inputTensorInfo.SetQuantizationOffset(5);
+        outputTensorInfo.SetQuantizationScale(0.5f);
+        outputTensorInfo.SetQuantizationOffset(5);
+    }
+
+    std::vector<T> input = std::vector<T>(
+    {
+        1, 11, 21, 31,
+        2, 12, 22, 32,
+        3, 13, 23, 33
+    });
+
+    std::vector<T> outputExpected = std::vector<T>(
+    {
+         1,  2,  3,
+        11, 12, 13,
+        21, 22, 23,
+        31, 32, 33,
+    });
+
+    return SimpleTransposeTestImpl<T>(workloadFactory, memoryManager,
+                                    descriptor, inputTensorInfo,
+                                    outputTensorInfo, input, outputExpected);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> TransposeValueSet3Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[]  = { 1, 2, 3, 3 };
+    unsigned int outputShape[] = { 1, 3, 2, 3 };
+
+    armnn::TransposeDescriptor descriptor;
+    descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(0.5f);
+        inputTensorInfo.SetQuantizationOffset(5);
+        outputTensorInfo.SetQuantizationScale(0.5f);
+        outputTensorInfo.SetQuantizationOffset(5);
+    }
+
+    std::vector<T> input = std::vector<T>(
+    {
+         1,  2,  3,
+        11, 12, 13,
+        21, 22, 23,
+        31, 32, 33,
+        41, 42, 43,
+        51, 52, 53
+    });
+
+    std::vector<T> outputExpected = std::vector<T>(
+    {
+        1, 11, 21, 31, 41, 51,
+        2, 12, 22, 32, 42, 52,
+        3, 13, 23, 33, 43, 53
+    });
+
+    return SimpleTransposeTestImpl<T>(workloadFactory, memoryManager,
+                                      descriptor, inputTensorInfo,
+                                      outputTensorInfo, input, outputExpected);
+}
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index e8548e4..d3ac986 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -60,6 +60,7 @@
 #include "workloads/ClStridedSliceWorkload.hpp"
 #include "workloads/ClSubtractionWorkload.hpp"
 #include "workloads/ClTransposeConvolution2dWorkload.hpp"
+#include "workloads/ClTransposeWorkload.hpp"
 #endif
 
 using namespace boost;
@@ -819,4 +820,12 @@
                                    biases);
 }
 
+bool ClLayerSupport::IsTransposeSupported(const TensorInfo& input,
+                                          const TensorInfo& output,
+                                          const TransposeDescriptor& descriptor,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
+}
+
 } // namespace armnn
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 819d086..60899d0 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -286,6 +286,12 @@
                                            const TensorInfo& weights,
                                            const Optional<TensorInfo>& biases,
                                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsTransposeSupported(const TensorInfo& input,
+                              const TensorInfo& output,
+                              const TransposeDescriptor& descriptor,
+                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
 };
 
 } // namespace armnn
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 4bb2e2a..21c2629 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -534,6 +534,12 @@
     return MakeWorkload<ClSubtractionWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor,
+                                                              const WorkloadInfo& info) const
+{
+    return MakeWorkload<ClTransposeWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateTransposeConvolution2d(
     const TransposeConvolution2dQueueDescriptor& descriptor,
     const WorkloadInfo& info) const
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 980be91..a716801 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -210,6 +210,9 @@
     std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
                                                  const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateTranspose(const TransposeQueueDescriptor& descriptor,
+                                               const WorkloadInfo& info) const override;
+
     std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const override;
 
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index 4182b94..e326add 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -67,7 +67,8 @@
         workloads/ClStackWorkload.cpp \
         workloads/ClStridedSliceWorkload.cpp \
         workloads/ClSubtractionWorkload.cpp \
-        workloads/ClTransposeConvolution2dWorkload.cpp
+        workloads/ClTransposeConvolution2dWorkload.cpp \
+        workloads/ClTransposeWorkload.cpp
 else
 
 # ARMNN_COMPUTE_CL_ENABLED == 0
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index cfec81a..d8b0fd1 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -770,6 +770,20 @@
 ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8ToFp16, DequantizeSimpleUint8ToFp16Test)
 ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16ToFp16, DequantizeSimpleInt16ToFp16Test)
 
+// Transpose
+ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet1Test, TransposeValueSet1Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet3Test, TransposeValueSet3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymm8, SimpleTransposeTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet3Test, TransposeValueSet3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(SimpleTransposeQSymm16, SimpleTransposeTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet1Test, TransposeValueSet1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet2Test, TransposeValueSet2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet3Test, TransposeValueSet3Test<DataType::QSymmS16>)
+
 // TransposeConvolution2d
 ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNchw,
                      SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index de62ca9..17d69b1 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -96,6 +96,8 @@
     ClSubtractionWorkload.hpp
     ClTransposeConvolution2dWorkload.cpp
     ClTransposeConvolution2dWorkload.hpp
+    ClTransposeWorkload.cpp
+    ClTransposeWorkload.hpp
     ClWorkloads.hpp
     ClWorkloadUtils.hpp
 )
diff --git a/src/backends/cl/workloads/ClTransposeWorkload.cpp b/src/backends/cl/workloads/ClTransposeWorkload.cpp
new file mode 100644
index 0000000..b276b22
--- /dev/null
+++ b/src/backends/cl/workloads/ClTransposeWorkload.cpp
@@ -0,0 +1,49 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClTransposeWorkload.hpp"
+#include <cl/ClTensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <arm_compute/core/Error.h>
+
+#include "ClWorkloadUtils.hpp"
+
+namespace armnn
+{
+
+arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo& input,
+                                                const TensorInfo& output,
+                                                const TransposeDescriptor& descriptor)
+{
+    const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+    const armnn::PermutationVector& mappings = descriptor.m_DimMappings;
+
+    return arm_compute::CLPermute::validate(&aclInputInfo, &aclOutputInfo,
+                                            armcomputetensorutils::BuildArmComputeTransposeVector(mappings));
+}
+
+ClTransposeWorkload::ClTransposeWorkload(const TransposeQueueDescriptor& descriptor,
+                                         const WorkloadInfo& info)
+    : BaseWorkload<TransposeQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs(GetName(), 1, 1);
+
+    const arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+    const armnn::PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings;
+    // Run the layer.
+    m_PermuteFunction.configure(&input, &output,
+                                armcomputetensorutils::BuildArmComputeTransposeVector(mappings));
+}
+
+void ClTransposeWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_CL(GetName() + "_Execute");
+    RunClFunction(m_PermuteFunction, CHECK_LOCATION());
+}
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClTransposeWorkload.hpp b/src/backends/cl/workloads/ClTransposeWorkload.hpp
new file mode 100644
index 0000000..c1bed93
--- /dev/null
+++ b/src/backends/cl/workloads/ClTransposeWorkload.hpp
@@ -0,0 +1,40 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+#include <armnn/TypesUtils.hpp>
+#include <arm_compute/runtime/CL/functions/CLPermute.h>
+
+#include <string>
+
+namespace armnn
+{
+
+arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo& input,
+                                                const TensorInfo& output,
+                                                const TransposeDescriptor& descriptor);
+
+class ClTransposeWorkload : public BaseWorkload<TransposeQueueDescriptor>
+{
+public:
+    static const std::string& GetName()
+    {
+        static const std::string name = std::string("ClTransposeWorkload");
+        return name;
+    }
+
+    ClTransposeWorkload(const TransposeQueueDescriptor& descriptor, const WorkloadInfo& info);
+    void Execute() const override;
+
+private:
+    using BaseWorkload<TransposeQueueDescriptor>::m_Data;
+    mutable arm_compute::CLPermute m_PermuteFunction;
+};
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index 014dc3f..ec193d5 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -49,3 +49,4 @@
 #include "ClConvertFp16ToFp32Workload.hpp"
 #include "ClConvertFp32ToFp16Workload.hpp"
 #include "ClTransposeConvolution2dWorkload.hpp"
+#include "ClTransposeWorkload.hpp"
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 3c161d5..7e58dab 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -59,6 +59,7 @@
 #include "workloads/NeonStridedSliceWorkload.hpp"
 #include "workloads/NeonSubtractionWorkload.hpp"
 #include "workloads/NeonTransposeConvolution2dWorkload.hpp"
+#include "workloads/NeonTransposeWorkload.hpp"
 #endif
 
 using namespace boost;
@@ -803,4 +804,12 @@
                                    biases);
 }
 
+bool NeonLayerSupport::IsTransposeSupported(const TensorInfo& input,
+                                            const TensorInfo& output,
+                                            const TransposeDescriptor& descriptor,
+                                            Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
+}
+
 } // namespace armnn
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 9cb64ea..f45db35 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -288,6 +288,11 @@
                                            const Optional<TensorInfo>& biases,
                                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsTransposeSupported(const TensorInfo& input,
+                              const TensorInfo& output,
+                              const TransposeDescriptor& descriptor,
+                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
 }; // class NeonLayerSupport
 
 } // namespace armnn
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index c3e0dc8..dc3ee84 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -503,6 +503,12 @@
     return std::make_unique<NeonSubtractionWorkload>(descriptor, info);
 }
 
+std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor,
+                                                                       const WorkloadInfo& info) const
+{
+    return std::make_unique<NeonTransposeWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateTransposeConvolution2d(
     const TransposeConvolution2dQueueDescriptor &descriptor,
     const WorkloadInfo &info) const
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 95271e2..bc4107d 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -213,6 +213,9 @@
     std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
                                                  const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateTranspose(const TransposeQueueDescriptor& descriptor,
+                                               const WorkloadInfo& info) const override;
+
     std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const override;
 
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 1c572e6..d9a5405 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -67,7 +67,8 @@
         workloads/NeonStackWorkload.cpp \
         workloads/NeonStridedSliceWorkload.cpp \
         workloads/NeonSubtractionWorkload.cpp \
-        workloads/NeonTransposeConvolution2dWorkload.cpp
+        workloads/NeonTransposeConvolution2dWorkload.cpp \
+        workloads/NeonTransposeWorkload.cpp
 
 else
 
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 18658a3..482bc25 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -891,6 +891,20 @@
 ARMNN_AUTO_TEST_CASE(StackOutput3DInputs3, StackOutput3DInputs3Float32Test)
 ARMNN_AUTO_TEST_CASE(StackOutput5D,        StackOutput5DFloat32Test)
 
+// Transpose
+ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet1Test, TransposeValueSet1Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet3Test, TransposeValueSet3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymm8, SimpleTransposeTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet3Test, TransposeValueSet3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(SimpleTransposeQSymm16, SimpleTransposeTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet1Test, TransposeValueSet1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet2Test, TransposeValueSet2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet3Test, TransposeValueSet3Test<DataType::QSymmS16>)
+
 // TransposeConvolution2d
 ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNchw,
                      SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index 02ffedc..a932f8b 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -98,6 +98,8 @@
     NeonSubtractionWorkload.hpp
     NeonTransposeConvolution2dWorkload.cpp
     NeonTransposeConvolution2dWorkload.hpp
+    NeonTransposeWorkload.cpp
+    NeonTransposeWorkload.hpp
     NeonWorkloads.hpp
     NeonWorkloadUtils.hpp
 )
diff --git a/src/backends/neon/workloads/NeonTransposeWorkload.cpp b/src/backends/neon/workloads/NeonTransposeWorkload.cpp
new file mode 100644
index 0000000..c11f2df
--- /dev/null
+++ b/src/backends/neon/workloads/NeonTransposeWorkload.cpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonTransposeWorkload.hpp"
+#include <neon/NeonTensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <arm_compute/core/Error.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo& input,
+                                                  const TensorInfo& output,
+                                                  const TransposeDescriptor& descriptor)
+{
+    const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+    const armnn::PermutationVector& mappings = descriptor.m_DimMappings;
+
+    return arm_compute::NEPermute::validate(&aclInputInfo, &aclOutputInfo,
+                                            armcomputetensorutils::BuildArmComputeTransposeVector(mappings));
+}
+
+NeonTransposeWorkload::NeonTransposeWorkload(const TransposeQueueDescriptor& descriptor,
+                                             const WorkloadInfo& info)
+        : BaseWorkload<TransposeQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs(GetName(), 1, 1);
+
+    const arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+    const armnn::PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings;
+
+    // Run the layer.
+    m_PermuteFunction.configure(&input, &output,
+                                armcomputetensorutils::BuildArmComputeTransposeVector(mappings));
+}
+
+void NeonTransposeWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON(GetName() + "_Execute");
+    m_PermuteFunction.run();
+}
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonTransposeWorkload.hpp b/src/backends/neon/workloads/NeonTransposeWorkload.hpp
new file mode 100644
index 0000000..aab7b70
--- /dev/null
+++ b/src/backends/neon/workloads/NeonTransposeWorkload.hpp
@@ -0,0 +1,39 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <neon/workloads/NeonWorkloadUtils.hpp>
+
+#include <armnn/TypesUtils.hpp>
+#include <arm_compute/runtime/NEON/functions/NEPermute.h>
+
+#include <string>
+
+namespace armnn
+{
+arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo& input, const TensorInfo& output,
+                                                  const TransposeDescriptor& descriptor);
+
+class NeonTransposeWorkload : public BaseWorkload<TransposeQueueDescriptor>
+{
+public:
+    static const std::string& GetName()
+    {
+        static const std::string name = std::string("NeonTransposeWorkload");
+        return name;
+    }
+
+    NeonTransposeWorkload(const TransposeQueueDescriptor& descriptor, const WorkloadInfo& info);
+    void Execute() const override;
+
+private:
+    using BaseWorkload<TransposeQueueDescriptor>::m_Data;
+    mutable arm_compute::NEPermute m_PermuteFunction;
+};
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index b08483c..52cd76f 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -50,3 +50,4 @@
 #include "NeonStridedSliceWorkload.hpp"
 #include "NeonSubtractionWorkload.hpp"
 #include "NeonTransposeConvolution2dWorkload.hpp"
+#include "NeonTransposeWorkload.hpp"
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 8f1f170..25334c3 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -1388,9 +1388,10 @@
     bool supported = true;
 
     // Define supported output and inputs types.
-    std::array<DataType,3> supportedTypes =
+    std::array<DataType, 4> supportedTypes =
     {
         DataType::Float32,
+        DataType::Float16,
         DataType::QAsymmU8,
         DataType::QSymmS16
     };
@@ -1912,4 +1913,33 @@
     return supported;
 }
 
+bool RefLayerSupport::IsTransposeSupported(const TensorInfo& input,
+                                           const TensorInfo& output,
+                                           const TransposeDescriptor& descriptor,
+                                           Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(descriptor);
+    bool supported = true;
+
+    // Define supported output and inputs types.
+    std::array<DataType, 4> supportedTypes =
+    {
+        DataType::Float32,
+        DataType::Float16,
+        DataType::QAsymmU8,
+        DataType::QSymmS16
+    };
+
+    supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+                                  "Reference transpose: input is not a supported type.");
+
+    supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+                                  "Reference transpose: output is not a supported type.");
+
+    supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+                                  "Reference transpose: input and output types are mismatched.");
+
+    return supported;
+}
+
 } // namespace armnn
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 1551a55..27f3f81 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -318,6 +318,12 @@
         const TensorInfo& weights,
         const Optional<TensorInfo>& biases,
         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsTransposeSupported(const TensorInfo& input,
+                              const TensorInfo& output,
+                              const TransposeDescriptor& descriptor,
+                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
 };
 
 } // namespace armnn
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 02dbbab..2a415bf 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -561,6 +561,17 @@
     return std::make_unique<RefSubtractionWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor,
+                                                               const WorkloadInfo& info) const
+{
+    if (IsQSymmS16(info))
+    {
+        return std::make_unique<RefTransposeQSymm16Workload>(descriptor, info);
+    }
+    return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload, RefTransposeQAsymm8Workload,
+            NullWorkload, NullWorkload, NullWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTransposeConvolution2d(
     const TransposeConvolution2dQueueDescriptor& descriptor,
     const WorkloadInfo& info) const
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index b5b9b0f..030ce6f 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -236,6 +236,9 @@
     std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
                                                  const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateTranspose(const TransposeQueueDescriptor& descriptor,
+                                               const WorkloadInfo& info) const override;
+
     std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const override;
 
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 1987bd5..010d548 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -85,6 +85,7 @@
         workloads/RefStridedSliceWorkload.cpp \
         workloads/RefSplitterWorkload.cpp \
         workloads/RefTransposeConvolution2dWorkload.cpp \
+        workloads/RefTransposeWorkload.cpp \
         workloads/Resize.cpp \
         workloads/Slice.cpp \
         workloads/SpaceToBatchNd.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index d5c67ef..ed2b995 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1460,6 +1460,20 @@
 ARMNN_AUTO_TEST_CASE(Slice2dInt16, Slice2dInt16Test)
 ARMNN_AUTO_TEST_CASE(Slice1dInt16, Slice1dInt16Test)
 
+// Transpose
+ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet1Test, TransposeValueSet1Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet3Test, TransposeValueSet3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymm8, SimpleTransposeTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet3Test, TransposeValueSet3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(SimpleTransposeQSymm16, SimpleTransposeTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet1Test, TransposeValueSet1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet2Test, TransposeValueSet2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet3Test, TransposeValueSet3Test<DataType::QSymmS16>)
+
 // TransposeConvolution2d
 ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNchw,
                      SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 6795204..b2d8938 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -141,6 +141,8 @@
     RefStridedSliceWorkload.hpp
     RefTransposeConvolution2dWorkload.cpp
     RefTransposeConvolution2dWorkload.hpp
+    RefTransposeWorkload.cpp
+    RefTransposeWorkload.hpp
     RefWorkloads.hpp
     RefWorkloadUtils.hpp
     Resize.cpp
diff --git a/src/backends/reference/workloads/RefTransposeWorkload.cpp b/src/backends/reference/workloads/RefTransposeWorkload.cpp
new file mode 100644
index 0000000..6bdfb21
--- /dev/null
+++ b/src/backends/reference/workloads/RefTransposeWorkload.cpp
@@ -0,0 +1,35 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefTransposeWorkload.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include <armnnUtils/Transpose.hpp>
+
+#include <ResolveType.hpp>
+
+namespace armnn
+{
+
+template <armnn::DataType DataType>
+void RefTransposeWorkload<DataType>::Execute() const
+{
+    using T = ResolveType<DataType>;
+
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, GetName() + "_Execute");
+
+    const ITensorHandle*     src      = m_Data.m_Inputs[0];
+    ITensorHandle*           dst      = m_Data.m_Outputs[0];
+    const PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings;
+
+    armnnUtils::Transpose(GetTensorInfo(src).GetShape(), mappings, src->Map(), dst->Map(), sizeof(T));
+}
+
+template class RefTransposeWorkload<DataType::Float16>;
+template class RefTransposeWorkload<DataType::Float32>;
+template class RefTransposeWorkload<DataType::QAsymmU8>;
+template class RefTransposeWorkload<DataType::QSymmS16>;
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefTransposeWorkload.hpp b/src/backends/reference/workloads/RefTransposeWorkload.hpp
new file mode 100644
index 0000000..4b1c3d3
--- /dev/null
+++ b/src/backends/reference/workloads/RefTransposeWorkload.hpp
@@ -0,0 +1,35 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <armnn/TypesUtils.hpp>
+
+namespace armnn
+{
+
+template <armnn::DataType DataType>
+class RefTransposeWorkload : public TypedWorkload<TransposeQueueDescriptor, DataType>
+{
+public:
+    static const std::string& GetName()
+    {
+        static const std::string name = std::string("RefTranspose") + GetDataTypeName(DataType) + "Workload";
+        return name;
+    }
+
+    using TypedWorkload<TransposeQueueDescriptor, DataType>::m_Data;
+    using TypedWorkload<TransposeQueueDescriptor, DataType>::TypedWorkload;
+    void Execute() const override;
+};
+
+using RefTransposeFloat16Workload = RefTransposeWorkload<DataType::Float16>;
+using RefTransposeFloat32Workload = RefTransposeWorkload<DataType::Float32>;
+using RefTransposeQAsymm8Workload = RefTransposeWorkload<DataType::QAsymmU8>;
+using RefTransposeQSymm16Workload = RefTransposeWorkload<DataType::QSymmS16>;
+
+} //namespace armnn
\ No newline at end of file
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 7034b67..a0558ff 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -58,6 +58,7 @@
 #include "RefStridedSliceWorkload.hpp"
 #include "RefSpaceToDepthWorkload.hpp"
 #include "RefTransposeConvolution2dWorkload.hpp"
+#include "RefTransposeWorkload.hpp"
 #include "RefWorkloadUtils.hpp"
 #include "Resize.hpp"
 #include "Softmax.hpp"