IVGCVSW-6088 Add Sin and Log to ElementWiseUnary

* Ref workload
* Cl workload
* Neon workload
* Serializer
* Deserializer

* Remove boost include from TensorTest.cpp

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I498548169cc77609c55cf3105f1de5a7429772cf
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index d829bfa..ac4dd31 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -108,7 +108,9 @@
     Sqrt       = 2,
     Rsqrt      = 3,
     Neg        = 4,
-    LogicalNot = 5
+    LogicalNot = 5,
+    Log        = 6,
+    Sin        = 7
 };
 
 enum class PoolingAlgorithm
diff --git a/src/armnn/test/TensorTest.cpp b/src/armnn/test/TensorTest.cpp
index fd2d784..1ecad50 100644
--- a/src/armnn/test/TensorTest.cpp
+++ b/src/armnn/test/TensorTest.cpp
@@ -2,7 +2,7 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <boost/test/unit_test.hpp>
+
 #include <armnn/Tensor.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 7951589..b5bf9da 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -550,6 +550,10 @@
             return armnn::UnaryOperation::Neg;
         case armnnSerializer::UnaryOperation::UnaryOperation_LogicalNot:
             return armnn::UnaryOperation::LogicalNot;
+        case armnnSerializer::UnaryOperation::UnaryOperation_Log:
+            return armnn::UnaryOperation::Log;
+        case armnnSerializer::UnaryOperation::UnaryOperation_Sin:
+            return armnn::UnaryOperation::Sin;
         default:
             throw armnn::InvalidArgumentException("Unary operation unknown");
     }
diff --git a/src/armnnDeserializer/test/DeserializeElementwiseUnary.cpp b/src/armnnDeserializer/test/DeserializeElementwiseUnary.cpp
new file mode 100644
index 0000000..0a89f48
--- /dev/null
+++ b/src/armnnDeserializer/test/DeserializeElementwiseUnary.cpp
@@ -0,0 +1,173 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include <armnnDeserializer/IDeserializer.hpp>
+
+#include <string>
+
+TEST_SUITE(Deserializer)
+{
+struct ElementwiseUnaryFixture : public ParserFlatbuffersSerializeFixture
+{
+    explicit ElementwiseUnaryFixture(const std::string& inputShape,
+                                     const std::string& outputShape,
+                                     const std::string& dataType,
+                                     const std::string& unaryOperation = "Abs")
+    {
+        m_JsonString = R"(
+            {
+                inputIds: [0],
+                outputIds: [2],
+                layers: [
+                    {
+                        layer_type: "InputLayer",
+                        layer: {
+                            base: {
+                                layerBindingId: 0,
+                                base: {
+                                    index: 0,
+                                    layerName: "InputLayer",
+                                    layerType: "Input",
+                                    inputSlots: [{
+                                        index: 0,
+                                        connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+                                    }],
+                                    outputSlots: [{
+                                        index: 0,
+                                        tensorInfo: {
+                                            dimensions: )" + inputShape + R"(,
+                                            dataType: )" + dataType + R"(
+                                        }
+                                    }]
+                                }
+                            }
+                        }
+                    },
+                    {
+                        layer_type: "ElementwiseUnaryLayer",
+                        layer: {
+                            base: {
+                                index: 1,
+                                layerName: "ElementwiseUnaryLayer",
+                                layerType: "ElementwiseUnary",
+                                inputSlots: [{
+                                    index: 0,
+                                    connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+                                }],
+                                outputSlots: [{
+                                    index: 0,
+                                    tensorInfo: {
+                                        dimensions: )" + outputShape + R"(,
+                                        dataType: )" + dataType + R"(
+                                    }
+                                }]
+                            },
+                            descriptor: {
+                                activationFunction: )" + unaryOperation + R"(
+                            },
+                        }
+                    },
+                    {
+                        layer_type: "OutputLayer",
+                        layer: {
+                            base:{
+                                layerBindingId: 2,
+                                base: {
+                                    index: 2,
+                                    layerName: "OutputLayer",
+                                    layerType: "Output",
+                                    inputSlots: [{
+                                        index: 0,
+                                        connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+                                    }],
+                                    outputSlots: [{
+                                        index: 0,
+                                        tensorInfo: {
+                                            dimensions: )" + outputShape + R"(,
+                                            dataType: )" + dataType + R"(
+                                        },
+                                    }],
+                                }
+                            }
+                        },
+                    }
+                ]
+            }
+        )";
+        Setup();
+    }
+};
+
+struct SimpleAbsFixture : ElementwiseUnaryFixture
+{
+    SimpleAbsFixture() : ElementwiseUnaryFixture("[ 1, 2, 2, 2 ]", // inputShape
+                                                 "[ 1, 2, 2, 2 ]", // outputShape
+                                                 "Float32",        // dataType
+                                                 "Abs")            // unaryOperation
+    {}
+};
+
+FIXTURE_TEST_CASE(SimpleAbsTest, SimpleAbsFixture)
+{
+    RunTest<4, armnn::DataType::Float32>(
+        0,
+        {{"InputLayer", {-100.0f, -50.5f, -25.9999f, -0.5f, 0.0f, 1.5555f, 25.5f, 100.0f}}},
+        {{"OutputLayer", {100.0f, 50.5f, 25.9999f, 0.5f, 0.0f, 1.5555f, 25.5f, 100.0f}}});
+}
+
+struct SimpleLogFixture : ElementwiseUnaryFixture
+{
+    SimpleLogFixture() : ElementwiseUnaryFixture("[ 1, 2, 2, 2 ]", // inputShape
+                                                 "[ 1, 2, 2, 2 ]", // outputShape
+                                                 "Float32",        // dataType
+                                                 "Log")            // unaryOperation
+    {}
+};
+
+FIXTURE_TEST_CASE(SimpleLogTest, SimpleLogFixture)
+{
+    RunTest<4, armnn::DataType::Float32>(
+        0,
+        {{"InputLayer", {1.0f, 2.1f, 3.2f, 4.3f, 10.f, 100.f, 25.5f, 200.0f}}},
+        {{"OutputLayer", {0.f, 0.74193734472f, 1.16315080981f, 1.4586150227f,
+                                                2.30258509299f, 4.60517018599f, 3.23867845216f, 5.29831736655f}}});
+}
+
+struct SimpleNegFixture : ElementwiseUnaryFixture
+{
+    SimpleNegFixture() : ElementwiseUnaryFixture("[ 1, 2, 2, 2 ]", // inputShape
+                                                 "[ 1, 2, 2, 2 ]", // outputShape
+                                                 "Float32",        // dataType
+                                                 "Neg")            // unaryOperation
+    {}
+};
+
+FIXTURE_TEST_CASE(SimpleNegTest, SimpleNegFixture)
+{
+    RunTest<4, armnn::DataType::Float32>(
+        0,
+        {{"InputLayer", {100.0f, 50.5f, 25.9999f, 0.5f, 0.0f, -1.5555f, -25.5f, -100.0f}}},
+        {{"OutputLayer", {-100.0f, -50.5f, -25.9999f, -0.5f, 0.0f, 1.5555f, 25.5f, 100.0f}}});
+}
+
+struct SimpleSinFixture : ElementwiseUnaryFixture
+{
+    SimpleSinFixture() : ElementwiseUnaryFixture("[ 1, 2, 2, 2 ]", // inputShape
+                                                 "[ 1, 2, 2, 2 ]", // outputShape
+                                                 "Float32",        // dataType
+                                                 "Sin")            // unaryOperation
+    {}
+};
+
+FIXTURE_TEST_CASE(SimpleSinTest, SimpleSinFixture)
+{
+    RunTest<4, armnn::DataType::Float32>(
+        0,
+        {{"InputLayer", {-100.0f, -50.5f, -25.9999f, -0.5f, 0.0f, 1.5555f, 25.5f, 100.0f}}},
+        {{"OutputLayer", {0.50636564111f, -0.23237376165f, -0.76249375473f, -0.4794255386f,
+                                                0.0f, 0.99988301347f, 0.35905835402f, -0.50636564111f}}});
+}
+}
\ No newline at end of file
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index 1c9a1de..753c244 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -287,7 +287,9 @@
     Sqrt = 2,
     Exp = 3,
     Neg = 4,
-    LogicalNot = 5
+    LogicalNot = 5,
+    Log = 6,
+    Sin = 7
 }
 
 table ElementwiseUnaryDescriptor {
diff --git a/src/armnnSerializer/ArmnnSchema_generated.h b/src/armnnSerializer/ArmnnSchema_generated.h
index fc55d9b..675fcc6 100644
--- a/src/armnnSerializer/ArmnnSchema_generated.h
+++ b/src/armnnSerializer/ArmnnSchema_generated.h
@@ -931,37 +931,43 @@
   UnaryOperation_Exp = 3,
   UnaryOperation_Neg = 4,
   UnaryOperation_LogicalNot = 5,
+  UnaryOperation_Log = 6,
+  UnaryOperation_Sin = 7,
   UnaryOperation_MIN = UnaryOperation_Abs,
-  UnaryOperation_MAX = UnaryOperation_LogicalNot
+  UnaryOperation_MAX = UnaryOperation_Sin
 };
 
-inline const UnaryOperation (&EnumValuesUnaryOperation())[6] {
+inline const UnaryOperation (&EnumValuesUnaryOperation())[8] {
   static const UnaryOperation values[] = {
     UnaryOperation_Abs,
     UnaryOperation_Rsqrt,
     UnaryOperation_Sqrt,
     UnaryOperation_Exp,
     UnaryOperation_Neg,
-    UnaryOperation_LogicalNot
+    UnaryOperation_LogicalNot,
+    UnaryOperation_Log,
+    UnaryOperation_Sin
   };
   return values;
 }
 
 inline const char * const *EnumNamesUnaryOperation() {
-  static const char * const names[7] = {
+  static const char * const names[9] = {
     "Abs",
     "Rsqrt",
     "Sqrt",
     "Exp",
     "Neg",
     "LogicalNot",
+    "Log",
+    "Sin",
     nullptr
   };
   return names;
 }
 
 inline const char *EnumNameUnaryOperation(UnaryOperation e) {
-  if (flatbuffers::IsOutRange(e, UnaryOperation_Abs, UnaryOperation_LogicalNot)) return "";
+  if (flatbuffers::IsOutRange(e, UnaryOperation_Abs, UnaryOperation_Sin)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesUnaryOperation()[index];
 }
diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp
index 936fb53..1df8d4e 100644
--- a/src/armnnSerializer/SerializerUtils.cpp
+++ b/src/armnnSerializer/SerializerUtils.cpp
@@ -119,6 +119,10 @@
             return armnnSerializer::UnaryOperation::UnaryOperation_Neg;
         case armnn::UnaryOperation::LogicalNot:
             return armnnSerializer::UnaryOperation::UnaryOperation_LogicalNot;
+        case armnn::UnaryOperation::Log:
+            return armnnSerializer::UnaryOperation::UnaryOperation_Log;
+        case armnn::UnaryOperation::Sin:
+            return armnnSerializer::UnaryOperation::UnaryOperation_Sin;
         default:
             throw armnn::InvalidArgumentException("Unary operation unknown");
     }
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 4cb1a81..8e7ca37 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -23,31 +23,6 @@
 
 TEST_SUITE("SerializerTests")
 {
-TEST_CASE("SerializeAbs")
-{
-    const std::string layerName("abs");
-    const armnn::TensorInfo tensorInfo({1, 2, 3}, armnn::DataType::Float32);
-
-    armnn::INetworkPtr network = armnn::INetwork::Create();
-    armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
-
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const absLayer = network->AddAbsLayer(layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_END
-    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
-
-    inputLayer->GetOutputSlot(0).Connect(absLayer->GetInputSlot(0));
-    absLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
-    inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-    absLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    CHECK(deserializedNetwork);
-
-    LayerVerifierBase verifier(layerName, {tensorInfo}, {tensorInfo});
-    deserializedNetwork->ExecuteStrategy(verifier);
-}
 
 TEST_CASE("SerializeAddition")
 {
@@ -719,6 +694,51 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
+void SerializeElementwiseUnaryTest(armnn::UnaryOperation unaryOperation)
+{
+    auto layerName = GetUnaryOperationAsCString(unaryOperation);
+
+    const armnn::TensorShape shape{2, 1, 2, 2};
+
+    const armnn::TensorInfo inputInfo  = armnn::TensorInfo(shape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputInfo = armnn::TensorInfo(shape, armnn::DataType::Float32);
+
+    armnn::ElementwiseUnaryDescriptor descriptor(unaryOperation);
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const elementwiseUnaryLayer =
+                                network->AddElementwiseUnaryLayer(descriptor, layerName);
+    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+
+    inputLayer->GetOutputSlot(0).Connect(elementwiseUnaryLayer->GetInputSlot(0));
+    elementwiseUnaryLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+    elementwiseUnaryLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+
+    CHECK(deserializedNetwork);
+
+    LayerVerifierBaseWithDescriptor<armnn::ElementwiseUnaryDescriptor>
+        verifier(layerName, { inputInfo }, { outputInfo }, descriptor);
+
+    deserializedNetwork->ExecuteStrategy(verifier);
+}
+
+TEST_CASE("SerializeElementwiseUnary")
+{
+    using op = armnn::UnaryOperation;
+    std::initializer_list<op> allUnaryOperations = {op::Abs, op::Exp, op::Sqrt, op::Rsqrt, op::Neg,
+                                                    op::LogicalNot, op::Log, op::Sin};
+
+    for (auto unaryOperation : allUnaryOperations)
+    {
+        SerializeElementwiseUnaryTest(unaryOperation);
+    }
+}
+
 TEST_CASE("SerializeFill")
 {
     const std::string layerName("fill");
@@ -1114,39 +1134,6 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-TEST_CASE("SerializeLogicalUnary")
-{
-    const std::string layerName("elementwiseUnaryLogicalNot");
-
-    const armnn::TensorShape shape{2, 1, 2, 2};
-
-    const armnn::TensorInfo inputInfo  = armnn::TensorInfo(shape, armnn::DataType::Boolean);
-    const armnn::TensorInfo outputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean);
-
-    armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::LogicalNot);
-
-    armnn::INetworkPtr network = armnn::INetwork::Create();
-    armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
-    armnn::IConnectableLayer* const elementwiseUnaryLayer =
-        network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
-    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
-
-    inputLayer->GetOutputSlot(0).Connect(elementwiseUnaryLayer->GetInputSlot(0));
-    elementwiseUnaryLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
-    inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
-    elementwiseUnaryLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
-
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-
-    CHECK(deserializedNetwork);
-
-    LayerVerifierBaseWithDescriptor<armnn::ElementwiseUnaryDescriptor> verifier(
-            layerName, { inputInfo }, { outputInfo }, descriptor);
-
-    deserializedNetwork->ExecuteStrategy(verifier);
-}
-
 TEST_CASE("SerializeLogSoftmax")
 {
     const std::string layerName("log_softmax");
@@ -2462,4 +2449,4 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-}
+}
\ No newline at end of file
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 63d768e..73a16d0 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -65,6 +65,7 @@
     test/layerTests/GatherTestImpl.cpp \
     test/layerTests/InstanceNormalizationTestImpl.cpp \
     test/layerTests/L2NormalizationTestImpl.cpp \
+    test/layerTests/LogTestImpl.cpp \
     test/layerTests/LogicalTestImpl.cpp \
     test/layerTests/LogSoftmaxTestImpl.cpp \
     test/layerTests/LstmTestImpl.cpp \
@@ -83,6 +84,7 @@
     test/layerTests/RsqrtTestImpl.cpp \
     test/layerTests/SliceTestImpl.cpp \
     test/layerTests/QuantizeTestImpl.cpp \
+    test/layerTests/SinTestImpl.cpp \
     test/layerTests/SoftmaxTestImpl.cpp \
     test/layerTests/SpaceToBatchNdTestImpl.cpp \
     test/layerTests/SpaceToDepthTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index d0c95c5..82381a8 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -116,6 +116,8 @@
     layerTests/L2NormalizationTestImpl.cpp
     layerTests/L2NormalizationTestImpl.hpp
     layerTests/LayerTestResult.hpp
+    layerTests/LogTestImpl.cpp
+    layerTests/LogTestImpl.hpp
     layerTests/LogicalTestImpl.cpp
     layerTests/LogicalTestImpl.hpp
     layerTests/LogSoftmaxTestImpl.cpp
@@ -153,6 +155,8 @@
     layerTests/ResizeTestImpl.hpp
     layerTests/RsqrtTestImpl.cpp
     layerTests/RsqrtTestImpl.hpp
+    layerTests/SinTestImpl.cpp
+    layerTests/SinTestImpl.hpp
     layerTests/SliceTestImpl.cpp
     layerTests/SliceTestImpl.hpp
     layerTests/SoftmaxTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index c1b4b46..4ae6553 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -34,6 +34,7 @@
 #include <backendsCommon/test/layerTests/GatherTestImpl.hpp>
 #include <backendsCommon/test/layerTests/InstanceNormalizationTestImpl.hpp>
 #include <backendsCommon/test/layerTests/L2NormalizationTestImpl.hpp>
+#include <backendsCommon/test/layerTests/LogTestImpl.hpp>
 #include <backendsCommon/test/layerTests/LogicalTestImpl.hpp>
 #include <backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp>
 #include <backendsCommon/test/layerTests/LstmTestImpl.hpp>
@@ -54,6 +55,7 @@
 #include <backendsCommon/test/layerTests/ReshapeTestImpl.hpp>
 #include <backendsCommon/test/layerTests/ResizeTestImpl.hpp>
 #include <backendsCommon/test/layerTests/RsqrtTestImpl.hpp>
+#include <backendsCommon/test/layerTests/SinTestImpl.hpp>
 #include <backendsCommon/test/layerTests/SliceTestImpl.hpp>
 #include <backendsCommon/test/layerTests/SoftmaxTestImpl.hpp>
 #include <backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/LogTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogTestImpl.cpp
new file mode 100644
index 0000000..eb73ddf
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/LogTestImpl.cpp
@@ -0,0 +1,205 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "LogTestImpl.hpp"
+#include "ElementwiseUnaryTestImpl.hpp"
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> Log2dTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    const unsigned int inputShape[] = { 2, 2 };
+
+    std::vector<float> inputValues
+    {
+        3.0f, 2.7182818284f,
+        1.0f, 1.1f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        1.09861228867f, 0.99999999997f,
+        0.0f, 0.0953101798f
+    };
+
+    return ElementwiseUnaryTestHelper<2, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Log,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues,
+        tensorHandleFactory);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> Log3dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    const unsigned int inputShape[] = { 3, 1, 2 };
+
+    std::vector<float> inputValues
+    {
+        5.0f, 4.0f,
+        3.0f, 2.7182818284f,
+        1.0f, 1.1f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        1.60943791243f, 1.38629436112f,
+        1.09861228867f, 0.99999999997f,
+        0.0f, 0.0953101798f
+    };
+
+    return ElementwiseUnaryTestHelper<3, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Log,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues,
+        tensorHandleFactory);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> LogZeroTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    const unsigned int inputShape[] = { 1, 2 };
+
+    std::vector<float> inputValues
+    {
+        0.f, 0.f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        -std::numeric_limits<float>::infinity(), -std::numeric_limits<float>::infinity()
+    };
+
+    return ElementwiseUnaryTestHelper<2, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Log,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues,
+        tensorHandleFactory);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> LogNegativeTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    const unsigned int inputShape[] = { 1, 2 };
+
+    std::vector<float> inputValues
+    {
+        -5.9f, -5.8f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        -std::numeric_limits<float>::quiet_NaN(), -std::numeric_limits<float>::quiet_NaN()
+    };
+
+    return ElementwiseUnaryTestHelper<2, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Log,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues,
+        tensorHandleFactory);
+}
+
+//
+// Loglicit template specializations
+//
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+Log2dTest<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2>
+Log2dTest<armnn::DataType::Float16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
+Log2dTest<armnn::DataType::QAsymmS8>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+Log2dTest<armnn::DataType::QAsymmU8>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+Log2dTest<armnn::DataType::QSymmS16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+Log3dTest<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
+Log3dTest<armnn::DataType::Float16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
+Log3dTest<armnn::DataType::QAsymmS8>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+Log3dTest<armnn::DataType::QAsymmU8>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+Log3dTest<armnn::DataType::QSymmS16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+LogZeroTest<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+LogNegativeTest<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/LogTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/LogTestImpl.hpp
new file mode 100644
index 0000000..e7e14b8
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/LogTestImpl.hpp
@@ -0,0 +1,37 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> Log2dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Log3dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> LogZeroTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> LogNegativeTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/SinTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SinTestImpl.cpp
new file mode 100644
index 0000000..be3f626
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/SinTestImpl.cpp
@@ -0,0 +1,205 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SinTestImpl.hpp"
+#include "ElementwiseUnaryTestImpl.hpp"
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> Sin2dTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    const unsigned int inputShape[] = { 2, 2 };
+
+    std::vector<float> inputValues
+    {
+        3.0f, 2.0f,
+        1.0f, 1.1f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        0.14112000806f, 0.90929742682f,
+        0.8414709848f, 0.89120736006f
+    };
+
+    return ElementwiseUnaryTestHelper<2, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Sin,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues,
+        tensorHandleFactory);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> Sin3dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    const unsigned int inputShape[] = { 3, 1, 2 };
+
+    std::vector<float> inputValues
+    {
+        5.0f, 4.0f,
+        3.0f, 2.0f,
+        1.0f, 1.1f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        -0.95892427466f, -0.7568024953f,
+        0.14112000806f, 0.90929742682f,
+        0.8414709848f, 0.89120736006f
+    };
+
+    return ElementwiseUnaryTestHelper<3, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Sin,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues,
+        tensorHandleFactory);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> SinZeroTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    const unsigned int inputShape[] = { 1, 2 };
+
+    std::vector<float> inputValues
+    {
+        0.f, 0.f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        0.f, 0.f
+    };
+
+    return ElementwiseUnaryTestHelper<2, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Sin,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues,
+        tensorHandleFactory);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> SinNegativeTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    const unsigned int inputShape[] = { 1, 2 };
+
+    std::vector<float> inputValues
+    {
+        -5.9f, -5.8f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        0.37387666483f, 0.46460217941f,
+    };
+
+    return ElementwiseUnaryTestHelper<2, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Sin,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues,
+        tensorHandleFactory);
+}
+
+//
+// Sinlicit template specializations
+//
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+Sin2dTest<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2>
+Sin2dTest<armnn::DataType::Float16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
+Sin2dTest<armnn::DataType::QAsymmS8>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+Sin2dTest<armnn::DataType::QAsymmU8>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+Sin2dTest<armnn::DataType::QSymmS16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+Sin3dTest<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
+Sin3dTest<armnn::DataType::Float16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
+Sin3dTest<armnn::DataType::QAsymmS8>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+Sin3dTest<armnn::DataType::QAsymmU8>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+Sin3dTest<armnn::DataType::QSymmS16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+SinZeroTest<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+SinNegativeTest<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/SinTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SinTestImpl.hpp
new file mode 100644
index 0000000..b04d75a
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/SinTestImpl.hpp
@@ -0,0 +1,37 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> Sin2dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Sin3dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> SinZeroTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> SinNegativeTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index aa83826..f9848ff 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -42,6 +42,7 @@
 #include "workloads/ClGatherWorkload.hpp"
 #include "workloads/ClInstanceNormalizationWorkload.hpp"
 #include "workloads/ClL2NormalizationFloatWorkload.hpp"
+#include "workloads/ClLogWorkload.hpp"
 #include "workloads/ClLogSoftmaxWorkload.hpp"
 #include "workloads/ClLogicalAndWorkload.hpp"
 #include "workloads/ClLogicalNotWorkload.hpp"
@@ -65,6 +66,7 @@
 #include "workloads/ClReshapeWorkload.hpp"
 #include "workloads/ClResizeWorkload.hpp"
 #include "workloads/ClRsqrtWorkload.hpp"
+#include "workloads/ClSinWorkload.hpp"
 #include "workloads/ClSliceWorkload.hpp"
 #include "workloads/ClSoftmaxWorkload.hpp"
 #include "workloads/ClSpaceToBatchNdWorkload.hpp"
@@ -465,6 +467,16 @@
                                            reasonIfUnsupported,
                                            input,
                                            output);
+        case UnaryOperation::Log:
+            FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogWorkloadValidate,
+                                           reasonIfUnsupported,
+                                           input,
+                                           output);
+        case UnaryOperation::LogicalNot:
+            FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalNotWorkloadValidate,
+                                           reasonIfUnsupported,
+                                           input,
+                                           output);
         case UnaryOperation::Neg:
             FORWARD_WORKLOAD_VALIDATE_FUNC(ClNegWorkloadValidate,
                                            reasonIfUnsupported,
@@ -475,8 +487,8 @@
                                            reasonIfUnsupported,
                                            input,
                                            output);
-        case UnaryOperation::LogicalNot:
-            FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalNotWorkloadValidate,
+        case UnaryOperation::Sin:
+            FORWARD_WORKLOAD_VALIDATE_FUNC(ClSinWorkloadValidate,
                                            reasonIfUnsupported,
                                            input,
                                            output);
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 0d8d0a7..6ca8c1b 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -349,6 +349,10 @@
         }
         case UnaryOperation::Exp:
             return std::make_unique<ClExpWorkload>(descriptor, info, m_CLCompileContext);
+         case UnaryOperation::Log:
+            return std::make_unique<ClLogWorkload>(descriptor, info, m_CLCompileContext);
+        case UnaryOperation::LogicalNot:
+            return std::make_unique<ClLogicalNotWorkload>(descriptor, info, m_CLCompileContext);
         case UnaryOperation::Neg:
             return std::make_unique<ClNegWorkload>(descriptor, info, m_CLCompileContext);
         case UnaryOperation::Rsqrt:
@@ -359,8 +363,8 @@
 
             return std::make_unique<ClRsqrtWorkload>(rsqrtQueueDescriptor, info, m_CLCompileContext);
         }
-        case UnaryOperation::LogicalNot:
-            return std::make_unique<ClLogicalNotWorkload>(descriptor, info, m_CLCompileContext);
+        case UnaryOperation::Sin:
+            return std::make_unique<ClSinWorkload>(descriptor, info, m_CLCompileContext);
         default:
             return nullptr;
     }
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index e6c289c..16748cf 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -50,6 +50,7 @@
         workloads/ClGatherWorkload.cpp \
         workloads/ClInstanceNormalizationWorkload.cpp \
         workloads/ClL2NormalizationFloatWorkload.cpp \
+        workloads/ClLogWorkload.cpp \
         workloads/ClLogicalAndWorkload.cpp \
         workloads/ClLogicalNotWorkload.cpp \
         workloads/ClLogicalOrWorkload.cpp \
@@ -72,6 +73,7 @@
         workloads/ClReshapeWorkload.cpp \
         workloads/ClResizeWorkload.cpp \
         workloads/ClRsqrtWorkload.cpp \
+        workloads/ClSinWorkload.cpp \
         workloads/ClSliceWorkload.cpp \
         workloads/ClSoftmaxWorkload.cpp \
         workloads/ClSpaceToBatchNdWorkload.cpp \
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 1c3c831..f5b26d3 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -1847,12 +1847,28 @@
 
 // Exp
 ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Exp2d, ClContextControlFixture, Exp2dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Exo3d, ClContextControlFixture, Exp3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Exp3d, ClContextControlFixture, Exp3dTest<DataType::Float32>)
 ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ExpZero, ClContextControlFixture, ExpZeroTest<DataType::Float32>)
 ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ExpNegative, ClContextControlFixture, ExpNegativeTest<DataType::Float32>)
 ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Exp2dFloat16, ClContextControlFixture, Exp2dTest<DataType::Float16>)
 ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Exp3dFloat16, ClContextControlFixture, Exp3dTest<DataType::Float16>)
 
+// Sin
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Sin2d, ClContextControlFixture, Sin2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Sin3d, ClContextControlFixture, Sin3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SinZero, ClContextControlFixture, SinZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SinNegative, ClContextControlFixture, SinNegativeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Sin2dFloat16, ClContextControlFixture, Sin2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Sin3dFloat16, ClContextControlFixture, Sin3dTest<DataType::Float16>)
+
+// Log
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Log2d, ClContextControlFixture, Log2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Log3d, ClContextControlFixture, Log3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogZero, ClContextControlFixture, LogZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogNegative, ClContextControlFixture, LogNegativeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Log2dFloat16, ClContextControlFixture, Log2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Log3dFloat16, ClContextControlFixture, Log3dTest<DataType::Float16>)
+
 // Logical
 ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalNot, ClContextControlFixture, LogicalNotTest)
 ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalNotInt, ClContextControlFixture, LogicalNotIntTest)
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index 9f1a02f..a351f73 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -36,8 +36,8 @@
     ClDepthwiseConvolutionWorkload.hpp
     ClDequantizeWorkload.cpp
     ClDequantizeWorkload.hpp
-        ClDivisionWorkload.cpp
-        ClDivisionWorkload.hpp
+    ClDivisionWorkload.cpp
+    ClDivisionWorkload.hpp
     ClExpWorkload.cpp
     ClExpWorkload.hpp
     ClFillWorkload.cpp
@@ -50,6 +50,8 @@
     ClGatherWorkload.hpp
     ClInstanceNormalizationWorkload.cpp
     ClInstanceNormalizationWorkload.hpp
+    ClLogWorkload.cpp
+    ClLogWorkload.hpp
     ClL2NormalizationFloatWorkload.cpp
     ClL2NormalizationFloatWorkload.hpp
     ClLogicalAndWorkload.cpp
@@ -97,6 +99,8 @@
     ClResizeWorkload.hpp
     ClRsqrtWorkload.cpp
     ClRsqrtWorkload.hpp
+    ClSinWorkload.cpp
+    ClSinWorkload.hpp
     ClSliceWorkload.cpp
     ClSliceWorkload.hpp
     ClSoftmaxWorkload.cpp
diff --git a/src/backends/cl/workloads/ClLogWorkload.cpp b/src/backends/cl/workloads/ClLogWorkload.cpp
new file mode 100644
index 0000000..b35345f
--- /dev/null
+++ b/src/backends/cl/workloads/ClLogWorkload.cpp
@@ -0,0 +1,45 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClLogWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+#include <cl/ClTensorHandle.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status ClLogWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInput  = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    return arm_compute::CLLogLayer::validate(&aclInput, &aclOutput);
+}
+
+ClLogWorkload::ClLogWorkload(const ElementwiseUnaryQueueDescriptor& descriptor,
+                             const WorkloadInfo& info,
+                             const arm_compute::CLCompileContext& clCompileContext)
+    : BaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("ClLogWorkload", 1, 1);
+
+    arm_compute::ICLTensor& input  = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    m_LogLayer.configure(clCompileContext, &input, &output);
+}
+
+void ClLogWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_CL("ClLogWorkload_Execute");
+    RunClFunction(m_LogLayer, CHECK_LOCATION());
+}
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClLogWorkload.hpp b/src/backends/cl/workloads/ClLogWorkload.hpp
new file mode 100644
index 0000000..4339ab7
--- /dev/null
+++ b/src/backends/cl/workloads/ClLogWorkload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClLogWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
+class ClLogWorkload : public BaseWorkload<ElementwiseUnaryQueueDescriptor>
+{
+public:
+    ClLogWorkload(const ElementwiseUnaryQueueDescriptor& descriptor,
+                  const WorkloadInfo& info,
+                  const arm_compute::CLCompileContext& clCompileContext);
+    virtual void Execute() const override;
+
+private:
+    mutable arm_compute::CLLogLayer m_LogLayer;
+};
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClSinWorkload.cpp b/src/backends/cl/workloads/ClSinWorkload.cpp
new file mode 100644
index 0000000..17572c6
--- /dev/null
+++ b/src/backends/cl/workloads/ClSinWorkload.cpp
@@ -0,0 +1,45 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClSinWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+#include <cl/ClTensorHandle.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status ClSinWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInput  = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    return arm_compute::CLSinLayer::validate(&aclInput, &aclOutput);
+}
+
+ClSinWorkload::ClSinWorkload(const ElementwiseUnaryQueueDescriptor& descriptor,
+                             const WorkloadInfo& info,
+                             const arm_compute::CLCompileContext& clCompileContext)
+    : BaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("ClSinWorkload", 1, 1);
+
+    arm_compute::ICLTensor& input  = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    m_SinLayer.configure(clCompileContext, &input, &output);
+}
+
+void ClSinWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_CL("ClSinWorkload_Execute");
+    RunClFunction(m_SinLayer, CHECK_LOCATION());
+}
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClSinWorkload.hpp b/src/backends/cl/workloads/ClSinWorkload.hpp
new file mode 100644
index 0000000..5eb3b45
--- /dev/null
+++ b/src/backends/cl/workloads/ClSinWorkload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClSinWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
+class ClSinWorkload : public BaseWorkload<ElementwiseUnaryQueueDescriptor>
+{
+public:
+    ClSinWorkload(const ElementwiseUnaryQueueDescriptor& descriptor,
+                  const WorkloadInfo& info,
+                  const arm_compute::CLCompileContext& clCompileContext);
+    virtual void Execute() const override;
+
+private:
+    mutable arm_compute::CLSinLayer m_SinLayer;
+};
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index 5488fcd..88d1c1b 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -25,6 +25,7 @@
 #include "ClGatherWorkload.hpp"
 #include "ClInstanceNormalizationWorkload.hpp"
 #include "ClL2NormalizationFloatWorkload.hpp"
+#include "ClLogWorkload.hpp"
 #include "ClLogicalAndWorkload.hpp"
 #include "ClLogicalNotWorkload.hpp"
 #include "ClLogicalOrWorkload.hpp"
@@ -49,6 +50,7 @@
 #include "ClReshapeWorkload.hpp"
 #include "ClResizeWorkload.hpp"
 #include "ClRsqrtWorkload.hpp"
+#include "ClSinWorkload.hpp"
 #include "ClSliceWorkload.hpp"
 #include "ClSoftmaxWorkload.hpp"
 #include "ClSpaceToBatchNdWorkload.hpp"
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index a1519cd..d9bde26 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -37,6 +37,7 @@
 #include "workloads/NeonDequantizeWorkload.hpp"
 #include "workloads/NeonInstanceNormalizationWorkload.hpp"
 #include "workloads/NeonL2NormalizationFloatWorkload.hpp"
+#include "workloads/NeonLogWorkload.hpp"
 #include "workloads/NeonLogSoftmaxWorkload.hpp"
 #include "workloads/NeonLogicalAndWorkload.hpp"
 #include "workloads/NeonLogicalNotWorkload.hpp"
@@ -63,6 +64,7 @@
 #include "workloads/NeonReshapeWorkload.hpp"
 #include "workloads/NeonResizeWorkload.hpp"
 #include "workloads/NeonRsqrtWorkload.hpp"
+#include "workloads/NeonSinWorkload.hpp"
 #include "workloads/NeonSliceWorkload.hpp"
 #include "workloads/NeonSoftmaxWorkload.hpp"
 #include "workloads/NeonSpaceToBatchNdWorkload.hpp"
@@ -439,6 +441,16 @@
                                            reasonIfUnsupported,
                                            input,
                                            output);
+        case UnaryOperation::LogicalNot:
+            FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogicalNotWorkloadValidate,
+                                           reasonIfUnsupported,
+                                           input,
+                                           output);
+       case UnaryOperation::Log:
+            FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogWorkloadValidate,
+                                           reasonIfUnsupported,
+                                           input,
+                                           output);
         case UnaryOperation::Neg:
             FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNegWorkloadValidate,
                                            reasonIfUnsupported,
@@ -449,8 +461,8 @@
                                            reasonIfUnsupported,
                                            input,
                                            output);
-        case UnaryOperation::LogicalNot:
-            FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogicalNotWorkloadValidate,
+        case UnaryOperation::Sin:
+            FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSinWorkloadValidate,
                                            reasonIfUnsupported,
                                            input,
                                            output);
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 8751d8c..5ccec62 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -294,6 +294,14 @@
 
             return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
         }
+        case UnaryOperation::Exp:
+            return std::make_unique<NeonExpWorkload>(descriptor, info);
+        case UnaryOperation::LogicalNot:
+            return std::make_unique<NeonLogicalNotWorkload>(descriptor, info);
+        case UnaryOperation::Log:
+            return std::make_unique<NeonLogWorkload>(descriptor, info);
+        case UnaryOperation::Neg:
+            return std::make_unique<NeonNegWorkload>(descriptor, info);
         case UnaryOperation::Rsqrt:
         {
             RsqrtQueueDescriptor rsqrtQueueDescriptor;
@@ -302,12 +310,8 @@
 
             return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
         }
-        case UnaryOperation::Neg:
-            return std::make_unique<NeonNegWorkload>(descriptor, info);
-        case UnaryOperation::Exp:
-            return std::make_unique<NeonExpWorkload>(descriptor, info);
-        case UnaryOperation::LogicalNot:
-            return std::make_unique<NeonLogicalNotWorkload>(descriptor, info);
+        case UnaryOperation::Sin:
+            return std::make_unique<NeonSinWorkload>(descriptor, info);
         default:
             return nullptr;
     }
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 21a1770..9906c80 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -48,6 +48,7 @@
         workloads/NeonGatherWorkload.cpp \
         workloads/NeonInstanceNormalizationWorkload.cpp \
         workloads/NeonL2NormalizationFloatWorkload.cpp \
+        workloads/NeonLogWorkload.cpp \
         workloads/NeonLogicalAndWorkload.cpp \
         workloads/NeonLogicalNotWorkload.cpp \
         workloads/NeonLogicalOrWorkload.cpp \
@@ -71,6 +72,7 @@
         workloads/NeonReshapeWorkload.cpp \
         workloads/NeonResizeWorkload.cpp \
         workloads/NeonRsqrtWorkload.cpp \
+        workloads/NeonSinWorkload.cpp \
         workloads/NeonSliceWorkload.cpp \
         workloads/NeonSoftmaxWorkload.cpp \
         workloads/NeonSpaceToBatchNdWorkload.cpp \
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 62864f8..6985776 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -1354,10 +1354,22 @@
 
 // Exp
 ARMNN_AUTO_TEST_CASE_WITH_THF(Exp2d, Exp2dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Exo3d, Exp3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Exp3d, Exp3dTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ExpZero, ExpZeroTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ExpNegative, ExpNegativeTest<DataType::Float32>)
 
+// Log
+ARMNN_AUTO_TEST_CASE_WITH_THF(Log2d, Log2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Log3d, Log3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogZero, LogZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogNegative, LogNegativeTest<DataType::Float32>)
+
+// Sin
+ARMNN_AUTO_TEST_CASE_WITH_THF(Sin2d, Sin2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Sin3d, Sin3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SinZero, SinZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SinNegative, SinNegativeTest<DataType::Float32>)
+
 // Fill
 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFill, SimpleFillTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFillF16, SimpleFillTest<DataType::Float16>)
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index f8fc18f..d08dd7e 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -56,6 +56,8 @@
     NeonInstanceNormalizationWorkload.hpp
     NeonL2NormalizationFloatWorkload.cpp
     NeonL2NormalizationFloatWorkload.hpp
+    NeonLogWorkload.cpp
+    NeonLogWorkload.hpp
     NeonLogicalAndWorkload.cpp
     NeonLogicalAndWorkload.hpp
     NeonLogicalNotWorkload.cpp
@@ -103,6 +105,8 @@
     NeonResizeWorkload.hpp
     NeonRsqrtWorkload.cpp
     NeonRsqrtWorkload.hpp
+    NeonSinWorkload.cpp
+    NeonSinWorkload.hpp
     NeonSliceWorkload.cpp
     NeonSliceWorkload.hpp
     NeonSoftmaxWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonLogWorkload.cpp b/src/backends/neon/workloads/NeonLogWorkload.cpp
new file mode 100644
index 0000000..460f5b3
--- /dev/null
+++ b/src/backends/neon/workloads/NeonLogWorkload.cpp
@@ -0,0 +1,42 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonLogWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status NeonLogWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInput  = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    return arm_compute::NELogLayer::validate(&aclInput, &aclOutput);
+}
+
+NeonLogWorkload::NeonLogWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
+    : BaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("NeonLogWorkload", 1, 1);
+
+    arm_compute::ITensor& input  = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    m_LogLayer.configure(&input, &output);
+}
+
+void NeonLogWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonLogWorkload_Execute");
+    m_LogLayer.run();
+}
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonLogWorkload.hpp b/src/backends/neon/workloads/NeonLogWorkload.hpp
new file mode 100644
index 0000000..965a845
--- /dev/null
+++ b/src/backends/neon/workloads/NeonLogWorkload.hpp
@@ -0,0 +1,27 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonLogWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
+class NeonLogWorkload : public BaseWorkload<ElementwiseUnaryQueueDescriptor>
+{
+public:
+    NeonLogWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+    virtual void Execute() const override;
+
+private:
+    mutable arm_compute::NELogLayer m_LogLayer;
+};
+
+} //namespace armnn
\ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonSinWorkload.cpp b/src/backends/neon/workloads/NeonSinWorkload.cpp
new file mode 100644
index 0000000..ac2bd49
--- /dev/null
+++ b/src/backends/neon/workloads/NeonSinWorkload.cpp
@@ -0,0 +1,42 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonSinWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status NeonSinWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInput  = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    return arm_compute::NESinLayer::validate(&aclInput, &aclOutput);
+}
+
+NeonSinWorkload::NeonSinWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
+    : BaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("NeonSinWorkload", 1, 1);
+
+    arm_compute::ITensor& input  = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    m_SinLayer.configure(&input, &output);
+}
+
+void NeonSinWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSinWorkload_Execute");
+    m_SinLayer.run();
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonSinWorkload.hpp b/src/backends/neon/workloads/NeonSinWorkload.hpp
new file mode 100644
index 0000000..9405c3c
--- /dev/null
+++ b/src/backends/neon/workloads/NeonSinWorkload.hpp
@@ -0,0 +1,27 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonSinWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
+class NeonSinWorkload : public BaseWorkload<ElementwiseUnaryQueueDescriptor>
+{
+public:
+    NeonSinWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+    virtual void Execute() const override;
+
+private:
+    mutable arm_compute::NESinLayer m_SinLayer;
+};
+
+} //namespace armnn
\ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 16035e0..2fb4b17 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -31,6 +31,7 @@
 #include "NeonGatherWorkload.hpp"
 #include "NeonInstanceNormalizationWorkload.hpp"
 #include "NeonL2NormalizationFloatWorkload.hpp"
+#include "NeonLogWorkload.hpp"
 #include "NeonLogicalAndWorkload.hpp"
 #include "NeonLogicalNotWorkload.hpp"
 #include "NeonLogicalOrWorkload.hpp"
@@ -54,6 +55,7 @@
 #include "NeonReshapeWorkload.hpp"
 #include "NeonResizeWorkload.hpp"
 #include "NeonRsqrtWorkload.hpp"
+#include "NeonSinWorkload.hpp"
 #include "NeonSliceWorkload.hpp"
 #include "NeonSoftmaxWorkload.hpp"
 #include "NeonSpaceToBatchNdWorkload.hpp"
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 6bc6f8a..1cc6fa8 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -2235,6 +2235,35 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(Exp2dQuantisedSymm16, Exp2dTest<DataType::QSymmS16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(Exp3dQuantisedSymm16, Exp3dTest<DataType::QSymmS16>)
 
+// Log
+ARMNN_AUTO_TEST_CASE_WITH_THF(Log2d, Log2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Log3d, Log3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogZero, LogZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogNegative, LogNegativeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Log2dFloat16, Log2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Log3dFloat16, Log3dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Log2dQuantisedAsymmS8, Log2dTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Log3dQuantisedAsymmS8, Log3dTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Log2dQuantisedAsymm8, Log2dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Log3dQuantisedAsymm8, Log3dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Log2dQuantisedSymm16, Log2dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Log3dQuantisedSymm16, Log3dTest<DataType::QSymmS16>)
+
+// Sin
+ARMNN_AUTO_TEST_CASE_WITH_THF(Sin2d, Sin2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Sin3d, Sin3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SinZero, SinZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SinNegative, SinNegativeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Sin2dFloat16, Sin2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Sin3dFloat16, Sin3dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Sin2dQuantisedAsymmS8, Sin2dTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Sin3dQuantisedAsymmS8, Sin3dTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Sin2dQuantisedAsymm8, Sin2dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Sin3dQuantisedAsymm8, Sin3dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Sin2dQuantisedSymm16, Sin2dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Sin3dQuantisedSymm16, Sin3dTest<DataType::QSymmS16>)
+
+//Logical
 ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalNot, LogicalNotTest)
 ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalNotInt, LogicalNotIntTest)
 ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAnd, LogicalAndTest)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index dadedf9..09e02e6 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -39,6 +39,7 @@
     Gather.hpp
     InstanceNorm.cpp
     InstanceNorm.hpp
+    Log.hpp
     LogSoftmax.cpp
     LogSoftmax.hpp
     LstmUtils.hpp
@@ -165,6 +166,7 @@
     Resize.cpp
     Resize.hpp
     Rsqrt.hpp
+    Sin.hpp
     Slice.cpp
     Slice.hpp
     Softmax.cpp
diff --git a/src/backends/reference/workloads/ElementwiseFunction.cpp b/src/backends/reference/workloads/ElementwiseFunction.cpp
index d6f3f42..82bcf99 100644
--- a/src/backends/reference/workloads/ElementwiseFunction.cpp
+++ b/src/backends/reference/workloads/ElementwiseFunction.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -10,7 +10,9 @@
 #include "Maximum.hpp"
 #include "Abs.hpp"
 #include "Exp.hpp"
+#include "Log.hpp"
 #include "Rsqrt.hpp"
+#include "Sin.hpp"
 #include "Sqrt.hpp"
 
 
@@ -84,8 +86,10 @@
 // Unary
 template struct armnn::ElementwiseUnaryFunction<armnn::abs<float>>;
 template struct armnn::ElementwiseUnaryFunction<armnn::exp<float>>;
+template struct armnn::ElementwiseUnaryFunction<armnn::log<float>>;
 template struct armnn::ElementwiseUnaryFunction<std::negate<float>>;
 template struct armnn::ElementwiseUnaryFunction<armnn::rsqrt<float>>;
+template struct armnn::ElementwiseUnaryFunction<armnn::sin<float>>;
 template struct armnn::ElementwiseUnaryFunction<armnn::sqrt<float>>;
 
 // Logical Unary
diff --git a/src/backends/reference/workloads/Log.hpp b/src/backends/reference/workloads/Log.hpp
new file mode 100644
index 0000000..98b6b82
--- /dev/null
+++ b/src/backends/reference/workloads/Log.hpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <iostream>
+
+namespace armnn
+{
+    template<typename T>
+struct log : public std::unary_function<T, T>
+    {
+        T
+        operator () (const T&  inputData) const
+        {
+            // computes natural logarithm of inputData
+            return std::log(inputData);
+        }
+    };
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp
index b442f25..be15363 100644
--- a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -11,7 +11,9 @@
 #include "RefWorkloadUtils.hpp"
 #include "Abs.hpp"
 #include "Exp.hpp"
+#include "Log.hpp"
 #include "Rsqrt.hpp"
+#include "Sin.hpp"
 #include "Sqrt.hpp"
 
 #include <Profiling.hpp>
@@ -54,8 +56,10 @@
 
     using AbsFunction   = ElementwiseUnaryFunction<abs<InType>>;
     using ExpFunction   = ElementwiseUnaryFunction<exp<InType>>;
+    using LogFunction   = ElementwiseUnaryFunction<log<InType>>;
     using NegFunction   = ElementwiseUnaryFunction<std::negate<InType>>;
     using RsqrtFunction = ElementwiseUnaryFunction<rsqrt<InType>>;
+    using SinFunction   = ElementwiseUnaryFunction<sin<InType>>;
     using SqrtFunction  = ElementwiseUnaryFunction<sqrt<InType>>;
 
     switch (m_Data.m_Parameters.m_Operation)
@@ -70,6 +74,11 @@
             ExpFunction(inShape, outShape, *input, *output);
             break;
         }
+        case UnaryOperation::Log:
+        {
+            LogFunction(inShape, outShape, *input, *output);
+            break;
+        }
         case UnaryOperation::Neg:
         {
             NegFunction(inShape, outShape, *input, *output);
@@ -80,6 +89,11 @@
             RsqrtFunction(inShape, outShape, *input, *output);
             break;
         }
+        case UnaryOperation::Sin:
+        {
+            SinFunction(inShape, outShape, *input, *output);
+            break;
+        }
         case UnaryOperation::Sqrt:
         {
             SqrtFunction(inShape, outShape, *input, *output);
diff --git a/src/backends/reference/workloads/Sin.hpp b/src/backends/reference/workloads/Sin.hpp
new file mode 100644
index 0000000..b71c33b
--- /dev/null
+++ b/src/backends/reference/workloads/Sin.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <iostream>
+
+namespace armnn
+{
+    template<typename T>
+struct sin : public std::unary_function<T, T>
+    {
+        T
+        operator () (const T&  inputData) const
+        {
+            return std::sin(inputData);
+        }
+    };
+
+} //namespace armnn