IVGCVSW-8159 Fixed issues building with NDK r26

* The compiler shipped with NDK r26 has stricter rules around certain
  warnings and deprecation notices.
  * Fixed warnings for unqualified call to 'std::move'
  * Fixed error where the half values weren't being cast to a float
    when calling 'std::nan'
  * Removed unnecessary subtensor unit tests for neon

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I4ceb46e55ff5f2a754452e3a43de2188d58bf927
diff --git a/include/armnn/utility/TransformIterator.hpp b/include/armnn/utility/TransformIterator.hpp
index b038447..e37c8a7 100644
--- a/include/armnn/utility/TransformIterator.hpp
+++ b/include/armnn/utility/TransformIterator.hpp
@@ -8,7 +8,6 @@
 
 namespace armnn
 {
-
 template<typename Function,
         typename Iterator,
         typename Category = typename std::iterator_traits<Iterator>::iterator_category,
@@ -16,7 +15,7 @@
         typename Distance = typename std::iterator_traits<Iterator>::difference_type,
         typename Pointer = typename std::iterator_traits<Iterator>::pointer,
         typename Reference =
-        typename std::result_of<const Function(typename std::iterator_traits<Iterator>::reference)>::type
+                typename std::invoke_result<const Function, typename std::iterator_traits<Iterator>::reference>::type
 >
 class TransformIterator
 {
@@ -73,7 +72,7 @@
     bool operator<=(const TransformIterator& rhs) const {return m_it <= rhs.m_it;}
 
     bool operator==(TransformIterator other) const {return (m_it == other.m_it);}
-    bool operator!=(TransformIterator other) const {return !(m_it == other.m_it);}
+    bool operator!=(TransformIterator other) const {return (m_it != other.m_it);}
 
     Reference operator*() const {return m_fn(*m_it);}
 
diff --git a/include/armnnTestUtils/TensorHelpers.hpp b/include/armnnTestUtils/TensorHelpers.hpp
index fa9c970..14c5061 100644
--- a/include/armnnTestUtils/TensorHelpers.hpp
+++ b/include/armnnTestUtils/TensorHelpers.hpp
@@ -47,7 +47,7 @@
             return true;
         }
 
-        if (std::isnan(a) && std::isnan(b))
+        if (std::isnan(static_cast<float>(a)) && std::isnan(static_cast<float>(b)))
         {
             return true;
         }
diff --git a/shim/sl/canonical/ArmnnDriverImpl.cpp b/shim/sl/canonical/ArmnnDriverImpl.cpp
index 060dd5a..0063149 100644
--- a/shim/sl/canonical/ArmnnDriverImpl.cpp
+++ b/shim/sl/canonical/ArmnnDriverImpl.cpp
@@ -233,7 +233,7 @@
     auto numOutputs = getMainModel(model).outputIndexes.size();
     try
     {
-        if (runtime->LoadNetwork(netId, move(optNet), msg, networkProperties) != armnn::Status::Success)
+        if (runtime->LoadNetwork(netId, std::move(optNet), msg, networkProperties) != armnn::Status::Success)
         {
             return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Network could not be loaded";
         }
@@ -530,7 +530,7 @@
                                                 options.IsGpuProfilingEnabled());
     try
     {
-        if (runtime->LoadNetwork(netId, move(optNet), msg, networkProperties) != armnn::Status::Success)
+        if (runtime->LoadNetwork(netId, std::move(optNet), msg, networkProperties) != armnn::Status::Success)
         {
             return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Network could not be loaded";
         }
diff --git a/src/armnn/ExecutionFrame.cpp b/src/armnn/ExecutionFrame.cpp
index 92a7990..118fa7e 100644
--- a/src/armnn/ExecutionFrame.cpp
+++ b/src/armnn/ExecutionFrame.cpp
@@ -39,7 +39,7 @@
 
 void ExecutionFrame::AddWorkloadToQueue(std::unique_ptr<IWorkload> workload)
 {
-    m_WorkloadQueue.push_back(move(workload));
+    m_WorkloadQueue.push_back(std::move(workload));
 }
 
 void ExecutionFrame::SetNextExecutionFrame(IExecutionFrame* nextExecutionFrame)
diff --git a/src/armnn/ExecutionFrame.hpp b/src/armnn/ExecutionFrame.hpp
index 20a5da0..3f2407b 100644
--- a/src/armnn/ExecutionFrame.hpp
+++ b/src/armnn/ExecutionFrame.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
index 0b717bc..415ffeb 100644
--- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -66,7 +66,7 @@
                                   m_Runtime->GetDeviceSpec());
 
         std::string errorMessage;
-        armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
+        armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optimized), errorMessage);
 
         if (ret != armnn::Status::Success)
         {
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index 9e98774..a3fb3d0 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -135,7 +135,7 @@
                                   m_Runtime->GetDeviceSpec());
         std::string errorMessage;
 
-        armnn::Status ret = m_Runtime->LoadNetwork(networkId, move(optimized), errorMessage);
+        armnn::Status ret = m_Runtime->LoadNetwork(networkId, std::move(optimized), errorMessage);
 
         if (ret != armnn::Status::Success)
         {
diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp
index ccb99be..a12a66e 100644
--- a/src/armnnUtils/ParserPrototxtFixture.hpp
+++ b/src/armnnUtils/ParserPrototxtFixture.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -127,7 +127,7 @@
     armnn::INetworkPtr network =
         m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
     auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
-    armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
+    armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optimized), errorMessage);
     if (ret != armnn::Status::Success)
     {
         throw armnn::Exception(fmt::format("LoadNetwork failed with error: '{0}' {1}",
@@ -144,7 +144,7 @@
     armnn::INetworkPtr network =
         m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes);
     auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
-    armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
+    armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optimized), errorMessage);
     if (ret != armnn::Status::Success)
     {
         throw armnn::Exception(fmt::format("LoadNetwork failed with error: '{0}' {1}",
@@ -161,7 +161,7 @@
     armnn::INetworkPtr network =
         m_Parser->CreateNetworkFromString(m_Prototext.c_str());
     auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
-    armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
+    armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optimized), errorMessage);
     if (ret != armnn::Status::Success)
     {
         throw armnn::Exception(fmt::format("LoadNetwork failed with error: '{0}' {1}",
diff --git a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
index 10e8363..c6d49b1 100644
--- a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -106,7 +106,7 @@
 
     float tolerance = GetActivationTolerance(descriptor.m_Function, ArmnnType);
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net),
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net),
                                                 inputTensorData,
                                                 expectedOutputTensorData,
                                                 backends,
diff --git a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
index 87fccd8..1936af6 100644
--- a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -72,7 +72,7 @@
     std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
     std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput } };
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType>
@@ -113,7 +113,7 @@
     std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
     std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput } };
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 } // anonymous namespace
diff --git a/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
index 27907f1..9e8a42d 100644
--- a/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -65,7 +65,7 @@
     std::map<int, std::vector<T>> inputTensorData = {{ 0, inputData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 } // anonymous namespace
diff --git a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
index 4bdf3f8..e2a0d66 100644
--- a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -72,7 +72,8 @@
     std::map<int, std::vector<TInput>>  inputTensorData    = {{ 0, input0 }, { 1, input1 }};
     std::map<int, std::vector<uint8_t>> expectedOutputData = {{ 0, expectedOutput }};
 
-    EndToEndLayerTestImpl<ArmnnInType, DataType::Boolean>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnInType, DataType::Boolean>(std::move(net), inputTensorData, expectedOutputData,
+                                                          backends);
 }
 
 template<armnn::DataType ArmnnInType,
@@ -97,7 +98,8 @@
     std::map<int, std::vector<TInput>>  inputTensorData    = {{ 0, input0 }, { 1, input1 }};
     std::map<int, std::vector<uint8_t>> expectedOutputData = {{ 0, expectedOutput }};
 
-    EndToEndLayerTestImpl<ArmnnInType, DataType::Boolean>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnInType, DataType::Boolean>(std::move(net), inputTensorData, expectedOutputData,
+                                                          backends);
 }
 
 } // anonymous namespace
diff --git a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
index 439c083..82fceb8 100644
--- a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019,2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -54,7 +54,7 @@
     std::map<int, std::vector<float>> expectedOutputData = { { 0, expectedOutput } };
 
     EndToEndLayerTestImpl<ArmnnType, armnn::DataType::Float32>(
-            move(net), inputTensorData, expectedOutputData, backends);
+            std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
diff --git a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
index 0f6d2c0..9d6c0ba 100644
--- a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019,2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -103,7 +103,7 @@
                                                             { 3, expectedNumDetections }};
 
     EndToEndLayerTestImpl<ArmnnType, armnn::DataType::Float32>(
-        move(net), inputTensorData, expectedOutputData, backends);
+        std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
diff --git a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
index 53722e1..c3d031c 100644
--- a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -62,7 +62,7 @@
     std::map<int, std::vector<int32_t>> inputTensorData    = {{ 0, inputData }};
     std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutputData }};
 
-    EndToEndLayerTestImpl<DataType::Signed32, ArmnnType>(move(network),
+    EndToEndLayerTestImpl<DataType::Signed32, ArmnnType>(std::move(network),
                                                          inputTensorData,
                                                          expectedOutputTensorData,
                                                          backends);
diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
index 0d2d2cb..a65f3b4 100644
--- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -210,7 +210,7 @@
     std::map<int, std::vector<T>> inputTensorData    = {{ 0, inputData }, {1, weightsData}};
     std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutputData }};
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(network),
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
                                                 inputTensorData,
                                                 expectedOutputTensorData,
                                                 backends,
@@ -305,7 +305,7 @@
         std::map<int, std::vector<T>> inputTensorData    = {{ 0, input }, {1, weights}};
         std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutput }};
 
-        EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(network),
+        EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
                                                     inputTensorData,
                                                     expectedOutputTensorData,
                                                     backends,
@@ -327,7 +327,7 @@
         std::map<int, std::vector<T>> inputTensorData    = {{ 0, input }, {2, biasValues}};
         std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutput }};
 
-        EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(network),
+        EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
                                                     inputTensorData,
                                                     expectedOutputTensorData,
                                                     backends,
diff --git a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
index cf42947..45b1f39 100644
--- a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -68,7 +68,7 @@
     std::map<int, std::vector<T>> inputTensorData = {{ 0, paramsData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -124,7 +124,7 @@
     std::map<int, std::vector<T>> inputTensorData = {{ 0, paramsData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 } // anonymous namespace
diff --git a/src/backends/backendsCommon/test/GatherNdEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/GatherNdEndToEndTestImpl.hpp
index 0eea911..6adaa5b 100644
--- a/src/backends/backendsCommon/test/GatherNdEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/GatherNdEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -87,7 +87,7 @@
     std::map<int, std::vector<T>> inputTensorData = {{ 0, paramsData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -155,7 +155,7 @@
     std::map<int, std::vector<T>> inputTensorData = {{ 0, paramsData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 } // anonymous namespace
diff --git a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
index 846aa76..7b32170 100644
--- a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019,2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -87,7 +87,7 @@
     std::map<int, std::vector<float>> inputTensorData = { { 0, inputData } };
     std::map<int, std::vector<float>> expectedOutputTensorData = { { 0, expectedOutputData } };
 
-    EndToEndLayerTestImpl<DataType::Float32, DataType::Float32>(move(net),
+    EndToEndLayerTestImpl<DataType::Float32, DataType::Float32>(std::move(net),
                                                                 inputTensorData,
                                                                 expectedOutputTensorData,
                                                                 backends);
diff --git a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
index 9ffa2a6..46cd33c 100644
--- a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019,2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -65,7 +65,7 @@
     std::map<int, std::vector<float>> inputTensorData = { {0, inputData} };
     std::map<int, std::vector<float>> expectedOutputTensorData = { {0, expectedOutputData} };
 
-    EndToEndLayerTestImpl<DataType::Float32, DataType::Float32>(move(net),
+    EndToEndLayerTestImpl<DataType::Float32, DataType::Float32>(std::move(net),
                                                                 inputTensorData,
                                                                 expectedOutputTensorData,
                                                                 backends);
diff --git a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
index b361511..4988e74 100644
--- a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019,2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -65,7 +65,7 @@
     std::map<int, std::vector<T>> inputTensorData          = { { 0, inputData }, { 1, alphaData} };
     std::map<int, std::vector<T>> expectedOutputTensorData = { { 0, expectedOutputData } };
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net),
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net),
                                                 inputTensorData,
                                                 expectedOutputTensorData,
                                                 backends);
diff --git a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
index 9dcf705..035b2e9 100644
--- a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -57,7 +57,7 @@
     std::map<int, std::vector<T>> inputTensorData   = {{ 0, inputData }};
     std::map<int, std::vector<int32_t>> expectedOutputTensorData = {{ 0, expectedOutputData }};
 
-    EndToEndLayerTestImpl<ArmnnType, DataType::Signed32>(move(network),
+    EndToEndLayerTestImpl<ArmnnType, DataType::Signed32>(std::move(network),
                                                          inputTensorData,
                                                          expectedOutputTensorData,
                                                          backends);
diff --git a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
index b868ba3..a1bd755 100644
--- a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019,2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -87,7 +87,7 @@
     std::map<int, std::vector<float>> expectedOutputTensorData = { { 0, expectedOutputData } };
 
     EndToEndLayerTestImpl<DataType::Float32, DataType::Float32>(
-            move(net),
+            std::move(net),
             inputTensorData,
             expectedOutputTensorData,
             backends);
diff --git a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
index b750a7a..da4c6a6 100644
--- a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -97,7 +97,7 @@
     std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
     std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput0 }, {1, expectedOutput1} };
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType>
@@ -132,7 +132,7 @@
     std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
     std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput0 }, {1, expectedOutput1} };
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType>
@@ -170,7 +170,7 @@
                                                          { 1, expectedOutput1 },
                                                          { 2, expectedOutput2 } };
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType>
@@ -218,7 +218,7 @@
     std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput0 },
                                                          { 1, expectedOutput1 } };
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType>
@@ -266,7 +266,7 @@
     std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput0 },
                                                          { 1, expectedOutput1 } };
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType>
@@ -306,7 +306,7 @@
                                                          { 1, expectedOutput1 },
                                                          { 2, expectedOutput2 } };
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType>
@@ -386,7 +386,7 @@
     std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput0 }, { 1, expectedOutput1 }};
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType>
@@ -466,7 +466,7 @@
     std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput0 }, { 1, expectedOutput1 }};
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType>
@@ -546,7 +546,7 @@
     std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput0 }, { 1, expectedOutput1 }};
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -613,7 +613,7 @@
     std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput0 }, { 1, expectedOutput1 }};
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 } // anonymous namespace
diff --git a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
index c6bfc5d..9ba9057 100644
--- a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
+++ b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -323,7 +323,7 @@
     std::map<int, std::vector<T>> inputTensorData = {{0, inputData}};
     std::map<int, std::vector<T>> expectedOutputData = {{0, outputExpected}};
 
-    AsyncEndToEndTestImpl<ArmnnType, ArmnnType>(move(net),
+    AsyncEndToEndTestImpl<ArmnnType, ArmnnType>(std::move(net),
                                                 inputTensorData,
                                                 expectedOutputData,
                                                 backends,
@@ -392,7 +392,7 @@
     outputTensors.push_back(std::map<int, std::vector<T>> {{0, outputExpected1}});
     outputTensors.push_back(std::map<int, std::vector<T>> {{0, outputExpected2}});
 
-    AsyncThreadedEndToEndTestImpl<ArmnnType, ArmnnType>(move(net), inputTensors, outputTensors, backends, 2);
+    AsyncThreadedEndToEndTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensors, outputTensors, backends, 2);
 }
 
 } // experimental namespace
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index a94e4dd..bc8ad5d 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -89,266 +89,6 @@
     CHECK(capabilities[0].m_Value);
 }
 
-TEST_CASE("ConcatonXorYPaddingRequiredTest")
-{
-    armnn::INetworkPtr net(armnn::INetwork::Create());
-
-    // Set up tensor infos
-    const armnn::TensorInfo inputInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32);
-    const armnn::TensorInfo intermediateInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32);
-    const armnn::TensorInfo outputInfo = armnn::TensorInfo({2, 3, 4, 2}, armnn::DataType::Float32);
-
-    armnn::Pooling2dDescriptor descriptor;
-    descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
-    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
-    descriptor.m_StrideX = descriptor.m_StrideY = 1;
-    descriptor.m_PadLeft = 1;
-    descriptor.m_PadRight = 1;
-    descriptor.m_PadTop = 1;
-    descriptor.m_PadBottom = 1;
-    descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
-
-    // Create the network
-    armnn::IConnectableLayer* const input0Layer = net->AddInputLayer(0, "input_0");
-    input0Layer->GetOutputSlot(0).SetTensorInfo(inputInfo);
-    armnn::IConnectableLayer* pooling2dLayer0 = net->AddPooling2dLayer(descriptor, "pooling2d_0");
-    pooling2dLayer0->GetOutputSlot(0).SetTensorInfo(intermediateInfo);
-    input0Layer->GetOutputSlot(0).Connect(pooling2dLayer0->GetInputSlot(0));
-
-    armnn::IConnectableLayer* const input1Layer = net->AddInputLayer(1, "input_1");
-    input1Layer->GetOutputSlot(0).SetTensorInfo(inputInfo);
-    armnn::IConnectableLayer* pooling2dLayer1 = net->AddPooling2dLayer(descriptor, "pooling2d_1");
-    pooling2dLayer1->GetOutputSlot(0).SetTensorInfo(intermediateInfo);
-    input1Layer->GetOutputSlot(0).Connect(pooling2dLayer1->GetInputSlot(0));
-
-    std::array<armnn::TensorShape, 2> concatInputShapes = { intermediateInfo.GetShape(), intermediateInfo.GetShape() };
-    armnn::IConnectableLayer* const concatLayer = net->AddConcatLayer(armnn::CreateDescriptorForConcatenation(
-        concatInputShapes.begin(), concatInputShapes.end(), 2), "concatenation");
-    concatLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
-    pooling2dLayer0->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
-    pooling2dLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
-
-    armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0, "output");
-    concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
-    armnn::IRuntime::CreationOptions options;
-    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
-
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-
-    const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
-
-    // Load graph into runtime
-    armnn::NetworkId networkIdentifier;
-    runtime->LoadNetwork(networkIdentifier, std::move(optimizedNet));
-
-    // now check the concat how many sub-tensors it is using..
-    auto TraceSubTensorHandleAncestry = [](armnn::ITensorHandle* const subTensorHandle)
-    {
-        if (subTensorHandle && subTensorHandle->GetParent())
-        {
-            return true;
-        }
-        return false;
-    };
-
-    unsigned int numberOfSubTensors = 0;
-    for (auto&& layer : theGraph)
-    {
-        if(layer->GetType() == armnn::LayerType::Concat)
-        {
-            for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
-            {
-                const armnn::OutputSlot* slot = layer->GetInputSlot(i).GetConnectedOutputSlot();
-                if (TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData()))
-                {
-                    ++numberOfSubTensors;
-                }
-            }
-        }
-    }
-    // sub-tensors should not be supported in this configuration
-    ARMNN_ASSERT(numberOfSubTensors == 0);
-}
-
-TEST_CASE("SplitteronXorYPaddingRequiredTest")
-{
-    using namespace armnn;
-
-    unsigned int splitAxis = 2;
-    unsigned int numSplit = 2;
-
-    const TensorShape& inputShape = { 1, 1, 4, 4 };
-    const armnn::TensorInfo intermediateInfo = armnn::TensorInfo({ 1, 1, 2, 4 }, armnn::DataType::Float32);
-    const std::vector<TensorShape> outputShapes{{ 1, 1, 2, 4 },
-                                                { 1, 1, 2, 4 }};
-
-    const float qScale = 1.0f;
-    const int32_t qOffset = 0;
-
-    // Creates structures for input & output.
-    std::vector<float> inputData{
-        9.0f,   27.0f,  18.0f,  36.0f,
-        18.0f,   9.0f,  18.0f,   9.0f,
-        27.0f,  18.0f,   9.0f,  27.0f,
-        9.0f,   27.0f,   9.0f,  18.0f,
-    };
-
-    std::vector<float> expectedOutput0{
-         7.0f,  11.0f,  13.0f, 9.0f,
-         7.0f,  11.0f,  13.0f, 9.0f
-    };
-
-    std::vector<float> expectedOutput1{
-        9.0f,  11.0f,  12.0f, 7.0f,
-        9.0f,  11.0f,  12.0f, 7.0f
-    };
-
-    // Builds up the structure of the network.
-    INetworkPtr net(INetwork::Create());
-
-    TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32, qScale, qOffset);
-
-    // Pooling
-    armnn::Pooling2dDescriptor descriptor;
-    descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
-    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
-    descriptor.m_StrideX = descriptor.m_StrideY = 1;
-    descriptor.m_PadLeft = 1;
-    descriptor.m_PadRight = 1;
-    descriptor.m_PadTop = 1;
-    descriptor.m_PadBottom = 1;
-    descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
-
-    // Splitter
-    std::vector<unsigned int> splitterDimSizes(inputShape.GetNumDimensions());
-
-    // Add current input shape to splitterDimSizes
-    for (unsigned int i = 0; i < inputShape.GetNumDimensions(); ++i)
-    {
-        splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
-    }
-
-    if (splitterDimSizes[splitAxis] % numSplit != 0)
-    {
-        throw ParseException("Number of splits must evenly divide the dimension");
-    }
-
-    splitterDimSizes[splitAxis] /= numSplit;
-
-    SplitterDescriptor splitDesc(numSplit, inputShape.GetNumDimensions());
-
-    for (unsigned int g = 0; g < numSplit; ++g)
-    {
-        // Set the size of the views.
-        for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
-        {
-            splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
-        }
-        splitDesc.SetViewOriginCoord(g, splitAxis, splitterDimSizes[splitAxis] * g);
-    }
-
-    IConnectableLayer* input = net->AddInputLayer(0, "input");
-    IConnectableLayer* pooling2d0 = net->AddPooling2dLayer(descriptor, "pooling2d_0");
-    IConnectableLayer* pooling2d1 = net->AddPooling2dLayer(descriptor, "pooling2d_1");
-    IConnectableLayer* splitter = net->AddSplitterLayer(splitDesc, "splitter");
-
-    // Connections
-    Connect(input, splitter, inputTensorInfo, 0, 0);
-    Connect(splitter, pooling2d0, intermediateInfo, 0, 0);
-    Connect(splitter, pooling2d1, intermediateInfo, 1, 0);
-
-    std::vector<IConnectableLayer*> pooling2dLayers{pooling2d0, pooling2d1};
-
-    for (unsigned int i = 0; i < outputShapes.size(); ++i)
-    {
-        TensorInfo outputTensorInfo(outputShapes[i], armnn::DataType::Float32, qScale, qOffset);
-        IConnectableLayer* output = net->AddOutputLayer(armnn::numeric_cast<LayerBindingId>(i));
-        Connect(pooling2dLayers[i], output, outputTensorInfo, 0, 0);
-    }
-
-    std::map<int, std::vector<float>> inputTensorData = {{ 0,inputData }};
-    std::map<int, std::vector<float>> expectedOutputData = {{ 0, expectedOutput0 }, { 1, expectedOutput1 }};
-
-    armnn::IRuntime::CreationOptions options;
-    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
-
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
-    armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-
-    const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
-
-    // Load graph into runtime
-    armnn::NetworkId networkIdentifier;
-    runtime->LoadNetwork(networkIdentifier, std::move(optimizedNet));
-
-    // now check the concat how many sub-tensors it is using..
-    auto TraceSubTensorHandleAncestry = [](armnn::ITensorHandle* const subTensorHandle)
-    {
-        if (subTensorHandle && subTensorHandle->GetParent())
-        {
-            return true;
-        }
-        return false;
-    };
-
-    for (auto&& layer : theGraph)
-    {
-        if(layer->GetType() == armnn::LayerType::Pooling2d)
-        {
-            unsigned int numberOfSubTensors = 0;
-            for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
-            {
-                const armnn::OutputSlot* slot = layer->GetInputSlot(i).GetConnectedOutputSlot();
-                if (TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData()))
-                {
-                    ++numberOfSubTensors;
-                }
-            }
-            // sub-tensors should be supported in this configuration
-            ARMNN_ASSERT(numberOfSubTensors == 0);
-        }
-    }
-
-    InputTensors inputTensors;
-    inputTensors.reserve(inputTensorData.size());
-    for (auto&& it : inputTensorData)
-    {
-        TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkIdentifier, it.first);
-        inputTensorInfo.SetConstant(true);
-        inputTensors.push_back({it.first,
-                                ConstTensor(inputTensorInfo, it.second.data())});
-    }
-    OutputTensors outputTensors;
-    outputTensors.reserve(expectedOutputData.size());
-    std::map<int, std::vector<float>> outputStorage;
-    for (auto&& it : expectedOutputData)
-    {
-        std::vector<float> out(it.second.size());
-        outputStorage.emplace(it.first, out);
-        outputTensors.push_back({it.first,
-                                 Tensor(runtime->GetOutputTensorInfo(networkIdentifier, it.first),
-                                               outputStorage.at(it.first).data())});
-    }
-
-    // Does the inference.
-    runtime->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
-
-    // Checks the results.
-    float tolerance = 0.000001f;
-    for (auto&& it : expectedOutputData)
-    {
-        std::vector<float> out = outputStorage.at(it.first);
-        for (unsigned int i = 0; i < out.size(); ++i)
-        {
-            CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
-                    "Actual output: " << out[i] << ". Expected output:" << it.second[i]);
-
-        }
-    }
-}
-
 TEST_CASE("NeonTensorHandleFactoryMemoryManaged")
 {
     std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>(