IVGCVSW-5846 Remove TODO statements from Armnn Code

 * Removed all instances of TODO statements from comments
 * Removed statements are noted as part of IVGCVSW-5846
 * Removed ProtoxtFixture.cpp from the Onnx Parser tests as it's not used

Signed-off-by: David Monahan <david.monahan@arm.com>
Change-Id: Ia0a15f8a0d4123c8831638634eaa0d1018c40e2c
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a20543b..59059a9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -761,7 +761,6 @@
             src/armnnOnnxParser/test/OnnxParserTestUtils.cpp
             src/armnnOnnxParser/test/OnnxParserTestUtils.hpp
             src/armnnOnnxParser/test/Pooling.cpp
-            src/armnnOnnxParser/test/ProtoxtFixture.cpp
             src/armnnOnnxParser/test/Relu.cpp
             src/armnnOnnxParser/test/Reshape.cpp
             src/armnnOnnxParser/test/Shape.cpp
diff --git a/shim/sl/canonical/ArmnnPreparedModel.cpp b/shim/sl/canonical/ArmnnPreparedModel.cpp
index 79cd241..91bd04e 100644
--- a/shim/sl/canonical/ArmnnPreparedModel.cpp
+++ b/shim/sl/canonical/ArmnnPreparedModel.cpp
@@ -232,7 +232,6 @@
             return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
         }
 
-        //TODO: Need to check for Request::Argument::LifeTime::POINTER
         if (outputArg.lifetime == Request::Argument::LifeTime::POOL)
         {
             size_t bufferSize = memPools.at(outputArg.location.poolIndex).getSize();
@@ -599,7 +598,6 @@
 
 GeneralResult<SharedBurst> ArmnnPreparedModel::configureExecutionBurst() const
 {
-    // TODO: Implement BURST
     return nullptr;
 }
 
diff --git a/shim/sl/canonical/CanonicalUtils.cpp b/shim/sl/canonical/CanonicalUtils.cpp
index 059b5ca..622d4b1 100644
--- a/shim/sl/canonical/CanonicalUtils.cpp
+++ b/shim/sl/canonical/CanonicalUtils.cpp
@@ -497,7 +497,7 @@
     return !tensorInfo.GetShape().AreAllDimensionsSpecified();
 }
 
-bool AreDynamicTensorsSupported() //TODO
+bool AreDynamicTensorsSupported() 
 {
     return true;
 }
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index 701327b..e992721 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -950,7 +950,6 @@
     layer->ExecuteStrategy(visitor);
 }
 
-// TODO add one with peephole
 TEST_CASE("CheckLstmLayerPeephole")
 {
     LstmDescriptor descriptor;
@@ -1275,7 +1274,6 @@
     layer->ExecuteStrategy(visitor);
 }
 
-// TODO add one with projection
 TEST_CASE("CheckLstmLayerProjection")
 {
     LstmDescriptor descriptor;
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
index 26e2cee..c0b42d9 100644
--- a/src/armnnOnnxParser/OnnxParser.cpp
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -1528,8 +1528,7 @@
 
     VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
 
-    // TODO: unify broadcast validation code across layers
-    // tracked by: IVGCVSW-1576
+    // IVGCVSW-1576: unify broadcast validation code across layers
 
     // Checking broadcast compatibility : only scalar or 1D tensors
     auto inputs = AddPrepareBroadcast(node.input(0), node.input(1));
@@ -1858,8 +1857,6 @@
         }
         else
         {
-            // TODO: split the input by channels into channels/groups separate convolutions
-            //  and concatenate the results afterwards
             throw ParseException(fmt::format("Error parsing Convolution node: {}. "
                                              "The 'group'={} parameter should be 1 or be equal to the "
                                              "channel of the input shape={} (in NCHW format). {}",
diff --git a/src/armnnOnnxParser/test/ProtoxtFixture.cpp b/src/armnnOnnxParser/test/ProtoxtFixture.cpp
deleted file mode 100644
index 067b440..0000000
--- a/src/armnnOnnxParser/test/ProtoxtFixture.cpp
+++ /dev/null
@@ -1,80 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include  "armnnOnnxParser/IOnnxParser.hpp"
-#include  "ParserPrototxtFixture.hpp"
-
-TEST_SUITE("OnnxParser_PrototxtFixture")
-{
-struct ProtoxtTestFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
-{
-    ProtoxtTestFixture()
-    {
-        m_Prototext = R"(
-                   ir_version: 3
-                   producer_name:  "CNTK "
-                   producer_version:  "2.5.1 "
-                   domain:  "ai.cntk "
-                   model_version: 1
-                   graph {
-                     name:  "CNTKGraph "
-                     node {
-                        input:  "Input"
-                        output:  "Output"
-                        name:  "Plus112"
-                        op_type:  "Add "
-                      }
-                      input {
-                          name:  "Input"
-                          type {
-                            tensor_type {
-                              elem_type: 1
-                              shape {
-                                dim {
-                                  dim_value: 2
-                                }
-                              }
-                            }
-                          }
-                      }
-                      output {
-                          name:  "Output"
-                          type {
-                             tensor_type {
-                               elem_type: 1
-                               shape {
-                                 dim {
-                                    dim_value: 1
-                                 }
-                                 dim {
-                                    dim_value: 10
-                                 }
-                               }
-                             }
-                          }
-                      }
-                   }
-                   opset_import {
-                      version: 7
-                    })";
-       // Setup();
-    }
-};
-
-
-TEST_CASE_FIXTURE(ProtoxtTestFixture, "ProtoxtTest")
-{
-    //TODO : add a test to check if the inputs and outputs are correctly inferred.
-}
-
-TEST_CASE_FIXTURE(ProtoxtTestFixture, "ProtoxtTestWithBadInputs")
-{
-
-   // CHECK_THROWS_AS(RunTest<4>({{ "InexistantInput" , {0.0, 1.0, 2.0, 3.0}}},
-   //                              {{ "InexistantOutput" , {0.0, 1.0, 2.0, 3.0}}}),
-   //                   armnn::InvalidArgumentException );
-}
-
-}
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 51bc3e6..d4e3fb7 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -1561,7 +1561,6 @@
     return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
 }
 
-// TODO merge with defaulted modelOptions above
 bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
                                         Optional<DataType> dataType,
                                         std::string& outReasonIfUnsupported,
diff --git a/src/backends/cl/test/ClRuntimeTests.cpp b/src/backends/cl/test/ClRuntimeTests.cpp
index db01fa7..8426c5f 100644
--- a/src/backends/cl/test/ClRuntimeTests.cpp
+++ b/src/backends/cl/test/ClRuntimeTests.cpp
@@ -138,7 +138,7 @@
     CHECK(leakedBefore == leakedAfter);
 
     // Add resonable threshold after and before running valgrind with the ACL clear cache function.
-    // TODO Threshold set to 80k until the root cause of the memory leakage is found and fixed. Revert threshold
+    // Threshold set to 80k until the root cause of the memory leakage is found and fixed. Revert threshold
     // value to 1024 when fixed.
     CHECK(static_cast<long>(reachableAfter) - static_cast<long>(reachableBefore) < 81920);
 
diff --git a/src/timelineDecoder/JSONTimelineDecoder.cpp b/src/timelineDecoder/JSONTimelineDecoder.cpp
index 2f99979..ee7bdb4 100644
--- a/src/timelineDecoder/JSONTimelineDecoder.cpp
+++ b/src/timelineDecoder/JSONTimelineDecoder.cpp
@@ -74,9 +74,6 @@
     }
     else
     {
-        /*
-         * TODO Handle DataLink
-         */
         m_Model.relationships.insert({relationship.m_Guid, relationship});
     }
 
@@ -99,9 +96,6 @@
     }
     else
     {
-        /*
-         * TODO Add some protection against packet ordering issues
-         */
         m_Model.relationships.insert({relationship.m_Guid, relationship});
     }
 }
@@ -128,16 +122,10 @@
         }
         else
         {
-            /*
-             * TODO Add some protection against packet ordering issues
-             */
             m_Model.relationships.insert({relationship.m_Guid, relationship});
         }
     } else
     {
-        /*
-         * TODO Add some protection against packet ordering issues
-         */
         m_Model.relationships.insert({relationship.m_Guid, relationship});
     }
 }
@@ -156,9 +144,6 @@
     }
     else
     {
-        /*
-        * TODO Add some protection against packet ordering issues
-        */
         m_Model.relationships.insert({relationship.m_Guid, relationship});
     }
 }
@@ -174,9 +159,6 @@
     }
     else
     {
-        /*
-        * TODO Add some protection against packet ordering issues
-        */
         m_Model.relationships.insert({relationship.m_Guid, relationship});
     }
 }
@@ -192,9 +174,6 @@
     }
     else
     {
-        /*
-        * TODO Add some protection against packet ordering issues
-        */
         m_Model.relationships.insert({relationship.m_Guid, relationship});
     }
 }
@@ -210,9 +189,6 @@
     }
     else
     {
-        /*
-        * TODO Add some protection against packet ordering issues
-        */
         m_Model.relationships.insert({relationship.m_Guid, relationship});
     }
 }
@@ -230,9 +206,6 @@
     }
     else
     {
-        /*
-        * TODO Add some protection against packet ordering issues
-        */
         m_Model.relationships.insert({relationship.m_Guid, relationship});
     }
 }