IVGCVSW-5587 Remove Tensorflow requirement from Arm NN TfLite delegate

 * Added support for building the delegate with an external armnn path
 * Replaced potentially troublesome package manager
 * Explicitly set the privacy levels of delegate libraries
 * Fixed some error handling in ExecuteNetwork

Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I2a7abc099796012cbb043c5b319f81778c9f3b56
diff --git a/CMakeLists.txt b/CMakeLists.txt
index cee3c2a..763c010 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -41,10 +41,8 @@
 
 
 if (BUILD_ARMNN_TFLITE_DELEGATE)
-
-    list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/delegate/cmake/Modules)
+    set(ARMNN_SUB_PROJECT ON)
     add_subdirectory(delegate)
-
     add_definitions(-DARMNN_TF_LITE_DELEGATE)
 endif()
 
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 677a38e..aa2f360 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -8,7 +8,7 @@
 
 set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -Wall -Wextra -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion")
 
-set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/")
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${PROJECT_SOURCE_DIR}/cmake/Modules/")
 
 set(armnnDelegate_sources)
 list(APPEND armnnDelegate_sources
@@ -54,24 +54,15 @@
 include(GNUInstallDirs)
 
 ## Add Armnn as a Dependency
-find_package(Armnn REQUIRED)
-target_link_libraries(armnnDelegate Armnn::Armnn)
-
-## Add Tensorflow v2.3.1 dependency
-find_package(Tensorflow 2.3.1 REQUIRED MODULE)
-
-target_link_libraries(armnnDelegate
-        ${Tensorflow_LIB})
-
-target_include_directories(armnnDelegate
-        PRIVATE
-            ${Tensorflow_INCLUDE_DIR})
+if(NOT ARMNN_SUB_PROJECT)
+    find_package(Armnn REQUIRED CONFIG HINTS ${Armnn_DIR})
+endif()
+target_link_libraries(armnnDelegate PUBLIC Armnn::Armnn)
 
 ## Add TfLite v2.3.1 dependency
 find_package(TfLite REQUIRED MODULE)
 
-target_link_libraries(armnnDelegate
-        ${TfLite_LIB})
+target_link_libraries(armnnDelegate PUBLIC ${TfLite_LIB})
 
 # Various tflite header files are not warning clean
 # We can't change compilation flags on header files directly, so we need to add them to an interface library first
@@ -84,12 +75,12 @@
                                                                          -Wno-unused-parameter
                                                                          -Wno-unused-function>)
 
-target_link_libraries(armnnDelegate tflite_headers)
+target_link_libraries(armnnDelegate PUBLIC tflite_headers)
 
 ## Add Flatbuffers dependency
 find_package(Flatbuffers REQUIRED MODULE)
 
-target_link_libraries(armnnDelegate
+target_link_libraries(armnnDelegate PRIVATE
         ${Flatbuffers_LIB})
 
 # include/flatbuffers/flatbuffers.h is not warning clean
@@ -99,7 +90,7 @@
                                                  $<INSTALL_INTERFACE:include/flatbuffer_headers>)
 target_compile_options(flatbuffer_headers INTERFACE $<$<CXX_COMPILER_ID:GNU>:-Wno-sign-conversion>)
 
-target_link_libraries(armnnDelegate flatbuffer_headers)
+target_link_libraries(armnnDelegate PUBLIC flatbuffer_headers)
 
 option(BUILD_UNIT_TESTS "Build unit tests" ON)
 if(BUILD_UNIT_TESTS)
@@ -146,12 +137,11 @@
     # Add half library from armnn third-party libraries
     target_include_directories(DelegateUnitTests PRIVATE ${ARMNN_SOURCE_DIR}/third-party)
 
-    target_link_libraries(DelegateUnitTests armnnDelegate)
-    target_link_libraries(DelegateUnitTests Armnn::armnnUtils)
+    target_link_libraries(DelegateUnitTests PRIVATE armnnDelegate)
+    target_link_libraries(DelegateUnitTests PRIVATE Armnn::armnnUtils)
 
-target_link_libraries(DelegateUnitTests tflite_headers)
-
-target_link_libraries(DelegateUnitTests flatbuffer_headers)
+    target_link_libraries(DelegateUnitTests PRIVATE tflite_headers)
+    target_link_libraries(DelegateUnitTests PRIVATE flatbuffer_headers)
 
 endif()
 
@@ -187,10 +177,13 @@
 set(INSTALL_CONFIGDIR ${CMAKE_INSTALL_LIBDIR})
 message(STATUS "CMAKE_CURRENT_LIST_DIR ${CMAKE_CURRENT_LIST_DIR}" )
 message(STATUS "CMAKE_CURRENT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}" )
+SET(Armnn_DIR "${Armnn_DIR}")
+
 configure_package_config_file(
         ${CMAKE_CURRENT_LIST_DIR}/cmake/Modules/ArmnnDelegateConfig.cmake.in
         ${CMAKE_CURRENT_BINARY_DIR}/ArmnnDelegateConfig.cmake
-        INSTALL_DESTINATION ${INSTALL_CONFIGDIR})
+        INSTALL_DESTINATION ${INSTALL_CONFIGDIR}
+        PATH_VARS  Armnn_DIR)
 
 ## Install ArmNN Delegate config file
 install(
diff --git a/delegate/cmake/Modules/ArmnnDelegateConfig.cmake.in b/delegate/cmake/Modules/ArmnnDelegateConfig.cmake.in
index c403068..c878c46 100644
--- a/delegate/cmake/Modules/ArmnnDelegateConfig.cmake.in
+++ b/delegate/cmake/Modules/ArmnnDelegateConfig.cmake.in
@@ -9,10 +9,12 @@
 
 include(CMakeFindDependencyMacro)
 
-find_dependency(Armnn REQUIRED CONFIG)
-
 list(APPEND CMAKE_MODULE_PATH ${ARMNN_DELEGATE_CMAKE_DIR})
 
+@PACKAGE_INIT@
+set_and_check(Armnn_DIR "@Armnn_DIR@")
+find_dependency(Armnn REQUIRED CONFIG HINTS ${Armnn_DIR})
+
 if(NOT TARGET ArmnnDelegate::ArmnnDelegate)
     MESSAGE(STATUS "ArmnnDelegate Import: ${ARMNN_DELEGATE_CMAKE_DIR}/ArmnnDelegateTargets.cmake")
     include("${ARMNN_DELEGATE_CMAKE_DIR}/ArmnnDelegateTargets.cmake")
diff --git a/delegate/cmake/Modules/FindTensorflow.cmake b/delegate/cmake/Modules/FindTensorflow.cmake
deleted file mode 100644
index 8f90011..0000000
--- a/delegate/cmake/Modules/FindTensorflow.cmake
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-# SPDX-License-Identifier: MIT
-#
-
-include(FindPackageHandleStandardArgs)
-unset(TENSORFLOW_FOUND)
-
-find_path(Tensorflow_INCLUDE_DIR
-        NAMES
-            tensorflow/core
-            tensorflow/cc
-            third_party
-        HINTS
-            ${TENSORFLOW_ROOT})
-
-find_library(Tensorflow_LIB
-        NAMES
-            tensorflow_all
-        HINTS
-            ${TENSORFLOW_LIB_DIR})
-
-## Set TENSORFLOW_FOUND
-find_package_handle_standard_args(Tensorflow DEFAULT_MSG Tensorflow_INCLUDE_DIR Tensorflow_LIB)
-
-## Set external variables for usage in CMakeLists.txt
-if(TENSORFLOW_FOUND)
-    set(Tensorflow_LIB ${Tensorflow_LIB})
-    set(Tensorflow_INCLUDE_DIRS ${Tensorflow_INCLUDE_DIR})
-endif()
\ No newline at end of file
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index be341b6..00507e0 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -88,6 +88,14 @@
         if (params.m_InputTypes[inputIndex].compare("float") == 0)
         {
             auto inputData = tfLiteInterpreter->typed_tensor<float>(input);
+
+            if(tfLiteInterpreter == NULL)
+            {
+                ARMNN_LOG(fatal) << "Input tensor is null, input type: "
+                                    "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
+                return EXIT_FAILURE;
+            }
+
             std::vector<float> tensorData;
             PopulateTensorWithDataGeneric<float>(tensorData,
                                                   params.m_InputTensorShapes[inputIndex]->GetNumElements(),
@@ -100,6 +108,14 @@
         else if (params.m_InputTypes[inputIndex].compare("int8") == 0)
         {
             auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
+
+            if(tfLiteInterpreter == NULL)
+            {
+                ARMNN_LOG(fatal) << "Input tensor is null, input type: "
+                                    "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
+                return EXIT_FAILURE;
+            }
+
             std::vector<int8_t> tensorData;
             PopulateTensorWithDataGeneric<int8_t>(tensorData,
                                                   params.m_InputTensorShapes[inputIndex]->GetNumElements(),
@@ -112,6 +128,14 @@
         else if (params.m_InputTypes[inputIndex].compare("int") == 0)
         {
             auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input);
+
+            if(tfLiteInterpreter == NULL)
+            {
+                ARMNN_LOG(fatal) << "Input tensor is null, input type: "
+                                    "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
+                return EXIT_FAILURE;
+            }
+
             std::vector<int32_t> tensorData;
             PopulateTensorWithDataGeneric<int32_t>(tensorData,
                                                    params.m_InputTensorShapes[inputIndex]->GetNumElements(),
@@ -124,6 +148,14 @@
         else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
         {
             auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
+
+            if(tfLiteInterpreter == NULL)
+            {
+                ARMNN_LOG(fatal) << "Input tensor is null, input type: "
+                                    "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
+                return EXIT_FAILURE;
+            }
+
             std::vector<uint8_t> tensorData;
             PopulateTensorWithDataGeneric<uint8_t>(tensorData,
                                                    params.m_InputTensorShapes[inputIndex]->GetNumElements(),
@@ -468,7 +500,7 @@
         #if defined(ARMNN_TF_LITE_DELEGATE)
             return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime);
         #else
-            ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
+            ARMNN_LOG(fatal) << "Not built with Arm NN Tensorflow-Lite delegate support.";
             return EXIT_FAILURE;
         #endif
         }