IVGCVSW-6181 patch to allow building against tflite > v2.3

Change-Id: I4199239228f7d5f4c819a7fe04cca059a830bf1f
Signed-off-by: Jim Flynn <jim.flynn@arm.com>
diff --git a/src/armnnTfLiteParser/CMakeLists.txt b/src/armnnTfLiteParser/CMakeLists.txt
index 6a02c94..1fbd8b8 100755
--- a/src/armnnTfLiteParser/CMakeLists.txt
+++ b/src/armnnTfLiteParser/CMakeLists.txt
@@ -13,16 +13,44 @@
 
     add_library_ex(armnnTfLiteParser SHARED ${armnn_tf_lite_parser_sources})
 
+    # NOTE: even though the tensorflow sources contain a ./tensorflow/lite/schema/schema_generated.h
+    #       file we cannot use this directly because we need to take packaging for linux distros into
+    #       account. On Ubuntu 20.04 the available package is flatbuffers 1.11 and on 21.10 it is 1.12
+    #       despite the minor versioning they are not backward compatible. The current tensorflow lite
+    #       source (v2.3-v2.5) is generated from 1.12... so we need to generate from the
+    #       ./tensorflow/lite/schema/schema.fbs in the tensorflow lite source using the flatc matching
+    #       the target platform but use the ./tensorflow/lite/version.h to determine which version of
+    #       tensorflow lite the header was generated from.
     include_directories(SYSTEM "${FLATBUFFERS_INCLUDE_PATH}")
     set_target_properties(armnnTfLiteParser PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR})
     target_include_directories(armnnTfLiteParser PRIVATE ../armnn)
     target_include_directories(armnnTfLiteParser PRIVATE ../armnnUtils)
     target_include_directories(armnnTfLiteParser SYSTEM PRIVATE "${TF_LITE_SCHEMA_INCLUDE_PATH}")
 
+
+    # using the armnn/delegate/cmake/Modules/FindTfLite.cmake to find the TfLite sources
+    # so that we can use the tensorflow/lite/version.h to determine which version of
+    # tensorflow lite we are compiling against
+    find_package(TfLite REQUIRED MODULE)
+
+    # Various tflite header files are not warning clean
+    # We can't change compilation flags on header files directly, so we need to add them to an interface library first
+    add_library(tflite_version_headers INTERFACE)
+    target_include_directories(tflite_version_headers INTERFACE $<BUILD_INTERFACE:${TfLite_INCLUDE_DIR}>
+                                                                $<INSTALL_INTERFACE:include/tflite_version_headers>)
+
+    target_compile_options(tflite_version_headers INTERFACE -Wno-conversion
+                                                            -Wno-sign-conversion
+                                                            -Wno-unused-parameter
+                                                            -Wno-unused-function)
+
     # If user has explicitly specified flatbuffers lib then use that,
     # otherwise search for it based on FLATBUFFERS_BUILD_DIR
     if (FLATBUFFERS_LIBRARY)
-        target_link_libraries(armnnTfLiteParser armnn ${FLATBUFFERS_LIBRARY})
+      target_link_libraries(armnnTfLiteParser
+                            armnn
+                            tflite_version_headers
+                            ${FLATBUFFERS_LIBRARY})
     else()
         # Use PATH_SUFFIXES to help find separate libs for debug/release on Windows builds
         find_library(FLATBUFFERS_LIBRARY_DEBUG NAMES flatbuffers
@@ -31,7 +59,11 @@
         find_library(FLATBUFFERS_LIBRARY_RELEASE NAMES flatbuffers
                      HINTS ${FLATBUFFERS_BUILD_DIR}
                      PATH_SUFFIXES "Release")
-        target_link_libraries(armnnTfLiteParser armnn debug ${FLATBUFFERS_LIBRARY_DEBUG} optimized ${FLATBUFFERS_LIBRARY_RELEASE})
+                   target_link_libraries(armnnTfLiteParser
+                                         armnn
+                                         tflite_version_headers
+                                         debug ${FLATBUFFERS_LIBRARY_DEBUG}
+                                         optimized ${FLATBUFFERS_LIBRARY_RELEASE})
     endif()
 
     set_target_properties(armnnTfLiteParser PROPERTIES VERSION ${TFLITE_PARSER_LIB_VERSION} SOVERSION ${TFLITE_PARSER_LIB_SOVERSION} )
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 305f769..8c85d30 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -32,6 +32,8 @@
 
 #include <fmt/format.h>
 
+#include <tensorflow/lite/version.h>
+
 #include <algorithm>
 #include <fstream>
 #include <iostream>
@@ -767,7 +769,14 @@
             for (OperatorPtr const& op : subgraph->operators)
             {
                 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
+
+// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
+#if TF_MAJOR_VERSION > 2 || (TF_MAJOR_VERSION == 2 && TF_MINOR_VERSION > 3)
+                auto builtinCode = std::max(opCodePtr->builtin_code,
+                        static_cast<tflite::BuiltinOperator>(opCodePtr->deprecated_builtin_code));
+#else
                 auto builtinCode = opCodePtr->builtin_code;
+#endif
 
                 if (builtinCode > tflite::BuiltinOperator_MAX)
                 {
@@ -887,7 +896,14 @@
     const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
 
     auto opcodeIndex = operatorPtr->opcode_index;
+
+// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
+#if TF_MAJOR_VERSION > 2 || (TF_MAJOR_VERSION == 2 && TF_MINOR_VERSION > 3)
+    auto opcode      = std::max(m_Model->operator_codes[opcodeIndex]->builtin_code,
+            static_cast<tflite::BuiltinOperator>(m_Model->operator_codes[opcodeIndex]->deprecated_builtin_code));
+#else
     auto opcode      = m_Model->operator_codes[opcodeIndex]->builtin_code;
+#endif
 
     if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
     {