MLECO-3620 MLECO-3619: Update to 22.11 dependencies

* CMSIS-NN is now a seperate dependency
* Added inclusive language commitment
* TensorFlow Lite Micro dependency is moved ahead of 22.11 to fix compilation issue

Change-Id: I60e3311ff7da2ce064cbcdca054a86bdd1f620d8
diff --git a/source/application/api/common/include/Model.hpp b/source/application/api/common/include/Model.hpp
index 8b64f10..ed2b4c1 100644
--- a/source/application/api/common/include/Model.hpp
+++ b/source/application/api/common/include/Model.hpp
@@ -133,7 +133,6 @@
         size_t GetActivationBufferSize();
 
     private:
-        tflite::ErrorReporter* m_pErrorReporter{nullptr};  /* Pointer to the error reporter. */
         const tflite::Model* m_pModel{nullptr};            /* Tflite model pointer. */
         tflite::MicroInterpreter* m_pInterpreter{nullptr}; /* Tflite interpreter. */
         tflite::MicroAllocator* m_pAllocator{nullptr};     /* Tflite micro allocator. */
diff --git a/source/application/api/common/include/TensorFlowLiteMicro.hpp b/source/application/api/common/include/TensorFlowLiteMicro.hpp
index 9826dfa..944ed4a 100644
--- a/source/application/api/common/include/TensorFlowLiteMicro.hpp
+++ b/source/application/api/common/include/TensorFlowLiteMicro.hpp
@@ -1,5 +1,5 @@
 /*
- * SPDX-FileCopyrightText: Copyright 2021 Arm Limited and/or its affiliates <open-source-office@arm.com>
+ * SPDX-FileCopyrightText: Copyright 2021-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  * SPDX-License-Identifier: Apache-2.0
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -30,7 +30,6 @@
     #pragma clang diagnostic ignored "-Wunused-parameter"
     #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
     #include "tensorflow/lite/micro/micro_interpreter.h"
-    #include "tensorflow/lite/micro/micro_error_reporter.h"
     #include "tensorflow/lite/micro/all_ops_resolver.h"
     #pragma clang diagnostic pop
 #elif defined(__GNUC__)
@@ -38,18 +37,17 @@
     #pragma GCC diagnostic ignored "-Wunused-parameter"
     #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
     #include "tensorflow/lite/micro/micro_interpreter.h"
-    #include "tensorflow/lite/micro/micro_error_reporter.h"
     #include "tensorflow/lite/micro/all_ops_resolver.h"
     #pragma GCC diagnostic pop
 #else
     #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
     #include "tensorflow/lite/micro/micro_interpreter.h"
-    #include "tensorflow/lite/micro/micro_error_reporter.h"
     #include "tensorflow/lite/micro/all_ops_resolver.h"
 #endif
 
 #include "tensorflow/lite/c/common.h"
 #include "tensorflow/lite/micro/kernels/micro_ops.h"
+#include "tensorflow/lite/micro/tflite_bridge/op_resolver_bridge.h"
 #include "tensorflow/lite/schema/schema_generated.h"
 #include "tensorflow/lite/schema/schema_utils.h"
 
diff --git a/source/application/api/common/source/Model.cc b/source/application/api/common/source/Model.cc
index 1dbef1d..8467d71 100644
--- a/source/application/api/common/source/Model.cc
+++ b/source/application/api/common/source/Model.cc
@@ -1,5 +1,5 @@
 /*
- * SPDX-FileCopyrightText: Copyright 2021 Arm Limited and/or its affiliates <open-source-office@arm.com>
+ * SPDX-FileCopyrightText: Copyright 2021-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  * SPDX-License-Identifier: Apache-2.0
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -31,9 +31,7 @@
 arm::app::Model::Model() :
     m_inited (false),
     m_type(kTfLiteNoType)
-{
-    this->m_pErrorReporter = tflite::GetMicroErrorReporter();
-}
+{}
 
 bool arm::app::Model::Init(uint8_t* tensorArenaAddr,
                            uint32_t tensorArenaSize,
@@ -50,8 +48,8 @@
     this->m_pModel = ::tflite::GetModel(nnModelAddr);
 
     if (this->m_pModel->version() != TFLITE_SCHEMA_VERSION) {
-        this->m_pErrorReporter->Report(
-            "[ERROR] model's schema version %d is not equal "
+        printf_err(
+            "Model's schema version %d is not equal "
             "to supported version %d.",
             this->m_pModel->version(), TFLITE_SCHEMA_VERSION);
         return false;
@@ -79,8 +77,7 @@
 
         this->m_pAllocator = tflite::MicroAllocator::Create(
                                         tensorArenaAddr,
-                                        tensorArenaSize,
-                                        this->m_pErrorReporter);
+                                        tensorArenaSize);
 
         if (!this->m_pAllocator) {
             printf_err("Failed to create allocator\n");
@@ -93,7 +90,7 @@
 
     this->m_pInterpreter = new ::tflite::MicroInterpreter(
         this->m_pModel, this->GetOpResolver(),
-        this->m_pAllocator, this->m_pErrorReporter);
+        this->m_pAllocator);
 
     if (!this->m_pInterpreter) {
         printf_err("Failed to allocate interpreter\n");
@@ -211,8 +208,7 @@
         const tflite::OperatorCode* opcode = opcodes->Get(op->opcode_index());
         const TfLiteRegistration* reg = nullptr;
 
-        tflite::GetRegistrationFromOpCode(opcode, this->GetOpResolver(),
-                                          this->m_pErrorReporter, &reg);
+        tflite::GetRegistrationFromOpCode(opcode, this->GetOpResolver(), &reg);
         std::string opName;
 
         if (reg) {