Opensource ML embedded evaluation kit

Change-Id: I12e807f19f5cacad7cef82572b6dd48252fd61fd
diff --git a/source/application/tensorflow-lite-micro/Model.cc b/source/application/tensorflow-lite-micro/Model.cc
new file mode 100644
index 0000000..0775467
--- /dev/null
+++ b/source/application/tensorflow-lite-micro/Model.cc
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Model.hpp"
+
+#include "hal.h"
+
+#include <cstdint>
+
+/* Initialise the model */
+arm::app::Model::~Model()
+{
+    if (this->_m_pInterpreter) {
+        delete this->_m_pInterpreter;
+    }
+
+    /**
+     * No clean-up function available for allocator in TensorFlow Lite Micro yet.
+     **/
+}
+
+arm::app::Model::Model() :
+    _m_inited (false),
+    _m_type(kTfLiteNoType)
+{
+    this->_m_pErrorReporter = &this->_m_uErrorReporter;
+}
+
+bool arm::app::Model::Init(tflite::MicroAllocator* allocator)
+{
+    /* Following tf lite micro example:
+     * Map the model into a usable data structure. This doesn't involve any
+     * copying or parsing, it's a very lightweight operation. */
+    const uint8_t* model_addr = ModelPointer();
+    debug("loading model from @ 0x%p\n", model_addr);
+    this->_m_pModel = ::tflite::GetModel(model_addr);
+
+    if (this->_m_pModel->version() != TFLITE_SCHEMA_VERSION) {
+        this->_m_pErrorReporter->Report(
+            "[ERROR] model's schema version %d is not equal "
+            "to supported version %d.",
+            this->_m_pModel->version(), TFLITE_SCHEMA_VERSION);
+        return false;
+    }
+
+    /* Pull in only the operation implementations we need.
+     * This relies on a complete list of all the ops needed by this graph.
+     * An easier approach is to just use the AllOpsResolver, but this will
+     * incur some penalty in code space for op implementations that are not
+     * needed by this graph.
+     * static ::tflite::ops::micro::AllOpsResolver resolver; */
+    /* NOLINTNEXTLINE(runtime-global-variables) */
+    debug("loading op resolver\n");
+
+    this->EnlistOperations();
+
+    /* Create allocator instance, if it doesn't exist */
+    this->_m_pAllocator = allocator;
+    if (!this->_m_pAllocator) {
+        /* Create an allocator instance */
+        info("Creating allocator using tensor arena in %s\n",
+            ACTIVATION_BUF_SECTION_NAME);
+
+        this->_m_pAllocator = tflite::MicroAllocator::Create(
+                                        this->GetTensorArena(),
+                                        this->GetActivationBufferSize(),
+                                        this->_m_pErrorReporter);
+
+        if (!this->_m_pAllocator) {
+            printf_err("Failed to create allocator\n");
+            return false;
+        }
+        debug("Created new allocator @ 0x%p\n", this->_m_pAllocator);
+    } else {
+        debug("Using existing allocator @ 0x%p\n", this->_m_pAllocator);
+    }
+
+    this->_m_pInterpreter = new ::tflite::MicroInterpreter(
+        this->_m_pModel, this->GetOpResolver(),
+        this->_m_pAllocator, this->_m_pErrorReporter);
+
+    if (!this->_m_pInterpreter) {
+        printf_err("Failed to allocate interpreter\n");
+        return false;
+    }
+
+    /* Allocate memory from the tensor_arena for the model's tensors. */
+    info("Allocating tensors\n");
+    TfLiteStatus allocate_status = this->_m_pInterpreter->AllocateTensors();
+
+    if (allocate_status != kTfLiteOk) {
+        this->_m_pErrorReporter->Report("[ERROR] allocateTensors() failed");
+        printf_err("tensor allocation failed!\n");
+        delete this->_m_pInterpreter;
+        return false;
+    }
+
+    /* Get information about the memory area to use for the model's input. */
+    this->_m_input.resize(this->GetNumInputs());
+    for (size_t inIndex = 0; inIndex < this->GetNumInputs(); inIndex++)
+        this->_m_input[inIndex] = this->_m_pInterpreter->input(inIndex);
+
+    this->_m_output.resize(this->GetNumOutputs());
+    for (size_t outIndex = 0; outIndex < this->GetNumOutputs(); outIndex++)
+        this->_m_output[outIndex] = this->_m_pInterpreter->output(outIndex);
+
+    if (this->_m_input.empty() || this->_m_output.empty()) {
+        printf_err("failed to get tensors\n");
+        return false;
+    } else {
+        this->_m_type = this->_m_input[0]->type;  /* Input 0 should be the main input */
+
+        /* Clear the input & output tensors */
+        for (size_t inIndex = 0; inIndex < this->GetNumInputs(); inIndex++) {
+            std::memset(this->_m_input[inIndex]->data.data, 0, this->_m_input[inIndex]->bytes);
+        }
+        for (size_t outIndex = 0; outIndex < this->GetNumOutputs(); outIndex++) {
+            std::memset(this->_m_output[outIndex]->data.data, 0, this->_m_output[outIndex]->bytes);
+        }
+
+        this->LogInterpreterInfo();
+    }
+
+    this->_m_inited = true;
+    return true;
+}
+
+tflite::MicroAllocator* arm::app::Model::GetAllocator()
+{
+    if (this->IsInited()) {
+        return this->_m_pAllocator;
+    }
+    return nullptr;
+}
+
+void arm::app::Model::LogTensorInfo(TfLiteTensor* tensor)
+{
+    if (!tensor) {
+        printf_err("Invalid tensor\n");
+        assert(tensor);
+        return;
+    }
+
+    debug("\ttensor is assigned to 0x%p\n", tensor);
+    info("\ttensor type is %s\n", TfLiteTypeGetName(tensor->type));
+    info("\ttensor occupies %u bytes with dimensions\n",
+         (uint32_t)tensor->bytes);
+    for (int i = 0 ; i < tensor->dims->size; ++i) {
+        info ("\t\t%d: %3d\n", i, tensor->dims->data[i]);
+    }
+
+    TfLiteQuantization quant = tensor->quantization;
+    if (kTfLiteAffineQuantization == quant.type) {
+        auto* quantParams = (TfLiteAffineQuantization*)quant.params;
+        info("Quant dimension: %u\n", quantParams->quantized_dimension);
+        for (int i = 0; i < quantParams->scale->size; ++i) {
+            info("Scale[%d] = %f\n", i, quantParams->scale->data[i]);
+        }
+        for (int i = 0; i < quantParams->zero_point->size; ++i) {
+            info("ZeroPoint[%d] = %d\n", i, quantParams->zero_point->data[i]);
+        }
+    }
+}
+
+void arm::app::Model::LogInterpreterInfo()
+{
+    if (!this->_m_pInterpreter) {
+        printf_err("Invalid interpreter\n");
+        return;
+    }
+
+    info("Model INPUT tensors: \n");
+    for (auto input : this->_m_input) {
+        this->LogTensorInfo(input);
+    }
+
+    info("Model OUTPUT tensors: \n");
+    for (auto output : this->_m_output) {
+        this->LogTensorInfo(output);
+    }
+
+    info("Activation buffer (a.k.a tensor arena) size used: %zu\n",
+        this->_m_pInterpreter->arena_used_bytes());
+
+    const uint32_t nOperators = this->_m_pInterpreter->operators_size();
+    info("Number of operators: %u\n", nOperators);
+
+    /* For each operator, display registration information */
+    for (uint32_t i = 0 ; i < nOperators; ++i) {
+        const tflite::NodeAndRegistration nodeReg =
+            this->_m_pInterpreter->node_and_registration(i);
+        const TfLiteRegistration* reg = nodeReg.registration;
+        std::string opName{""};
+
+        if (reg) {
+            if (tflite::BuiltinOperator_CUSTOM == reg->builtin_code) {
+                opName = std::string(reg->custom_name);
+            } else {
+                opName = std::string(EnumNameBuiltinOperator(
+                            tflite::BuiltinOperator(reg->builtin_code)));
+            }
+        }
+        info("\tOperator %u: %s\n", i, opName.c_str());
+    }
+}
+
+bool arm::app::Model::IsInited() const
+{
+    return this->_m_inited;
+}
+
+bool arm::app::Model::IsDataSigned() const
+{
+    return this->GetType() == kTfLiteInt8;
+}
+
+bool arm::app::Model::RunInference()
+{
+    bool inference_state = false;
+    if (this->_m_pModel && this->_m_pInterpreter) {
+        if (kTfLiteOk != this->_m_pInterpreter->Invoke()) {
+            printf_err("Invoke failed.\n");
+        } else {
+            inference_state = true;
+        }
+    } else {
+        printf_err("Error: No interpreter!\n");
+    }
+    return inference_state;
+}
+
+TfLiteTensor* arm::app::Model::GetInputTensor(size_t index) const
+{
+    if (index < this->GetNumInputs()) {
+        return this->_m_input.at(index);
+    }
+    return nullptr;
+}
+
+TfLiteTensor* arm::app::Model::GetOutputTensor(size_t index) const
+{
+    if (index < this->GetNumOutputs()) {
+        return this->_m_output.at(index);
+    }
+    return nullptr;
+}
+
+size_t arm::app::Model::GetNumInputs() const
+{
+    if (this->_m_pModel && this->_m_pInterpreter) {
+        return this->_m_pInterpreter->inputs_size();
+    }
+    return 0;
+}
+
+size_t arm::app::Model::GetNumOutputs() const
+{
+    if (this->_m_pModel && this->_m_pInterpreter) {
+        return this->_m_pInterpreter->outputs_size();
+    }
+    return 0;
+}
+
+
+TfLiteType arm::app::Model::GetType() const
+{
+    return this->_m_type;
+}
+
+TfLiteIntArray* arm::app::Model::GetInputShape(size_t index) const
+{
+    if (index < this->GetNumInputs()) {
+        return this->_m_input.at(index)->dims;
+    }
+    return nullptr;
+}
+
+TfLiteIntArray* arm::app::Model::GetOutputShape(size_t index) const
+{
+    if (index < this->GetNumOutputs()) {
+        return this->_m_output.at(index)->dims;
+    }
+    return nullptr;
+}
+
+bool arm::app::Model::ShowModelInfoHandler()
+{
+    if (!this->IsInited()) {
+        printf_err("Model is not initialised! Terminating processing.\n");
+        return false;
+    }
+
+    PrintTensorFlowVersion();
+    info("Model info:\n");
+    this->LogInterpreterInfo();
+
+#if defined(ARM_NPU)
+    info("Use of Arm uNPU is enabled\n");
+#else   /* ARM_NPU */
+    info("Use of Arm uNPU is disabled\n");
+#endif  /* ARM_NPU */
+
+    return true;
+}
+namespace arm {
+namespace app {
+    static uint8_t  _tensor_arena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+} /* namespace app */
+} /* namespace arm */
+
+size_t arm::app::Model::GetActivationBufferSize()
+{
+    return ACTIVATION_BUF_SZ;
+}
+
+uint8_t *arm::app::Model::GetTensorArena()
+{
+    return _tensor_arena;
+}
\ No newline at end of file
diff --git a/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc b/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc
new file mode 100644
index 0000000..ce36a8f
--- /dev/null
+++ b/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "TensorFlowLiteMicro.hpp"
+
+#include "hal.h"
+
+void PrintTensorFlowVersion()
+{
+    info("uTFL version: %u.%u.%u\n", TF_MAJOR_VERSION, TF_MINOR_VERSION,
+        TF_PATCH_VERSION);
+}
+
+arm::app::QuantParams arm::app::GetTensorQuantParams(TfLiteTensor* tensor)
+{
+    arm::app::QuantParams params;
+    if (kTfLiteAffineQuantization == tensor->quantization.type) {
+        auto* quantParams = (TfLiteAffineQuantization*) (tensor->quantization.params);
+        if (quantParams && 0 == quantParams->quantized_dimension) {
+            if (quantParams->scale->size) {
+                params.scale = quantParams->scale->data[0];
+            }
+            if (quantParams->zero_point->size) {
+                params.offset = quantParams->zero_point->data[0];
+            }
+        } else if (tensor->params.scale != 0.0) {
+            /* Legacy tensorflow quantisation parameters */
+            params.scale = tensor->params.scale;
+            params.offset = tensor->params.zero_point;
+        }
+    }
+    return params;
+}
+
diff --git a/source/application/tensorflow-lite-micro/include/BufAttributes.hpp b/source/application/tensorflow-lite-micro/include/BufAttributes.hpp
new file mode 100644
index 0000000..126172b
--- /dev/null
+++ b/source/application/tensorflow-lite-micro/include/BufAttributes.hpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BUF_ATTRIBUTES_HPP
+#define BUF_ATTRIBUTES_HPP
+
+#ifdef __has_attribute
+#define HAVE_ATTRIBUTE(x) __has_attribute(x)
+#else   /* __has_attribute */
+#define HAVE_ATTRIBUTE(x) 0
+#endif  /* __has_attribute */
+
+#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))
+
+/* We want all buffers/sections to be aligned to 16 byte.  */
+#define ALIGNMENT_REQ               aligned(16)
+
+/* Model data section name. */
+#define MODEL_SECTION               section("nn_model")
+
+/* Label section name */
+#define LABEL_SECTION               section("labels")
+
+#ifndef ACTIVATION_BUF_SZ
+    #warning  "ACTIVATION_BUF_SZ needs to be defined. Using default value"
+    #define ACTIVATION_BUF_SZ       0x00200000
+#endif  /* ACTIVATION_BUF_SZ */
+
+#ifndef ACTIVATION_BUF_SRAM_SZ
+    #warning  "ACTIVATION_BUF_SRAM_SZ needs to be defined. Using default value = 0"
+    #define ACTIVATION_BUF_SRAM_SZ  0x00000000
+#endif /* ACTIVATION_BUF_SRAM_SZ */
+
+/**
+ * Activation buffer aka tensor arena section name
+ * We have to place the tensor arena in different region based on its size.
+ * If it fits in SRAM, we place it there, and also mark it by giving it a
+ * different section name. The scatter file places the ZI data in DDR and
+ * the uninitialised region in the SRAM.
+ **/
+#define ACTIVATION_BUF_SECTION_SRAM section(".bss.NoInit.activation_buf")
+#define ACTIVATION_BUF_SECTION_DRAM section("activation_buf")
+
+#if     ACTIVATION_BUF_SZ > ACTIVATION_BUF_SRAM_SZ /* Will buffer not fit in SRAM? */
+    #define ACTIVATION_BUF_SECTION      ACTIVATION_BUF_SECTION_DRAM
+    #define ACTIVATION_BUF_SECTION_NAME ("DDR")
+#else   /* ACTIVATION_BUF_SZ > 0x00200000 */
+    #define ACTIVATION_BUF_SECTION  ACTIVATION_BUF_SECTION_SRAM
+    #define ACTIVATION_BUF_SECTION_NAME ("SRAM")
+#endif  /* ACTIVATION_BUF_SZ > 0x00200000 */
+
+/* IFM section name. */
+#define IFM_BUF_SECTION             section("ifm")
+
+/* Form the attributes, alignment is mandatory. */
+#define MAKE_ATTRIBUTE(x)           __attribute__((ALIGNMENT_REQ, x))
+#define MODEL_TFLITE_ATTRIBUTE      MAKE_ATTRIBUTE(MODEL_SECTION)
+#define ACTIVATION_BUF_ATTRIBUTE    MAKE_ATTRIBUTE(ACTIVATION_BUF_SECTION)
+#define IFM_BUF_ATTRIBUTE           MAKE_ATTRIBUTE(IFM_BUF_SECTION)
+#define LABELS_ATTRIBUTE            MAKE_ATTRIBUTE(LABEL_SECTION)
+
+#else /* HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) */
+
+#define MODEL_TFLITE_ATTRIBUTE
+#define ACTIVATION_BUF_ATTRIBUTE
+#define IFM_BUF_ATTRIBUTE
+#define LABELS_ATTRIBUTE
+
+#endif /* HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) */
+
+#endif /* BUF_ATTRIBUTES_HPP */
\ No newline at end of file
diff --git a/source/application/tensorflow-lite-micro/include/Model.hpp b/source/application/tensorflow-lite-micro/include/Model.hpp
new file mode 100644
index 0000000..70cf9ca
--- /dev/null
+++ b/source/application/tensorflow-lite-micro/include/Model.hpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MODEL_HPP
+#define MODEL_HPP
+
+#include "TensorFlowLiteMicro.hpp"
+#include "BufAttributes.hpp"
+
+#include <cstdint>
+
+namespace arm {
+namespace app {
+
+    /**
+     * @brief   NN model class wrapping the underlying TensorFlow-Lite-Micro API.
+     */
+    class Model {
+    public:
+        /** @brief Constructor. */
+        Model();
+
+        /** @brief Destructor. */
+        ~Model();
+
+        /** @brief  Gets the pointer to the model's input tensor at given input index. */
+        TfLiteTensor* GetInputTensor(size_t index) const;
+
+        /** @brief  Gets the pointer to the model's output tensor at given output index. */
+        TfLiteTensor* GetOutputTensor(size_t index) const;
+
+        /** @brief  Gets the model's data type. */
+        TfLiteType GetType() const;
+
+        /** @brief  Gets the pointer to the model's input shape. */
+        TfLiteIntArray* GetInputShape(size_t index) const;
+
+        /** @brief  Gets the pointer to the model's output shape at given output index. */
+        TfLiteIntArray* GetOutputShape(size_t index) const;
+
+        /** @brief  Gets the number of input tensors the model has. */
+        size_t GetNumInputs() const;
+
+        /** @brief  Gets the number of output tensors the model has. */
+        size_t GetNumOutputs() const;
+
+        /** @brief  Logs the tensor information to stdout. */
+        void LogTensorInfo(TfLiteTensor* tensor);
+
+        /** @brief  Logs the interpreter information to stdout. */
+        void LogInterpreterInfo();
+
+        /** @brief      Initialise the model class object.
+         *  @param[in]  allocator   Optional: a pre-initialised micro allocator pointer,
+         *                          if available. If supplied, this allocator will be used
+         *                          to create the interpreter instance.
+         *  @return     true if initialisation succeeds, false otherwise.
+        **/
+        bool Init(tflite::MicroAllocator* allocator = nullptr);
+
+        /**
+         * @brief       Gets the allocator pointer for this instance.
+         * @return      Pointer to a tflite::MicroAllocator object, if
+         *              available; nullptr otherwise.
+         **/
+        tflite::MicroAllocator* GetAllocator();
+
+        /** @brief  Checks if this object has been initialised. */
+        bool IsInited() const;
+
+        /** @brief  Checks if the model uses signed data. */
+        bool IsDataSigned() const;
+
+        /** @brief  Runs the inference (invokes the interpreter). */
+        bool RunInference();
+
+        /** @brief   Model information handler common to all models.
+         *  @return  true or false based on execution success.
+         **/
+        bool ShowModelInfoHandler();
+
+        /** @brief   Gets a pointer to the tensor arena. */
+        uint8_t* GetTensorArena();
+
+    protected:
+        /** @brief      Gets the pointer to the NN model data array.
+         *  @return     Pointer of uint8_t type.
+         **/
+        virtual const uint8_t* ModelPointer() = 0;
+
+        /** @brief      Gets the model size.
+         *  @return     size_t, size in bytes.
+         **/
+        virtual size_t ModelSize() = 0;
+
+        /**
+         * @brief       Gets the op resolver for the model instance.
+         * @return      const reference to a tflite::MicroOpResolver object.
+         **/
+        virtual const tflite::MicroOpResolver& GetOpResolver() = 0;
+
+        /**
+         * @brief       Add all the operators required for the given model.
+         *              Implementation of this should come from the use case.
+         * @return      true is ops are successfully added, false otherwise.
+         **/
+        virtual bool EnlistOperations() = 0;
+
+        /** @brief   Gets the total size of tensor arena available for use. */
+        size_t GetActivationBufferSize();
+
+    private:
+        tflite::MicroErrorReporter      _m_uErrorReporter;                     /* Error reporter object. */
+        tflite::ErrorReporter*          _m_pErrorReporter      = nullptr;      /* Pointer to the error reporter. */
+        const tflite::Model*            _m_pModel              = nullptr;      /* Tflite model pointer. */
+        tflite::MicroInterpreter*       _m_pInterpreter        = nullptr;      /* Tflite interpreter. */
+        tflite::MicroAllocator*         _m_pAllocator          = nullptr;      /* Tflite micro allocator. */
+        bool                            _m_inited              = false;        /* Indicates whether this object has been initialised. */
+
+        std::vector<TfLiteTensor*>      _m_input              = {};           /* Model's input tensor pointers. */
+        std::vector<TfLiteTensor*>      _m_output             = {};           /* Model's output tensor pointers. */
+        TfLiteType                      _m_type               = kTfLiteNoType;/* Model's data type. */
+
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* MODEL_HPP */
diff --git a/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp b/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp
new file mode 100644
index 0000000..677b4ba
--- /dev/null
+++ b/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef TENSORFLOW_LITE_MICRO_LOCAL_HPP
+#define TENSORFLOW_LITE_MICRO_LOCAL_HPP
+
+/* We include all our TensorFlow Lite Micro headers here */
+
+/**
+ * TensorFlow Lite Micro sources can generate a lot of warnings from the usage
+ * of a single macro (TF_LITE_REMOVE_VIRTUAL_DELETE). Suppress the known ones
+ * here to prevent them from masking warnings that might be generated by our
+ * application sources.
+ */
+#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+    #pragma clang diagnostic push
+    #pragma clang diagnostic ignored "-Wunused-parameter"
+    #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
+    #include "tensorflow/lite/micro/micro_interpreter.h"
+    #include "tensorflow/lite/micro/micro_error_reporter.h"
+    #include "tensorflow/lite/micro/all_ops_resolver.h"
+    #pragma clang diagnostic pop
+#elif defined(__GNUC__)
+    #pragma GCC diagnostic push
+    #pragma GCC diagnostic ignored "-Wunused-parameter"
+    #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
+    #include "tensorflow/lite/micro/micro_interpreter.h"
+    #include "tensorflow/lite/micro/micro_error_reporter.h"
+    #include "tensorflow/lite/micro/all_ops_resolver.h"
+    #pragma GCC diagnostic pop
+#else
+    #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
+    #include "tensorflow/lite/micro/micro_interpreter.h"
+    #include "tensorflow/lite/micro/micro_error_reporter.h"
+    #include "tensorflow/lite/micro/all_ops_resolver.h"
+#endif
+
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/micro/kernels/micro_ops.h"
+#include "tensorflow/lite/schema/schema_generated.h"
+#include "tensorflow/lite/version.h"
+
+#if defined (TESTS)
+    #include "tensorflow/lite/micro/test_helpers.h"
+#endif /* defined (TESTS) */
+
+namespace arm {
+namespace app {
+
+    struct QuantParams {
+        float   scale   = 1.0;
+        int     offset  = 0;
+    };
+
+    QuantParams GetTensorQuantParams(TfLiteTensor* tensor);
+
+} /* namespace app */
+} /* namespace arm */
+
+/**
+ * @brief Prints the tensor flow version in use to stdout.
+ */
+void PrintTensorFlowVersion();
+
+#endif /* TENSORFLOW_LITE_MICRO_LOCAL_HPP */