MLECO-3183: Refactoring application sources

Platform agnostic application sources are moved into application
api module with their own independent CMake projects.

Changes for MLECO-3080 also included - they create CMake projects
individial API's (again, platform agnostic) that dependent on the
common logic. The API for KWS_API "joint" API has been removed and
now the use case relies on individual KWS, and ASR API libraries.

Change-Id: I1f7748dc767abb3904634a04e0991b74ac7b756d
Signed-off-by: Kshitij Sisodia <kshitij.sisodia@arm.com>
diff --git a/source/application/api/use_case/img_class/CMakeLists.txt b/source/application/api/use_case/img_class/CMakeLists.txt
new file mode 100644
index 0000000..f4818d8
--- /dev/null
+++ b/source/application/api/use_case/img_class/CMakeLists.txt
@@ -0,0 +1,39 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2022 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+#########################################################
+#                IMG CLASS API library                  #
+#########################################################
+cmake_minimum_required(VERSION 3.15.6)
+
+set(IMG_CLASS_API_TARGET img_class_api)
+project(${IMG_CLASS_API_TARGET}
+        DESCRIPTION     "Image classification use case API library"
+        LANGUAGES       C CXX)
+
+# Create static library
+add_library(${IMG_CLASS_API_TARGET} STATIC
+    src/ImgClassProcessing.cc
+    src/MobileNetModel.cc)
+
+target_include_directories(${IMG_CLASS_API_TARGET} PUBLIC include)
+
+target_link_libraries(${IMG_CLASS_API_TARGET} PUBLIC common_api)
+
+message(STATUS "*******************************************************")
+message(STATUS "Library                                : " ${IMG_CLASS_API_TARGET})
+message(STATUS "CMAKE_SYSTEM_PROCESSOR                 : " ${CMAKE_SYSTEM_PROCESSOR})
+message(STATUS "*******************************************************")
diff --git a/source/application/api/use_case/img_class/include/ImgClassProcessing.hpp b/source/application/api/use_case/img_class/include/ImgClassProcessing.hpp
new file mode 100644
index 0000000..55b5ce1
--- /dev/null
+++ b/source/application/api/use_case/img_class/include/ImgClassProcessing.hpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef IMG_CLASS_PROCESSING_HPP
+#define IMG_CLASS_PROCESSING_HPP
+
+#include "BaseProcessing.hpp"
+#include "Classifier.hpp"
+
+namespace arm {
+namespace app {
+
+    /**
+     * @brief   Pre-processing class for Image Classification use case.
+     *          Implements methods declared by BasePreProcess and anything else needed
+     *          to populate input tensors ready for inference.
+     */
+    class ImgClassPreProcess : public BasePreProcess {
+
+    public:
+        /**
+         * @brief       Constructor
+         * @param[in]   inputTensor     Pointer to the TFLite Micro input Tensor.
+         * @param[in]   convertToInt8   Should the image be converted to Int8 range.
+         **/
+        explicit ImgClassPreProcess(TfLiteTensor* inputTensor, bool convertToInt8);
+
+        /**
+         * @brief       Should perform pre-processing of 'raw' input image data and load it into
+         *              TFLite Micro input tensors ready for inference
+         * @param[in]   input      Pointer to the data that pre-processing will work on.
+         * @param[in]   inputSize  Size of the input data.
+         * @return      true if successful, false otherwise.
+         **/
+        bool DoPreProcess(const void* input, size_t inputSize) override;
+
+    private:
+        TfLiteTensor* m_inputTensor;
+        bool m_convertToInt8;
+    };
+
+    /**
+     * @brief   Post-processing class for Image Classification use case.
+     *          Implements methods declared by BasePostProcess and anything else needed
+     *          to populate result vector.
+     */
+    class ImgClassPostProcess : public BasePostProcess {
+
+    public:
+        /**
+         * @brief       Constructor
+         * @param[in]   outputTensor  Pointer to the TFLite Micro output Tensor.
+         * @param[in]   classifier    Classifier object used to get top N results from classification.
+         * @param[in]   labels        Vector of string labels to identify each output of the model.
+         * @param[in]   results       Vector of classification results to store decoded outputs.
+         **/
+        ImgClassPostProcess(TfLiteTensor* outputTensor, Classifier& classifier,
+                            const std::vector<std::string>& labels,
+                            std::vector<ClassificationResult>& results);
+
+        /**
+         * @brief       Should perform post-processing of the result of inference then
+         *              populate classification result data for any later use.
+         * @return      true if successful, false otherwise.
+         **/
+        bool DoPostProcess() override;
+
+    private:
+        TfLiteTensor* m_outputTensor;
+        Classifier& m_imgClassifier;
+        const std::vector<std::string>& m_labels;
+        std::vector<ClassificationResult>& m_results;
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* IMG_CLASS_PROCESSING_HPP */
\ No newline at end of file
diff --git a/source/application/api/use_case/img_class/include/MobileNetModel.hpp b/source/application/api/use_case/img_class/include/MobileNetModel.hpp
new file mode 100644
index 0000000..adaa9c2
--- /dev/null
+++ b/source/application/api/use_case/img_class/include/MobileNetModel.hpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef IMG_CLASS_MOBILENETMODEL_HPP
+#define IMG_CLASS_MOBILENETMODEL_HPP
+
+#include "Model.hpp"
+
+namespace arm {
+namespace app {
+
+    class MobileNetModel : public Model {
+
+    public:
+        /* Indices for the expected model - based on input tensor shape */
+        static constexpr uint32_t ms_inputRowsIdx     = 1;
+        static constexpr uint32_t ms_inputColsIdx     = 2;
+        static constexpr uint32_t ms_inputChannelsIdx = 3;
+
+    protected:
+        /** @brief   Gets the reference to op resolver interface class. */
+        const tflite::MicroOpResolver& GetOpResolver() override;
+
+        /** @brief   Adds operations to the op resolver instance. */
+        bool EnlistOperations() override;
+
+    private:
+        /* Maximum number of individual operations that can be enlisted. */
+        static constexpr int ms_maxOpCnt = 7;
+
+        /* A mutable op resolver instance. */
+        tflite::MicroMutableOpResolver<ms_maxOpCnt> m_opResolver;
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* IMG_CLASS_MOBILENETMODEL_HPP */
diff --git a/source/application/api/use_case/img_class/src/ImgClassProcessing.cc b/source/application/api/use_case/img_class/src/ImgClassProcessing.cc
new file mode 100644
index 0000000..491e751
--- /dev/null
+++ b/source/application/api/use_case/img_class/src/ImgClassProcessing.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "ImgClassProcessing.hpp"
+
+#include "ImageUtils.hpp"
+#include "log_macros.h"
+
+namespace arm {
+namespace app {
+
+    ImgClassPreProcess::ImgClassPreProcess(TfLiteTensor* inputTensor, bool convertToInt8)
+    :m_inputTensor{inputTensor},
+     m_convertToInt8{convertToInt8}
+    {}
+
+    bool ImgClassPreProcess::DoPreProcess(const void* data, size_t inputSize)
+    {
+        if (data == nullptr) {
+            printf_err("Data pointer is null");
+            return false;
+        }
+
+        auto input = static_cast<const uint8_t*>(data);
+
+        std::memcpy(this->m_inputTensor->data.data, input, inputSize);
+        debug("Input tensor populated \n");
+
+        if (this->m_convertToInt8) {
+            image::ConvertImgToInt8(this->m_inputTensor->data.data, this->m_inputTensor->bytes);
+        }
+
+        return true;
+    }
+
+    ImgClassPostProcess::ImgClassPostProcess(TfLiteTensor* outputTensor, Classifier& classifier,
+                                             const std::vector<std::string>& labels,
+                                             std::vector<ClassificationResult>& results)
+            :m_outputTensor{outputTensor},
+             m_imgClassifier{classifier},
+             m_labels{labels},
+             m_results{results}
+    {}
+
+    bool ImgClassPostProcess::DoPostProcess()
+    {
+        return this->m_imgClassifier.GetClassificationResults(
+                this->m_outputTensor, this->m_results,
+                this->m_labels, 5, false);
+    }
+
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/application/api/use_case/img_class/src/MobileNetModel.cc b/source/application/api/use_case/img_class/src/MobileNetModel.cc
new file mode 100644
index 0000000..b700d70
--- /dev/null
+++ b/source/application/api/use_case/img_class/src/MobileNetModel.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "MobileNetModel.hpp"
+#include "log_macros.h"
+
+const tflite::MicroOpResolver& arm::app::MobileNetModel::GetOpResolver()
+{
+    return this->m_opResolver;
+}
+
+bool arm::app::MobileNetModel::EnlistOperations()
+{
+    this->m_opResolver.AddDepthwiseConv2D();
+    this->m_opResolver.AddConv2D();
+    this->m_opResolver.AddAveragePool2D();
+    this->m_opResolver.AddAdd();
+    this->m_opResolver.AddReshape();
+    this->m_opResolver.AddSoftmax();
+
+    if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
+        info("Added %s support to op resolver\n",
+            tflite::GetString_ETHOSU());
+    } else {
+        printf_err("Failed to add Arm NPU support to op resolver.");
+        return false;
+    }
+    return true;
+}