MLECO-2599: Replace DSCNN with MicroNet for KWS

Added SoftMax function to Mathutils to allow MicroNet
to output probability as it does not nativelu have this layer.
Minor refactoring to accommodate Softmax Calculations
Extensive renaming and updating of documentation and resource download script.
Added SoftMax function to Mathutils to allow MicroNet
to output probability.

Change-Id: I7cbbda1024d14b85c9ac1beea7ca8fbffd0b6eb5
Signed-off-by: Liam Barry <liam.barry@arm.com>
diff --git a/source/application/main/Classifier.cc b/source/application/main/Classifier.cc
index c5519fb..a6ff532 100644
--- a/source/application/main/Classifier.cc
+++ b/source/application/main/Classifier.cc
@@ -24,61 +24,40 @@
 #include <set>
 #include <cstdint>
 #include <inttypes.h>
+#include "PlatformMath.hpp"
 
 namespace arm {
 namespace app {
 
-    template<typename T>
-    void SetVectorResults(std::set<std::pair<T, uint32_t>>& topNSet,
+    void Classifier::SetVectorResults(std::set<std::pair<float, uint32_t>>& topNSet,
                           std::vector<ClassificationResult>& vecResults,
-                          TfLiteTensor* tensor,
-                          const std::vector <std::string>& labels) {
-
-        /* For getting the floating point values, we need quantization parameters. */
-        QuantParams quantParams = GetTensorQuantParams(tensor);
+                          const std::vector <std::string>& labels)
+    {
 
         /* Reset the iterator to the largest element - use reverse iterator. */
-        auto topNIter = topNSet.rbegin();
-        for (size_t i = 0; i < vecResults.size() && topNIter != topNSet.rend(); ++i, ++topNIter) {
-            T score = topNIter->first;
-            vecResults[i].m_normalisedVal = quantParams.scale * (score - quantParams.offset);
-            vecResults[i].m_label = labels[topNIter->second];
-            vecResults[i].m_labelIdx = topNIter->second;
-        }
 
-    }
-
-    template<>
-    void SetVectorResults<float>(std::set<std::pair<float, uint32_t>>& topNSet,
-                          std::vector<ClassificationResult>& vecResults,
-                          TfLiteTensor* tensor,
-                          const std::vector <std::string>& labels) {
-        UNUSED(tensor);
-        /* Reset the iterator to the largest element - use reverse iterator. */
         auto topNIter = topNSet.rbegin();
         for (size_t i = 0; i < vecResults.size() && topNIter != topNSet.rend(); ++i, ++topNIter) {
             vecResults[i].m_normalisedVal = topNIter->first;
             vecResults[i].m_label = labels[topNIter->second];
             vecResults[i].m_labelIdx = topNIter->second;
         }
-
     }
 
-    template<typename T>
-    bool Classifier::GetTopNResults(TfLiteTensor* tensor,
+    bool Classifier::GetTopNResults(const std::vector<float>& tensor,
                                     std::vector<ClassificationResult>& vecResults,
                                     uint32_t topNCount,
                                     const std::vector <std::string>& labels)
     {
-        std::set<std::pair<T, uint32_t>> sortedSet;
+
+        std::set<std::pair<float , uint32_t>> sortedSet;
 
         /* NOTE: inputVec's size verification against labels should be
          *       checked by the calling/public function. */
-        T* tensorData = tflite::GetTensorData<T>(tensor);
 
         /* Set initial elements. */
         for (uint32_t i = 0; i < topNCount; ++i) {
-            sortedSet.insert({tensorData[i], i});
+            sortedSet.insert({tensor[i], i});
         }
 
         /* Initialise iterator. */
@@ -86,33 +65,26 @@
 
         /* Scan through the rest of elements with compare operations. */
         for (uint32_t i = topNCount; i < labels.size(); ++i) {
-            if (setFwdIter->first < tensorData[i]) {
+            if (setFwdIter->first < tensor[i]) {
                 sortedSet.erase(*setFwdIter);
-                sortedSet.insert({tensorData[i], i});
+                sortedSet.insert({tensor[i], i});
                 setFwdIter = sortedSet.begin();
             }
         }
 
         /* Final results' container. */
         vecResults = std::vector<ClassificationResult>(topNCount);
-
-        SetVectorResults<T>(sortedSet, vecResults, tensor, labels);
+        SetVectorResults(sortedSet, vecResults, labels);
 
         return true;
     }
 
-    template bool  Classifier::GetTopNResults<uint8_t>(TfLiteTensor* tensor,
-                                                       std::vector<ClassificationResult>& vecResults,
-                                                       uint32_t topNCount, const std::vector <std::string>& labels);
-
-    template bool  Classifier::GetTopNResults<int8_t>(TfLiteTensor* tensor,
-                                                      std::vector<ClassificationResult>& vecResults,
-                                                      uint32_t topNCount, const std::vector <std::string>& labels);
-
     bool  Classifier::GetClassificationResults(
         TfLiteTensor* outputTensor,
         std::vector<ClassificationResult>& vecResults,
-        const std::vector <std::string>& labels, uint32_t topNCount)
+        const std::vector <std::string>& labels,
+        uint32_t topNCount,
+        bool useSoftmax)
     {
         if (outputTensor == nullptr) {
             printf_err("Output vector is null pointer.\n");
@@ -120,7 +92,7 @@
         }
 
         uint32_t totalOutputSize = 1;
-        for (int inputDim = 0; inputDim < outputTensor->dims->size; inputDim++){
+        for (int inputDim = 0; inputDim < outputTensor->dims->size; inputDim++) {
             totalOutputSize *= outputTensor->dims->data[inputDim];
         }
 
@@ -139,22 +111,52 @@
         bool resultState;
         vecResults.clear();
 
-        /* Get the top N results. */
+        /* De-Quantize Output Tensor */
+        QuantParams quantParams = GetTensorQuantParams(outputTensor);
+
+        /* Floating point tensor data to be populated
+         * NOTE: The assumption here is that the output tensor size isn't too
+         * big and therefore, there's neglibible impact on heap usage. */
+        std::vector<float> tensorData(totalOutputSize);
+
+        /* Populate the floating point buffer */
         switch (outputTensor->type) {
-            case kTfLiteUInt8:
-                resultState = GetTopNResults<uint8_t>(outputTensor, vecResults, topNCount, labels);
+            case kTfLiteUInt8: {
+                uint8_t *tensor_buffer = tflite::GetTensorData<uint8_t>(outputTensor);
+                for (size_t i = 0; i < totalOutputSize; ++i) {
+                    tensorData[i] = quantParams.scale *
+                        (static_cast<float>(tensor_buffer[i]) - quantParams.offset);
+                }
                 break;
-            case kTfLiteInt8:
-                resultState = GetTopNResults<int8_t>(outputTensor, vecResults, topNCount, labels);
+            }
+            case kTfLiteInt8: {
+                int8_t *tensor_buffer = tflite::GetTensorData<int8_t>(outputTensor);
+                for (size_t i = 0; i < totalOutputSize; ++i) {
+                    tensorData[i] = quantParams.scale *
+                        (static_cast<float>(tensor_buffer[i]) - quantParams.offset);
+                }
                 break;
-            case kTfLiteFloat32:
-                resultState = GetTopNResults<float>(outputTensor, vecResults, topNCount, labels);
+            }
+            case kTfLiteFloat32: {
+                float *tensor_buffer = tflite::GetTensorData<float>(outputTensor);
+                for (size_t i = 0; i < totalOutputSize; ++i) {
+                    tensorData[i] = tensor_buffer[i];
+                }
                 break;
+            }
             default:
-                printf_err("Tensor type %s not supported by classifier\n", TfLiteTypeGetName(outputTensor->type));
+                printf_err("Tensor type %s not supported by classifier\n",
+                    TfLiteTypeGetName(outputTensor->type));
                 return false;
         }
 
+        if (useSoftmax) {
+            math::MathUtils::SoftmaxF32(tensorData);
+        }
+
+        /* Get the top N results. */
+        resultState = GetTopNResults(tensorData, vecResults, topNCount, labels);
+
         if (!resultState) {
             printf_err("Failed to get top N results set\n");
             return false;
@@ -162,6 +164,5 @@
 
         return true;
     }
-
 } /* namespace app */
 } /* namespace arm */
\ No newline at end of file