core_software - Move TensorArena

Moved TensorArena in inference_process.ccp to application level.

InferenceProcess class now takes TensorArena pointer and TensorArenaSize as parameters. Needs to be set by application before runJob() is called.

Change-Id: I530b96039868305fa903ae7f93419d9d00f9c16f
diff --git a/applications/inference_process/src/inference_process.cpp b/applications/inference_process/src/inference_process.cpp
index b5ed5c4..cc2b378 100644
--- a/applications/inference_process/src/inference_process.cpp
+++ b/applications/inference_process/src/inference_process.cpp
@@ -30,14 +30,8 @@
 
 #include <inttypes.h>
 
-#ifndef TENSOR_ARENA_SIZE
-#define TENSOR_ARENA_SIZE (1024)
-#endif
-
 using namespace std;
 
-__attribute__((section(".bss.NoInit"), aligned(16))) uint8_t inferenceProcessTensorArena[TENSOR_ARENA_SIZE];
-
 namespace {
 
 void tflu_debug_log(const char *s) {
@@ -151,8 +145,6 @@
     }
 }
 
-InferenceProcess::InferenceProcess() : lock(0) {}
-
 // NOTE: Adding code for get_lock & free_lock with some corrections from
 // http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHEJCHB.html
 // TODO: check correctness?
@@ -216,9 +208,7 @@
                                     ethosu_pmu_event_type(job.pmuEventConfig[2]),
                                     ethosu_pmu_event_type(job.pmuEventConfig[3]));
 #endif
-
-    tflite::MicroInterpreter interpreter(
-        model, resolver, inferenceProcessTensorArena, TENSOR_ARENA_SIZE, reporter, &profiler);
+    tflite::MicroInterpreter interpreter(model, resolver, tensorArena, tensorArenaSize, reporter, &profiler);
 
     // Allocate tensors
     TfLiteStatus allocate_status = interpreter.AllocateTensors();
@@ -236,7 +226,6 @@
             inputTensors.push_back(tensor);
         }
     }
-
     if (job.input.size() != inputTensors.size()) {
         printf("Number of input buffers does not match number of non empty network tensors. input=%zu, network=%zu\n",
                job.input.size(),