blob: c77c758cc11e9adf557440fd6d3ff5073712016e [file] [log] [blame]
Lior Dekel489e40b2021-08-02 12:03:55 +03001/*
Kristofer Jonssone0b078b2023-02-13 10:56:17 +01002 * SPDX-FileCopyrightText: Copyright 2019-2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
Lior Dekel489e40b2021-08-02 12:03:55 +03003 * SPDX-License-Identifier: Apache-2.0
4 *
5 * Licensed under the Apache License, Version 2.0 (the License); you may
6 * not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
13 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18/****************************************************************************
19 * Includes
20 ****************************************************************************/
Kristofer Jonssone0b078b2023-02-13 10:56:17 +010021
22#include <tx_api.h>
23#include <tx_thread.h>
Lior Dekel489e40b2021-08-02 12:03:55 +030024
25#include <inttypes.h>
26#include <stdio.h>
27#include <vector>
28
29#include "inference_process.hpp"
30
31// Model data (Defined & changable by modifiying compile definition in CMakeLists.txt)
32#include "input.h"
33#include "model.h"
34#include "output.h"
35
36using namespace std;
37using namespace InferenceProcess;
38
39/****************************************************************************
40 * Defines
41 ****************************************************************************/
42// Nr. of threads to process inferences with. Thread reserves driver & runs inference (Normally 1 per NPU, but not a
43// must)
44#define NUM_INFERENCE_THREADS 1
45// Nr. of threads to create jobs and recieve responses
46#define NUM_JOB_THREADS 2
47// Nr. of jobs to create per job thread
48#define NUM_JOBS_PER_THREAD 1
49
Davide Grohmann41dc3412022-02-15 17:19:56 +010050#define MAX_THREAD_NAME_SIZE 128
51
Kristofer Jonsson089b31e2022-08-10 16:04:26 +020052#define PROCESS_THREAD_STACK_SIZE (32 * 1024)
Lior Dekel489e40b2021-08-02 12:03:55 +030053#define SENDER_THREAD_STACK_SIZE (2 * 1024)
54#define PROCESS_THREAD_CONTEXT_SIZE (sizeof(TX_THREAD))
55#define SENDER_THREAD_CONTEXT_SIZE (sizeof(TX_THREAD))
56
57// Tensor arena size
58#ifdef TENSOR_ARENA_SIZE // If defined in model.h
59#define TENSOR_ARENA_SIZE_PER_INFERENCE TENSOR_ARENA_SIZE
60#else // If not defined, use maximum available - 2M
61#define TENSOR_ARENA_SIZE 2000000
62#define TENSOR_ARENA_SIZE_PER_INFERENCE (TENSOR_ARENA_SIZE / NUM_INFERENCE_THREADS)
63#endif
64
65#define PROCESS_QUEUE_SIZE (NUM_JOBS_PER_THREAD * NUM_JOB_THREADS * sizeof(xInferenceJob *))
66#define SENDER_QUEUE_SIZE (NUM_JOBS_PER_THREAD * sizeof(xInferenceJob *))
67
68/* BYTE_POOL_SIZE_OVERHEAD is used to increase the memory byte pool size, as the number of
69 allocatable bytes in a memory byte pool is slightly less than what was specified during creation */
70#define BYTE_POOL_SIZE_OVERHEAD (512)
71#define BYTE_POOL_SIZE \
72 (((PROCESS_THREAD_CONTEXT_SIZE + PROCESS_THREAD_STACK_SIZE) * NUM_INFERENCE_THREADS) + \
73 (SENDER_THREAD_CONTEXT_SIZE + SENDER_THREAD_STACK_SIZE + SENDER_QUEUE_SIZE) * NUM_JOB_THREADS + \
74 PROCESS_QUEUE_SIZE + BYTE_POOL_SIZE_OVERHEAD)
75
76/****************************************************************************
77 * Structures
78 ****************************************************************************/
79struct ProcessThreadParams {
80 ProcessThreadParams() : queueHandle(nullptr), tensorArena(nullptr), arenaSize(0) {}
81 ProcessThreadParams(TX_QUEUE *_queue, uint8_t *_tensorArena, size_t _arenaSize) :
82 queueHandle(_queue), tensorArena(_tensorArena), arenaSize(_arenaSize) {}
83
84 TX_QUEUE *queueHandle;
85 uint8_t *tensorArena;
86 size_t arenaSize;
87};
88
89// Wrapper around InferenceProcess::InferenceJob. Adds responseQueue and status for ThreadX multi-threaded purposes.
90struct xInferenceJob : public InferenceJob {
91 TX_QUEUE *responseQueue;
92 bool status;
93
94 xInferenceJob() : InferenceJob(), responseQueue(nullptr), status(false) {}
95 xInferenceJob(const string &_name,
96 const DataPtr &_networkModel,
97 const vector<DataPtr> &_input,
98 const vector<DataPtr> &_output,
99 const vector<DataPtr> &_expectedOutput,
100 const size_t _numBytesToPrint,
Lior Dekel4882dbe2022-02-09 17:18:27 +0200101 void *_userArg,
Lior Dekel489e40b2021-08-02 12:03:55 +0300102 TX_QUEUE *_queue) :
Lior Dekel4882dbe2022-02-09 17:18:27 +0200103 InferenceJob(_name, _networkModel, _input, _output, _expectedOutput, _numBytesToPrint, _userArg),
Lior Dekel489e40b2021-08-02 12:03:55 +0300104 responseQueue(_queue), status(false) {}
105};
106
107/****************************************************************************
108 * Global and static variables
109 ****************************************************************************/
110namespace {
111// Number of total completed jobs, needed to exit application correctly if NUM_JOB_THREADS > 1
112int totalCompletedJobs = 0;
113
114// TensorArena static initialisation
115const size_t arenaSize = TENSOR_ARENA_SIZE_PER_INFERENCE;
116
117TX_QUEUE inferenceProcessQueue;
Davide Grohmann41dc3412022-02-15 17:19:56 +0100118char inferenceProcessQueueName[] = "inferenceProcessQueue";
Lior Dekel489e40b2021-08-02 12:03:55 +0300119
120ProcessThreadParams threadParams[NUM_INFERENCE_THREADS];
121
122TX_BYTE_POOL bytePool;
Davide Grohmann41dc3412022-02-15 17:19:56 +0100123char bytePoolName[] = "byte pool";
124
Lior Dekel489e40b2021-08-02 12:03:55 +0300125ULONG memoryArea[BYTE_POOL_SIZE / sizeof(ULONG)];
126} // namespace
127
128__attribute__((section(".bss.tensor_arena"), aligned(16)))
129uint8_t inferenceProcessTensorArena[NUM_INFERENCE_THREADS][arenaSize];
130
131/****************************************************************************
132 * Mutex & Semaphore
133 * Overrides weak-linked symbols in ethosu_driver.c to implement thread handling
134 ****************************************************************************/
135extern "C" {
136void *ethosu_mutex_create(void) {
137 UINT status;
138 TX_MUTEX *mutex;
139
140 mutex = new TX_MUTEX;
Davide Grohmann41dc3412022-02-15 17:19:56 +0100141 status = tx_mutex_create(mutex, nullptr, TX_NO_INHERIT);
Lior Dekel489e40b2021-08-02 12:03:55 +0300142 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100143 printf("mutex create failed, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300144 }
145 return (void *)mutex;
146}
147
Ledion Daja60c57372022-04-05 15:04:11 +0200148int ethosu_mutex_lock(void *mutex) {
Lior Dekel489e40b2021-08-02 12:03:55 +0300149 UINT status;
Kristofer Jonssone0b078b2023-02-13 10:56:17 +0100150
151 // Skip during initialization phase
152 if (TX_THREAD_GET_SYSTEM_STATE() != 0) {
153 return 0;
154 }
155
Lior Dekel489e40b2021-08-02 12:03:55 +0300156 status = tx_mutex_get(reinterpret_cast<TX_MUTEX *>(mutex), TX_WAIT_FOREVER);
157 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100158 printf("mutex get failed, error - %u\n", status);
Ledion Daja60c57372022-04-05 15:04:11 +0200159 return -1;
Lior Dekel489e40b2021-08-02 12:03:55 +0300160 }
Ledion Daja60c57372022-04-05 15:04:11 +0200161 return 0;
Lior Dekel489e40b2021-08-02 12:03:55 +0300162}
163
Ledion Daja60c57372022-04-05 15:04:11 +0200164int ethosu_mutex_unlock(void *mutex) {
Lior Dekel489e40b2021-08-02 12:03:55 +0300165 UINT status;
Kristofer Jonssone0b078b2023-02-13 10:56:17 +0100166
167 // Skip during initialization phase
168 if (TX_THREAD_GET_SYSTEM_STATE() != 0) {
169 return 0;
170 }
171
Lior Dekel489e40b2021-08-02 12:03:55 +0300172 status = tx_mutex_put(reinterpret_cast<TX_MUTEX *>(mutex));
173 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100174 printf("mutex put failed, error - %u\n", status);
Ledion Daja60c57372022-04-05 15:04:11 +0200175 return -1;
Lior Dekel489e40b2021-08-02 12:03:55 +0300176 }
Ledion Daja60c57372022-04-05 15:04:11 +0200177 return 0;
Lior Dekel489e40b2021-08-02 12:03:55 +0300178}
179
180void *ethosu_semaphore_create(void) {
181 UINT status;
182 TX_SEMAPHORE *semaphore;
183
184 semaphore = new TX_SEMAPHORE;
Davide Grohmann06ebcbc2022-04-04 14:27:49 +0200185 status = tx_semaphore_create(semaphore, nullptr, 0);
Lior Dekel489e40b2021-08-02 12:03:55 +0300186
187 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100188 printf("Semaphore create failed, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300189 }
190
191 return (void *)semaphore;
192}
193
Jonny Svärd244a3102023-12-18 18:26:44 +0100194int ethosu_semaphore_take(void *sem, uint64_t timeout) {
Lior Dekel489e40b2021-08-02 12:03:55 +0300195 UINT status;
196
Jonny Svärd244a3102023-12-18 18:26:44 +0100197 status = tx_semaphore_get(reinterpret_cast<TX_SEMAPHORE *>(sem), (ULONG)timeout);
Lior Dekel489e40b2021-08-02 12:03:55 +0300198
199 if (status != TX_SUCCESS) {
Jonny Svärd244a3102023-12-18 18:26:44 +0100200 if (status != TX_NO_INSTANCE) {
201 printf("Semaphore get/take, error - %u\n", status);
202 }
Ledion Daja60c57372022-04-05 15:04:11 +0200203 return -1;
Lior Dekel489e40b2021-08-02 12:03:55 +0300204 }
205
Ledion Daja60c57372022-04-05 15:04:11 +0200206 return 0;
Lior Dekel489e40b2021-08-02 12:03:55 +0300207}
208
Ledion Daja60c57372022-04-05 15:04:11 +0200209int ethosu_semaphore_give(void *sem) {
Lior Dekel489e40b2021-08-02 12:03:55 +0300210 UINT status;
211
212 status = tx_semaphore_put(reinterpret_cast<TX_SEMAPHORE *>(sem));
213
214 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100215 printf("Semaphore put/give, error - %u\n", status);
Ledion Daja60c57372022-04-05 15:04:11 +0200216 return -1;
Lior Dekel489e40b2021-08-02 12:03:55 +0300217 }
218
Ledion Daja60c57372022-04-05 15:04:11 +0200219 return 0;
Lior Dekel489e40b2021-08-02 12:03:55 +0300220}
221}
222
223/****************************************************************************
224 * Functions
225 ****************************************************************************/
226// inferenceProcessThread - Run jobs from queue with available driver
227void inferenceProcessThread(ULONG pvParameters) {
228 ProcessThreadParams params = *reinterpret_cast<ProcessThreadParams *>(pvParameters);
229 UINT tx_status = TX_QUEUE_ERROR;
230
231 class InferenceProcess inferenceProcess(params.tensorArena, params.arenaSize);
232
233 for (;;) {
234 xInferenceJob *xJob;
235
236 // Get the job details from the process queue
237 tx_status = tx_queue_receive(params.queueHandle, &xJob, TX_WAIT_FOREVER);
238 if (tx_status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100239 printf("process failed to receive from Queue, error - %u\n", tx_status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300240 exit(1);
241 }
242
243 // run the job
244 bool status = inferenceProcess.runJob(*xJob);
245 xJob->status = status;
246
247 // Send response for the job in the response queue
248 tx_status = tx_queue_send(xJob->responseQueue, &xJob, TX_WAIT_FOREVER);
249 if (tx_status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100250 printf("process inferenceProcessThread failed to send to Queue, error - %u\n", tx_status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300251 exit(1);
252 }
253 }
254
255 tx_status = tx_thread_terminate(nullptr);
256 if (tx_status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100257 printf("process inferenceProcessThread failed to terminate thread, error - %u\n", tx_status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300258 exit(1);
259 }
260}
261
262// inferenceSenderThread - Creates NUM_INFERNECE_JOBS jobs, queues them, and then listens for completion status
263void inferenceSenderThread(ULONG pvParameters) {
264 int ret = 0;
265 TX_QUEUE senderQueue;
266 UINT status = TX_QUEUE_ERROR;
267 TX_QUEUE *inferenceProcessQueueLocal = reinterpret_cast<TX_QUEUE *>(pvParameters);
268 xInferenceJob jobs[NUM_JOBS_PER_THREAD];
Davide Grohmann41dc3412022-02-15 17:19:56 +0100269 CHAR *senderQueuePtr = nullptr;
270 char senderQueueName[] = "senderQueue";
Lior Dekel489e40b2021-08-02 12:03:55 +0300271
272 /* Allocate memory for this inference sender thread responses queue */
273 status = tx_byte_allocate(&bytePool, reinterpret_cast<VOID **>(&senderQueuePtr), SENDER_QUEUE_SIZE, TX_NO_WAIT);
274 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100275 printf("Sender thread failed to allocate bytes for Queue, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300276 exit(1);
277 }
278
279 /* Create responses queue for this inference sender thread */
280 status = tx_queue_create(
Davide Grohmann41dc3412022-02-15 17:19:56 +0100281 &senderQueue, senderQueueName, sizeof(xInferenceJob *) / sizeof(uint32_t), senderQueuePtr, SENDER_QUEUE_SIZE);
Lior Dekel489e40b2021-08-02 12:03:55 +0300282
283 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100284 printf("Sender thread failed to create Queue, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300285 exit(1);
286 }
287
288 /* Create the jobs and queue them in the inference process queue */
289 for (int n = 0; n < NUM_JOBS_PER_THREAD; n++) {
290
291 // Create job
292 xInferenceJob *job = &jobs[n];
293 job->name = string(modelName);
294 job->networkModel = DataPtr(networkModelData, sizeof(networkModelData));
295 job->input.push_back(DataPtr(inputData, sizeof(inputData)));
296 job->expectedOutput.push_back(DataPtr(expectedOutputData, sizeof(expectedOutputData)));
297 job->responseQueue = &senderQueue;
298
299 // queue job
300 status = tx_queue_send(inferenceProcessQueueLocal, &job, TX_WAIT_FOREVER);
301 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100302 printf("Sender thread failed to send to Queue, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300303 exit(1);
304 }
305 }
306
307 /* Listen for completion status on the response queue */
308 do {
309 xInferenceJob *pSendJob;
310
311 status = tx_queue_receive(&senderQueue, &pSendJob, TX_WAIT_FOREVER);
312 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100313 printf("Sender thread failed to receive from Queue, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300314 exit(1);
315 }
316
317 totalCompletedJobs++;
318 ret = (pSendJob->status);
319 if (pSendJob->status != 0) {
320 break;
321 }
322 } while (totalCompletedJobs < NUM_JOBS_PER_THREAD * NUM_JOB_THREADS);
323
324 /* delete the response queue */
325 status = tx_queue_delete(&senderQueue);
326 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100327 printf("Sender thread failed to delete Queue, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300328 exit(1);
329 }
330
331 exit(ret);
332}
333
334/****************************************************************************
335 * Application
336 ****************************************************************************/
337int main() {
338 /* Enter the ThreadX kernel. */
339 tx_kernel_enter();
340 return 0;
341}
342
343void tx_application_define(void *first_unused_memory) {
344 UINT status;
345 CHAR *senderThreadStackPtr[NUM_JOB_THREADS] = {nullptr};
346 CHAR *processThreadStackPtr[NUM_INFERENCE_THREADS] = {nullptr};
347 CHAR *processQueuePtr = nullptr;
348 CHAR *senderThreadPtr[NUM_JOB_THREADS] = {nullptr};
Davide Grohmann41dc3412022-02-15 17:19:56 +0100349 CHAR senderThreadNames[NUM_JOB_THREADS][MAX_THREAD_NAME_SIZE];
350 CHAR *processThreadPtr[NUM_INFERENCE_THREADS] = {nullptr};
351 CHAR processThreadNames[NUM_JOB_THREADS][MAX_THREAD_NAME_SIZE];
Lior Dekel489e40b2021-08-02 12:03:55 +0300352
353 /* Create a byte memory pool from which to allocate the threads stacks and queues. */
Davide Grohmann41dc3412022-02-15 17:19:56 +0100354 status = tx_byte_pool_create(&bytePool, bytePoolName, memoryArea, BYTE_POOL_SIZE);
Lior Dekel489e40b2021-08-02 12:03:55 +0300355 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100356 printf("Main failed to allocate pool of bytes, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300357 exit(1);
358 }
359
360 /* Allocate memory for the inference process queue */
361 status = tx_byte_allocate(&bytePool, reinterpret_cast<VOID **>(&processQueuePtr), PROCESS_QUEUE_SIZE, TX_NO_WAIT);
362 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100363 printf("Main failed to allocate bytes for process queue, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300364 exit(1);
365 }
366
367 status = tx_queue_create(&inferenceProcessQueue,
Davide Grohmann41dc3412022-02-15 17:19:56 +0100368 inferenceProcessQueueName,
Lior Dekel489e40b2021-08-02 12:03:55 +0300369 sizeof(xInferenceJob *) / sizeof(uint32_t),
370 processQueuePtr,
371 PROCESS_QUEUE_SIZE);
372 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100373 printf("Main failed to create Queue, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300374 exit(1);
375 }
376
377 /* inferenceSender threads to create and queue the jobs */
378 for (int n = 0; n < NUM_JOB_THREADS; n++) {
379
380 /* Allocate the thread context for the inference sender thread. */
381 status =
382 tx_byte_allocate(&bytePool, reinterpret_cast<VOID **>(&senderThreadPtr[n]), sizeof(TX_THREAD), TX_NO_WAIT);
383 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100384 printf("Main failed to allocate bytes for sender tread, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300385 exit(1);
386 }
387
388 /* Allocate the stack for the inference sender thread. */
389 status = tx_byte_allocate(
390 &bytePool, reinterpret_cast<VOID **>(&senderThreadStackPtr[n]), SENDER_THREAD_STACK_SIZE, TX_NO_WAIT);
391 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100392 printf("Main failed to allocate bytes for sender tread stack, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300393 exit(1);
394 }
395
Davide Grohmann41dc3412022-02-15 17:19:56 +0100396 snprintf(senderThreadNames[n], MAX_THREAD_NAME_SIZE, "senderThread-%d", n);
397
Lior Dekel489e40b2021-08-02 12:03:55 +0300398 /* Create the inference sender thread. */
399 status = tx_thread_create(reinterpret_cast<TX_THREAD *>(senderThreadPtr[n]),
Davide Grohmann41dc3412022-02-15 17:19:56 +0100400 senderThreadNames[n],
Lior Dekel489e40b2021-08-02 12:03:55 +0300401 inferenceSenderThread,
402 reinterpret_cast<ULONG>(&inferenceProcessQueue),
403 senderThreadStackPtr[n],
404 SENDER_THREAD_STACK_SIZE,
405 1,
406 1,
407 TX_NO_TIME_SLICE,
408 TX_AUTO_START);
409 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100410 printf("Main failed to create Thread, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300411 exit(1);
412 }
413 }
414
415 /* Create inferenceProcess threads to process the queued jobs */
416 for (int n = 0; n < NUM_INFERENCE_THREADS; n++) {
417
418 /* Allocate the thread context for the inference process thread. */
419 status =
420 tx_byte_allocate(&bytePool, reinterpret_cast<VOID **>(&processThreadPtr[n]), sizeof(TX_THREAD), TX_NO_WAIT);
421 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100422 printf("Main failed to allocate bytes for process tread, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300423 exit(1);
424 }
425
426 /* Allocate the stack for the inference process thread. */
427 status = tx_byte_allocate(
428 &bytePool, reinterpret_cast<VOID **>(&processThreadStackPtr[n]), PROCESS_THREAD_STACK_SIZE, TX_NO_WAIT);
429 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100430 printf("Main failed to allocate bytes for process stack, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300431 exit(1);
432 }
433
434 threadParams[n] = ProcessThreadParams(
435 &inferenceProcessQueue, inferenceProcessTensorArena[n], reinterpret_cast<size_t>(arenaSize));
Davide Grohmann41dc3412022-02-15 17:19:56 +0100436 snprintf(processThreadNames[n], MAX_THREAD_NAME_SIZE, "processThread-%d", n);
Lior Dekel489e40b2021-08-02 12:03:55 +0300437
438 /* Create the inference process thread. */
439 status = tx_thread_create(reinterpret_cast<TX_THREAD *>(processThreadPtr[n]),
Davide Grohmann41dc3412022-02-15 17:19:56 +0100440 processThreadNames[n],
Lior Dekel489e40b2021-08-02 12:03:55 +0300441 inferenceProcessThread,
442 reinterpret_cast<ULONG>(&threadParams[n]),
443 processThreadStackPtr[n],
444 PROCESS_THREAD_STACK_SIZE,
445 1,
446 1,
447 TX_NO_TIME_SLICE,
448 TX_AUTO_START);
449 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100450 printf("Main failed to create thread, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300451 exit(1);
452 }
453 }
454
455 printf("ThreadX application initialisation - Done \n");
456 return;
457}