blob: 1211c7896b76864a459d8e1bfa2cb512dd85ddc0 [file] [log] [blame]
Lior Dekel489e40b2021-08-02 12:03:55 +03001/*
Kristofer Jonssone0b078b2023-02-13 10:56:17 +01002 * SPDX-FileCopyrightText: Copyright 2019-2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
Lior Dekel489e40b2021-08-02 12:03:55 +03003 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19/****************************************************************************
20 * Includes
21 ****************************************************************************/
Kristofer Jonssone0b078b2023-02-13 10:56:17 +010022
23#include <tx_api.h>
24#include <tx_thread.h>
Lior Dekel489e40b2021-08-02 12:03:55 +030025
26#include <inttypes.h>
27#include <stdio.h>
28#include <vector>
29
30#include "inference_process.hpp"
31
32// Model data (Defined & changable by modifiying compile definition in CMakeLists.txt)
33#include "input.h"
34#include "model.h"
35#include "output.h"
36
37using namespace std;
38using namespace InferenceProcess;
39
40/****************************************************************************
41 * Defines
42 ****************************************************************************/
43// Nr. of threads to process inferences with. Thread reserves driver & runs inference (Normally 1 per NPU, but not a
44// must)
45#define NUM_INFERENCE_THREADS 1
46// Nr. of threads to create jobs and recieve responses
47#define NUM_JOB_THREADS 2
48// Nr. of jobs to create per job thread
49#define NUM_JOBS_PER_THREAD 1
50
Davide Grohmann41dc3412022-02-15 17:19:56 +010051#define MAX_THREAD_NAME_SIZE 128
52
Kristofer Jonsson089b31e2022-08-10 16:04:26 +020053#define PROCESS_THREAD_STACK_SIZE (32 * 1024)
Lior Dekel489e40b2021-08-02 12:03:55 +030054#define SENDER_THREAD_STACK_SIZE (2 * 1024)
55#define PROCESS_THREAD_CONTEXT_SIZE (sizeof(TX_THREAD))
56#define SENDER_THREAD_CONTEXT_SIZE (sizeof(TX_THREAD))
57
58// Tensor arena size
59#ifdef TENSOR_ARENA_SIZE // If defined in model.h
60#define TENSOR_ARENA_SIZE_PER_INFERENCE TENSOR_ARENA_SIZE
61#else // If not defined, use maximum available - 2M
62#define TENSOR_ARENA_SIZE 2000000
63#define TENSOR_ARENA_SIZE_PER_INFERENCE (TENSOR_ARENA_SIZE / NUM_INFERENCE_THREADS)
64#endif
65
66#define PROCESS_QUEUE_SIZE (NUM_JOBS_PER_THREAD * NUM_JOB_THREADS * sizeof(xInferenceJob *))
67#define SENDER_QUEUE_SIZE (NUM_JOBS_PER_THREAD * sizeof(xInferenceJob *))
68
69/* BYTE_POOL_SIZE_OVERHEAD is used to increase the memory byte pool size, as the number of
70 allocatable bytes in a memory byte pool is slightly less than what was specified during creation */
71#define BYTE_POOL_SIZE_OVERHEAD (512)
72#define BYTE_POOL_SIZE \
73 (((PROCESS_THREAD_CONTEXT_SIZE + PROCESS_THREAD_STACK_SIZE) * NUM_INFERENCE_THREADS) + \
74 (SENDER_THREAD_CONTEXT_SIZE + SENDER_THREAD_STACK_SIZE + SENDER_QUEUE_SIZE) * NUM_JOB_THREADS + \
75 PROCESS_QUEUE_SIZE + BYTE_POOL_SIZE_OVERHEAD)
76
77/****************************************************************************
78 * Structures
79 ****************************************************************************/
80struct ProcessThreadParams {
81 ProcessThreadParams() : queueHandle(nullptr), tensorArena(nullptr), arenaSize(0) {}
82 ProcessThreadParams(TX_QUEUE *_queue, uint8_t *_tensorArena, size_t _arenaSize) :
83 queueHandle(_queue), tensorArena(_tensorArena), arenaSize(_arenaSize) {}
84
85 TX_QUEUE *queueHandle;
86 uint8_t *tensorArena;
87 size_t arenaSize;
88};
89
90// Wrapper around InferenceProcess::InferenceJob. Adds responseQueue and status for ThreadX multi-threaded purposes.
91struct xInferenceJob : public InferenceJob {
92 TX_QUEUE *responseQueue;
93 bool status;
94
95 xInferenceJob() : InferenceJob(), responseQueue(nullptr), status(false) {}
96 xInferenceJob(const string &_name,
97 const DataPtr &_networkModel,
98 const vector<DataPtr> &_input,
99 const vector<DataPtr> &_output,
100 const vector<DataPtr> &_expectedOutput,
101 const size_t _numBytesToPrint,
Lior Dekel4882dbe2022-02-09 17:18:27 +0200102 void *_userArg,
Lior Dekel489e40b2021-08-02 12:03:55 +0300103 TX_QUEUE *_queue) :
Lior Dekel4882dbe2022-02-09 17:18:27 +0200104 InferenceJob(_name, _networkModel, _input, _output, _expectedOutput, _numBytesToPrint, _userArg),
Lior Dekel489e40b2021-08-02 12:03:55 +0300105 responseQueue(_queue), status(false) {}
106};
107
108/****************************************************************************
109 * Global and static variables
110 ****************************************************************************/
111namespace {
112// Number of total completed jobs, needed to exit application correctly if NUM_JOB_THREADS > 1
113int totalCompletedJobs = 0;
114
115// TensorArena static initialisation
116const size_t arenaSize = TENSOR_ARENA_SIZE_PER_INFERENCE;
117
118TX_QUEUE inferenceProcessQueue;
Davide Grohmann41dc3412022-02-15 17:19:56 +0100119char inferenceProcessQueueName[] = "inferenceProcessQueue";
Lior Dekel489e40b2021-08-02 12:03:55 +0300120
121ProcessThreadParams threadParams[NUM_INFERENCE_THREADS];
122
123TX_BYTE_POOL bytePool;
Davide Grohmann41dc3412022-02-15 17:19:56 +0100124char bytePoolName[] = "byte pool";
125
Lior Dekel489e40b2021-08-02 12:03:55 +0300126ULONG memoryArea[BYTE_POOL_SIZE / sizeof(ULONG)];
127} // namespace
128
129__attribute__((section(".bss.tensor_arena"), aligned(16)))
130uint8_t inferenceProcessTensorArena[NUM_INFERENCE_THREADS][arenaSize];
131
132/****************************************************************************
133 * Mutex & Semaphore
134 * Overrides weak-linked symbols in ethosu_driver.c to implement thread handling
135 ****************************************************************************/
136extern "C" {
137void *ethosu_mutex_create(void) {
138 UINT status;
139 TX_MUTEX *mutex;
140
141 mutex = new TX_MUTEX;
Davide Grohmann41dc3412022-02-15 17:19:56 +0100142 status = tx_mutex_create(mutex, nullptr, TX_NO_INHERIT);
Lior Dekel489e40b2021-08-02 12:03:55 +0300143 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100144 printf("mutex create failed, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300145 }
146 return (void *)mutex;
147}
148
Ledion Daja60c57372022-04-05 15:04:11 +0200149int ethosu_mutex_lock(void *mutex) {
Lior Dekel489e40b2021-08-02 12:03:55 +0300150 UINT status;
Kristofer Jonssone0b078b2023-02-13 10:56:17 +0100151
152 // Skip during initialization phase
153 if (TX_THREAD_GET_SYSTEM_STATE() != 0) {
154 return 0;
155 }
156
Lior Dekel489e40b2021-08-02 12:03:55 +0300157 status = tx_mutex_get(reinterpret_cast<TX_MUTEX *>(mutex), TX_WAIT_FOREVER);
158 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100159 printf("mutex get failed, error - %u\n", status);
Ledion Daja60c57372022-04-05 15:04:11 +0200160 return -1;
Lior Dekel489e40b2021-08-02 12:03:55 +0300161 }
Ledion Daja60c57372022-04-05 15:04:11 +0200162 return 0;
Lior Dekel489e40b2021-08-02 12:03:55 +0300163}
164
Ledion Daja60c57372022-04-05 15:04:11 +0200165int ethosu_mutex_unlock(void *mutex) {
Lior Dekel489e40b2021-08-02 12:03:55 +0300166 UINT status;
Kristofer Jonssone0b078b2023-02-13 10:56:17 +0100167
168 // Skip during initialization phase
169 if (TX_THREAD_GET_SYSTEM_STATE() != 0) {
170 return 0;
171 }
172
Lior Dekel489e40b2021-08-02 12:03:55 +0300173 status = tx_mutex_put(reinterpret_cast<TX_MUTEX *>(mutex));
174 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100175 printf("mutex put failed, error - %u\n", status);
Ledion Daja60c57372022-04-05 15:04:11 +0200176 return -1;
Lior Dekel489e40b2021-08-02 12:03:55 +0300177 }
Ledion Daja60c57372022-04-05 15:04:11 +0200178 return 0;
Lior Dekel489e40b2021-08-02 12:03:55 +0300179}
180
181void *ethosu_semaphore_create(void) {
182 UINT status;
183 TX_SEMAPHORE *semaphore;
184
185 semaphore = new TX_SEMAPHORE;
Davide Grohmann06ebcbc2022-04-04 14:27:49 +0200186 status = tx_semaphore_create(semaphore, nullptr, 0);
Lior Dekel489e40b2021-08-02 12:03:55 +0300187
188 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100189 printf("Semaphore create failed, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300190 }
191
192 return (void *)semaphore;
193}
194
Ledion Daja60c57372022-04-05 15:04:11 +0200195int ethosu_semaphore_take(void *sem) {
Lior Dekel489e40b2021-08-02 12:03:55 +0300196 UINT status;
197
198 status = tx_semaphore_get(reinterpret_cast<TX_SEMAPHORE *>(sem), TX_WAIT_FOREVER);
199
200 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100201 printf("Semaphore get/take, error - %u\n", status);
Ledion Daja60c57372022-04-05 15:04:11 +0200202 return -1;
Lior Dekel489e40b2021-08-02 12:03:55 +0300203 }
204
Ledion Daja60c57372022-04-05 15:04:11 +0200205 return 0;
Lior Dekel489e40b2021-08-02 12:03:55 +0300206}
207
Ledion Daja60c57372022-04-05 15:04:11 +0200208int ethosu_semaphore_give(void *sem) {
Lior Dekel489e40b2021-08-02 12:03:55 +0300209 UINT status;
210
211 status = tx_semaphore_put(reinterpret_cast<TX_SEMAPHORE *>(sem));
212
213 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100214 printf("Semaphore put/give, error - %u\n", status);
Ledion Daja60c57372022-04-05 15:04:11 +0200215 return -1;
Lior Dekel489e40b2021-08-02 12:03:55 +0300216 }
217
Ledion Daja60c57372022-04-05 15:04:11 +0200218 return 0;
Lior Dekel489e40b2021-08-02 12:03:55 +0300219}
220}
221
222/****************************************************************************
223 * Functions
224 ****************************************************************************/
225// inferenceProcessThread - Run jobs from queue with available driver
226void inferenceProcessThread(ULONG pvParameters) {
227 ProcessThreadParams params = *reinterpret_cast<ProcessThreadParams *>(pvParameters);
228 UINT tx_status = TX_QUEUE_ERROR;
229
230 class InferenceProcess inferenceProcess(params.tensorArena, params.arenaSize);
231
232 for (;;) {
233 xInferenceJob *xJob;
234
235 // Get the job details from the process queue
236 tx_status = tx_queue_receive(params.queueHandle, &xJob, TX_WAIT_FOREVER);
237 if (tx_status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100238 printf("process failed to receive from Queue, error - %u\n", tx_status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300239 exit(1);
240 }
241
242 // run the job
243 bool status = inferenceProcess.runJob(*xJob);
244 xJob->status = status;
245
246 // Send response for the job in the response queue
247 tx_status = tx_queue_send(xJob->responseQueue, &xJob, TX_WAIT_FOREVER);
248 if (tx_status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100249 printf("process inferenceProcessThread failed to send to Queue, error - %u\n", tx_status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300250 exit(1);
251 }
252 }
253
254 tx_status = tx_thread_terminate(nullptr);
255 if (tx_status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100256 printf("process inferenceProcessThread failed to terminate thread, error - %u\n", tx_status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300257 exit(1);
258 }
259}
260
261// inferenceSenderThread - Creates NUM_INFERNECE_JOBS jobs, queues them, and then listens for completion status
262void inferenceSenderThread(ULONG pvParameters) {
263 int ret = 0;
264 TX_QUEUE senderQueue;
265 UINT status = TX_QUEUE_ERROR;
266 TX_QUEUE *inferenceProcessQueueLocal = reinterpret_cast<TX_QUEUE *>(pvParameters);
267 xInferenceJob jobs[NUM_JOBS_PER_THREAD];
Davide Grohmann41dc3412022-02-15 17:19:56 +0100268 CHAR *senderQueuePtr = nullptr;
269 char senderQueueName[] = "senderQueue";
Lior Dekel489e40b2021-08-02 12:03:55 +0300270
271 /* Allocate memory for this inference sender thread responses queue */
272 status = tx_byte_allocate(&bytePool, reinterpret_cast<VOID **>(&senderQueuePtr), SENDER_QUEUE_SIZE, TX_NO_WAIT);
273 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100274 printf("Sender thread failed to allocate bytes for Queue, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300275 exit(1);
276 }
277
278 /* Create responses queue for this inference sender thread */
279 status = tx_queue_create(
Davide Grohmann41dc3412022-02-15 17:19:56 +0100280 &senderQueue, senderQueueName, sizeof(xInferenceJob *) / sizeof(uint32_t), senderQueuePtr, SENDER_QUEUE_SIZE);
Lior Dekel489e40b2021-08-02 12:03:55 +0300281
282 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100283 printf("Sender thread failed to create Queue, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300284 exit(1);
285 }
286
287 /* Create the jobs and queue them in the inference process queue */
288 for (int n = 0; n < NUM_JOBS_PER_THREAD; n++) {
289
290 // Create job
291 xInferenceJob *job = &jobs[n];
292 job->name = string(modelName);
293 job->networkModel = DataPtr(networkModelData, sizeof(networkModelData));
294 job->input.push_back(DataPtr(inputData, sizeof(inputData)));
295 job->expectedOutput.push_back(DataPtr(expectedOutputData, sizeof(expectedOutputData)));
296 job->responseQueue = &senderQueue;
297
298 // queue job
299 status = tx_queue_send(inferenceProcessQueueLocal, &job, TX_WAIT_FOREVER);
300 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100301 printf("Sender thread failed to send to Queue, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300302 exit(1);
303 }
304 }
305
306 /* Listen for completion status on the response queue */
307 do {
308 xInferenceJob *pSendJob;
309
310 status = tx_queue_receive(&senderQueue, &pSendJob, TX_WAIT_FOREVER);
311 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100312 printf("Sender thread failed to receive from Queue, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300313 exit(1);
314 }
315
316 totalCompletedJobs++;
317 ret = (pSendJob->status);
318 if (pSendJob->status != 0) {
319 break;
320 }
321 } while (totalCompletedJobs < NUM_JOBS_PER_THREAD * NUM_JOB_THREADS);
322
323 /* delete the response queue */
324 status = tx_queue_delete(&senderQueue);
325 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100326 printf("Sender thread failed to delete Queue, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300327 exit(1);
328 }
329
330 exit(ret);
331}
332
333/****************************************************************************
334 * Application
335 ****************************************************************************/
336int main() {
337 /* Enter the ThreadX kernel. */
338 tx_kernel_enter();
339 return 0;
340}
341
342void tx_application_define(void *first_unused_memory) {
343 UINT status;
344 CHAR *senderThreadStackPtr[NUM_JOB_THREADS] = {nullptr};
345 CHAR *processThreadStackPtr[NUM_INFERENCE_THREADS] = {nullptr};
346 CHAR *processQueuePtr = nullptr;
347 CHAR *senderThreadPtr[NUM_JOB_THREADS] = {nullptr};
Davide Grohmann41dc3412022-02-15 17:19:56 +0100348 CHAR senderThreadNames[NUM_JOB_THREADS][MAX_THREAD_NAME_SIZE];
349 CHAR *processThreadPtr[NUM_INFERENCE_THREADS] = {nullptr};
350 CHAR processThreadNames[NUM_JOB_THREADS][MAX_THREAD_NAME_SIZE];
Lior Dekel489e40b2021-08-02 12:03:55 +0300351
352 /* Create a byte memory pool from which to allocate the threads stacks and queues. */
Davide Grohmann41dc3412022-02-15 17:19:56 +0100353 status = tx_byte_pool_create(&bytePool, bytePoolName, memoryArea, BYTE_POOL_SIZE);
Lior Dekel489e40b2021-08-02 12:03:55 +0300354 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100355 printf("Main failed to allocate pool of bytes, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300356 exit(1);
357 }
358
359 /* Allocate memory for the inference process queue */
360 status = tx_byte_allocate(&bytePool, reinterpret_cast<VOID **>(&processQueuePtr), PROCESS_QUEUE_SIZE, TX_NO_WAIT);
361 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100362 printf("Main failed to allocate bytes for process queue, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300363 exit(1);
364 }
365
366 status = tx_queue_create(&inferenceProcessQueue,
Davide Grohmann41dc3412022-02-15 17:19:56 +0100367 inferenceProcessQueueName,
Lior Dekel489e40b2021-08-02 12:03:55 +0300368 sizeof(xInferenceJob *) / sizeof(uint32_t),
369 processQueuePtr,
370 PROCESS_QUEUE_SIZE);
371 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100372 printf("Main failed to create Queue, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300373 exit(1);
374 }
375
376 /* inferenceSender threads to create and queue the jobs */
377 for (int n = 0; n < NUM_JOB_THREADS; n++) {
378
379 /* Allocate the thread context for the inference sender thread. */
380 status =
381 tx_byte_allocate(&bytePool, reinterpret_cast<VOID **>(&senderThreadPtr[n]), sizeof(TX_THREAD), TX_NO_WAIT);
382 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100383 printf("Main failed to allocate bytes for sender tread, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300384 exit(1);
385 }
386
387 /* Allocate the stack for the inference sender thread. */
388 status = tx_byte_allocate(
389 &bytePool, reinterpret_cast<VOID **>(&senderThreadStackPtr[n]), SENDER_THREAD_STACK_SIZE, TX_NO_WAIT);
390 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100391 printf("Main failed to allocate bytes for sender tread stack, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300392 exit(1);
393 }
394
Davide Grohmann41dc3412022-02-15 17:19:56 +0100395 snprintf(senderThreadNames[n], MAX_THREAD_NAME_SIZE, "senderThread-%d", n);
396
Lior Dekel489e40b2021-08-02 12:03:55 +0300397 /* Create the inference sender thread. */
398 status = tx_thread_create(reinterpret_cast<TX_THREAD *>(senderThreadPtr[n]),
Davide Grohmann41dc3412022-02-15 17:19:56 +0100399 senderThreadNames[n],
Lior Dekel489e40b2021-08-02 12:03:55 +0300400 inferenceSenderThread,
401 reinterpret_cast<ULONG>(&inferenceProcessQueue),
402 senderThreadStackPtr[n],
403 SENDER_THREAD_STACK_SIZE,
404 1,
405 1,
406 TX_NO_TIME_SLICE,
407 TX_AUTO_START);
408 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100409 printf("Main failed to create Thread, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300410 exit(1);
411 }
412 }
413
414 /* Create inferenceProcess threads to process the queued jobs */
415 for (int n = 0; n < NUM_INFERENCE_THREADS; n++) {
416
417 /* Allocate the thread context for the inference process thread. */
418 status =
419 tx_byte_allocate(&bytePool, reinterpret_cast<VOID **>(&processThreadPtr[n]), sizeof(TX_THREAD), TX_NO_WAIT);
420 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100421 printf("Main failed to allocate bytes for process tread, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300422 exit(1);
423 }
424
425 /* Allocate the stack for the inference process thread. */
426 status = tx_byte_allocate(
427 &bytePool, reinterpret_cast<VOID **>(&processThreadStackPtr[n]), PROCESS_THREAD_STACK_SIZE, TX_NO_WAIT);
428 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100429 printf("Main failed to allocate bytes for process stack, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300430 exit(1);
431 }
432
433 threadParams[n] = ProcessThreadParams(
434 &inferenceProcessQueue, inferenceProcessTensorArena[n], reinterpret_cast<size_t>(arenaSize));
Davide Grohmann41dc3412022-02-15 17:19:56 +0100435 snprintf(processThreadNames[n], MAX_THREAD_NAME_SIZE, "processThread-%d", n);
Lior Dekel489e40b2021-08-02 12:03:55 +0300436
437 /* Create the inference process thread. */
438 status = tx_thread_create(reinterpret_cast<TX_THREAD *>(processThreadPtr[n]),
Davide Grohmann41dc3412022-02-15 17:19:56 +0100439 processThreadNames[n],
Lior Dekel489e40b2021-08-02 12:03:55 +0300440 inferenceProcessThread,
441 reinterpret_cast<ULONG>(&threadParams[n]),
442 processThreadStackPtr[n],
443 PROCESS_THREAD_STACK_SIZE,
444 1,
445 1,
446 TX_NO_TIME_SLICE,
447 TX_AUTO_START);
448 if (status != TX_SUCCESS) {
Ledion Dajab0aacb42023-02-17 09:37:58 +0100449 printf("Main failed to create thread, error - %u\n", status);
Lior Dekel489e40b2021-08-02 12:03:55 +0300450 exit(1);
451 }
452 }
453
454 printf("ThreadX application initialisation - Done \n");
455 return;
456}