blob: 84a37425549a908ec6a029b06d34ffab86a32224 [file] [log] [blame]
Kristofer Jonsson641c0912020-08-31 11:34:14 +02001/*
2 * Copyright (c) 2020 Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19#include <message_process.hpp>
20
Per Åstrandd9afc082020-10-06 13:25:08 +020021#include "cmsis_compiler.h"
22
Kristofer Jonsson641c0912020-08-31 11:34:14 +020023#include <cstddef>
24#include <cstdio>
Kristofer Jonsson25480142020-09-03 12:35:21 +020025#include <cstring>
Per Åstrand91a91732020-09-25 15:04:26 +020026#include <inttypes.h>
Kristofer Jonsson641c0912020-08-31 11:34:14 +020027
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +020028using namespace std;
29using namespace InferenceProcess;
30
Kristofer Jonsson641c0912020-08-31 11:34:14 +020031namespace MessageProcess {
32
Kristofer Jonsson68b4ad52020-12-10 15:11:27 +010033QueueImpl::QueueImpl(ethosu_core_queue &_queue) : queue(_queue) {
34 cleanHeaderData();
35}
Kristofer Jonsson641c0912020-08-31 11:34:14 +020036
37bool QueueImpl::empty() const {
Kristofer Jonsson2cda7fb2021-02-01 11:22:59 +010038 invalidateHeaderData();
39
Kristofer Jonsson641c0912020-08-31 11:34:14 +020040 return queue.header.read == queue.header.write;
41}
42
43size_t QueueImpl::available() const {
Kristofer Jonsson2cda7fb2021-02-01 11:22:59 +010044 invalidateHeaderData();
45
Kristofer Jonsson641c0912020-08-31 11:34:14 +020046 size_t avail = queue.header.write - queue.header.read;
47
48 if (queue.header.read > queue.header.write) {
49 avail += queue.header.size;
50 }
51
52 return avail;
53}
54
55size_t QueueImpl::capacity() const {
56 return queue.header.size - available();
57}
58
59bool QueueImpl::read(uint8_t *dst, uint32_t length) {
60 const uint8_t *end = dst + length;
Kristofer Jonsson641c0912020-08-31 11:34:14 +020061
Kristofer Jonsson2cda7fb2021-02-01 11:22:59 +010062 // Available will invalidate the cache
Kristofer Jonsson641c0912020-08-31 11:34:14 +020063 if (length > available()) {
64 return false;
65 }
66
Kristofer Jonsson2cda7fb2021-02-01 11:22:59 +010067 uint32_t rpos = queue.header.read;
68
Kristofer Jonsson641c0912020-08-31 11:34:14 +020069 while (dst < end) {
70 *dst++ = queue.data[rpos];
71 rpos = (rpos + 1) % queue.header.size;
72 }
73
74 queue.header.read = rpos;
75
Kristofer Jonsson2cbaaa92020-11-19 16:14:46 +010076 cleanHeader();
Kristofer Jonsson641c0912020-08-31 11:34:14 +020077
78 return true;
79}
80
81bool QueueImpl::write(const Vec *vec, size_t length) {
82 size_t total = 0;
83
84 for (size_t i = 0; i < length; i++) {
85 total += vec[i].length;
86 }
87
Kristofer Jonsson2cbaaa92020-11-19 16:14:46 +010088 invalidateHeader();
89
Kristofer Jonsson641c0912020-08-31 11:34:14 +020090 if (total > capacity()) {
91 return false;
92 }
93
94 uint32_t wpos = queue.header.write;
95
96 for (size_t i = 0; i < length; i++) {
97 const uint8_t *src = reinterpret_cast<const uint8_t *>(vec[i].base);
98 const uint8_t *end = src + vec[i].length;
99
100 while (src < end) {
101 queue.data[wpos] = *src++;
102 wpos = (wpos + 1) % queue.header.size;
103 }
104 }
105
106 // Update the write position last
107 queue.header.write = wpos;
108
Kristofer Jonsson2cbaaa92020-11-19 16:14:46 +0100109 cleanHeaderData();
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200110
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200111 return true;
112}
113
114bool QueueImpl::write(const uint32_t type, const void *src, uint32_t length) {
Jonny Svärddc84f4f2021-01-14 19:54:54 +0100115 ethosu_core_msg msg = {ETHOSU_CORE_MSG_MAGIC, type, length};
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200116 Vec vec[2] = {{&msg, sizeof(msg)}, {src, length}};
117
118 return write(vec, 2);
119}
120
Jonny Svärddc84f4f2021-01-14 19:54:54 +0100121// Skip to magic or end of queue
122void QueueImpl::reset() {
Kristofer Jonsson2cbaaa92020-11-19 16:14:46 +0100123 invalidateHeader();
Jonny Svärddc84f4f2021-01-14 19:54:54 +0100124 queue.header.read = queue.header.write;
Kristofer Jonsson2cbaaa92020-11-19 16:14:46 +0100125 cleanHeader();
Per Åstranddc28b132020-09-28 13:02:18 +0200126}
127
Kristofer Jonsson2cbaaa92020-11-19 16:14:46 +0100128void QueueImpl::cleanHeader() const {
129#if defined(__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
130 SCB_CleanDCache_by_Addr(reinterpret_cast<uint32_t *>(&queue.header), sizeof(queue.header));
131#endif
132}
133
134void QueueImpl::cleanHeaderData() const {
135#if defined(__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
136 SCB_CleanDCache_by_Addr(reinterpret_cast<uint32_t *>(&queue.header), sizeof(queue.header));
Bhavik Patel97906eb2020-12-17 15:32:16 +0100137 uintptr_t queueDataPtr = reinterpret_cast<uintptr_t>(&queue.data[0]);
138 SCB_CleanDCache_by_Addr(reinterpret_cast<uint32_t *>(queueDataPtr & ~3), queue.header.size + (queueDataPtr & 3));
Kristofer Jonsson2cbaaa92020-11-19 16:14:46 +0100139#endif
140}
141
142void QueueImpl::invalidateHeader() const {
143#if defined(__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
144 SCB_InvalidateDCache_by_Addr(reinterpret_cast<uint32_t *>(&queue.header), sizeof(queue.header));
145#endif
146}
147
148void QueueImpl::invalidateHeaderData() const {
149#if defined(__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
150 SCB_InvalidateDCache_by_Addr(reinterpret_cast<uint32_t *>(&queue.header), sizeof(queue.header));
Bhavik Patel97906eb2020-12-17 15:32:16 +0100151 uintptr_t queueDataPtr = reinterpret_cast<uintptr_t>(&queue.data[0]);
152 SCB_InvalidateDCache_by_Addr(reinterpret_cast<uint32_t *>(queueDataPtr & ~3),
153 queue.header.size + (queueDataPtr & 3));
Kristofer Jonsson2cbaaa92020-11-19 16:14:46 +0100154#endif
155}
156
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200157MessageProcess::MessageProcess(ethosu_core_queue &in,
158 ethosu_core_queue &out,
Jonny Svärd44398c82020-10-06 14:18:28 +0200159 Mailbox::Mailbox &mbox,
Per Åstrandbbd9c8f2020-09-25 15:07:35 +0200160 ::InferenceProcess::InferenceProcess &_inferenceProcess) :
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200161 queueIn(in),
Jonny Svärd44398c82020-10-06 14:18:28 +0200162 queueOut(out), mailbox(mbox), inferenceProcess(_inferenceProcess) {
163 mailbox.registerCallback(mailboxCallback, reinterpret_cast<void *>(this));
164}
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200165
166void MessageProcess::run() {
167 while (true) {
168 // Handle all messages in queue
169 while (handleMessage())
170 ;
171
172 // Wait for event
173 __WFE();
174 }
175}
176
177void MessageProcess::handleIrq() {
178 __SEV();
179}
180
181bool MessageProcess::handleMessage() {
182 ethosu_core_msg msg;
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200183
Jonny Svärddc84f4f2021-01-14 19:54:54 +0100184 if (queueIn.available() == 0) {
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200185 return false;
186 }
187
Jonny Svärddc84f4f2021-01-14 19:54:54 +0100188 // Read msg header
189 // Only process a complete message header, else send error message
190 // and reset queue
191 if (!queueIn.read(msg)) {
192 sndErrorRspAndResetQueue(ETHOSU_CORE_MSG_ERR_INVALID_SIZE, "Failed to read a complete header");
193 return false;
194 }
195
196 printf("Msg: header magic=%" PRIX32 ", type=%" PRIu32 ", length=%" PRIu32 "\n", msg.magic, msg.type, msg.length);
197
198 if (msg.magic != ETHOSU_CORE_MSG_MAGIC) {
199 sndErrorRspAndResetQueue(ETHOSU_CORE_MSG_ERR_INVALID_MAGIC, "Invalid magic");
200 return false;
201 }
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200202
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200203 switch (msg.type) {
204 case ETHOSU_CORE_MSG_PING:
Jonny Svärddc84f4f2021-01-14 19:54:54 +0100205 printf("Msg: Ping\n");
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200206 sendPong();
207 break;
Jonny Svärddc84f4f2021-01-14 19:54:54 +0100208 case ETHOSU_CORE_MSG_ERR: {
209 struct ethosu_core_msg_err error = {0};
210 if (!queueIn.read(error)) {
211 printf("ERROR: Msg: Failed to receive error message\n");
212 } else {
213 printf("Msg: Received an error response, type=%" PRIu32 ", msg=\"%s\"\n", error.type, error.msg);
214 }
215 queueIn.reset();
216 return false;
217 }
218 case ETHOSU_CORE_MSG_VERSION_REQ:
219 printf("Msg: Version request\n");
220 sendVersionRsp();
221 break;
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200222 case ETHOSU_CORE_MSG_INFERENCE_REQ: {
Per Åstranddc28b132020-09-28 13:02:18 +0200223 ethosu_core_inference_req req;
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200224
Jonny Svärddc84f4f2021-01-14 19:54:54 +0100225 if (!queueIn.read(req)) {
226 sndErrorRspAndResetQueue(ETHOSU_CORE_MSG_ERR_INVALID_PAYLOAD, "InferenceReq. Failed to read payload");
Per Åstranddc28b132020-09-28 13:02:18 +0200227 return false;
228 }
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200229
Jonny Svärddc84f4f2021-01-14 19:54:54 +0100230 printf("Msg: InferenceReq. user_arg=0x%" PRIx64 ", network={0x%" PRIx32 ", %" PRIu32 "}",
Per Åstrand91a91732020-09-25 15:04:26 +0200231 req.user_arg,
232 req.network.ptr,
233 req.network.size);
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200234
Per Åstrand91a91732020-09-25 15:04:26 +0200235 printf(", ifm_count=%" PRIu32 ", ifm=[", req.ifm_count);
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200236 for (uint32_t i = 0; i < req.ifm_count; ++i) {
237 if (i > 0) {
238 printf(", ");
239 }
240
Per Åstrand91a91732020-09-25 15:04:26 +0200241 printf("{0x%" PRIx32 ", %" PRIu32 "}", req.ifm[i].ptr, req.ifm[i].size);
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200242 }
243 printf("]");
244
Per Åstrand91a91732020-09-25 15:04:26 +0200245 printf(", ofm_count=%" PRIu32 ", ofm=[", req.ofm_count);
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200246 for (uint32_t i = 0; i < req.ofm_count; ++i) {
247 if (i > 0) {
248 printf(", ");
249 }
250
Per Åstrand91a91732020-09-25 15:04:26 +0200251 printf("{0x%" PRIx32 ", %" PRIu32 "}", req.ofm[i].ptr, req.ofm[i].size);
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200252 }
253 printf("]\n");
254
255 DataPtr networkModel(reinterpret_cast<void *>(req.network.ptr), req.network.size);
256
257 vector<DataPtr> ifm;
258 for (uint32_t i = 0; i < req.ifm_count; ++i) {
259 ifm.push_back(DataPtr(reinterpret_cast<void *>(req.ifm[i].ptr), req.ifm[i].size));
260 }
261
262 vector<DataPtr> ofm;
263 for (uint32_t i = 0; i < req.ofm_count; ++i) {
264 ofm.push_back(DataPtr(reinterpret_cast<void *>(req.ofm[i].ptr), req.ofm[i].size));
265 }
266
267 vector<DataPtr> expectedOutput;
268
Jonny Svärde9f57dc2020-12-10 11:12:44 +0100269 vector<uint8_t> pmuEventConfig(ETHOSU_CORE_PMU_MAX);
Bhavik Patelffe845d2020-11-16 12:13:56 +0100270 for (uint32_t i = 0; i < ETHOSU_CORE_PMU_MAX; i++) {
271 pmuEventConfig[i] = req.pmu_event_config[i];
272 }
273
274 InferenceJob job(
275 "job", networkModel, ifm, ofm, expectedOutput, -1, pmuEventConfig, req.pmu_cycle_counter_enable);
Kristofer Jonsson34e24962020-11-23 16:22:10 +0100276 job.invalidate();
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200277
278 bool failed = inferenceProcess.runJob(job);
Kristofer Jonsson34e24962020-11-23 16:22:10 +0100279 job.clean();
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200280
Bhavik Patelffe845d2020-11-16 12:13:56 +0100281 sendInferenceRsp(req.user_arg,
282 job.output,
283 failed,
284 job.pmuEventConfig,
285 job.pmuCycleCounterEnable,
286 job.pmuEventCount,
287 job.pmuCycleCounterCount);
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200288 break;
289 }
Per Åstranddc28b132020-09-28 13:02:18 +0200290 default: {
Jonny Svärddc84f4f2021-01-14 19:54:54 +0100291 char errMsg[128] = {0};
292 snprintf(&errMsg[0],
293 sizeof(errMsg),
294 "Msg: Unknown type: %" PRIu32 " with payload length %" PRIu32 " bytes\n",
295 msg.type,
296 msg.length);
297 sndErrorRspAndResetQueue(ETHOSU_CORE_MSG_ERR_UNSUPPORTED_TYPE, errMsg);
298 return false;
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200299 }
Jonny Svärddc84f4f2021-01-14 19:54:54 +0100300 }
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200301 return true;
302}
303
304void MessageProcess::sendPong() {
305 if (!queueOut.write(ETHOSU_CORE_MSG_PONG)) {
Jonny Svärddc84f4f2021-01-14 19:54:54 +0100306 printf("ERROR: Msg: Failed to write pong response. No mailbox message sent\n");
307 } else {
308 mailbox.sendMessage();
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200309 }
Jonny Svärddc84f4f2021-01-14 19:54:54 +0100310}
311
312void MessageProcess::sendVersionRsp() {
313 struct ethosu_core_msg_version ver = {.major = ETHOSU_CORE_MSG_VERSION_MAJOR,
314 .minor = ETHOSU_CORE_MSG_VERSION_MINOR,
315 .patch = ETHOSU_CORE_MSG_VERSION_PATCH,
316 ._reserved = 0};
317
318 if (!queueOut.write(ETHOSU_CORE_MSG_VERSION_RSP, ver)) {
319 printf("ERROR: Failed to write version response. No mailbox message sent\n");
320 } else {
321 mailbox.sendMessage();
322 }
323}
324
325void MessageProcess::sndErrorRspAndResetQueue(ethosu_core_msg_err_type type, const char *message) {
326 ethosu_core_msg_err error = {0};
327 error.type = type;
328 unsigned int i = 0;
329
330 if (message) {
331 for (; i < (sizeof(error.msg) - 1) && message[i]; i++) {
332 error.msg[i] = message[i];
333 }
334 }
335 printf("ERROR: Msg: \"%s\"\n", message);
336 if (!queueOut.write(ETHOSU_CORE_MSG_ERR, &error)) {
337 printf("ERROR: Msg: Failed to write error response. No mailbox message sent\n");
338 return;
339 }
340 queueIn.reset();
Jonny Svärd44398c82020-10-06 14:18:28 +0200341 mailbox.sendMessage();
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200342}
343
Bhavik Patelffe845d2020-11-16 12:13:56 +0100344void MessageProcess::sendInferenceRsp(uint64_t userArg,
345 vector<DataPtr> &ofm,
346 bool failed,
347 vector<uint8_t> &pmuEventConfig,
348 uint32_t pmuCycleCounterEnable,
349 vector<uint32_t> &pmuEventCount,
350 uint64_t pmuCycleCounterCount) {
351 ethosu_core_inference_rsp rsp = {
352 .pmu_event_count =
353 {
354 0,
355 },
356 };
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200357
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200358 rsp.user_arg = userArg;
359 rsp.ofm_count = ofm.size();
360 rsp.status = failed ? ETHOSU_CORE_STATUS_ERROR : ETHOSU_CORE_STATUS_OK;
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200361
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200362 for (size_t i = 0; i < ofm.size(); ++i) {
363 rsp.ofm_size[i] = ofm[i].size;
364 }
365
Bhavik Patelffe845d2020-11-16 12:13:56 +0100366 for (size_t i = 0; i < pmuEventConfig.size(); i++) {
367 rsp.pmu_event_config[i] = pmuEventConfig[i];
368 }
369 rsp.pmu_cycle_counter_enable = pmuCycleCounterEnable;
370 for (size_t i = 0; i < pmuEventCount.size(); i++) {
371 rsp.pmu_event_count[i] = pmuEventCount[i];
372 }
373 rsp.pmu_cycle_counter_count = pmuCycleCounterCount;
374
Per Åstrand91a91732020-09-25 15:04:26 +0200375 printf("Sending inference response. userArg=0x%" PRIx64 ", ofm_count=%" PRIu32 ", status=%" PRIu32 "\n",
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200376 rsp.user_arg,
377 rsp.ofm_count,
378 rsp.status);
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200379
380 if (!queueOut.write(ETHOSU_CORE_MSG_INFERENCE_RSP, rsp)) {
Jonny Svärddc84f4f2021-01-14 19:54:54 +0100381 printf("ERROR: Msg: Failed to write inference response. No mailbox message sent\n");
382 } else {
383 mailbox.sendMessage();
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200384 }
385}
Jonny Svärd44398c82020-10-06 14:18:28 +0200386
387void MessageProcess::mailboxCallback(void *userArg) {
388 MessageProcess *_this = reinterpret_cast<MessageProcess *>(userArg);
389 _this->handleIrq();
390}
391
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200392} // namespace MessageProcess