blob: 870a4efe18aa7856f01a145f45448774fa5115c6 [file] [log] [blame]
Kristofer Jonsson116a6352020-08-20 17:25:23 +02001/*
Kristofer Jonssonb42bc0b2023-01-04 17:09:28 +01002 * Copyright 2020-2023 Arm Limited and/or its affiliates
Kristofer Jonsson116a6352020-08-20 17:25:23 +02003 *
4 * This program is free software and is provided to you under the terms of the
5 * GNU General Public License version 2 as published by the Free Software
6 * Foundation, and any use by you of this program is subject to the terms
7 * of such GNU licence.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * SPDX-License-Identifier: GPL-2.0-only
19 */
20
21/****************************************************************************
22 * Includes
23 ****************************************************************************/
24
25#include "ethosu_mailbox.h"
26
27#include "ethosu_buffer.h"
28#include "ethosu_core_interface.h"
29#include "ethosu_device.h"
30
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010031#include <linux/jiffies.h>
Kristofer Jonsson116a6352020-08-20 17:25:23 +020032#include <linux/resource.h>
33#include <linux/uio.h>
34
35/****************************************************************************
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010036 * Includes
37 ****************************************************************************/
38
39#ifndef fallthrough
40#if __has_attribute(__fallthrough__)
41#define fallthrough __attribute__((__fallthrough__))
42#else
43#define fallthrough do {} while (0) /* fallthrough */
44#endif
45#endif
46
47/****************************************************************************
Kristofer Jonsson116a6352020-08-20 17:25:23 +020048 * Functions
49 ****************************************************************************/
50
51static void ethosu_core_set_size(struct ethosu_buffer *buf,
52 struct ethosu_core_buffer *cbuf)
53{
54 cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset;
55 cbuf->size = (uint32_t)buf->size;
56}
57
58static void ethosu_core_set_capacity(struct ethosu_buffer *buf,
59 struct ethosu_core_buffer *cbuf)
60{
61 cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset + buf->size;
62 cbuf->size = (uint32_t)buf->capacity - buf->offset - buf->size;
63}
64
65static size_t ethosu_queue_available(struct ethosu_core_queue *queue)
66{
67 size_t size = queue->header.write - queue->header.read;
68
69 if (queue->header.read > queue->header.write)
70 size += queue->header.size;
71
72 return size;
73}
74
75static size_t ethosu_queue_capacity(struct ethosu_core_queue *queue)
76{
Kristofer Jonssone2a58152022-06-28 14:09:19 +020077 return max_t(ssize_t,
78 queue->header.size - ethosu_queue_available(queue) - 1, 0);
Kristofer Jonsson116a6352020-08-20 17:25:23 +020079}
80
81static int ethosu_queue_write(struct ethosu_mailbox *mbox,
82 const struct kvec *vec,
83 size_t length)
84{
85 struct ethosu_core_queue *queue = mbox->in_queue;
86 uint8_t *dst = &queue->data[0];
87 uint32_t wpos = queue->header.write;
88 size_t total_size;
89 size_t i;
90 int ret;
91
92 for (i = 0, total_size = 0; i < length; i++)
93 total_size += vec[i].iov_len;
94
95 if (total_size > ethosu_queue_capacity(queue))
96 return -EINVAL;
97
98 for (i = 0; i < length; i++) {
99 const uint8_t *src = vec[i].iov_base;
100 const uint8_t *end = src + vec[i].iov_len;
101
102 while (src < end) {
103 dst[wpos] = *src++;
104 wpos = (wpos + 1) % queue->header.size;
105 }
106 }
107
108 queue->header.write = wpos;
109
110 ret = mbox_send_message(mbox->tx, queue);
111 if (ret < 0)
112 return ret;
113
114 return 0;
115}
116
117static int ethosu_queue_write_msg(struct ethosu_mailbox *mbox,
118 uint32_t type,
119 void *data,
120 size_t length)
121{
Jonny Svärd7c24c772021-01-14 19:53:17 +0100122 struct ethosu_core_msg msg = {
123 .magic = ETHOSU_CORE_MSG_MAGIC,
124 .type = type, .length= length
125 };
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200126 const struct kvec vec[2] = {
127 { &msg, sizeof(msg) },
128 { data, length }
129 };
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100130 int ret;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200131
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100132 ret = ethosu_queue_write(mbox, vec, 2);
133 if (ret)
134 return ret;
135
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100136 return 0;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200137}
138
139static int ethosu_queue_read(struct ethosu_mailbox *mbox,
140 void *data,
141 size_t length)
142{
143 struct ethosu_core_queue *queue = mbox->out_queue;
144 uint8_t *src = &queue->data[0];
145 uint8_t *dst = (uint8_t *)data;
146 const uint8_t *end = dst + length;
147 uint32_t rpos = queue->header.read;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100148 size_t queue_avail = ethosu_queue_available(queue);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200149
Davide Grohmann1c26baa2021-06-15 13:21:15 +0200150 if (length == 0)
151 return 0;
152 else if (queue_avail == 0)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200153 return -ENOMSG;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100154 else if (length > queue_avail)
155 return -EBADMSG;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200156
157 while (dst < end) {
158 *dst++ = src[rpos];
159 rpos = (rpos + 1) % queue->header.size;
160 }
161
162 queue->header.read = rpos;
163
164 return 0;
165}
166
Jonny Svärd7c24c772021-01-14 19:53:17 +0100167void ethosu_mailbox_reset(struct ethosu_mailbox *mbox)
168{
169 mbox->out_queue->header.read = mbox->out_queue->header.write;
170}
171
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100172void ethosu_mailbox_wait_prepare(struct ethosu_mailbox *mbox)
173{
174 mbox->in_queue->header.size = 0;
175 mbox->in_queue->header.read = 0xffffff;
176 mbox->in_queue->header.write = 0xffffff;
177}
178
179int ethosu_mailbox_wait_firmware(struct ethosu_mailbox *mbox)
180{
181 const unsigned long timeout = 1000;
182 const unsigned long end = jiffies + msecs_to_jiffies(timeout);
183 volatile struct ethosu_core_queue_header *hdr =
184 &mbox->in_queue->header;
185 int ret = -ETIMEDOUT;
186
187 /* Spin wait on mailbox initialization */
188 while ((end - jiffies) < timeout)
189 if (hdr->size != 0 &&
190 hdr->read != 0xffffff &&
191 hdr->write != 0xffffff) {
192 ret = 0;
193 break;
194 }
195
196 dev_info(mbox->dev, "mbox: Wait. ret=%d, size=%u, read=%u, write=%u",
197 ret, hdr->size, hdr->read, hdr->write);
198
199 return ret;
200}
201
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200202int ethosu_mailbox_read(struct ethosu_mailbox *mbox,
203 struct ethosu_core_msg *header,
204 void *data,
205 size_t length)
206{
207 int ret;
208
Jonny Svärd7c24c772021-01-14 19:53:17 +0100209 /* Read message header magic */
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200210 ret = ethosu_queue_read(mbox, header, sizeof(*header));
Jonny Svärd7c24c772021-01-14 19:53:17 +0100211 if (ret) {
212 if (ret != -ENOMSG)
213 dev_warn(mbox->dev,
214 "Msg: Failed to read message header\n");
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200215
Jonny Svärd7c24c772021-01-14 19:53:17 +0100216 return ret;
217 }
218
219 if (header->magic != ETHOSU_CORE_MSG_MAGIC) {
220 dev_warn(mbox->dev,
221 "Msg: Invalid magic. Got: %08X but expected %08X\n",
222 header->magic, ETHOSU_CORE_MSG_MAGIC);
223
224 return -EINVAL;
225 }
226
227 dev_info(mbox->dev,
228 "mbox: Read msg header. magic=%08X, type=%u, length=%u",
229 header->magic, header->type, header->length);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200230
231 /* Check that payload is not larger than allocated buffer */
Jonny Svärd7c24c772021-01-14 19:53:17 +0100232 if (header->length > length) {
233 dev_warn(mbox->dev,
234 "Msg: Buffer size (%zu) too small for message (%u)\n",
235 sizeof(data), header->length);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200236
Jonny Svärd7c24c772021-01-14 19:53:17 +0100237 return -ENOMEM;
238 }
239
240 /* Read payload data */
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200241 ret = ethosu_queue_read(mbox, data, header->length);
Jonny Svärd7c24c772021-01-14 19:53:17 +0100242 if (ret) {
243 dev_warn(mbox->dev, "Msg: Failed to read payload data\n");
244
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200245 return -EBADMSG;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100246 }
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200247
248 return 0;
249}
250
Davide Grohmann32660f92022-04-27 16:49:07 +0200251int ethosu_mailbox_register(struct ethosu_mailbox *mbox,
252 struct ethosu_mailbox_msg *msg)
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100253{
Davide Grohmann32660f92022-04-27 16:49:07 +0200254 msg->id = idr_alloc_cyclic(&mbox->msg_idr, msg, 0, INT_MAX, GFP_KERNEL);
255 if (msg->id < 0)
256 return msg->id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100257
Davide Grohmann32660f92022-04-27 16:49:07 +0200258 return 0;
259}
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100260
Davide Grohmann32660f92022-04-27 16:49:07 +0200261void ethosu_mailbox_deregister(struct ethosu_mailbox *mbox,
262 struct ethosu_mailbox_msg *msg)
263{
264 idr_remove(&mbox->msg_idr, msg->id);
265}
266
267struct ethosu_mailbox_msg *ethosu_mailbox_find(struct ethosu_mailbox *mbox,
268 int msg_id)
269{
270 struct ethosu_mailbox_msg *ptr = (struct ethosu_mailbox_msg *)idr_find(
271 &mbox->msg_idr, msg_id);
272
273 if (ptr == NULL)
274 return ERR_PTR(-EINVAL);
275
276 return ptr;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100277}
278
279void ethosu_mailbox_fail(struct ethosu_mailbox *mbox)
280{
Davide Grohmann32660f92022-04-27 16:49:07 +0200281 struct ethosu_mailbox_msg *cur;
282 int id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100283
Davide Grohmann32660f92022-04-27 16:49:07 +0200284 idr_for_each_entry(&mbox->msg_idr, cur, id) {
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100285 cur->fail(cur);
286 }
287}
288
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200289int ethosu_mailbox_ping(struct ethosu_mailbox *mbox)
290{
291 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PING, NULL, 0);
292}
293
Jonny Svärd7c24c772021-01-14 19:53:17 +0100294int ethosu_mailbox_pong(struct ethosu_mailbox *mbox)
295{
296 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PONG, NULL, 0);
297}
298
299int ethosu_mailbox_version_request(struct ethosu_mailbox *mbox)
300{
301 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_VERSION_REQ, NULL,
302 0);
303}
304
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200305int ethosu_mailbox_capabilities_request(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200306 struct ethosu_mailbox_msg *msg)
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200307{
308 struct ethosu_core_capabilities_req req = {
Davide Grohmann32660f92022-04-27 16:49:07 +0200309 .user_arg = msg->id
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200310 };
311
312 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_CAPABILITIES_REQ,
313 &req,
314 sizeof(req));
315}
316
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200317int ethosu_mailbox_inference(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200318 struct ethosu_mailbox_msg *msg,
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200319 uint32_t ifm_count,
320 struct ethosu_buffer **ifm,
321 uint32_t ofm_count,
322 struct ethosu_buffer **ofm,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200323 struct ethosu_buffer *network,
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100324 uint32_t network_index,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200325 uint8_t *pmu_event_config,
326 uint8_t pmu_event_config_count,
327 uint8_t pmu_cycle_counter_enable)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200328{
329 struct ethosu_core_inference_req inf;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200330 uint32_t i;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200331
Per Åstrandf7e407a2020-10-23 21:25:05 +0200332 /* Verify that the uapi and core has the same number of pmus */
333 if (pmu_event_config_count != ETHOSU_CORE_PMU_MAX) {
334 dev_err(mbox->dev, "PMU count misconfigured.\n");
335
336 return -EINVAL;
337 }
338
Davide Grohmann32660f92022-04-27 16:49:07 +0200339 inf.user_arg = msg->id;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200340 inf.ifm_count = ifm_count;
341 inf.ofm_count = ofm_count;
Per Åstrandf7e407a2020-10-23 21:25:05 +0200342 inf.pmu_cycle_counter_enable = pmu_cycle_counter_enable;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200343
344 for (i = 0; i < ifm_count; i++)
345 ethosu_core_set_size(ifm[i], &inf.ifm[i]);
346
347 for (i = 0; i < ofm_count; i++)
348 ethosu_core_set_capacity(ofm[i], &inf.ofm[i]);
349
Per Åstrandf7e407a2020-10-23 21:25:05 +0200350 for (i = 0; i < ETHOSU_CORE_PMU_MAX; i++)
351 inf.pmu_event_config[i] = pmu_event_config[i];
352
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100353 if (network != NULL) {
354 inf.network.type = ETHOSU_CORE_NETWORK_BUFFER;
355 ethosu_core_set_size(network, &inf.network.buffer);
356 } else {
357 inf.network.type = ETHOSU_CORE_NETWORK_INDEX;
358 inf.network.index = network_index;
359 }
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200360
361 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_INFERENCE_REQ,
362 &inf, sizeof(inf));
363}
364
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100365int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200366 struct ethosu_mailbox_msg *msg,
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100367 struct ethosu_buffer *network,
368 uint32_t network_index)
369{
370 struct ethosu_core_network_info_req info;
371
Davide Grohmann32660f92022-04-27 16:49:07 +0200372 info.user_arg = msg->id;
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100373
374 if (network != NULL) {
375 info.network.type = ETHOSU_CORE_NETWORK_BUFFER;
376 ethosu_core_set_size(network, &info.network.buffer);
377 } else {
378 info.network.type = ETHOSU_CORE_NETWORK_INDEX;
379 info.network.index = network_index;
380 }
381
382 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_NETWORK_INFO_REQ,
383 &info, sizeof(info));
384}
385
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100386int ethosu_mailbox_cancel_inference(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200387 struct ethosu_mailbox_msg *msg,
388 int inference_handle)
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100389{
390 struct ethosu_core_cancel_inference_req req;
391
Davide Grohmann32660f92022-04-27 16:49:07 +0200392 req.user_arg = msg->id;
393 req.inference_handle = inference_handle;
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100394
395 return ethosu_queue_write_msg(mbox,
396 ETHOSU_CORE_MSG_CANCEL_INFERENCE_REQ,
397 &req, sizeof(req));
398}
399
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200400static void ethosu_mailbox_rx_work(struct work_struct *work)
401{
402 struct ethosu_mailbox *mbox = container_of(work, typeof(*mbox), work);
403
404 mbox->callback(mbox->user_arg);
405}
406
407static void ethosu_mailbox_rx_callback(struct mbox_client *client,
408 void *message)
409{
410 struct ethosu_mailbox *mbox =
411 container_of(client, typeof(*mbox), client);
412
413 dev_info(mbox->dev, "mbox: Received message.\n");
414
415 queue_work(mbox->wq, &mbox->work);
416}
417
418static void ethosu_mailbox_tx_done(struct mbox_client *client,
419 void *message,
420 int r)
421{
422 if (r)
423 dev_warn(client->dev, "mbox: Failed sending message (%d)\n", r);
424 else
425 dev_info(client->dev, "mbox: Message sent\n");
426}
427
428int ethosu_mailbox_init(struct ethosu_mailbox *mbox,
429 struct device *dev,
430 struct resource *in_queue,
431 struct resource *out_queue,
432 ethosu_mailbox_cb callback,
Kristofer Jonssonb42bc0b2023-01-04 17:09:28 +0100433 void *user_arg)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200434{
435 int ret;
436
437 mbox->dev = dev;
438 mbox->callback = callback;
439 mbox->user_arg = user_arg;
Davide Grohmann32660f92022-04-27 16:49:07 +0200440 idr_init(&mbox->msg_idr);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200441
442 mbox->client.dev = dev;
443 mbox->client.rx_callback = ethosu_mailbox_rx_callback;
444 mbox->client.tx_prepare = NULL; /* preparation of data is handled
445 * through the
446 * queue functions */
447 mbox->client.tx_done = ethosu_mailbox_tx_done;
448 mbox->client.tx_block = true;
449 mbox->client.knows_txdone = false;
450 mbox->client.tx_tout = 500;
451
452 mbox->in_queue = devm_ioremap_resource(mbox->dev, in_queue);
453 if (IS_ERR(mbox->in_queue))
454 return PTR_ERR(mbox->in_queue);
455
456 mbox->out_queue = devm_ioremap_resource(mbox->dev, out_queue);
457 if (IS_ERR(mbox->out_queue)) {
458 ret = PTR_ERR(mbox->out_queue);
459 goto unmap_in_queue;
460 }
461
462 mbox->wq = create_singlethread_workqueue("ethosu_workqueue");
463 if (!mbox->wq) {
464 dev_err(mbox->dev, "Failed to create work queue\n");
465 ret = -EINVAL;
466 goto unmap_out_queue;
467 }
468
469 INIT_WORK(&mbox->work, ethosu_mailbox_rx_work);
470
471 mbox->tx = mbox_request_channel_byname(&mbox->client, "tx");
472 if (IS_ERR(mbox->tx)) {
473 dev_warn(mbox->dev, "mbox: Failed to request tx channel\n");
474 ret = PTR_ERR(mbox->tx);
475 goto workqueue_destroy;
476 }
477
478 mbox->rx = mbox_request_channel_byname(&mbox->client, "rx");
479 if (IS_ERR(mbox->rx)) {
480 dev_info(dev, "mbox: Using same channel for RX and TX\n");
481 mbox->rx = mbox->tx;
482 }
483
484 return 0;
485
486workqueue_destroy:
487 destroy_workqueue(mbox->wq);
488
489unmap_out_queue:
490 devm_iounmap(mbox->dev, mbox->out_queue);
491
492unmap_in_queue:
493 devm_iounmap(mbox->dev, mbox->in_queue);
494
495 return ret;
496}
497
498void ethosu_mailbox_deinit(struct ethosu_mailbox *mbox)
499{
500 if (mbox->rx != mbox->tx)
501 mbox_free_channel(mbox->rx);
502
503 mbox_free_channel(mbox->tx);
504 destroy_workqueue(mbox->wq);
505 devm_iounmap(mbox->dev, mbox->out_queue);
506 devm_iounmap(mbox->dev, mbox->in_queue);
507}