blob: 7753baaf5d046fd4218c4d321d8e4c3cb63f944b [file] [log] [blame]
Kristofer Jonsson116a6352020-08-20 17:25:23 +02001/*
Kristofer Jonsson35de9e62022-03-08 13:25:45 +01002 * Copyright (c) 2020-2022 Arm Limited.
Kristofer Jonsson116a6352020-08-20 17:25:23 +02003 *
4 * This program is free software and is provided to you under the terms of the
5 * GNU General Public License version 2 as published by the Free Software
6 * Foundation, and any use by you of this program is subject to the terms
7 * of such GNU licence.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * SPDX-License-Identifier: GPL-2.0-only
19 */
20
21/****************************************************************************
22 * Includes
23 ****************************************************************************/
24
25#include "ethosu_mailbox.h"
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +010026#include "ethosu_watchdog.h"
Kristofer Jonsson116a6352020-08-20 17:25:23 +020027
28#include "ethosu_buffer.h"
29#include "ethosu_core_interface.h"
30#include "ethosu_device.h"
31
32#include <linux/resource.h>
33#include <linux/uio.h>
34
35/****************************************************************************
36 * Functions
37 ****************************************************************************/
38
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +010039static void ethosu_wd_inc(struct ethosu_mailbox *mbox,
40 enum ethosu_core_msg_type type)
41{
42 switch (type) {
43 case ETHOSU_CORE_MSG_PING:
44 case ETHOSU_CORE_MSG_INFERENCE_REQ:
45 case ETHOSU_CORE_MSG_VERSION_REQ:
46 case ETHOSU_CORE_MSG_CAPABILITIES_REQ:
47 case ETHOSU_CORE_MSG_NETWORK_INFO_REQ:
48 ethosu_watchdog_inc(mbox->wdog);
49 break;
50 default:
51 break;
52 }
53}
54
55static void ethosu_wd_dec(struct ethosu_mailbox *mbox,
56 enum ethosu_core_msg_type type)
57{
58 switch (type) {
59 case ETHOSU_CORE_MSG_PONG:
60 case ETHOSU_CORE_MSG_INFERENCE_RSP:
61 case ETHOSU_CORE_MSG_VERSION_RSP:
62 case ETHOSU_CORE_MSG_CAPABILITIES_RSP:
63 case ETHOSU_CORE_MSG_NETWORK_INFO_RSP:
64 ethosu_watchdog_dec(mbox->wdog);
65 break;
66 default:
67 break;
68 }
69}
70
Kristofer Jonsson116a6352020-08-20 17:25:23 +020071static void ethosu_core_set_size(struct ethosu_buffer *buf,
72 struct ethosu_core_buffer *cbuf)
73{
74 cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset;
75 cbuf->size = (uint32_t)buf->size;
76}
77
78static void ethosu_core_set_capacity(struct ethosu_buffer *buf,
79 struct ethosu_core_buffer *cbuf)
80{
81 cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset + buf->size;
82 cbuf->size = (uint32_t)buf->capacity - buf->offset - buf->size;
83}
84
85static size_t ethosu_queue_available(struct ethosu_core_queue *queue)
86{
87 size_t size = queue->header.write - queue->header.read;
88
89 if (queue->header.read > queue->header.write)
90 size += queue->header.size;
91
92 return size;
93}
94
95static size_t ethosu_queue_capacity(struct ethosu_core_queue *queue)
96{
97 return queue->header.size - ethosu_queue_available(queue);
98}
99
100static int ethosu_queue_write(struct ethosu_mailbox *mbox,
101 const struct kvec *vec,
102 size_t length)
103{
104 struct ethosu_core_queue *queue = mbox->in_queue;
105 uint8_t *dst = &queue->data[0];
106 uint32_t wpos = queue->header.write;
107 size_t total_size;
108 size_t i;
109 int ret;
110
111 for (i = 0, total_size = 0; i < length; i++)
112 total_size += vec[i].iov_len;
113
114 if (total_size > ethosu_queue_capacity(queue))
115 return -EINVAL;
116
117 for (i = 0; i < length; i++) {
118 const uint8_t *src = vec[i].iov_base;
119 const uint8_t *end = src + vec[i].iov_len;
120
121 while (src < end) {
122 dst[wpos] = *src++;
123 wpos = (wpos + 1) % queue->header.size;
124 }
125 }
126
127 queue->header.write = wpos;
128
129 ret = mbox_send_message(mbox->tx, queue);
130 if (ret < 0)
131 return ret;
132
133 return 0;
134}
135
136static int ethosu_queue_write_msg(struct ethosu_mailbox *mbox,
137 uint32_t type,
138 void *data,
139 size_t length)
140{
Jonny Svärd7c24c772021-01-14 19:53:17 +0100141 struct ethosu_core_msg msg = {
142 .magic = ETHOSU_CORE_MSG_MAGIC,
143 .type = type, .length= length
144 };
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200145 const struct kvec vec[2] = {
146 { &msg, sizeof(msg) },
147 { data, length }
148 };
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100149 int ret;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200150
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100151 ret = ethosu_queue_write(mbox, vec, 2);
152 if (ret)
153 return ret;
154
155 ethosu_wd_inc(mbox, type);
156
157 return 0;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200158}
159
160static int ethosu_queue_read(struct ethosu_mailbox *mbox,
161 void *data,
162 size_t length)
163{
164 struct ethosu_core_queue *queue = mbox->out_queue;
165 uint8_t *src = &queue->data[0];
166 uint8_t *dst = (uint8_t *)data;
167 const uint8_t *end = dst + length;
168 uint32_t rpos = queue->header.read;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100169 size_t queue_avail = ethosu_queue_available(queue);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200170
Davide Grohmann1c26baa2021-06-15 13:21:15 +0200171 if (length == 0)
172 return 0;
173 else if (queue_avail == 0)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200174 return -ENOMSG;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100175 else if (length > queue_avail)
176 return -EBADMSG;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200177
178 while (dst < end) {
179 *dst++ = src[rpos];
180 rpos = (rpos + 1) % queue->header.size;
181 }
182
183 queue->header.read = rpos;
184
185 return 0;
186}
187
Jonny Svärd7c24c772021-01-14 19:53:17 +0100188void ethosu_mailbox_reset(struct ethosu_mailbox *mbox)
189{
190 mbox->out_queue->header.read = mbox->out_queue->header.write;
191}
192
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200193int ethosu_mailbox_read(struct ethosu_mailbox *mbox,
194 struct ethosu_core_msg *header,
195 void *data,
196 size_t length)
197{
198 int ret;
199
Jonny Svärd7c24c772021-01-14 19:53:17 +0100200 /* Read message header magic */
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200201 ret = ethosu_queue_read(mbox, header, sizeof(*header));
Jonny Svärd7c24c772021-01-14 19:53:17 +0100202 if (ret) {
203 if (ret != -ENOMSG)
204 dev_warn(mbox->dev,
205 "Msg: Failed to read message header\n");
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200206
Jonny Svärd7c24c772021-01-14 19:53:17 +0100207 return ret;
208 }
209
210 if (header->magic != ETHOSU_CORE_MSG_MAGIC) {
211 dev_warn(mbox->dev,
212 "Msg: Invalid magic. Got: %08X but expected %08X\n",
213 header->magic, ETHOSU_CORE_MSG_MAGIC);
214
215 return -EINVAL;
216 }
217
218 dev_info(mbox->dev,
219 "mbox: Read msg header. magic=%08X, type=%u, length=%u",
220 header->magic, header->type, header->length);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200221
222 /* Check that payload is not larger than allocated buffer */
Jonny Svärd7c24c772021-01-14 19:53:17 +0100223 if (header->length > length) {
224 dev_warn(mbox->dev,
225 "Msg: Buffer size (%zu) too small for message (%u)\n",
226 sizeof(data), header->length);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200227
Jonny Svärd7c24c772021-01-14 19:53:17 +0100228 return -ENOMEM;
229 }
230
231 /* Read payload data */
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200232 ret = ethosu_queue_read(mbox, data, header->length);
Jonny Svärd7c24c772021-01-14 19:53:17 +0100233 if (ret) {
234 dev_warn(mbox->dev, "Msg: Failed to read payload data\n");
235
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200236 return -EBADMSG;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100237 }
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200238
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100239 ethosu_wd_dec(mbox, header->type);
240
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200241 return 0;
242}
243
244int ethosu_mailbox_ping(struct ethosu_mailbox *mbox)
245{
246 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PING, NULL, 0);
247}
248
Jonny Svärd7c24c772021-01-14 19:53:17 +0100249int ethosu_mailbox_pong(struct ethosu_mailbox *mbox)
250{
251 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PONG, NULL, 0);
252}
253
254int ethosu_mailbox_version_request(struct ethosu_mailbox *mbox)
255{
256 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_VERSION_REQ, NULL,
257 0);
258}
259
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200260int ethosu_mailbox_capabilities_request(struct ethosu_mailbox *mbox,
261 void *user_arg)
262{
263 struct ethosu_core_capabilities_req req = {
264 .user_arg = (ptrdiff_t)user_arg
265 };
266
267 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_CAPABILITIES_REQ,
268 &req,
269 sizeof(req));
270}
271
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200272int ethosu_mailbox_inference(struct ethosu_mailbox *mbox,
273 void *user_arg,
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200274 uint32_t ifm_count,
275 struct ethosu_buffer **ifm,
276 uint32_t ofm_count,
277 struct ethosu_buffer **ofm,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200278 struct ethosu_buffer *network,
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100279 uint32_t network_index,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200280 uint8_t *pmu_event_config,
281 uint8_t pmu_event_config_count,
282 uint8_t pmu_cycle_counter_enable)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200283{
284 struct ethosu_core_inference_req inf;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200285 uint32_t i;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200286
Per Åstrandf7e407a2020-10-23 21:25:05 +0200287 /* Verify that the uapi and core has the same number of pmus */
288 if (pmu_event_config_count != ETHOSU_CORE_PMU_MAX) {
289 dev_err(mbox->dev, "PMU count misconfigured.\n");
290
291 return -EINVAL;
292 }
293
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200294 inf.user_arg = (ptrdiff_t)user_arg;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200295 inf.ifm_count = ifm_count;
296 inf.ofm_count = ofm_count;
Per Åstrandf7e407a2020-10-23 21:25:05 +0200297 inf.pmu_cycle_counter_enable = pmu_cycle_counter_enable;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200298
299 for (i = 0; i < ifm_count; i++)
300 ethosu_core_set_size(ifm[i], &inf.ifm[i]);
301
302 for (i = 0; i < ofm_count; i++)
303 ethosu_core_set_capacity(ofm[i], &inf.ofm[i]);
304
Per Åstrandf7e407a2020-10-23 21:25:05 +0200305 for (i = 0; i < ETHOSU_CORE_PMU_MAX; i++)
306 inf.pmu_event_config[i] = pmu_event_config[i];
307
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100308 if (network != NULL) {
309 inf.network.type = ETHOSU_CORE_NETWORK_BUFFER;
310 ethosu_core_set_size(network, &inf.network.buffer);
311 } else {
312 inf.network.type = ETHOSU_CORE_NETWORK_INDEX;
313 inf.network.index = network_index;
314 }
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200315
316 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_INFERENCE_REQ,
317 &inf, sizeof(inf));
318}
319
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100320int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox,
321 void *user_arg,
322 struct ethosu_buffer *network,
323 uint32_t network_index)
324{
325 struct ethosu_core_network_info_req info;
326
327 info.user_arg = (ptrdiff_t)user_arg;
328
329 if (network != NULL) {
330 info.network.type = ETHOSU_CORE_NETWORK_BUFFER;
331 ethosu_core_set_size(network, &info.network.buffer);
332 } else {
333 info.network.type = ETHOSU_CORE_NETWORK_INDEX;
334 info.network.index = network_index;
335 }
336
337 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_NETWORK_INFO_REQ,
338 &info, sizeof(info));
339}
340
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200341static void ethosu_mailbox_rx_work(struct work_struct *work)
342{
343 struct ethosu_mailbox *mbox = container_of(work, typeof(*mbox), work);
344
345 mbox->callback(mbox->user_arg);
346}
347
348static void ethosu_mailbox_rx_callback(struct mbox_client *client,
349 void *message)
350{
351 struct ethosu_mailbox *mbox =
352 container_of(client, typeof(*mbox), client);
353
354 dev_info(mbox->dev, "mbox: Received message.\n");
355
356 queue_work(mbox->wq, &mbox->work);
357}
358
359static void ethosu_mailbox_tx_done(struct mbox_client *client,
360 void *message,
361 int r)
362{
363 if (r)
364 dev_warn(client->dev, "mbox: Failed sending message (%d)\n", r);
365 else
366 dev_info(client->dev, "mbox: Message sent\n");
367}
368
369int ethosu_mailbox_init(struct ethosu_mailbox *mbox,
370 struct device *dev,
371 struct resource *in_queue,
372 struct resource *out_queue,
373 ethosu_mailbox_cb callback,
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100374 void *user_arg,
375 struct ethosu_watchdog *wdog)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200376{
377 int ret;
378
379 mbox->dev = dev;
380 mbox->callback = callback;
381 mbox->user_arg = user_arg;
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100382 mbox->wdog = wdog;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200383
384 mbox->client.dev = dev;
385 mbox->client.rx_callback = ethosu_mailbox_rx_callback;
386 mbox->client.tx_prepare = NULL; /* preparation of data is handled
387 * through the
388 * queue functions */
389 mbox->client.tx_done = ethosu_mailbox_tx_done;
390 mbox->client.tx_block = true;
391 mbox->client.knows_txdone = false;
392 mbox->client.tx_tout = 500;
393
394 mbox->in_queue = devm_ioremap_resource(mbox->dev, in_queue);
395 if (IS_ERR(mbox->in_queue))
396 return PTR_ERR(mbox->in_queue);
397
398 mbox->out_queue = devm_ioremap_resource(mbox->dev, out_queue);
399 if (IS_ERR(mbox->out_queue)) {
400 ret = PTR_ERR(mbox->out_queue);
401 goto unmap_in_queue;
402 }
403
404 mbox->wq = create_singlethread_workqueue("ethosu_workqueue");
405 if (!mbox->wq) {
406 dev_err(mbox->dev, "Failed to create work queue\n");
407 ret = -EINVAL;
408 goto unmap_out_queue;
409 }
410
411 INIT_WORK(&mbox->work, ethosu_mailbox_rx_work);
412
413 mbox->tx = mbox_request_channel_byname(&mbox->client, "tx");
414 if (IS_ERR(mbox->tx)) {
415 dev_warn(mbox->dev, "mbox: Failed to request tx channel\n");
416 ret = PTR_ERR(mbox->tx);
417 goto workqueue_destroy;
418 }
419
420 mbox->rx = mbox_request_channel_byname(&mbox->client, "rx");
421 if (IS_ERR(mbox->rx)) {
422 dev_info(dev, "mbox: Using same channel for RX and TX\n");
423 mbox->rx = mbox->tx;
424 }
425
426 return 0;
427
428workqueue_destroy:
429 destroy_workqueue(mbox->wq);
430
431unmap_out_queue:
432 devm_iounmap(mbox->dev, mbox->out_queue);
433
434unmap_in_queue:
435 devm_iounmap(mbox->dev, mbox->in_queue);
436
437 return ret;
438}
439
440void ethosu_mailbox_deinit(struct ethosu_mailbox *mbox)
441{
442 if (mbox->rx != mbox->tx)
443 mbox_free_channel(mbox->rx);
444
445 mbox_free_channel(mbox->tx);
446 destroy_workqueue(mbox->wq);
447 devm_iounmap(mbox->dev, mbox->out_queue);
448 devm_iounmap(mbox->dev, mbox->in_queue);
449}