blob: e3d31feeacc27234f525fdecfb5b6073342327b3 [file] [log] [blame]
Kristofer Jonsson116a6352020-08-20 17:25:23 +02001/*
Kristofer Jonsson35de9e62022-03-08 13:25:45 +01002 * Copyright (c) 2020-2022 Arm Limited.
Kristofer Jonsson116a6352020-08-20 17:25:23 +02003 *
4 * This program is free software and is provided to you under the terms of the
5 * GNU General Public License version 2 as published by the Free Software
6 * Foundation, and any use by you of this program is subject to the terms
7 * of such GNU licence.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * SPDX-License-Identifier: GPL-2.0-only
19 */
20
21/****************************************************************************
22 * Includes
23 ****************************************************************************/
24
25#include "ethosu_mailbox.h"
26
27#include "ethosu_buffer.h"
28#include "ethosu_core_interface.h"
29#include "ethosu_device.h"
30
31#include <linux/resource.h>
32#include <linux/uio.h>
33
34/****************************************************************************
35 * Functions
36 ****************************************************************************/
37
38static void ethosu_core_set_size(struct ethosu_buffer *buf,
39 struct ethosu_core_buffer *cbuf)
40{
41 cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset;
42 cbuf->size = (uint32_t)buf->size;
43}
44
45static void ethosu_core_set_capacity(struct ethosu_buffer *buf,
46 struct ethosu_core_buffer *cbuf)
47{
48 cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset + buf->size;
49 cbuf->size = (uint32_t)buf->capacity - buf->offset - buf->size;
50}
51
52static size_t ethosu_queue_available(struct ethosu_core_queue *queue)
53{
54 size_t size = queue->header.write - queue->header.read;
55
56 if (queue->header.read > queue->header.write)
57 size += queue->header.size;
58
59 return size;
60}
61
62static size_t ethosu_queue_capacity(struct ethosu_core_queue *queue)
63{
64 return queue->header.size - ethosu_queue_available(queue);
65}
66
67static int ethosu_queue_write(struct ethosu_mailbox *mbox,
68 const struct kvec *vec,
69 size_t length)
70{
71 struct ethosu_core_queue *queue = mbox->in_queue;
72 uint8_t *dst = &queue->data[0];
73 uint32_t wpos = queue->header.write;
74 size_t total_size;
75 size_t i;
76 int ret;
77
78 for (i = 0, total_size = 0; i < length; i++)
79 total_size += vec[i].iov_len;
80
81 if (total_size > ethosu_queue_capacity(queue))
82 return -EINVAL;
83
84 for (i = 0; i < length; i++) {
85 const uint8_t *src = vec[i].iov_base;
86 const uint8_t *end = src + vec[i].iov_len;
87
88 while (src < end) {
89 dst[wpos] = *src++;
90 wpos = (wpos + 1) % queue->header.size;
91 }
92 }
93
94 queue->header.write = wpos;
95
96 ret = mbox_send_message(mbox->tx, queue);
97 if (ret < 0)
98 return ret;
99
100 return 0;
101}
102
103static int ethosu_queue_write_msg(struct ethosu_mailbox *mbox,
104 uint32_t type,
105 void *data,
106 size_t length)
107{
Jonny Svärd7c24c772021-01-14 19:53:17 +0100108 struct ethosu_core_msg msg = {
109 .magic = ETHOSU_CORE_MSG_MAGIC,
110 .type = type, .length= length
111 };
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200112 const struct kvec vec[2] = {
113 { &msg, sizeof(msg) },
114 { data, length }
115 };
116
117 return ethosu_queue_write(mbox, vec, 2);
118}
119
120static int ethosu_queue_read(struct ethosu_mailbox *mbox,
121 void *data,
122 size_t length)
123{
124 struct ethosu_core_queue *queue = mbox->out_queue;
125 uint8_t *src = &queue->data[0];
126 uint8_t *dst = (uint8_t *)data;
127 const uint8_t *end = dst + length;
128 uint32_t rpos = queue->header.read;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100129 size_t queue_avail = ethosu_queue_available(queue);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200130
Davide Grohmann1c26baa2021-06-15 13:21:15 +0200131 if (length == 0)
132 return 0;
133 else if (queue_avail == 0)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200134 return -ENOMSG;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100135 else if (length > queue_avail)
136 return -EBADMSG;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200137
138 while (dst < end) {
139 *dst++ = src[rpos];
140 rpos = (rpos + 1) % queue->header.size;
141 }
142
143 queue->header.read = rpos;
144
145 return 0;
146}
147
Jonny Svärd7c24c772021-01-14 19:53:17 +0100148void ethosu_mailbox_reset(struct ethosu_mailbox *mbox)
149{
150 mbox->out_queue->header.read = mbox->out_queue->header.write;
151}
152
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200153int ethosu_mailbox_read(struct ethosu_mailbox *mbox,
154 struct ethosu_core_msg *header,
155 void *data,
156 size_t length)
157{
158 int ret;
159
Jonny Svärd7c24c772021-01-14 19:53:17 +0100160 /* Read message header magic */
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200161 ret = ethosu_queue_read(mbox, header, sizeof(*header));
Jonny Svärd7c24c772021-01-14 19:53:17 +0100162 if (ret) {
163 if (ret != -ENOMSG)
164 dev_warn(mbox->dev,
165 "Msg: Failed to read message header\n");
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200166
Jonny Svärd7c24c772021-01-14 19:53:17 +0100167 return ret;
168 }
169
170 if (header->magic != ETHOSU_CORE_MSG_MAGIC) {
171 dev_warn(mbox->dev,
172 "Msg: Invalid magic. Got: %08X but expected %08X\n",
173 header->magic, ETHOSU_CORE_MSG_MAGIC);
174
175 return -EINVAL;
176 }
177
178 dev_info(mbox->dev,
179 "mbox: Read msg header. magic=%08X, type=%u, length=%u",
180 header->magic, header->type, header->length);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200181
182 /* Check that payload is not larger than allocated buffer */
Jonny Svärd7c24c772021-01-14 19:53:17 +0100183 if (header->length > length) {
184 dev_warn(mbox->dev,
185 "Msg: Buffer size (%zu) too small for message (%u)\n",
186 sizeof(data), header->length);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200187
Jonny Svärd7c24c772021-01-14 19:53:17 +0100188 return -ENOMEM;
189 }
190
191 /* Read payload data */
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200192 ret = ethosu_queue_read(mbox, data, header->length);
Jonny Svärd7c24c772021-01-14 19:53:17 +0100193 if (ret) {
194 dev_warn(mbox->dev, "Msg: Failed to read payload data\n");
195
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200196 return -EBADMSG;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100197 }
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200198
199 return 0;
200}
201
202int ethosu_mailbox_ping(struct ethosu_mailbox *mbox)
203{
204 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PING, NULL, 0);
205}
206
Jonny Svärd7c24c772021-01-14 19:53:17 +0100207int ethosu_mailbox_pong(struct ethosu_mailbox *mbox)
208{
209 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PONG, NULL, 0);
210}
211
212int ethosu_mailbox_version_request(struct ethosu_mailbox *mbox)
213{
214 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_VERSION_REQ, NULL,
215 0);
216}
217
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200218int ethosu_mailbox_capabilities_request(struct ethosu_mailbox *mbox,
219 void *user_arg)
220{
221 struct ethosu_core_capabilities_req req = {
222 .user_arg = (ptrdiff_t)user_arg
223 };
224
225 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_CAPABILITIES_REQ,
226 &req,
227 sizeof(req));
228}
229
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200230int ethosu_mailbox_inference(struct ethosu_mailbox *mbox,
231 void *user_arg,
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200232 uint32_t ifm_count,
233 struct ethosu_buffer **ifm,
234 uint32_t ofm_count,
235 struct ethosu_buffer **ofm,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200236 struct ethosu_buffer *network,
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100237 uint32_t network_index,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200238 uint8_t *pmu_event_config,
239 uint8_t pmu_event_config_count,
240 uint8_t pmu_cycle_counter_enable)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200241{
242 struct ethosu_core_inference_req inf;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200243 uint32_t i;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200244
Per Åstrandf7e407a2020-10-23 21:25:05 +0200245 /* Verify that the uapi and core has the same number of pmus */
246 if (pmu_event_config_count != ETHOSU_CORE_PMU_MAX) {
247 dev_err(mbox->dev, "PMU count misconfigured.\n");
248
249 return -EINVAL;
250 }
251
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200252 inf.user_arg = (ptrdiff_t)user_arg;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200253 inf.ifm_count = ifm_count;
254 inf.ofm_count = ofm_count;
Per Åstrandf7e407a2020-10-23 21:25:05 +0200255 inf.pmu_cycle_counter_enable = pmu_cycle_counter_enable;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200256
257 for (i = 0; i < ifm_count; i++)
258 ethosu_core_set_size(ifm[i], &inf.ifm[i]);
259
260 for (i = 0; i < ofm_count; i++)
261 ethosu_core_set_capacity(ofm[i], &inf.ofm[i]);
262
Per Åstrandf7e407a2020-10-23 21:25:05 +0200263 for (i = 0; i < ETHOSU_CORE_PMU_MAX; i++)
264 inf.pmu_event_config[i] = pmu_event_config[i];
265
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100266 if (network != NULL) {
267 inf.network.type = ETHOSU_CORE_NETWORK_BUFFER;
268 ethosu_core_set_size(network, &inf.network.buffer);
269 } else {
270 inf.network.type = ETHOSU_CORE_NETWORK_INDEX;
271 inf.network.index = network_index;
272 }
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200273
274 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_INFERENCE_REQ,
275 &inf, sizeof(inf));
276}
277
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100278int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox,
279 void *user_arg,
280 struct ethosu_buffer *network,
281 uint32_t network_index)
282{
283 struct ethosu_core_network_info_req info;
284
285 info.user_arg = (ptrdiff_t)user_arg;
286
287 if (network != NULL) {
288 info.network.type = ETHOSU_CORE_NETWORK_BUFFER;
289 ethosu_core_set_size(network, &info.network.buffer);
290 } else {
291 info.network.type = ETHOSU_CORE_NETWORK_INDEX;
292 info.network.index = network_index;
293 }
294
295 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_NETWORK_INFO_REQ,
296 &info, sizeof(info));
297}
298
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200299static void ethosu_mailbox_rx_work(struct work_struct *work)
300{
301 struct ethosu_mailbox *mbox = container_of(work, typeof(*mbox), work);
302
303 mbox->callback(mbox->user_arg);
304}
305
306static void ethosu_mailbox_rx_callback(struct mbox_client *client,
307 void *message)
308{
309 struct ethosu_mailbox *mbox =
310 container_of(client, typeof(*mbox), client);
311
312 dev_info(mbox->dev, "mbox: Received message.\n");
313
314 queue_work(mbox->wq, &mbox->work);
315}
316
317static void ethosu_mailbox_tx_done(struct mbox_client *client,
318 void *message,
319 int r)
320{
321 if (r)
322 dev_warn(client->dev, "mbox: Failed sending message (%d)\n", r);
323 else
324 dev_info(client->dev, "mbox: Message sent\n");
325}
326
327int ethosu_mailbox_init(struct ethosu_mailbox *mbox,
328 struct device *dev,
329 struct resource *in_queue,
330 struct resource *out_queue,
331 ethosu_mailbox_cb callback,
332 void *user_arg)
333{
334 int ret;
335
336 mbox->dev = dev;
337 mbox->callback = callback;
338 mbox->user_arg = user_arg;
339
340 mbox->client.dev = dev;
341 mbox->client.rx_callback = ethosu_mailbox_rx_callback;
342 mbox->client.tx_prepare = NULL; /* preparation of data is handled
343 * through the
344 * queue functions */
345 mbox->client.tx_done = ethosu_mailbox_tx_done;
346 mbox->client.tx_block = true;
347 mbox->client.knows_txdone = false;
348 mbox->client.tx_tout = 500;
349
350 mbox->in_queue = devm_ioremap_resource(mbox->dev, in_queue);
351 if (IS_ERR(mbox->in_queue))
352 return PTR_ERR(mbox->in_queue);
353
354 mbox->out_queue = devm_ioremap_resource(mbox->dev, out_queue);
355 if (IS_ERR(mbox->out_queue)) {
356 ret = PTR_ERR(mbox->out_queue);
357 goto unmap_in_queue;
358 }
359
360 mbox->wq = create_singlethread_workqueue("ethosu_workqueue");
361 if (!mbox->wq) {
362 dev_err(mbox->dev, "Failed to create work queue\n");
363 ret = -EINVAL;
364 goto unmap_out_queue;
365 }
366
367 INIT_WORK(&mbox->work, ethosu_mailbox_rx_work);
368
369 mbox->tx = mbox_request_channel_byname(&mbox->client, "tx");
370 if (IS_ERR(mbox->tx)) {
371 dev_warn(mbox->dev, "mbox: Failed to request tx channel\n");
372 ret = PTR_ERR(mbox->tx);
373 goto workqueue_destroy;
374 }
375
376 mbox->rx = mbox_request_channel_byname(&mbox->client, "rx");
377 if (IS_ERR(mbox->rx)) {
378 dev_info(dev, "mbox: Using same channel for RX and TX\n");
379 mbox->rx = mbox->tx;
380 }
381
382 return 0;
383
384workqueue_destroy:
385 destroy_workqueue(mbox->wq);
386
387unmap_out_queue:
388 devm_iounmap(mbox->dev, mbox->out_queue);
389
390unmap_in_queue:
391 devm_iounmap(mbox->dev, mbox->in_queue);
392
393 return ret;
394}
395
396void ethosu_mailbox_deinit(struct ethosu_mailbox *mbox)
397{
398 if (mbox->rx != mbox->tx)
399 mbox_free_channel(mbox->rx);
400
401 mbox_free_channel(mbox->tx);
402 destroy_workqueue(mbox->wq);
403 devm_iounmap(mbox->dev, mbox->out_queue);
404 devm_iounmap(mbox->dev, mbox->in_queue);
405}