blob: 5cc2465693583e2669d4d7bf53339b18603f6cab [file] [log] [blame]
Kristofer Jonsson116a6352020-08-20 17:25:23 +02001/*
Kristofer Jonssonb42bc0b2023-01-04 17:09:28 +01002 * Copyright 2020-2023 Arm Limited and/or its affiliates
Kristofer Jonsson116a6352020-08-20 17:25:23 +02003 *
4 * This program is free software and is provided to you under the terms of the
5 * GNU General Public License version 2 as published by the Free Software
6 * Foundation, and any use by you of this program is subject to the terms
7 * of such GNU licence.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * SPDX-License-Identifier: GPL-2.0-only
19 */
20
21/****************************************************************************
22 * Includes
23 ****************************************************************************/
24
25#include "ethosu_mailbox.h"
26
27#include "ethosu_buffer.h"
Kristofer Jonssond779a082023-01-04 17:09:47 +010028#include "ethosu_core_rpmsg.h"
Kristofer Jonsson116a6352020-08-20 17:25:23 +020029#include "ethosu_device.h"
30
Mikael Olsson6fb238f2023-08-23 11:02:47 +020031#include <linux/atomic.h>
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010032#include <linux/jiffies.h>
Kristofer Jonsson116a6352020-08-20 17:25:23 +020033#include <linux/resource.h>
34#include <linux/uio.h>
Mikael Olsson529cfad2023-06-14 17:14:14 +020035#include <linux/bug.h>
Kristofer Jonsson116a6352020-08-20 17:25:23 +020036
37/****************************************************************************
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010038 * Includes
39 ****************************************************************************/
40
41#ifndef fallthrough
42#if __has_attribute(__fallthrough__)
43#define fallthrough __attribute__((__fallthrough__))
44#else
45#define fallthrough do {} while (0) /* fallthrough */
46#endif
47#endif
48
49/****************************************************************************
Mikael Olsson6fb238f2023-08-23 11:02:47 +020050 * Defines
51 ****************************************************************************/
52
53#define MAILBOX_SEND_TIMEOUT_MS 15000
54
55/****************************************************************************
Kristofer Jonsson116a6352020-08-20 17:25:23 +020056 * Functions
57 ****************************************************************************/
58
Mikael Olsson6fb238f2023-08-23 11:02:47 +020059/**
60 * ethosu_send_locked() - Blocking mailbox message sender
61 *
62 * Context: Can sleep and must be called with the device mutex locked.
63 *
64 * Return: 0 on success, else error code.
65 */
66static int ethosu_send_locked(struct ethosu_mailbox *mbox,
67 void *data,
68 size_t length)
69{
70 DEFINE_WAIT_FUNC(wait, woken_wake_function);
71 struct device *dev = mbox->dev;
72 long timeout = msecs_to_jiffies(MAILBOX_SEND_TIMEOUT_MS);
73 bool try_send = !wq_has_sleeper(&mbox->send_queue);
74 int ret;
75
76 might_sleep();
77
78 /* Exclusive wait to only wake up one task at a time */
79 add_wait_queue_exclusive(&mbox->send_queue, &wait);
80 for (;;) {
81 /* Stop if the mailbox is closing down */
82 if (atomic_read(&mbox->done)) {
83 ret = -ENODEV;
84 break;
85 }
86
87 /* Attempt to send if queue is empty or task was woken up */
88 if (try_send) {
89 ret = rpmsg_trysend(mbox->ept, data, length);
90 if (ret != -ENOMEM)
91 break;
92 } else {
93 try_send = true;
94 }
95
96 /* Unlock device mutex while waiting to not block other tasks */
97 device_unlock(dev);
98 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
99 device_lock(dev);
100
101 /* Stop if the wait was interrupted */
102 if (signal_pending(current)) {
103 ret = -EINTR;
104 break;
105 }
106
107 if (!timeout) {
108 ret = -ETIME;
109 break;
110 }
111 }
112
113 remove_wait_queue(&mbox->send_queue, &wait);
114
115 /*
116 * If the message was sent successfully, there may be more TX buffers
117 * available so wake up the next waiting task.
118 */
119 if (!ret && wq_has_sleeper(&mbox->send_queue))
120 wake_up(&mbox->send_queue);
121
122 return ret;
123}
124
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200125static void ethosu_core_set_size(struct ethosu_buffer *buf,
126 struct ethosu_core_buffer *cbuf)
127{
128 cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset;
129 cbuf->size = (uint32_t)buf->size;
130}
131
132static void ethosu_core_set_capacity(struct ethosu_buffer *buf,
133 struct ethosu_core_buffer *cbuf)
134{
135 cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset + buf->size;
136 cbuf->size = (uint32_t)buf->capacity - buf->offset - buf->size;
137}
138
Davide Grohmann32660f92022-04-27 16:49:07 +0200139int ethosu_mailbox_register(struct ethosu_mailbox *mbox,
140 struct ethosu_mailbox_msg *msg)
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100141{
Mikael Olsson529cfad2023-06-14 17:14:14 +0200142 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
Davide Grohmann32660f92022-04-27 16:49:07 +0200143 msg->id = idr_alloc_cyclic(&mbox->msg_idr, msg, 0, INT_MAX, GFP_KERNEL);
144 if (msg->id < 0)
145 return msg->id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100146
Davide Grohmann32660f92022-04-27 16:49:07 +0200147 return 0;
148}
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100149
Davide Grohmann32660f92022-04-27 16:49:07 +0200150void ethosu_mailbox_deregister(struct ethosu_mailbox *mbox,
151 struct ethosu_mailbox_msg *msg)
152{
Mikael Olsson529cfad2023-06-14 17:14:14 +0200153 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
Davide Grohmann32660f92022-04-27 16:49:07 +0200154 idr_remove(&mbox->msg_idr, msg->id);
155}
156
157struct ethosu_mailbox_msg *ethosu_mailbox_find(struct ethosu_mailbox *mbox,
Mikael Olsson09965b02023-06-13 12:17:04 +0200158 int msg_id,
159 uint32_t msg_type)
Davide Grohmann32660f92022-04-27 16:49:07 +0200160{
Mikael Olsson529cfad2023-06-14 17:14:14 +0200161 struct ethosu_mailbox_msg *ptr;
162
163 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
164 ptr = (struct ethosu_mailbox_msg *)idr_find(&mbox->msg_idr, msg_id);
Davide Grohmann32660f92022-04-27 16:49:07 +0200165
166 if (ptr == NULL)
Mikael Olsson09965b02023-06-13 12:17:04 +0200167 return ERR_PTR(-ENOENT);
168
169 if (ptr->type != msg_type)
Davide Grohmann32660f92022-04-27 16:49:07 +0200170 return ERR_PTR(-EINVAL);
171
172 return ptr;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100173}
174
175void ethosu_mailbox_fail(struct ethosu_mailbox *mbox)
176{
Davide Grohmann32660f92022-04-27 16:49:07 +0200177 struct ethosu_mailbox_msg *cur;
178 int id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100179
Mikael Olsson529cfad2023-06-14 17:14:14 +0200180 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
Davide Grohmann32660f92022-04-27 16:49:07 +0200181 idr_for_each_entry(&mbox->msg_idr, cur, id) {
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100182 cur->fail(cur);
183 }
184}
185
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200186int ethosu_mailbox_ping(struct ethosu_mailbox *mbox)
187{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100188 struct ethosu_core_rpmsg rpmsg = {
189 .header = {
190 .magic = ETHOSU_CORE_MSG_MAGIC,
191 .type = ETHOSU_CORE_MSG_PING,
192 }
193 };
194
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200195 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200196}
197
Jonny Svärd7c24c772021-01-14 19:53:17 +0100198int ethosu_mailbox_pong(struct ethosu_mailbox *mbox)
199{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100200 struct ethosu_core_rpmsg rpmsg = {
201 .header = {
202 .magic = ETHOSU_CORE_MSG_MAGIC,
203 .type = ETHOSU_CORE_MSG_PONG,
204 }
205 };
206
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200207 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Jonny Svärd7c24c772021-01-14 19:53:17 +0100208}
209
Mikael Olsson7c843dc2023-08-03 12:41:48 +0200210int ethosu_mailbox_version_request(struct ethosu_mailbox *mbox,
211 struct ethosu_mailbox_msg *msg)
Jonny Svärd7c24c772021-01-14 19:53:17 +0100212{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100213 struct ethosu_core_rpmsg rpmsg = {
Mikael Olsson7c843dc2023-08-03 12:41:48 +0200214 .header = {
215 .magic = ETHOSU_CORE_MSG_MAGIC,
216 .type = ETHOSU_CORE_MSG_VERSION_REQ,
217 .msg_id = msg->id
Kristofer Jonssond779a082023-01-04 17:09:47 +0100218 }
219 };
220
Mikael Olsson7c843dc2023-08-03 12:41:48 +0200221 msg->type = rpmsg.header.type;
222
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200223 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Jonny Svärd7c24c772021-01-14 19:53:17 +0100224}
225
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200226int ethosu_mailbox_capabilities_request(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200227 struct ethosu_mailbox_msg *msg)
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200228{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100229 struct ethosu_core_rpmsg rpmsg = {
230 .header = {
231 .magic = ETHOSU_CORE_MSG_MAGIC,
232 .type = ETHOSU_CORE_MSG_CAPABILITIES_REQ,
233 .msg_id = msg->id
234 }
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200235 };
236
Mikael Olsson09965b02023-06-13 12:17:04 +0200237 msg->type = rpmsg.header.type;
238
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200239 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200240}
241
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200242int ethosu_mailbox_inference(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200243 struct ethosu_mailbox_msg *msg,
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200244 uint32_t ifm_count,
245 struct ethosu_buffer **ifm,
246 uint32_t ofm_count,
247 struct ethosu_buffer **ofm,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200248 struct ethosu_buffer *network,
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100249 uint32_t network_index,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200250 uint8_t *pmu_event_config,
251 uint8_t pmu_event_config_count,
252 uint8_t pmu_cycle_counter_enable)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200253{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100254 struct ethosu_core_rpmsg rpmsg = {
255 .header = {
256 .magic = ETHOSU_CORE_MSG_MAGIC,
257 .type = ETHOSU_CORE_MSG_INFERENCE_REQ,
258 .msg_id = msg->id
259 }
260 };
261 struct ethosu_core_msg_inference_req *inf_req = &rpmsg.inf_req;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200262 uint32_t i;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200263
Mikael Olsson09965b02023-06-13 12:17:04 +0200264 msg->type = rpmsg.header.type;
265
Per Åstrandf7e407a2020-10-23 21:25:05 +0200266 /* Verify that the uapi and core has the same number of pmus */
267 if (pmu_event_config_count != ETHOSU_CORE_PMU_MAX) {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100268 dev_err(mbox->dev, "PMU count misconfigured.");
Per Åstrandf7e407a2020-10-23 21:25:05 +0200269
270 return -EINVAL;
271 }
272
Kristofer Jonssond779a082023-01-04 17:09:47 +0100273 inf_req->ifm_count = ifm_count;
274 inf_req->ofm_count = ofm_count;
275 inf_req->pmu_cycle_counter_enable = pmu_cycle_counter_enable;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200276
277 for (i = 0; i < ifm_count; i++)
Kristofer Jonssond779a082023-01-04 17:09:47 +0100278 ethosu_core_set_size(ifm[i], &inf_req->ifm[i]);
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200279
280 for (i = 0; i < ofm_count; i++)
Kristofer Jonssond779a082023-01-04 17:09:47 +0100281 ethosu_core_set_capacity(ofm[i], &inf_req->ofm[i]);
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200282
Per Åstrandf7e407a2020-10-23 21:25:05 +0200283 for (i = 0; i < ETHOSU_CORE_PMU_MAX; i++)
Kristofer Jonssond779a082023-01-04 17:09:47 +0100284 inf_req->pmu_event_config[i] = pmu_event_config[i];
Per Åstrandf7e407a2020-10-23 21:25:05 +0200285
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100286 if (network != NULL) {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100287 inf_req->network.type = ETHOSU_CORE_NETWORK_BUFFER;
288 ethosu_core_set_size(network, &inf_req->network.buffer);
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100289 } else {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100290 inf_req->network.type = ETHOSU_CORE_NETWORK_INDEX;
291 inf_req->network.index = network_index;
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100292 }
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200293
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200294 return ethosu_send_locked(mbox, &rpmsg,
295 sizeof(rpmsg.header) + sizeof(rpmsg.inf_req));
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200296}
297
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100298int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200299 struct ethosu_mailbox_msg *msg,
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100300 struct ethosu_buffer *network,
301 uint32_t network_index)
302{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100303 struct ethosu_core_rpmsg rpmsg = {
304 .header = {
305 .magic = ETHOSU_CORE_MSG_MAGIC,
306 .type = ETHOSU_CORE_MSG_NETWORK_INFO_REQ,
307 .msg_id = msg->id
308 }
309 };
310 struct ethosu_core_msg_network_info_req *info_req = &rpmsg.net_info_req;
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100311
Mikael Olsson09965b02023-06-13 12:17:04 +0200312 msg->type = rpmsg.header.type;
313
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100314 if (network != NULL) {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100315 info_req->network.type = ETHOSU_CORE_NETWORK_BUFFER;
316 ethosu_core_set_size(network, &info_req->network.buffer);
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100317 } else {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100318 info_req->network.type = ETHOSU_CORE_NETWORK_INDEX;
319 info_req->network.index = network_index;
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100320 }
321
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200322 return ethosu_send_locked(mbox, &rpmsg,
323 sizeof(rpmsg.header) +
324 sizeof(rpmsg.net_info_req));
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100325}
326
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100327int ethosu_mailbox_cancel_inference(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200328 struct ethosu_mailbox_msg *msg,
329 int inference_handle)
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100330{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100331 struct ethosu_core_rpmsg rpmsg = {
332 .header = {
333 .magic = ETHOSU_CORE_MSG_MAGIC,
334 .type =
335 ETHOSU_CORE_MSG_CANCEL_INFERENCE_REQ,
336 .msg_id = msg->id
337 },
338 .cancel_req = {
339 .inference_handle = inference_handle
340 }
341 };
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100342
Mikael Olsson09965b02023-06-13 12:17:04 +0200343 msg->type = rpmsg.header.type;
344
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200345 return ethosu_send_locked(mbox, &rpmsg,
346 sizeof(rpmsg.header) +
347 sizeof(rpmsg.cancel_req));
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200348}
349
350int ethosu_mailbox_init(struct ethosu_mailbox *mbox,
351 struct device *dev,
Kristofer Jonssond779a082023-01-04 17:09:47 +0100352 struct rpmsg_endpoint *ept)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200353{
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200354 mbox->dev = dev;
Kristofer Jonssond779a082023-01-04 17:09:47 +0100355 mbox->ept = ept;
Davide Grohmann32660f92022-04-27 16:49:07 +0200356 idr_init(&mbox->msg_idr);
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200357 init_waitqueue_head(&mbox->send_queue);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200358
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200359 return 0;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200360}
361
362void ethosu_mailbox_deinit(struct ethosu_mailbox *mbox)
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200363{
364 atomic_set(&mbox->done, 1);
365 wake_up_all(&mbox->send_queue);
366}