blob: 11ac414a306155724c9d51e9e52448c2ae73f3e2 [file] [log] [blame]
Kristofer Jonsson116a6352020-08-20 17:25:23 +02001/*
Mikael Olssond4ad9e52024-02-07 11:22:26 +01002 * SPDX-FileCopyrightText: Copyright 2020-2024 Arm Limited and/or its affiliates <open-source-office@arm.com>
Mikael Olsson07545152023-10-17 13:05:38 +02003 * SPDX-License-Identifier: GPL-2.0-only
Kristofer Jonsson116a6352020-08-20 17:25:23 +02004 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
Kristofer Jonsson116a6352020-08-20 17:25:23 +020018 */
19
20/****************************************************************************
21 * Includes
22 ****************************************************************************/
23
Mikael Olssond4ad9e52024-02-07 11:22:26 +010024#include <rpmsg/ethosu_rpmsg_mailbox.h>
Kristofer Jonsson116a6352020-08-20 17:25:23 +020025
Mikael Olssond4ad9e52024-02-07 11:22:26 +010026#include <common/ethosu_buffer.h>
27#include <common/ethosu_device.h>
28#include <common/ethosu_dma_mem.h>
29#include <rpmsg/ethosu_rpmsg.h>
30
31#include <rpmsg/ethosu_rpmsg_network.h>
Kristofer Jonsson116a6352020-08-20 17:25:23 +020032
Mikael Olsson6fb238f2023-08-23 11:02:47 +020033#include <linux/atomic.h>
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010034#include <linux/jiffies.h>
Kristofer Jonsson116a6352020-08-20 17:25:23 +020035#include <linux/resource.h>
36#include <linux/uio.h>
Mikael Olsson529cfad2023-06-14 17:14:14 +020037#include <linux/bug.h>
Kristofer Jonsson116a6352020-08-20 17:25:23 +020038
39/****************************************************************************
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010040 * Includes
41 ****************************************************************************/
42
43#ifndef fallthrough
44#if __has_attribute(__fallthrough__)
45#define fallthrough __attribute__((__fallthrough__))
46#else
47#define fallthrough do {} while (0) /* fallthrough */
48#endif
49#endif
50
51/****************************************************************************
Mikael Olsson6fb238f2023-08-23 11:02:47 +020052 * Defines
53 ****************************************************************************/
54
55#define MAILBOX_SEND_TIMEOUT_MS 15000
56
57/****************************************************************************
Kristofer Jonsson116a6352020-08-20 17:25:23 +020058 * Functions
59 ****************************************************************************/
60
Mikael Olsson6fb238f2023-08-23 11:02:47 +020061/**
62 * ethosu_send_locked() - Blocking mailbox message sender
63 *
64 * Context: Can sleep and must be called with the device mutex locked.
65 *
66 * Return: 0 on success, else error code.
67 */
68static int ethosu_send_locked(struct ethosu_mailbox *mbox,
69 void *data,
70 size_t length)
71{
72 DEFINE_WAIT_FUNC(wait, woken_wake_function);
73 struct device *dev = mbox->dev;
74 long timeout = msecs_to_jiffies(MAILBOX_SEND_TIMEOUT_MS);
75 bool try_send = !wq_has_sleeper(&mbox->send_queue);
76 int ret;
77
78 might_sleep();
79
80 /* Exclusive wait to only wake up one task at a time */
81 add_wait_queue_exclusive(&mbox->send_queue, &wait);
82 for (;;) {
83 /* Stop if the mailbox is closing down */
84 if (atomic_read(&mbox->done)) {
85 ret = -ENODEV;
86 break;
87 }
88
89 /* Attempt to send if queue is empty or task was woken up */
90 if (try_send) {
91 ret = rpmsg_trysend(mbox->ept, data, length);
92 if (ret != -ENOMEM)
93 break;
94 } else {
95 try_send = true;
96 }
97
98 /* Unlock device mutex while waiting to not block other tasks */
99 device_unlock(dev);
100 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
101 device_lock(dev);
102
103 /* Stop if the wait was interrupted */
104 if (signal_pending(current)) {
105 ret = -EINTR;
106 break;
107 }
108
109 if (!timeout) {
110 ret = -ETIME;
111 break;
112 }
113 }
114
115 remove_wait_queue(&mbox->send_queue, &wait);
116
117 /*
118 * If the message was sent successfully, there may be more TX buffers
119 * available so wake up the next waiting task.
120 */
121 if (!ret && wq_has_sleeper(&mbox->send_queue))
122 wake_up(&mbox->send_queue);
123
124 return ret;
125}
126
Mikael Olsson9c999fd2023-10-30 11:05:39 +0100127static void ethosu_core_buffer_dma_mem_set(struct ethosu_dma_mem *dma_mem,
128 struct ethosu_core_buffer *cbuf)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200129{
Mikael Olsson9c999fd2023-10-30 11:05:39 +0100130 cbuf->ptr = (uint32_t)dma_mem->dma_addr;
131 cbuf->size = (uint32_t)dma_mem->size;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200132}
133
Davide Grohmann32660f92022-04-27 16:49:07 +0200134int ethosu_mailbox_register(struct ethosu_mailbox *mbox,
135 struct ethosu_mailbox_msg *msg)
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100136{
Mikael Olsson529cfad2023-06-14 17:14:14 +0200137 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
Davide Grohmann32660f92022-04-27 16:49:07 +0200138 msg->id = idr_alloc_cyclic(&mbox->msg_idr, msg, 0, INT_MAX, GFP_KERNEL);
139 if (msg->id < 0)
140 return msg->id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100141
Davide Grohmann32660f92022-04-27 16:49:07 +0200142 return 0;
143}
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100144
Davide Grohmann32660f92022-04-27 16:49:07 +0200145void ethosu_mailbox_deregister(struct ethosu_mailbox *mbox,
146 struct ethosu_mailbox_msg *msg)
147{
Mikael Olsson529cfad2023-06-14 17:14:14 +0200148 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
Davide Grohmann32660f92022-04-27 16:49:07 +0200149 idr_remove(&mbox->msg_idr, msg->id);
150}
151
152struct ethosu_mailbox_msg *ethosu_mailbox_find(struct ethosu_mailbox *mbox,
Mikael Olsson09965b02023-06-13 12:17:04 +0200153 int msg_id,
154 uint32_t msg_type)
Davide Grohmann32660f92022-04-27 16:49:07 +0200155{
Mikael Olsson529cfad2023-06-14 17:14:14 +0200156 struct ethosu_mailbox_msg *ptr;
157
158 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
159 ptr = (struct ethosu_mailbox_msg *)idr_find(&mbox->msg_idr, msg_id);
Davide Grohmann32660f92022-04-27 16:49:07 +0200160
161 if (ptr == NULL)
Mikael Olsson09965b02023-06-13 12:17:04 +0200162 return ERR_PTR(-ENOENT);
163
164 if (ptr->type != msg_type)
Davide Grohmann32660f92022-04-27 16:49:07 +0200165 return ERR_PTR(-EINVAL);
166
167 return ptr;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100168}
169
170void ethosu_mailbox_fail(struct ethosu_mailbox *mbox)
171{
Davide Grohmann32660f92022-04-27 16:49:07 +0200172 struct ethosu_mailbox_msg *cur;
173 int id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100174
Mikael Olsson529cfad2023-06-14 17:14:14 +0200175 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
Davide Grohmann32660f92022-04-27 16:49:07 +0200176 idr_for_each_entry(&mbox->msg_idr, cur, id) {
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100177 cur->fail(cur);
178 }
179}
180
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200181int ethosu_mailbox_ping(struct ethosu_mailbox *mbox)
182{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100183 struct ethosu_core_rpmsg rpmsg = {
184 .header = {
185 .magic = ETHOSU_CORE_MSG_MAGIC,
186 .type = ETHOSU_CORE_MSG_PING,
187 }
188 };
189
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200190 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200191}
192
Jonny Svärd7c24c772021-01-14 19:53:17 +0100193int ethosu_mailbox_pong(struct ethosu_mailbox *mbox)
194{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100195 struct ethosu_core_rpmsg rpmsg = {
196 .header = {
197 .magic = ETHOSU_CORE_MSG_MAGIC,
198 .type = ETHOSU_CORE_MSG_PONG,
199 }
200 };
201
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200202 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Jonny Svärd7c24c772021-01-14 19:53:17 +0100203}
204
Mikael Olsson7c843dc2023-08-03 12:41:48 +0200205int ethosu_mailbox_version_request(struct ethosu_mailbox *mbox,
206 struct ethosu_mailbox_msg *msg)
Jonny Svärd7c24c772021-01-14 19:53:17 +0100207{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100208 struct ethosu_core_rpmsg rpmsg = {
Mikael Olsson7c843dc2023-08-03 12:41:48 +0200209 .header = {
210 .magic = ETHOSU_CORE_MSG_MAGIC,
211 .type = ETHOSU_CORE_MSG_VERSION_REQ,
212 .msg_id = msg->id
Kristofer Jonssond779a082023-01-04 17:09:47 +0100213 }
214 };
215
Mikael Olsson7c843dc2023-08-03 12:41:48 +0200216 msg->type = rpmsg.header.type;
217
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200218 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Jonny Svärd7c24c772021-01-14 19:53:17 +0100219}
220
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200221int ethosu_mailbox_capabilities_request(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200222 struct ethosu_mailbox_msg *msg)
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200223{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100224 struct ethosu_core_rpmsg rpmsg = {
225 .header = {
226 .magic = ETHOSU_CORE_MSG_MAGIC,
227 .type = ETHOSU_CORE_MSG_CAPABILITIES_REQ,
228 .msg_id = msg->id
229 }
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200230 };
231
Mikael Olsson09965b02023-06-13 12:17:04 +0200232 msg->type = rpmsg.header.type;
233
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200234 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200235}
236
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200237int ethosu_mailbox_inference(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200238 struct ethosu_mailbox_msg *msg,
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200239 uint32_t ifm_count,
240 struct ethosu_buffer **ifm,
241 uint32_t ofm_count,
242 struct ethosu_buffer **ofm,
Mikael Olssonc081e592023-10-30 11:10:56 +0100243 struct ethosu_network *network,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200244 uint8_t *pmu_event_config,
245 uint8_t pmu_event_config_count,
246 uint8_t pmu_cycle_counter_enable)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200247{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100248 struct ethosu_core_rpmsg rpmsg = {
249 .header = {
250 .magic = ETHOSU_CORE_MSG_MAGIC,
251 .type = ETHOSU_CORE_MSG_INFERENCE_REQ,
252 .msg_id = msg->id
253 }
254 };
255 struct ethosu_core_msg_inference_req *inf_req = &rpmsg.inf_req;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200256 uint32_t i;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200257
Mikael Olsson09965b02023-06-13 12:17:04 +0200258 msg->type = rpmsg.header.type;
259
Per Åstrandf7e407a2020-10-23 21:25:05 +0200260 /* Verify that the uapi and core has the same number of pmus */
261 if (pmu_event_config_count != ETHOSU_CORE_PMU_MAX) {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100262 dev_err(mbox->dev, "PMU count misconfigured.");
Per Åstrandf7e407a2020-10-23 21:25:05 +0200263
264 return -EINVAL;
265 }
266
Kristofer Jonssond779a082023-01-04 17:09:47 +0100267 inf_req->ifm_count = ifm_count;
268 inf_req->ofm_count = ofm_count;
269 inf_req->pmu_cycle_counter_enable = pmu_cycle_counter_enable;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200270
271 for (i = 0; i < ifm_count; i++)
Mikael Olsson9c999fd2023-10-30 11:05:39 +0100272 ethosu_core_buffer_dma_mem_set(ifm[i]->dma_mem,
273 &inf_req->ifm[i]);
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200274
275 for (i = 0; i < ofm_count; i++)
Mikael Olsson9c999fd2023-10-30 11:05:39 +0100276 ethosu_core_buffer_dma_mem_set(ofm[i]->dma_mem,
277 &inf_req->ofm[i]);
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200278
Per Åstrandf7e407a2020-10-23 21:25:05 +0200279 for (i = 0; i < ETHOSU_CORE_PMU_MAX; i++)
Kristofer Jonssond779a082023-01-04 17:09:47 +0100280 inf_req->pmu_event_config[i] = pmu_event_config[i];
Per Åstrandf7e407a2020-10-23 21:25:05 +0200281
Mikael Olssonc081e592023-10-30 11:10:56 +0100282 if (network->dma_mem != NULL) {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100283 inf_req->network.type = ETHOSU_CORE_NETWORK_BUFFER;
Mikael Olsson9c999fd2023-10-30 11:05:39 +0100284 ethosu_core_buffer_dma_mem_set(network->dma_mem,
285 &inf_req->network.buffer);
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100286 } else {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100287 inf_req->network.type = ETHOSU_CORE_NETWORK_INDEX;
Mikael Olssonc081e592023-10-30 11:10:56 +0100288 inf_req->network.index = network->index;
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100289 }
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200290
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200291 return ethosu_send_locked(mbox, &rpmsg,
292 sizeof(rpmsg.header) + sizeof(rpmsg.inf_req));
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200293}
294
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100295int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200296 struct ethosu_mailbox_msg *msg,
Mikael Olssonc081e592023-10-30 11:10:56 +0100297 struct ethosu_network *network)
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100298{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100299 struct ethosu_core_rpmsg rpmsg = {
300 .header = {
301 .magic = ETHOSU_CORE_MSG_MAGIC,
302 .type = ETHOSU_CORE_MSG_NETWORK_INFO_REQ,
303 .msg_id = msg->id
304 }
305 };
306 struct ethosu_core_msg_network_info_req *info_req = &rpmsg.net_info_req;
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100307
Mikael Olsson09965b02023-06-13 12:17:04 +0200308 msg->type = rpmsg.header.type;
309
Mikael Olssonc081e592023-10-30 11:10:56 +0100310 if (network->dma_mem != NULL) {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100311 info_req->network.type = ETHOSU_CORE_NETWORK_BUFFER;
Mikael Olsson9c999fd2023-10-30 11:05:39 +0100312 ethosu_core_buffer_dma_mem_set(network->dma_mem,
313 &info_req->network.buffer);
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100314 } else {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100315 info_req->network.type = ETHOSU_CORE_NETWORK_INDEX;
Mikael Olssonc081e592023-10-30 11:10:56 +0100316 info_req->network.index = network->index;
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100317 }
318
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200319 return ethosu_send_locked(mbox, &rpmsg,
320 sizeof(rpmsg.header) +
321 sizeof(rpmsg.net_info_req));
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100322}
323
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100324int ethosu_mailbox_cancel_inference(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200325 struct ethosu_mailbox_msg *msg,
326 int inference_handle)
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100327{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100328 struct ethosu_core_rpmsg rpmsg = {
329 .header = {
330 .magic = ETHOSU_CORE_MSG_MAGIC,
331 .type =
332 ETHOSU_CORE_MSG_CANCEL_INFERENCE_REQ,
333 .msg_id = msg->id
334 },
335 .cancel_req = {
336 .inference_handle = inference_handle
337 }
338 };
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100339
Mikael Olsson09965b02023-06-13 12:17:04 +0200340 msg->type = rpmsg.header.type;
341
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200342 return ethosu_send_locked(mbox, &rpmsg,
343 sizeof(rpmsg.header) +
344 sizeof(rpmsg.cancel_req));
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200345}
346
347int ethosu_mailbox_init(struct ethosu_mailbox *mbox,
348 struct device *dev,
Kristofer Jonssond779a082023-01-04 17:09:47 +0100349 struct rpmsg_endpoint *ept)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200350{
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200351 mbox->dev = dev;
Kristofer Jonssond779a082023-01-04 17:09:47 +0100352 mbox->ept = ept;
Davide Grohmann32660f92022-04-27 16:49:07 +0200353 idr_init(&mbox->msg_idr);
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200354 init_waitqueue_head(&mbox->send_queue);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200355
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200356 return 0;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200357}
358
359void ethosu_mailbox_deinit(struct ethosu_mailbox *mbox)
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200360{
361 atomic_set(&mbox->done, 1);
362 wake_up_all(&mbox->send_queue);
363}