blob: e499860a70f4d3ae2cbfe757f3b90964372b0183 [file] [log] [blame]
Kristofer Jonsson116a6352020-08-20 17:25:23 +02001/*
Mikael Olsson07545152023-10-17 13:05:38 +02002 * SPDX-FileCopyrightText: Copyright 2020-2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
3 * SPDX-License-Identifier: GPL-2.0-only
Kristofer Jonsson116a6352020-08-20 17:25:23 +02004 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
Kristofer Jonsson116a6352020-08-20 17:25:23 +020018 */
19
20/****************************************************************************
21 * Includes
22 ****************************************************************************/
23
24#include "ethosu_mailbox.h"
25
26#include "ethosu_buffer.h"
Kristofer Jonssond779a082023-01-04 17:09:47 +010027#include "ethosu_core_rpmsg.h"
Kristofer Jonsson116a6352020-08-20 17:25:23 +020028#include "ethosu_device.h"
29
Mikael Olsson6fb238f2023-08-23 11:02:47 +020030#include <linux/atomic.h>
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010031#include <linux/jiffies.h>
Kristofer Jonsson116a6352020-08-20 17:25:23 +020032#include <linux/resource.h>
33#include <linux/uio.h>
Mikael Olsson529cfad2023-06-14 17:14:14 +020034#include <linux/bug.h>
Kristofer Jonsson116a6352020-08-20 17:25:23 +020035
36/****************************************************************************
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010037 * Includes
38 ****************************************************************************/
39
40#ifndef fallthrough
41#if __has_attribute(__fallthrough__)
42#define fallthrough __attribute__((__fallthrough__))
43#else
44#define fallthrough do {} while (0) /* fallthrough */
45#endif
46#endif
47
48/****************************************************************************
Mikael Olsson6fb238f2023-08-23 11:02:47 +020049 * Defines
50 ****************************************************************************/
51
52#define MAILBOX_SEND_TIMEOUT_MS 15000
53
54/****************************************************************************
Kristofer Jonsson116a6352020-08-20 17:25:23 +020055 * Functions
56 ****************************************************************************/
57
Mikael Olsson6fb238f2023-08-23 11:02:47 +020058/**
59 * ethosu_send_locked() - Blocking mailbox message sender
60 *
61 * Context: Can sleep and must be called with the device mutex locked.
62 *
63 * Return: 0 on success, else error code.
64 */
65static int ethosu_send_locked(struct ethosu_mailbox *mbox,
66 void *data,
67 size_t length)
68{
69 DEFINE_WAIT_FUNC(wait, woken_wake_function);
70 struct device *dev = mbox->dev;
71 long timeout = msecs_to_jiffies(MAILBOX_SEND_TIMEOUT_MS);
72 bool try_send = !wq_has_sleeper(&mbox->send_queue);
73 int ret;
74
75 might_sleep();
76
77 /* Exclusive wait to only wake up one task at a time */
78 add_wait_queue_exclusive(&mbox->send_queue, &wait);
79 for (;;) {
80 /* Stop if the mailbox is closing down */
81 if (atomic_read(&mbox->done)) {
82 ret = -ENODEV;
83 break;
84 }
85
86 /* Attempt to send if queue is empty or task was woken up */
87 if (try_send) {
88 ret = rpmsg_trysend(mbox->ept, data, length);
89 if (ret != -ENOMEM)
90 break;
91 } else {
92 try_send = true;
93 }
94
95 /* Unlock device mutex while waiting to not block other tasks */
96 device_unlock(dev);
97 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
98 device_lock(dev);
99
100 /* Stop if the wait was interrupted */
101 if (signal_pending(current)) {
102 ret = -EINTR;
103 break;
104 }
105
106 if (!timeout) {
107 ret = -ETIME;
108 break;
109 }
110 }
111
112 remove_wait_queue(&mbox->send_queue, &wait);
113
114 /*
115 * If the message was sent successfully, there may be more TX buffers
116 * available so wake up the next waiting task.
117 */
118 if (!ret && wq_has_sleeper(&mbox->send_queue))
119 wake_up(&mbox->send_queue);
120
121 return ret;
122}
123
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200124static void ethosu_core_set_size(struct ethosu_buffer *buf,
125 struct ethosu_core_buffer *cbuf)
126{
Mikael Olsson07545152023-10-17 13:05:38 +0200127 cbuf->ptr = (uint32_t)buf->dma_addr;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200128 cbuf->size = (uint32_t)buf->size;
129}
130
Davide Grohmann32660f92022-04-27 16:49:07 +0200131int ethosu_mailbox_register(struct ethosu_mailbox *mbox,
132 struct ethosu_mailbox_msg *msg)
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100133{
Mikael Olsson529cfad2023-06-14 17:14:14 +0200134 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
Davide Grohmann32660f92022-04-27 16:49:07 +0200135 msg->id = idr_alloc_cyclic(&mbox->msg_idr, msg, 0, INT_MAX, GFP_KERNEL);
136 if (msg->id < 0)
137 return msg->id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100138
Davide Grohmann32660f92022-04-27 16:49:07 +0200139 return 0;
140}
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100141
Davide Grohmann32660f92022-04-27 16:49:07 +0200142void ethosu_mailbox_deregister(struct ethosu_mailbox *mbox,
143 struct ethosu_mailbox_msg *msg)
144{
Mikael Olsson529cfad2023-06-14 17:14:14 +0200145 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
Davide Grohmann32660f92022-04-27 16:49:07 +0200146 idr_remove(&mbox->msg_idr, msg->id);
147}
148
149struct ethosu_mailbox_msg *ethosu_mailbox_find(struct ethosu_mailbox *mbox,
Mikael Olsson09965b02023-06-13 12:17:04 +0200150 int msg_id,
151 uint32_t msg_type)
Davide Grohmann32660f92022-04-27 16:49:07 +0200152{
Mikael Olsson529cfad2023-06-14 17:14:14 +0200153 struct ethosu_mailbox_msg *ptr;
154
155 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
156 ptr = (struct ethosu_mailbox_msg *)idr_find(&mbox->msg_idr, msg_id);
Davide Grohmann32660f92022-04-27 16:49:07 +0200157
158 if (ptr == NULL)
Mikael Olsson09965b02023-06-13 12:17:04 +0200159 return ERR_PTR(-ENOENT);
160
161 if (ptr->type != msg_type)
Davide Grohmann32660f92022-04-27 16:49:07 +0200162 return ERR_PTR(-EINVAL);
163
164 return ptr;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100165}
166
167void ethosu_mailbox_fail(struct ethosu_mailbox *mbox)
168{
Davide Grohmann32660f92022-04-27 16:49:07 +0200169 struct ethosu_mailbox_msg *cur;
170 int id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100171
Mikael Olsson529cfad2023-06-14 17:14:14 +0200172 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
Davide Grohmann32660f92022-04-27 16:49:07 +0200173 idr_for_each_entry(&mbox->msg_idr, cur, id) {
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100174 cur->fail(cur);
175 }
176}
177
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200178int ethosu_mailbox_ping(struct ethosu_mailbox *mbox)
179{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100180 struct ethosu_core_rpmsg rpmsg = {
181 .header = {
182 .magic = ETHOSU_CORE_MSG_MAGIC,
183 .type = ETHOSU_CORE_MSG_PING,
184 }
185 };
186
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200187 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200188}
189
Jonny Svärd7c24c772021-01-14 19:53:17 +0100190int ethosu_mailbox_pong(struct ethosu_mailbox *mbox)
191{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100192 struct ethosu_core_rpmsg rpmsg = {
193 .header = {
194 .magic = ETHOSU_CORE_MSG_MAGIC,
195 .type = ETHOSU_CORE_MSG_PONG,
196 }
197 };
198
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200199 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Jonny Svärd7c24c772021-01-14 19:53:17 +0100200}
201
Mikael Olsson7c843dc2023-08-03 12:41:48 +0200202int ethosu_mailbox_version_request(struct ethosu_mailbox *mbox,
203 struct ethosu_mailbox_msg *msg)
Jonny Svärd7c24c772021-01-14 19:53:17 +0100204{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100205 struct ethosu_core_rpmsg rpmsg = {
Mikael Olsson7c843dc2023-08-03 12:41:48 +0200206 .header = {
207 .magic = ETHOSU_CORE_MSG_MAGIC,
208 .type = ETHOSU_CORE_MSG_VERSION_REQ,
209 .msg_id = msg->id
Kristofer Jonssond779a082023-01-04 17:09:47 +0100210 }
211 };
212
Mikael Olsson7c843dc2023-08-03 12:41:48 +0200213 msg->type = rpmsg.header.type;
214
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200215 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Jonny Svärd7c24c772021-01-14 19:53:17 +0100216}
217
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200218int ethosu_mailbox_capabilities_request(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200219 struct ethosu_mailbox_msg *msg)
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200220{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100221 struct ethosu_core_rpmsg rpmsg = {
222 .header = {
223 .magic = ETHOSU_CORE_MSG_MAGIC,
224 .type = ETHOSU_CORE_MSG_CAPABILITIES_REQ,
225 .msg_id = msg->id
226 }
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200227 };
228
Mikael Olsson09965b02023-06-13 12:17:04 +0200229 msg->type = rpmsg.header.type;
230
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200231 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200232}
233
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200234int ethosu_mailbox_inference(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200235 struct ethosu_mailbox_msg *msg,
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200236 uint32_t ifm_count,
237 struct ethosu_buffer **ifm,
238 uint32_t ofm_count,
239 struct ethosu_buffer **ofm,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200240 struct ethosu_buffer *network,
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100241 uint32_t network_index,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200242 uint8_t *pmu_event_config,
243 uint8_t pmu_event_config_count,
244 uint8_t pmu_cycle_counter_enable)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200245{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100246 struct ethosu_core_rpmsg rpmsg = {
247 .header = {
248 .magic = ETHOSU_CORE_MSG_MAGIC,
249 .type = ETHOSU_CORE_MSG_INFERENCE_REQ,
250 .msg_id = msg->id
251 }
252 };
253 struct ethosu_core_msg_inference_req *inf_req = &rpmsg.inf_req;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200254 uint32_t i;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200255
Mikael Olsson09965b02023-06-13 12:17:04 +0200256 msg->type = rpmsg.header.type;
257
Per Åstrandf7e407a2020-10-23 21:25:05 +0200258 /* Verify that the uapi and core has the same number of pmus */
259 if (pmu_event_config_count != ETHOSU_CORE_PMU_MAX) {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100260 dev_err(mbox->dev, "PMU count misconfigured.");
Per Åstrandf7e407a2020-10-23 21:25:05 +0200261
262 return -EINVAL;
263 }
264
Kristofer Jonssond779a082023-01-04 17:09:47 +0100265 inf_req->ifm_count = ifm_count;
266 inf_req->ofm_count = ofm_count;
267 inf_req->pmu_cycle_counter_enable = pmu_cycle_counter_enable;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200268
269 for (i = 0; i < ifm_count; i++)
Kristofer Jonssond779a082023-01-04 17:09:47 +0100270 ethosu_core_set_size(ifm[i], &inf_req->ifm[i]);
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200271
272 for (i = 0; i < ofm_count; i++)
Mikael Olsson07545152023-10-17 13:05:38 +0200273 ethosu_core_set_size(ofm[i], &inf_req->ofm[i]);
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200274
Per Åstrandf7e407a2020-10-23 21:25:05 +0200275 for (i = 0; i < ETHOSU_CORE_PMU_MAX; i++)
Kristofer Jonssond779a082023-01-04 17:09:47 +0100276 inf_req->pmu_event_config[i] = pmu_event_config[i];
Per Åstrandf7e407a2020-10-23 21:25:05 +0200277
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100278 if (network != NULL) {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100279 inf_req->network.type = ETHOSU_CORE_NETWORK_BUFFER;
280 ethosu_core_set_size(network, &inf_req->network.buffer);
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100281 } else {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100282 inf_req->network.type = ETHOSU_CORE_NETWORK_INDEX;
283 inf_req->network.index = network_index;
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100284 }
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200285
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200286 return ethosu_send_locked(mbox, &rpmsg,
287 sizeof(rpmsg.header) + sizeof(rpmsg.inf_req));
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200288}
289
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100290int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200291 struct ethosu_mailbox_msg *msg,
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100292 struct ethosu_buffer *network,
293 uint32_t network_index)
294{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100295 struct ethosu_core_rpmsg rpmsg = {
296 .header = {
297 .magic = ETHOSU_CORE_MSG_MAGIC,
298 .type = ETHOSU_CORE_MSG_NETWORK_INFO_REQ,
299 .msg_id = msg->id
300 }
301 };
302 struct ethosu_core_msg_network_info_req *info_req = &rpmsg.net_info_req;
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100303
Mikael Olsson09965b02023-06-13 12:17:04 +0200304 msg->type = rpmsg.header.type;
305
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100306 if (network != NULL) {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100307 info_req->network.type = ETHOSU_CORE_NETWORK_BUFFER;
308 ethosu_core_set_size(network, &info_req->network.buffer);
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100309 } else {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100310 info_req->network.type = ETHOSU_CORE_NETWORK_INDEX;
311 info_req->network.index = network_index;
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100312 }
313
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200314 return ethosu_send_locked(mbox, &rpmsg,
315 sizeof(rpmsg.header) +
316 sizeof(rpmsg.net_info_req));
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100317}
318
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100319int ethosu_mailbox_cancel_inference(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200320 struct ethosu_mailbox_msg *msg,
321 int inference_handle)
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100322{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100323 struct ethosu_core_rpmsg rpmsg = {
324 .header = {
325 .magic = ETHOSU_CORE_MSG_MAGIC,
326 .type =
327 ETHOSU_CORE_MSG_CANCEL_INFERENCE_REQ,
328 .msg_id = msg->id
329 },
330 .cancel_req = {
331 .inference_handle = inference_handle
332 }
333 };
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100334
Mikael Olsson09965b02023-06-13 12:17:04 +0200335 msg->type = rpmsg.header.type;
336
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200337 return ethosu_send_locked(mbox, &rpmsg,
338 sizeof(rpmsg.header) +
339 sizeof(rpmsg.cancel_req));
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200340}
341
342int ethosu_mailbox_init(struct ethosu_mailbox *mbox,
343 struct device *dev,
Kristofer Jonssond779a082023-01-04 17:09:47 +0100344 struct rpmsg_endpoint *ept)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200345{
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200346 mbox->dev = dev;
Kristofer Jonssond779a082023-01-04 17:09:47 +0100347 mbox->ept = ept;
Davide Grohmann32660f92022-04-27 16:49:07 +0200348 idr_init(&mbox->msg_idr);
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200349 init_waitqueue_head(&mbox->send_queue);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200350
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200351 return 0;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200352}
353
354void ethosu_mailbox_deinit(struct ethosu_mailbox *mbox)
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200355{
356 atomic_set(&mbox->done, 1);
357 wake_up_all(&mbox->send_queue);
358}