blob: 9b9cd1800aefb6e6a9e54bc7b75e148a1ed7e06f [file] [log] [blame]
Kristofer Jonsson116a6352020-08-20 17:25:23 +02001/*
Mikael Olsson07545152023-10-17 13:05:38 +02002 * SPDX-FileCopyrightText: Copyright 2020-2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
3 * SPDX-License-Identifier: GPL-2.0-only
Kristofer Jonsson116a6352020-08-20 17:25:23 +02004 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
Kristofer Jonsson116a6352020-08-20 17:25:23 +020018 */
19
20/****************************************************************************
21 * Includes
22 ****************************************************************************/
23
24#include "ethosu_mailbox.h"
25
26#include "ethosu_buffer.h"
Kristofer Jonssond779a082023-01-04 17:09:47 +010027#include "ethosu_core_rpmsg.h"
Kristofer Jonsson116a6352020-08-20 17:25:23 +020028#include "ethosu_device.h"
Mikael Olsson9c999fd2023-10-30 11:05:39 +010029#include "ethosu_dma_mem.h"
30#include "ethosu_network.h"
Kristofer Jonsson116a6352020-08-20 17:25:23 +020031
Mikael Olsson6fb238f2023-08-23 11:02:47 +020032#include <linux/atomic.h>
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010033#include <linux/jiffies.h>
Kristofer Jonsson116a6352020-08-20 17:25:23 +020034#include <linux/resource.h>
35#include <linux/uio.h>
Mikael Olsson529cfad2023-06-14 17:14:14 +020036#include <linux/bug.h>
Kristofer Jonsson116a6352020-08-20 17:25:23 +020037
38/****************************************************************************
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010039 * Includes
40 ****************************************************************************/
41
42#ifndef fallthrough
43#if __has_attribute(__fallthrough__)
44#define fallthrough __attribute__((__fallthrough__))
45#else
46#define fallthrough do {} while (0) /* fallthrough */
47#endif
48#endif
49
50/****************************************************************************
Mikael Olsson6fb238f2023-08-23 11:02:47 +020051 * Defines
52 ****************************************************************************/
53
54#define MAILBOX_SEND_TIMEOUT_MS 15000
55
56/****************************************************************************
Kristofer Jonsson116a6352020-08-20 17:25:23 +020057 * Functions
58 ****************************************************************************/
59
Mikael Olsson6fb238f2023-08-23 11:02:47 +020060/**
61 * ethosu_send_locked() - Blocking mailbox message sender
62 *
63 * Context: Can sleep and must be called with the device mutex locked.
64 *
65 * Return: 0 on success, else error code.
66 */
67static int ethosu_send_locked(struct ethosu_mailbox *mbox,
68 void *data,
69 size_t length)
70{
71 DEFINE_WAIT_FUNC(wait, woken_wake_function);
72 struct device *dev = mbox->dev;
73 long timeout = msecs_to_jiffies(MAILBOX_SEND_TIMEOUT_MS);
74 bool try_send = !wq_has_sleeper(&mbox->send_queue);
75 int ret;
76
77 might_sleep();
78
79 /* Exclusive wait to only wake up one task at a time */
80 add_wait_queue_exclusive(&mbox->send_queue, &wait);
81 for (;;) {
82 /* Stop if the mailbox is closing down */
83 if (atomic_read(&mbox->done)) {
84 ret = -ENODEV;
85 break;
86 }
87
88 /* Attempt to send if queue is empty or task was woken up */
89 if (try_send) {
90 ret = rpmsg_trysend(mbox->ept, data, length);
91 if (ret != -ENOMEM)
92 break;
93 } else {
94 try_send = true;
95 }
96
97 /* Unlock device mutex while waiting to not block other tasks */
98 device_unlock(dev);
99 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
100 device_lock(dev);
101
102 /* Stop if the wait was interrupted */
103 if (signal_pending(current)) {
104 ret = -EINTR;
105 break;
106 }
107
108 if (!timeout) {
109 ret = -ETIME;
110 break;
111 }
112 }
113
114 remove_wait_queue(&mbox->send_queue, &wait);
115
116 /*
117 * If the message was sent successfully, there may be more TX buffers
118 * available so wake up the next waiting task.
119 */
120 if (!ret && wq_has_sleeper(&mbox->send_queue))
121 wake_up(&mbox->send_queue);
122
123 return ret;
124}
125
Mikael Olsson9c999fd2023-10-30 11:05:39 +0100126static void ethosu_core_buffer_dma_mem_set(struct ethosu_dma_mem *dma_mem,
127 struct ethosu_core_buffer *cbuf)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200128{
Mikael Olsson9c999fd2023-10-30 11:05:39 +0100129 cbuf->ptr = (uint32_t)dma_mem->dma_addr;
130 cbuf->size = (uint32_t)dma_mem->size;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200131}
132
Davide Grohmann32660f92022-04-27 16:49:07 +0200133int ethosu_mailbox_register(struct ethosu_mailbox *mbox,
134 struct ethosu_mailbox_msg *msg)
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100135{
Mikael Olsson529cfad2023-06-14 17:14:14 +0200136 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
Davide Grohmann32660f92022-04-27 16:49:07 +0200137 msg->id = idr_alloc_cyclic(&mbox->msg_idr, msg, 0, INT_MAX, GFP_KERNEL);
138 if (msg->id < 0)
139 return msg->id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100140
Davide Grohmann32660f92022-04-27 16:49:07 +0200141 return 0;
142}
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100143
Davide Grohmann32660f92022-04-27 16:49:07 +0200144void ethosu_mailbox_deregister(struct ethosu_mailbox *mbox,
145 struct ethosu_mailbox_msg *msg)
146{
Mikael Olsson529cfad2023-06-14 17:14:14 +0200147 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
Davide Grohmann32660f92022-04-27 16:49:07 +0200148 idr_remove(&mbox->msg_idr, msg->id);
149}
150
151struct ethosu_mailbox_msg *ethosu_mailbox_find(struct ethosu_mailbox *mbox,
Mikael Olsson09965b02023-06-13 12:17:04 +0200152 int msg_id,
153 uint32_t msg_type)
Davide Grohmann32660f92022-04-27 16:49:07 +0200154{
Mikael Olsson529cfad2023-06-14 17:14:14 +0200155 struct ethosu_mailbox_msg *ptr;
156
157 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
158 ptr = (struct ethosu_mailbox_msg *)idr_find(&mbox->msg_idr, msg_id);
Davide Grohmann32660f92022-04-27 16:49:07 +0200159
160 if (ptr == NULL)
Mikael Olsson09965b02023-06-13 12:17:04 +0200161 return ERR_PTR(-ENOENT);
162
163 if (ptr->type != msg_type)
Davide Grohmann32660f92022-04-27 16:49:07 +0200164 return ERR_PTR(-EINVAL);
165
166 return ptr;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100167}
168
169void ethosu_mailbox_fail(struct ethosu_mailbox *mbox)
170{
Davide Grohmann32660f92022-04-27 16:49:07 +0200171 struct ethosu_mailbox_msg *cur;
172 int id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100173
Mikael Olsson529cfad2023-06-14 17:14:14 +0200174 WARN_ON_ONCE(!mutex_is_locked(&mbox->dev->mutex));
Davide Grohmann32660f92022-04-27 16:49:07 +0200175 idr_for_each_entry(&mbox->msg_idr, cur, id) {
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100176 cur->fail(cur);
177 }
178}
179
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200180int ethosu_mailbox_ping(struct ethosu_mailbox *mbox)
181{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100182 struct ethosu_core_rpmsg rpmsg = {
183 .header = {
184 .magic = ETHOSU_CORE_MSG_MAGIC,
185 .type = ETHOSU_CORE_MSG_PING,
186 }
187 };
188
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200189 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200190}
191
Jonny Svärd7c24c772021-01-14 19:53:17 +0100192int ethosu_mailbox_pong(struct ethosu_mailbox *mbox)
193{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100194 struct ethosu_core_rpmsg rpmsg = {
195 .header = {
196 .magic = ETHOSU_CORE_MSG_MAGIC,
197 .type = ETHOSU_CORE_MSG_PONG,
198 }
199 };
200
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200201 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Jonny Svärd7c24c772021-01-14 19:53:17 +0100202}
203
Mikael Olsson7c843dc2023-08-03 12:41:48 +0200204int ethosu_mailbox_version_request(struct ethosu_mailbox *mbox,
205 struct ethosu_mailbox_msg *msg)
Jonny Svärd7c24c772021-01-14 19:53:17 +0100206{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100207 struct ethosu_core_rpmsg rpmsg = {
Mikael Olsson7c843dc2023-08-03 12:41:48 +0200208 .header = {
209 .magic = ETHOSU_CORE_MSG_MAGIC,
210 .type = ETHOSU_CORE_MSG_VERSION_REQ,
211 .msg_id = msg->id
Kristofer Jonssond779a082023-01-04 17:09:47 +0100212 }
213 };
214
Mikael Olsson7c843dc2023-08-03 12:41:48 +0200215 msg->type = rpmsg.header.type;
216
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200217 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Jonny Svärd7c24c772021-01-14 19:53:17 +0100218}
219
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200220int ethosu_mailbox_capabilities_request(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200221 struct ethosu_mailbox_msg *msg)
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200222{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100223 struct ethosu_core_rpmsg rpmsg = {
224 .header = {
225 .magic = ETHOSU_CORE_MSG_MAGIC,
226 .type = ETHOSU_CORE_MSG_CAPABILITIES_REQ,
227 .msg_id = msg->id
228 }
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200229 };
230
Mikael Olsson09965b02023-06-13 12:17:04 +0200231 msg->type = rpmsg.header.type;
232
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200233 return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200234}
235
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200236int ethosu_mailbox_inference(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200237 struct ethosu_mailbox_msg *msg,
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200238 uint32_t ifm_count,
239 struct ethosu_buffer **ifm,
240 uint32_t ofm_count,
241 struct ethosu_buffer **ofm,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200242 struct ethosu_buffer *network,
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100243 uint32_t network_index,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200244 uint8_t *pmu_event_config,
245 uint8_t pmu_event_config_count,
246 uint8_t pmu_cycle_counter_enable)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200247{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100248 struct ethosu_core_rpmsg rpmsg = {
249 .header = {
250 .magic = ETHOSU_CORE_MSG_MAGIC,
251 .type = ETHOSU_CORE_MSG_INFERENCE_REQ,
252 .msg_id = msg->id
253 }
254 };
255 struct ethosu_core_msg_inference_req *inf_req = &rpmsg.inf_req;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200256 uint32_t i;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200257
Mikael Olsson09965b02023-06-13 12:17:04 +0200258 msg->type = rpmsg.header.type;
259
Per Åstrandf7e407a2020-10-23 21:25:05 +0200260 /* Verify that the uapi and core has the same number of pmus */
261 if (pmu_event_config_count != ETHOSU_CORE_PMU_MAX) {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100262 dev_err(mbox->dev, "PMU count misconfigured.");
Per Åstrandf7e407a2020-10-23 21:25:05 +0200263
264 return -EINVAL;
265 }
266
Kristofer Jonssond779a082023-01-04 17:09:47 +0100267 inf_req->ifm_count = ifm_count;
268 inf_req->ofm_count = ofm_count;
269 inf_req->pmu_cycle_counter_enable = pmu_cycle_counter_enable;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200270
271 for (i = 0; i < ifm_count; i++)
Mikael Olsson9c999fd2023-10-30 11:05:39 +0100272 ethosu_core_buffer_dma_mem_set(ifm[i]->dma_mem,
273 &inf_req->ifm[i]);
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200274
275 for (i = 0; i < ofm_count; i++)
Mikael Olsson9c999fd2023-10-30 11:05:39 +0100276 ethosu_core_buffer_dma_mem_set(ofm[i]->dma_mem,
277 &inf_req->ofm[i]);
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200278
Per Åstrandf7e407a2020-10-23 21:25:05 +0200279 for (i = 0; i < ETHOSU_CORE_PMU_MAX; i++)
Kristofer Jonssond779a082023-01-04 17:09:47 +0100280 inf_req->pmu_event_config[i] = pmu_event_config[i];
Per Åstrandf7e407a2020-10-23 21:25:05 +0200281
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100282 if (network != NULL) {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100283 inf_req->network.type = ETHOSU_CORE_NETWORK_BUFFER;
Mikael Olsson9c999fd2023-10-30 11:05:39 +0100284 ethosu_core_buffer_dma_mem_set(network->dma_mem,
285 &inf_req->network.buffer);
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100286 } else {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100287 inf_req->network.type = ETHOSU_CORE_NETWORK_INDEX;
288 inf_req->network.index = network_index;
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100289 }
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200290
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200291 return ethosu_send_locked(mbox, &rpmsg,
292 sizeof(rpmsg.header) + sizeof(rpmsg.inf_req));
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200293}
294
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100295int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200296 struct ethosu_mailbox_msg *msg,
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100297 struct ethosu_buffer *network,
298 uint32_t network_index)
299{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100300 struct ethosu_core_rpmsg rpmsg = {
301 .header = {
302 .magic = ETHOSU_CORE_MSG_MAGIC,
303 .type = ETHOSU_CORE_MSG_NETWORK_INFO_REQ,
304 .msg_id = msg->id
305 }
306 };
307 struct ethosu_core_msg_network_info_req *info_req = &rpmsg.net_info_req;
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100308
Mikael Olsson09965b02023-06-13 12:17:04 +0200309 msg->type = rpmsg.header.type;
310
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100311 if (network != NULL) {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100312 info_req->network.type = ETHOSU_CORE_NETWORK_BUFFER;
Mikael Olsson9c999fd2023-10-30 11:05:39 +0100313 ethosu_core_buffer_dma_mem_set(network->dma_mem,
314 &info_req->network.buffer);
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100315 } else {
Kristofer Jonssond779a082023-01-04 17:09:47 +0100316 info_req->network.type = ETHOSU_CORE_NETWORK_INDEX;
317 info_req->network.index = network_index;
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100318 }
319
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200320 return ethosu_send_locked(mbox, &rpmsg,
321 sizeof(rpmsg.header) +
322 sizeof(rpmsg.net_info_req));
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100323}
324
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100325int ethosu_mailbox_cancel_inference(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200326 struct ethosu_mailbox_msg *msg,
327 int inference_handle)
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100328{
Kristofer Jonssond779a082023-01-04 17:09:47 +0100329 struct ethosu_core_rpmsg rpmsg = {
330 .header = {
331 .magic = ETHOSU_CORE_MSG_MAGIC,
332 .type =
333 ETHOSU_CORE_MSG_CANCEL_INFERENCE_REQ,
334 .msg_id = msg->id
335 },
336 .cancel_req = {
337 .inference_handle = inference_handle
338 }
339 };
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100340
Mikael Olsson09965b02023-06-13 12:17:04 +0200341 msg->type = rpmsg.header.type;
342
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200343 return ethosu_send_locked(mbox, &rpmsg,
344 sizeof(rpmsg.header) +
345 sizeof(rpmsg.cancel_req));
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200346}
347
348int ethosu_mailbox_init(struct ethosu_mailbox *mbox,
349 struct device *dev,
Kristofer Jonssond779a082023-01-04 17:09:47 +0100350 struct rpmsg_endpoint *ept)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200351{
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200352 mbox->dev = dev;
Kristofer Jonssond779a082023-01-04 17:09:47 +0100353 mbox->ept = ept;
Davide Grohmann32660f92022-04-27 16:49:07 +0200354 idr_init(&mbox->msg_idr);
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200355 init_waitqueue_head(&mbox->send_queue);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200356
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200357 return 0;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200358}
359
360void ethosu_mailbox_deinit(struct ethosu_mailbox *mbox)
Mikael Olsson6fb238f2023-08-23 11:02:47 +0200361{
362 atomic_set(&mbox->done, 1);
363 wake_up_all(&mbox->send_queue);
364}