blob: 014ceb85bf0bbce50033ee131199a79dcb5e0ef6 [file] [log] [blame]
Kristofer Jonsson116a6352020-08-20 17:25:23 +02001/*
Kristofer Jonsson35de9e62022-03-08 13:25:45 +01002 * Copyright (c) 2020-2022 Arm Limited.
Kristofer Jonsson116a6352020-08-20 17:25:23 +02003 *
4 * This program is free software and is provided to you under the terms of the
5 * GNU General Public License version 2 as published by the Free Software
6 * Foundation, and any use by you of this program is subject to the terms
7 * of such GNU licence.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * SPDX-License-Identifier: GPL-2.0-only
19 */
20
21/****************************************************************************
22 * Includes
23 ****************************************************************************/
24
25#include "ethosu_mailbox.h"
26
27#include "ethosu_buffer.h"
28#include "ethosu_core_interface.h"
29#include "ethosu_device.h"
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010030#include "ethosu_watchdog.h"
Kristofer Jonsson116a6352020-08-20 17:25:23 +020031
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010032#include <linux/jiffies.h>
Kristofer Jonsson116a6352020-08-20 17:25:23 +020033#include <linux/resource.h>
34#include <linux/uio.h>
35
36/****************************************************************************
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010037 * Includes
38 ****************************************************************************/
39
40#ifndef fallthrough
41#if __has_attribute(__fallthrough__)
42#define fallthrough __attribute__((__fallthrough__))
43#else
44#define fallthrough do {} while (0) /* fallthrough */
45#endif
46#endif
47
48/****************************************************************************
Kristofer Jonsson116a6352020-08-20 17:25:23 +020049 * Functions
50 ****************************************************************************/
51
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +010052static void ethosu_wd_inc(struct ethosu_mailbox *mbox,
53 enum ethosu_core_msg_type type)
54{
55 switch (type) {
56 case ETHOSU_CORE_MSG_PING:
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010057 mbox->ping_count++;
58 fallthrough;
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +010059 case ETHOSU_CORE_MSG_INFERENCE_REQ:
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +010060 ethosu_watchdog_inc(mbox->wdog);
61 break;
62 default:
63 break;
64 }
65}
66
67static void ethosu_wd_dec(struct ethosu_mailbox *mbox,
68 enum ethosu_core_msg_type type)
69{
70 switch (type) {
71 case ETHOSU_CORE_MSG_PONG:
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010072 mbox->ping_count--;
73 fallthrough;
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +010074 case ETHOSU_CORE_MSG_INFERENCE_RSP:
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +010075 ethosu_watchdog_dec(mbox->wdog);
76 break;
77 default:
78 break;
79 }
80}
81
Kristofer Jonsson116a6352020-08-20 17:25:23 +020082static void ethosu_core_set_size(struct ethosu_buffer *buf,
83 struct ethosu_core_buffer *cbuf)
84{
85 cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset;
86 cbuf->size = (uint32_t)buf->size;
87}
88
89static void ethosu_core_set_capacity(struct ethosu_buffer *buf,
90 struct ethosu_core_buffer *cbuf)
91{
92 cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset + buf->size;
93 cbuf->size = (uint32_t)buf->capacity - buf->offset - buf->size;
94}
95
96static size_t ethosu_queue_available(struct ethosu_core_queue *queue)
97{
98 size_t size = queue->header.write - queue->header.read;
99
100 if (queue->header.read > queue->header.write)
101 size += queue->header.size;
102
103 return size;
104}
105
106static size_t ethosu_queue_capacity(struct ethosu_core_queue *queue)
107{
108 return queue->header.size - ethosu_queue_available(queue);
109}
110
111static int ethosu_queue_write(struct ethosu_mailbox *mbox,
112 const struct kvec *vec,
113 size_t length)
114{
115 struct ethosu_core_queue *queue = mbox->in_queue;
116 uint8_t *dst = &queue->data[0];
117 uint32_t wpos = queue->header.write;
118 size_t total_size;
119 size_t i;
120 int ret;
121
122 for (i = 0, total_size = 0; i < length; i++)
123 total_size += vec[i].iov_len;
124
125 if (total_size > ethosu_queue_capacity(queue))
126 return -EINVAL;
127
128 for (i = 0; i < length; i++) {
129 const uint8_t *src = vec[i].iov_base;
130 const uint8_t *end = src + vec[i].iov_len;
131
132 while (src < end) {
133 dst[wpos] = *src++;
134 wpos = (wpos + 1) % queue->header.size;
135 }
136 }
137
138 queue->header.write = wpos;
139
140 ret = mbox_send_message(mbox->tx, queue);
141 if (ret < 0)
142 return ret;
143
144 return 0;
145}
146
147static int ethosu_queue_write_msg(struct ethosu_mailbox *mbox,
148 uint32_t type,
149 void *data,
150 size_t length)
151{
Jonny Svärd7c24c772021-01-14 19:53:17 +0100152 struct ethosu_core_msg msg = {
153 .magic = ETHOSU_CORE_MSG_MAGIC,
154 .type = type, .length= length
155 };
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200156 const struct kvec vec[2] = {
157 { &msg, sizeof(msg) },
158 { data, length }
159 };
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100160 int ret;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200161
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100162 ret = ethosu_queue_write(mbox, vec, 2);
163 if (ret)
164 return ret;
165
166 ethosu_wd_inc(mbox, type);
167
168 return 0;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200169}
170
171static int ethosu_queue_read(struct ethosu_mailbox *mbox,
172 void *data,
173 size_t length)
174{
175 struct ethosu_core_queue *queue = mbox->out_queue;
176 uint8_t *src = &queue->data[0];
177 uint8_t *dst = (uint8_t *)data;
178 const uint8_t *end = dst + length;
179 uint32_t rpos = queue->header.read;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100180 size_t queue_avail = ethosu_queue_available(queue);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200181
Davide Grohmann1c26baa2021-06-15 13:21:15 +0200182 if (length == 0)
183 return 0;
184 else if (queue_avail == 0)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200185 return -ENOMSG;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100186 else if (length > queue_avail)
187 return -EBADMSG;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200188
189 while (dst < end) {
190 *dst++ = src[rpos];
191 rpos = (rpos + 1) % queue->header.size;
192 }
193
194 queue->header.read = rpos;
195
196 return 0;
197}
198
Jonny Svärd7c24c772021-01-14 19:53:17 +0100199void ethosu_mailbox_reset(struct ethosu_mailbox *mbox)
200{
201 mbox->out_queue->header.read = mbox->out_queue->header.write;
202}
203
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100204void ethosu_mailbox_wait_prepare(struct ethosu_mailbox *mbox)
205{
206 mbox->in_queue->header.size = 0;
207 mbox->in_queue->header.read = 0xffffff;
208 mbox->in_queue->header.write = 0xffffff;
209}
210
211int ethosu_mailbox_wait_firmware(struct ethosu_mailbox *mbox)
212{
213 const unsigned long timeout = 1000;
214 const unsigned long end = jiffies + msecs_to_jiffies(timeout);
215 volatile struct ethosu_core_queue_header *hdr =
216 &mbox->in_queue->header;
217 int ret = -ETIMEDOUT;
218
219 /* Spin wait on mailbox initialization */
220 while ((end - jiffies) < timeout)
221 if (hdr->size != 0 &&
222 hdr->read != 0xffffff &&
223 hdr->write != 0xffffff) {
224 ret = 0;
225 break;
226 }
227
228 dev_info(mbox->dev, "mbox: Wait. ret=%d, size=%u, read=%u, write=%u",
229 ret, hdr->size, hdr->read, hdr->write);
230
231 return ret;
232}
233
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200234int ethosu_mailbox_read(struct ethosu_mailbox *mbox,
235 struct ethosu_core_msg *header,
236 void *data,
237 size_t length)
238{
239 int ret;
240
Jonny Svärd7c24c772021-01-14 19:53:17 +0100241 /* Read message header magic */
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200242 ret = ethosu_queue_read(mbox, header, sizeof(*header));
Jonny Svärd7c24c772021-01-14 19:53:17 +0100243 if (ret) {
244 if (ret != -ENOMSG)
245 dev_warn(mbox->dev,
246 "Msg: Failed to read message header\n");
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200247
Jonny Svärd7c24c772021-01-14 19:53:17 +0100248 return ret;
249 }
250
251 if (header->magic != ETHOSU_CORE_MSG_MAGIC) {
252 dev_warn(mbox->dev,
253 "Msg: Invalid magic. Got: %08X but expected %08X\n",
254 header->magic, ETHOSU_CORE_MSG_MAGIC);
255
256 return -EINVAL;
257 }
258
259 dev_info(mbox->dev,
260 "mbox: Read msg header. magic=%08X, type=%u, length=%u",
261 header->magic, header->type, header->length);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200262
263 /* Check that payload is not larger than allocated buffer */
Jonny Svärd7c24c772021-01-14 19:53:17 +0100264 if (header->length > length) {
265 dev_warn(mbox->dev,
266 "Msg: Buffer size (%zu) too small for message (%u)\n",
267 sizeof(data), header->length);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200268
Jonny Svärd7c24c772021-01-14 19:53:17 +0100269 return -ENOMEM;
270 }
271
272 /* Read payload data */
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200273 ret = ethosu_queue_read(mbox, data, header->length);
Jonny Svärd7c24c772021-01-14 19:53:17 +0100274 if (ret) {
275 dev_warn(mbox->dev, "Msg: Failed to read payload data\n");
276
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200277 return -EBADMSG;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100278 }
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200279
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100280 ethosu_wd_dec(mbox, header->type);
281
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200282 return 0;
283}
284
Davide Grohmann32660f92022-04-27 16:49:07 +0200285int ethosu_mailbox_register(struct ethosu_mailbox *mbox,
286 struct ethosu_mailbox_msg *msg)
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100287{
Davide Grohmann32660f92022-04-27 16:49:07 +0200288 msg->id = idr_alloc_cyclic(&mbox->msg_idr, msg, 0, INT_MAX, GFP_KERNEL);
289 if (msg->id < 0)
290 return msg->id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100291
Davide Grohmann32660f92022-04-27 16:49:07 +0200292 return 0;
293}
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100294
Davide Grohmann32660f92022-04-27 16:49:07 +0200295void ethosu_mailbox_deregister(struct ethosu_mailbox *mbox,
296 struct ethosu_mailbox_msg *msg)
297{
298 idr_remove(&mbox->msg_idr, msg->id);
299}
300
301struct ethosu_mailbox_msg *ethosu_mailbox_find(struct ethosu_mailbox *mbox,
302 int msg_id)
303{
304 struct ethosu_mailbox_msg *ptr = (struct ethosu_mailbox_msg *)idr_find(
305 &mbox->msg_idr, msg_id);
306
307 if (ptr == NULL)
308 return ERR_PTR(-EINVAL);
309
310 return ptr;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100311}
312
313void ethosu_mailbox_fail(struct ethosu_mailbox *mbox)
314{
Davide Grohmann32660f92022-04-27 16:49:07 +0200315 struct ethosu_mailbox_msg *cur;
316 int id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100317
Davide Grohmann32660f92022-04-27 16:49:07 +0200318 idr_for_each_entry(&mbox->msg_idr, cur, id) {
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100319 cur->fail(cur);
320 }
321}
322
Davide Grohmann8b1fe552022-04-07 16:58:32 +0200323void ethosu_mailbox_resend(struct ethosu_mailbox *mbox)
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100324{
Davide Grohmann32660f92022-04-27 16:49:07 +0200325 struct ethosu_mailbox_msg *cur;
326 int id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100327 int ret;
328
Davide Grohmann32660f92022-04-27 16:49:07 +0200329 idr_for_each_entry(&mbox->msg_idr, cur, id) {
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100330 ret = cur->resend(cur);
331 if (ret) {
Davide Grohmann8b1fe552022-04-07 16:58:32 +0200332 dev_warn(mbox->dev, "Failed to resend msg. ret=%d",
333 ret);
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100334 cur->fail(cur);
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100335 }
336 }
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100337}
338
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200339int ethosu_mailbox_ping(struct ethosu_mailbox *mbox)
340{
341 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PING, NULL, 0);
342}
343
Jonny Svärd7c24c772021-01-14 19:53:17 +0100344int ethosu_mailbox_pong(struct ethosu_mailbox *mbox)
345{
346 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PONG, NULL, 0);
347}
348
349int ethosu_mailbox_version_request(struct ethosu_mailbox *mbox)
350{
351 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_VERSION_REQ, NULL,
352 0);
353}
354
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200355int ethosu_mailbox_capabilities_request(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200356 struct ethosu_mailbox_msg *msg)
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200357{
358 struct ethosu_core_capabilities_req req = {
Davide Grohmann32660f92022-04-27 16:49:07 +0200359 .user_arg = msg->id
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200360 };
361
362 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_CAPABILITIES_REQ,
363 &req,
364 sizeof(req));
365}
366
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200367int ethosu_mailbox_inference(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200368 struct ethosu_mailbox_msg *msg,
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200369 uint32_t ifm_count,
370 struct ethosu_buffer **ifm,
371 uint32_t ofm_count,
372 struct ethosu_buffer **ofm,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200373 struct ethosu_buffer *network,
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100374 uint32_t network_index,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200375 uint8_t *pmu_event_config,
376 uint8_t pmu_event_config_count,
377 uint8_t pmu_cycle_counter_enable)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200378{
379 struct ethosu_core_inference_req inf;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200380 uint32_t i;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200381
Per Åstrandf7e407a2020-10-23 21:25:05 +0200382 /* Verify that the uapi and core has the same number of pmus */
383 if (pmu_event_config_count != ETHOSU_CORE_PMU_MAX) {
384 dev_err(mbox->dev, "PMU count misconfigured.\n");
385
386 return -EINVAL;
387 }
388
Davide Grohmann32660f92022-04-27 16:49:07 +0200389 inf.user_arg = msg->id;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200390 inf.ifm_count = ifm_count;
391 inf.ofm_count = ofm_count;
Per Åstrandf7e407a2020-10-23 21:25:05 +0200392 inf.pmu_cycle_counter_enable = pmu_cycle_counter_enable;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200393
394 for (i = 0; i < ifm_count; i++)
395 ethosu_core_set_size(ifm[i], &inf.ifm[i]);
396
397 for (i = 0; i < ofm_count; i++)
398 ethosu_core_set_capacity(ofm[i], &inf.ofm[i]);
399
Per Åstrandf7e407a2020-10-23 21:25:05 +0200400 for (i = 0; i < ETHOSU_CORE_PMU_MAX; i++)
401 inf.pmu_event_config[i] = pmu_event_config[i];
402
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100403 if (network != NULL) {
404 inf.network.type = ETHOSU_CORE_NETWORK_BUFFER;
405 ethosu_core_set_size(network, &inf.network.buffer);
406 } else {
407 inf.network.type = ETHOSU_CORE_NETWORK_INDEX;
408 inf.network.index = network_index;
409 }
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200410
411 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_INFERENCE_REQ,
412 &inf, sizeof(inf));
413}
414
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100415int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200416 struct ethosu_mailbox_msg *msg,
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100417 struct ethosu_buffer *network,
418 uint32_t network_index)
419{
420 struct ethosu_core_network_info_req info;
421
Davide Grohmann32660f92022-04-27 16:49:07 +0200422 info.user_arg = msg->id;
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100423
424 if (network != NULL) {
425 info.network.type = ETHOSU_CORE_NETWORK_BUFFER;
426 ethosu_core_set_size(network, &info.network.buffer);
427 } else {
428 info.network.type = ETHOSU_CORE_NETWORK_INDEX;
429 info.network.index = network_index;
430 }
431
432 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_NETWORK_INFO_REQ,
433 &info, sizeof(info));
434}
435
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100436int ethosu_mailbox_cancel_inference(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200437 struct ethosu_mailbox_msg *msg,
438 int inference_handle)
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100439{
440 struct ethosu_core_cancel_inference_req req;
441
Davide Grohmann32660f92022-04-27 16:49:07 +0200442 req.user_arg = msg->id;
443 req.inference_handle = inference_handle;
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100444
445 return ethosu_queue_write_msg(mbox,
446 ETHOSU_CORE_MSG_CANCEL_INFERENCE_REQ,
447 &req, sizeof(req));
448}
449
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200450static void ethosu_mailbox_rx_work(struct work_struct *work)
451{
452 struct ethosu_mailbox *mbox = container_of(work, typeof(*mbox), work);
453
454 mbox->callback(mbox->user_arg);
455}
456
457static void ethosu_mailbox_rx_callback(struct mbox_client *client,
458 void *message)
459{
460 struct ethosu_mailbox *mbox =
461 container_of(client, typeof(*mbox), client);
462
463 dev_info(mbox->dev, "mbox: Received message.\n");
464
465 queue_work(mbox->wq, &mbox->work);
466}
467
468static void ethosu_mailbox_tx_done(struct mbox_client *client,
469 void *message,
470 int r)
471{
472 if (r)
473 dev_warn(client->dev, "mbox: Failed sending message (%d)\n", r);
474 else
475 dev_info(client->dev, "mbox: Message sent\n");
476}
477
478int ethosu_mailbox_init(struct ethosu_mailbox *mbox,
479 struct device *dev,
480 struct resource *in_queue,
481 struct resource *out_queue,
482 ethosu_mailbox_cb callback,
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100483 void *user_arg,
484 struct ethosu_watchdog *wdog)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200485{
486 int ret;
487
488 mbox->dev = dev;
489 mbox->callback = callback;
490 mbox->user_arg = user_arg;
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100491 mbox->wdog = wdog;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100492 mbox->ping_count = 0;
Davide Grohmann32660f92022-04-27 16:49:07 +0200493 idr_init(&mbox->msg_idr);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200494
495 mbox->client.dev = dev;
496 mbox->client.rx_callback = ethosu_mailbox_rx_callback;
497 mbox->client.tx_prepare = NULL; /* preparation of data is handled
498 * through the
499 * queue functions */
500 mbox->client.tx_done = ethosu_mailbox_tx_done;
501 mbox->client.tx_block = true;
502 mbox->client.knows_txdone = false;
503 mbox->client.tx_tout = 500;
504
505 mbox->in_queue = devm_ioremap_resource(mbox->dev, in_queue);
506 if (IS_ERR(mbox->in_queue))
507 return PTR_ERR(mbox->in_queue);
508
509 mbox->out_queue = devm_ioremap_resource(mbox->dev, out_queue);
510 if (IS_ERR(mbox->out_queue)) {
511 ret = PTR_ERR(mbox->out_queue);
512 goto unmap_in_queue;
513 }
514
515 mbox->wq = create_singlethread_workqueue("ethosu_workqueue");
516 if (!mbox->wq) {
517 dev_err(mbox->dev, "Failed to create work queue\n");
518 ret = -EINVAL;
519 goto unmap_out_queue;
520 }
521
522 INIT_WORK(&mbox->work, ethosu_mailbox_rx_work);
523
524 mbox->tx = mbox_request_channel_byname(&mbox->client, "tx");
525 if (IS_ERR(mbox->tx)) {
526 dev_warn(mbox->dev, "mbox: Failed to request tx channel\n");
527 ret = PTR_ERR(mbox->tx);
528 goto workqueue_destroy;
529 }
530
531 mbox->rx = mbox_request_channel_byname(&mbox->client, "rx");
532 if (IS_ERR(mbox->rx)) {
533 dev_info(dev, "mbox: Using same channel for RX and TX\n");
534 mbox->rx = mbox->tx;
535 }
536
537 return 0;
538
539workqueue_destroy:
540 destroy_workqueue(mbox->wq);
541
542unmap_out_queue:
543 devm_iounmap(mbox->dev, mbox->out_queue);
544
545unmap_in_queue:
546 devm_iounmap(mbox->dev, mbox->in_queue);
547
548 return ret;
549}
550
551void ethosu_mailbox_deinit(struct ethosu_mailbox *mbox)
552{
553 if (mbox->rx != mbox->tx)
554 mbox_free_channel(mbox->rx);
555
556 mbox_free_channel(mbox->tx);
557 destroy_workqueue(mbox->wq);
558 devm_iounmap(mbox->dev, mbox->out_queue);
559 devm_iounmap(mbox->dev, mbox->in_queue);
560}