blob: b5cde0d2a9e94c8ecd9332a2cb85d4142a9e7e82 [file] [log] [blame]
Kristofer Jonsson116a6352020-08-20 17:25:23 +02001/*
Kristofer Jonsson35de9e62022-03-08 13:25:45 +01002 * Copyright (c) 2020-2022 Arm Limited.
Kristofer Jonsson116a6352020-08-20 17:25:23 +02003 *
4 * This program is free software and is provided to you under the terms of the
5 * GNU General Public License version 2 as published by the Free Software
6 * Foundation, and any use by you of this program is subject to the terms
7 * of such GNU licence.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * SPDX-License-Identifier: GPL-2.0-only
19 */
20
21/****************************************************************************
22 * Includes
23 ****************************************************************************/
24
25#include "ethosu_mailbox.h"
26
27#include "ethosu_buffer.h"
28#include "ethosu_core_interface.h"
29#include "ethosu_device.h"
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010030#include "ethosu_watchdog.h"
Kristofer Jonsson116a6352020-08-20 17:25:23 +020031
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010032#include <linux/jiffies.h>
Kristofer Jonsson116a6352020-08-20 17:25:23 +020033#include <linux/resource.h>
34#include <linux/uio.h>
35
36/****************************************************************************
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010037 * Includes
38 ****************************************************************************/
39
40#ifndef fallthrough
41#if __has_attribute(__fallthrough__)
42#define fallthrough __attribute__((__fallthrough__))
43#else
44#define fallthrough do {} while (0) /* fallthrough */
45#endif
46#endif
47
48/****************************************************************************
Kristofer Jonsson116a6352020-08-20 17:25:23 +020049 * Functions
50 ****************************************************************************/
51
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +010052static void ethosu_wd_inc(struct ethosu_mailbox *mbox,
53 enum ethosu_core_msg_type type)
54{
55 switch (type) {
56 case ETHOSU_CORE_MSG_PING:
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010057 mbox->ping_count++;
58 fallthrough;
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +010059 case ETHOSU_CORE_MSG_INFERENCE_REQ:
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +010060 ethosu_watchdog_inc(mbox->wdog);
61 break;
62 default:
63 break;
64 }
65}
66
67static void ethosu_wd_dec(struct ethosu_mailbox *mbox,
68 enum ethosu_core_msg_type type)
69{
70 switch (type) {
71 case ETHOSU_CORE_MSG_PONG:
Kristofer Jonsson442fefb2022-03-17 17:15:52 +010072 mbox->ping_count--;
73 fallthrough;
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +010074 case ETHOSU_CORE_MSG_INFERENCE_RSP:
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +010075 ethosu_watchdog_dec(mbox->wdog);
76 break;
77 default:
78 break;
79 }
80}
81
Kristofer Jonsson116a6352020-08-20 17:25:23 +020082static void ethosu_core_set_size(struct ethosu_buffer *buf,
83 struct ethosu_core_buffer *cbuf)
84{
85 cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset;
86 cbuf->size = (uint32_t)buf->size;
87}
88
89static void ethosu_core_set_capacity(struct ethosu_buffer *buf,
90 struct ethosu_core_buffer *cbuf)
91{
92 cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset + buf->size;
93 cbuf->size = (uint32_t)buf->capacity - buf->offset - buf->size;
94}
95
96static size_t ethosu_queue_available(struct ethosu_core_queue *queue)
97{
98 size_t size = queue->header.write - queue->header.read;
99
100 if (queue->header.read > queue->header.write)
101 size += queue->header.size;
102
103 return size;
104}
105
106static size_t ethosu_queue_capacity(struct ethosu_core_queue *queue)
107{
Kristofer Jonssone2a58152022-06-28 14:09:19 +0200108 return max_t(ssize_t,
109 queue->header.size - ethosu_queue_available(queue) - 1, 0);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200110}
111
112static int ethosu_queue_write(struct ethosu_mailbox *mbox,
113 const struct kvec *vec,
114 size_t length)
115{
116 struct ethosu_core_queue *queue = mbox->in_queue;
117 uint8_t *dst = &queue->data[0];
118 uint32_t wpos = queue->header.write;
119 size_t total_size;
120 size_t i;
121 int ret;
122
123 for (i = 0, total_size = 0; i < length; i++)
124 total_size += vec[i].iov_len;
125
126 if (total_size > ethosu_queue_capacity(queue))
127 return -EINVAL;
128
129 for (i = 0; i < length; i++) {
130 const uint8_t *src = vec[i].iov_base;
131 const uint8_t *end = src + vec[i].iov_len;
132
133 while (src < end) {
134 dst[wpos] = *src++;
135 wpos = (wpos + 1) % queue->header.size;
136 }
137 }
138
139 queue->header.write = wpos;
140
141 ret = mbox_send_message(mbox->tx, queue);
142 if (ret < 0)
143 return ret;
144
145 return 0;
146}
147
148static int ethosu_queue_write_msg(struct ethosu_mailbox *mbox,
149 uint32_t type,
150 void *data,
151 size_t length)
152{
Jonny Svärd7c24c772021-01-14 19:53:17 +0100153 struct ethosu_core_msg msg = {
154 .magic = ETHOSU_CORE_MSG_MAGIC,
155 .type = type, .length= length
156 };
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200157 const struct kvec vec[2] = {
158 { &msg, sizeof(msg) },
159 { data, length }
160 };
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100161 int ret;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200162
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100163 ret = ethosu_queue_write(mbox, vec, 2);
164 if (ret)
165 return ret;
166
167 ethosu_wd_inc(mbox, type);
168
169 return 0;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200170}
171
172static int ethosu_queue_read(struct ethosu_mailbox *mbox,
173 void *data,
174 size_t length)
175{
176 struct ethosu_core_queue *queue = mbox->out_queue;
177 uint8_t *src = &queue->data[0];
178 uint8_t *dst = (uint8_t *)data;
179 const uint8_t *end = dst + length;
180 uint32_t rpos = queue->header.read;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100181 size_t queue_avail = ethosu_queue_available(queue);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200182
Davide Grohmann1c26baa2021-06-15 13:21:15 +0200183 if (length == 0)
184 return 0;
185 else if (queue_avail == 0)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200186 return -ENOMSG;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100187 else if (length > queue_avail)
188 return -EBADMSG;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200189
190 while (dst < end) {
191 *dst++ = src[rpos];
192 rpos = (rpos + 1) % queue->header.size;
193 }
194
195 queue->header.read = rpos;
196
197 return 0;
198}
199
Jonny Svärd7c24c772021-01-14 19:53:17 +0100200void ethosu_mailbox_reset(struct ethosu_mailbox *mbox)
201{
202 mbox->out_queue->header.read = mbox->out_queue->header.write;
203}
204
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100205void ethosu_mailbox_wait_prepare(struct ethosu_mailbox *mbox)
206{
207 mbox->in_queue->header.size = 0;
208 mbox->in_queue->header.read = 0xffffff;
209 mbox->in_queue->header.write = 0xffffff;
210}
211
212int ethosu_mailbox_wait_firmware(struct ethosu_mailbox *mbox)
213{
214 const unsigned long timeout = 1000;
215 const unsigned long end = jiffies + msecs_to_jiffies(timeout);
216 volatile struct ethosu_core_queue_header *hdr =
217 &mbox->in_queue->header;
218 int ret = -ETIMEDOUT;
219
220 /* Spin wait on mailbox initialization */
221 while ((end - jiffies) < timeout)
222 if (hdr->size != 0 &&
223 hdr->read != 0xffffff &&
224 hdr->write != 0xffffff) {
225 ret = 0;
226 break;
227 }
228
229 dev_info(mbox->dev, "mbox: Wait. ret=%d, size=%u, read=%u, write=%u",
230 ret, hdr->size, hdr->read, hdr->write);
231
232 return ret;
233}
234
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200235int ethosu_mailbox_read(struct ethosu_mailbox *mbox,
236 struct ethosu_core_msg *header,
237 void *data,
238 size_t length)
239{
240 int ret;
241
Jonny Svärd7c24c772021-01-14 19:53:17 +0100242 /* Read message header magic */
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200243 ret = ethosu_queue_read(mbox, header, sizeof(*header));
Jonny Svärd7c24c772021-01-14 19:53:17 +0100244 if (ret) {
245 if (ret != -ENOMSG)
246 dev_warn(mbox->dev,
247 "Msg: Failed to read message header\n");
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200248
Jonny Svärd7c24c772021-01-14 19:53:17 +0100249 return ret;
250 }
251
252 if (header->magic != ETHOSU_CORE_MSG_MAGIC) {
253 dev_warn(mbox->dev,
254 "Msg: Invalid magic. Got: %08X but expected %08X\n",
255 header->magic, ETHOSU_CORE_MSG_MAGIC);
256
257 return -EINVAL;
258 }
259
260 dev_info(mbox->dev,
261 "mbox: Read msg header. magic=%08X, type=%u, length=%u",
262 header->magic, header->type, header->length);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200263
264 /* Check that payload is not larger than allocated buffer */
Jonny Svärd7c24c772021-01-14 19:53:17 +0100265 if (header->length > length) {
266 dev_warn(mbox->dev,
267 "Msg: Buffer size (%zu) too small for message (%u)\n",
268 sizeof(data), header->length);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200269
Jonny Svärd7c24c772021-01-14 19:53:17 +0100270 return -ENOMEM;
271 }
272
273 /* Read payload data */
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200274 ret = ethosu_queue_read(mbox, data, header->length);
Jonny Svärd7c24c772021-01-14 19:53:17 +0100275 if (ret) {
276 dev_warn(mbox->dev, "Msg: Failed to read payload data\n");
277
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200278 return -EBADMSG;
Jonny Svärd7c24c772021-01-14 19:53:17 +0100279 }
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200280
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100281 ethosu_wd_dec(mbox, header->type);
282
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200283 return 0;
284}
285
Davide Grohmann32660f92022-04-27 16:49:07 +0200286int ethosu_mailbox_register(struct ethosu_mailbox *mbox,
287 struct ethosu_mailbox_msg *msg)
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100288{
Davide Grohmann32660f92022-04-27 16:49:07 +0200289 msg->id = idr_alloc_cyclic(&mbox->msg_idr, msg, 0, INT_MAX, GFP_KERNEL);
290 if (msg->id < 0)
291 return msg->id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100292
Davide Grohmann32660f92022-04-27 16:49:07 +0200293 return 0;
294}
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100295
Davide Grohmann32660f92022-04-27 16:49:07 +0200296void ethosu_mailbox_deregister(struct ethosu_mailbox *mbox,
297 struct ethosu_mailbox_msg *msg)
298{
299 idr_remove(&mbox->msg_idr, msg->id);
300}
301
302struct ethosu_mailbox_msg *ethosu_mailbox_find(struct ethosu_mailbox *mbox,
303 int msg_id)
304{
305 struct ethosu_mailbox_msg *ptr = (struct ethosu_mailbox_msg *)idr_find(
306 &mbox->msg_idr, msg_id);
307
308 if (ptr == NULL)
309 return ERR_PTR(-EINVAL);
310
311 return ptr;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100312}
313
314void ethosu_mailbox_fail(struct ethosu_mailbox *mbox)
315{
Davide Grohmann32660f92022-04-27 16:49:07 +0200316 struct ethosu_mailbox_msg *cur;
317 int id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100318
Davide Grohmann32660f92022-04-27 16:49:07 +0200319 idr_for_each_entry(&mbox->msg_idr, cur, id) {
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100320 cur->fail(cur);
321 }
322}
323
Davide Grohmann8b1fe552022-04-07 16:58:32 +0200324void ethosu_mailbox_resend(struct ethosu_mailbox *mbox)
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100325{
Davide Grohmann32660f92022-04-27 16:49:07 +0200326 struct ethosu_mailbox_msg *cur;
327 int id;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100328 int ret;
329
Davide Grohmann32660f92022-04-27 16:49:07 +0200330 idr_for_each_entry(&mbox->msg_idr, cur, id) {
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100331 ret = cur->resend(cur);
332 if (ret) {
Davide Grohmann8b1fe552022-04-07 16:58:32 +0200333 dev_warn(mbox->dev, "Failed to resend msg. ret=%d",
334 ret);
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100335 cur->fail(cur);
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100336 }
337 }
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100338}
339
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200340int ethosu_mailbox_ping(struct ethosu_mailbox *mbox)
341{
342 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PING, NULL, 0);
343}
344
Jonny Svärd7c24c772021-01-14 19:53:17 +0100345int ethosu_mailbox_pong(struct ethosu_mailbox *mbox)
346{
347 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PONG, NULL, 0);
348}
349
350int ethosu_mailbox_version_request(struct ethosu_mailbox *mbox)
351{
352 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_VERSION_REQ, NULL,
353 0);
354}
355
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200356int ethosu_mailbox_capabilities_request(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200357 struct ethosu_mailbox_msg *msg)
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200358{
359 struct ethosu_core_capabilities_req req = {
Davide Grohmann32660f92022-04-27 16:49:07 +0200360 .user_arg = msg->id
Davide Grohmann35ce6c82021-06-01 15:03:51 +0200361 };
362
363 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_CAPABILITIES_REQ,
364 &req,
365 sizeof(req));
366}
367
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200368int ethosu_mailbox_inference(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200369 struct ethosu_mailbox_msg *msg,
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200370 uint32_t ifm_count,
371 struct ethosu_buffer **ifm,
372 uint32_t ofm_count,
373 struct ethosu_buffer **ofm,
Per Ã…strandf7e407a2020-10-23 21:25:05 +0200374 struct ethosu_buffer *network,
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100375 uint32_t network_index,
Per Ã…strandf7e407a2020-10-23 21:25:05 +0200376 uint8_t *pmu_event_config,
377 uint8_t pmu_event_config_count,
378 uint8_t pmu_cycle_counter_enable)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200379{
380 struct ethosu_core_inference_req inf;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200381 uint32_t i;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200382
Per Ã…strandf7e407a2020-10-23 21:25:05 +0200383 /* Verify that the uapi and core has the same number of pmus */
384 if (pmu_event_config_count != ETHOSU_CORE_PMU_MAX) {
385 dev_err(mbox->dev, "PMU count misconfigured.\n");
386
387 return -EINVAL;
388 }
389
Davide Grohmann32660f92022-04-27 16:49:07 +0200390 inf.user_arg = msg->id;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200391 inf.ifm_count = ifm_count;
392 inf.ofm_count = ofm_count;
Per Ã…strandf7e407a2020-10-23 21:25:05 +0200393 inf.pmu_cycle_counter_enable = pmu_cycle_counter_enable;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200394
395 for (i = 0; i < ifm_count; i++)
396 ethosu_core_set_size(ifm[i], &inf.ifm[i]);
397
398 for (i = 0; i < ofm_count; i++)
399 ethosu_core_set_capacity(ofm[i], &inf.ofm[i]);
400
Per Ã…strandf7e407a2020-10-23 21:25:05 +0200401 for (i = 0; i < ETHOSU_CORE_PMU_MAX; i++)
402 inf.pmu_event_config[i] = pmu_event_config[i];
403
Kristofer Jonsson35de9e62022-03-08 13:25:45 +0100404 if (network != NULL) {
405 inf.network.type = ETHOSU_CORE_NETWORK_BUFFER;
406 ethosu_core_set_size(network, &inf.network.buffer);
407 } else {
408 inf.network.type = ETHOSU_CORE_NETWORK_INDEX;
409 inf.network.index = network_index;
410 }
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200411
412 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_INFERENCE_REQ,
413 &inf, sizeof(inf));
414}
415
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100416int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200417 struct ethosu_mailbox_msg *msg,
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100418 struct ethosu_buffer *network,
419 uint32_t network_index)
420{
421 struct ethosu_core_network_info_req info;
422
Davide Grohmann32660f92022-04-27 16:49:07 +0200423 info.user_arg = msg->id;
Kristofer Jonsson3c6a2602022-03-10 11:17:29 +0100424
425 if (network != NULL) {
426 info.network.type = ETHOSU_CORE_NETWORK_BUFFER;
427 ethosu_core_set_size(network, &info.network.buffer);
428 } else {
429 info.network.type = ETHOSU_CORE_NETWORK_INDEX;
430 info.network.index = network_index;
431 }
432
433 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_NETWORK_INFO_REQ,
434 &info, sizeof(info));
435}
436
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100437int ethosu_mailbox_cancel_inference(struct ethosu_mailbox *mbox,
Davide Grohmann32660f92022-04-27 16:49:07 +0200438 struct ethosu_mailbox_msg *msg,
439 int inference_handle)
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100440{
441 struct ethosu_core_cancel_inference_req req;
442
Davide Grohmann32660f92022-04-27 16:49:07 +0200443 req.user_arg = msg->id;
444 req.inference_handle = inference_handle;
Davide Grohmann7e8f5082022-03-23 12:48:45 +0100445
446 return ethosu_queue_write_msg(mbox,
447 ETHOSU_CORE_MSG_CANCEL_INFERENCE_REQ,
448 &req, sizeof(req));
449}
450
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200451static void ethosu_mailbox_rx_work(struct work_struct *work)
452{
453 struct ethosu_mailbox *mbox = container_of(work, typeof(*mbox), work);
454
455 mbox->callback(mbox->user_arg);
456}
457
458static void ethosu_mailbox_rx_callback(struct mbox_client *client,
459 void *message)
460{
461 struct ethosu_mailbox *mbox =
462 container_of(client, typeof(*mbox), client);
463
464 dev_info(mbox->dev, "mbox: Received message.\n");
465
466 queue_work(mbox->wq, &mbox->work);
467}
468
469static void ethosu_mailbox_tx_done(struct mbox_client *client,
470 void *message,
471 int r)
472{
473 if (r)
474 dev_warn(client->dev, "mbox: Failed sending message (%d)\n", r);
475 else
476 dev_info(client->dev, "mbox: Message sent\n");
477}
478
479int ethosu_mailbox_init(struct ethosu_mailbox *mbox,
480 struct device *dev,
481 struct resource *in_queue,
482 struct resource *out_queue,
483 ethosu_mailbox_cb callback,
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100484 void *user_arg,
485 struct ethosu_watchdog *wdog)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200486{
487 int ret;
488
489 mbox->dev = dev;
490 mbox->callback = callback;
491 mbox->user_arg = user_arg;
Kristofer Jonssonf5b98c92022-03-14 16:09:12 +0100492 mbox->wdog = wdog;
Kristofer Jonsson442fefb2022-03-17 17:15:52 +0100493 mbox->ping_count = 0;
Davide Grohmann32660f92022-04-27 16:49:07 +0200494 idr_init(&mbox->msg_idr);
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200495
496 mbox->client.dev = dev;
497 mbox->client.rx_callback = ethosu_mailbox_rx_callback;
498 mbox->client.tx_prepare = NULL; /* preparation of data is handled
499 * through the
500 * queue functions */
501 mbox->client.tx_done = ethosu_mailbox_tx_done;
502 mbox->client.tx_block = true;
503 mbox->client.knows_txdone = false;
504 mbox->client.tx_tout = 500;
505
506 mbox->in_queue = devm_ioremap_resource(mbox->dev, in_queue);
507 if (IS_ERR(mbox->in_queue))
508 return PTR_ERR(mbox->in_queue);
509
510 mbox->out_queue = devm_ioremap_resource(mbox->dev, out_queue);
511 if (IS_ERR(mbox->out_queue)) {
512 ret = PTR_ERR(mbox->out_queue);
513 goto unmap_in_queue;
514 }
515
516 mbox->wq = create_singlethread_workqueue("ethosu_workqueue");
517 if (!mbox->wq) {
518 dev_err(mbox->dev, "Failed to create work queue\n");
519 ret = -EINVAL;
520 goto unmap_out_queue;
521 }
522
523 INIT_WORK(&mbox->work, ethosu_mailbox_rx_work);
524
525 mbox->tx = mbox_request_channel_byname(&mbox->client, "tx");
526 if (IS_ERR(mbox->tx)) {
527 dev_warn(mbox->dev, "mbox: Failed to request tx channel\n");
528 ret = PTR_ERR(mbox->tx);
529 goto workqueue_destroy;
530 }
531
532 mbox->rx = mbox_request_channel_byname(&mbox->client, "rx");
533 if (IS_ERR(mbox->rx)) {
534 dev_info(dev, "mbox: Using same channel for RX and TX\n");
535 mbox->rx = mbox->tx;
536 }
537
538 return 0;
539
540workqueue_destroy:
541 destroy_workqueue(mbox->wq);
542
543unmap_out_queue:
544 devm_iounmap(mbox->dev, mbox->out_queue);
545
546unmap_in_queue:
547 devm_iounmap(mbox->dev, mbox->in_queue);
548
549 return ret;
550}
551
552void ethosu_mailbox_deinit(struct ethosu_mailbox *mbox)
553{
554 if (mbox->rx != mbox->tx)
555 mbox_free_channel(mbox->rx);
556
557 mbox_free_channel(mbox->tx);
558 destroy_workqueue(mbox->wq);
559 devm_iounmap(mbox->dev, mbox->out_queue);
560 devm_iounmap(mbox->dev, mbox->in_queue);
561}