Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 1 | /* |
Kristofer Jonsson | 35de9e6 | 2022-03-08 13:25:45 +0100 | [diff] [blame] | 2 | * Copyright (c) 2020-2022 Arm Limited. |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 3 | * |
| 4 | * This program is free software and is provided to you under the terms of the |
| 5 | * GNU General Public License version 2 as published by the Free Software |
| 6 | * Foundation, and any use by you of this program is subject to the terms |
| 7 | * of such GNU licence. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, you can access it online at |
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * |
| 18 | * SPDX-License-Identifier: GPL-2.0-only |
| 19 | */ |
| 20 | |
| 21 | /**************************************************************************** |
| 22 | * Includes |
| 23 | ****************************************************************************/ |
| 24 | |
| 25 | #include "ethosu_mailbox.h" |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame^] | 26 | #include "ethosu_watchdog.h" |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 27 | |
| 28 | #include "ethosu_buffer.h" |
| 29 | #include "ethosu_core_interface.h" |
| 30 | #include "ethosu_device.h" |
| 31 | |
| 32 | #include <linux/resource.h> |
| 33 | #include <linux/uio.h> |
| 34 | |
| 35 | /**************************************************************************** |
| 36 | * Functions |
| 37 | ****************************************************************************/ |
| 38 | |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame^] | 39 | static void ethosu_wd_inc(struct ethosu_mailbox *mbox, |
| 40 | enum ethosu_core_msg_type type) |
| 41 | { |
| 42 | switch (type) { |
| 43 | case ETHOSU_CORE_MSG_PING: |
| 44 | case ETHOSU_CORE_MSG_INFERENCE_REQ: |
| 45 | case ETHOSU_CORE_MSG_VERSION_REQ: |
| 46 | case ETHOSU_CORE_MSG_CAPABILITIES_REQ: |
| 47 | case ETHOSU_CORE_MSG_NETWORK_INFO_REQ: |
| 48 | ethosu_watchdog_inc(mbox->wdog); |
| 49 | break; |
| 50 | default: |
| 51 | break; |
| 52 | } |
| 53 | } |
| 54 | |
| 55 | static void ethosu_wd_dec(struct ethosu_mailbox *mbox, |
| 56 | enum ethosu_core_msg_type type) |
| 57 | { |
| 58 | switch (type) { |
| 59 | case ETHOSU_CORE_MSG_PONG: |
| 60 | case ETHOSU_CORE_MSG_INFERENCE_RSP: |
| 61 | case ETHOSU_CORE_MSG_VERSION_RSP: |
| 62 | case ETHOSU_CORE_MSG_CAPABILITIES_RSP: |
| 63 | case ETHOSU_CORE_MSG_NETWORK_INFO_RSP: |
| 64 | ethosu_watchdog_dec(mbox->wdog); |
| 65 | break; |
| 66 | default: |
| 67 | break; |
| 68 | } |
| 69 | } |
| 70 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 71 | static void ethosu_core_set_size(struct ethosu_buffer *buf, |
| 72 | struct ethosu_core_buffer *cbuf) |
| 73 | { |
| 74 | cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset; |
| 75 | cbuf->size = (uint32_t)buf->size; |
| 76 | } |
| 77 | |
| 78 | static void ethosu_core_set_capacity(struct ethosu_buffer *buf, |
| 79 | struct ethosu_core_buffer *cbuf) |
| 80 | { |
| 81 | cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset + buf->size; |
| 82 | cbuf->size = (uint32_t)buf->capacity - buf->offset - buf->size; |
| 83 | } |
| 84 | |
| 85 | static size_t ethosu_queue_available(struct ethosu_core_queue *queue) |
| 86 | { |
| 87 | size_t size = queue->header.write - queue->header.read; |
| 88 | |
| 89 | if (queue->header.read > queue->header.write) |
| 90 | size += queue->header.size; |
| 91 | |
| 92 | return size; |
| 93 | } |
| 94 | |
| 95 | static size_t ethosu_queue_capacity(struct ethosu_core_queue *queue) |
| 96 | { |
| 97 | return queue->header.size - ethosu_queue_available(queue); |
| 98 | } |
| 99 | |
| 100 | static int ethosu_queue_write(struct ethosu_mailbox *mbox, |
| 101 | const struct kvec *vec, |
| 102 | size_t length) |
| 103 | { |
| 104 | struct ethosu_core_queue *queue = mbox->in_queue; |
| 105 | uint8_t *dst = &queue->data[0]; |
| 106 | uint32_t wpos = queue->header.write; |
| 107 | size_t total_size; |
| 108 | size_t i; |
| 109 | int ret; |
| 110 | |
| 111 | for (i = 0, total_size = 0; i < length; i++) |
| 112 | total_size += vec[i].iov_len; |
| 113 | |
| 114 | if (total_size > ethosu_queue_capacity(queue)) |
| 115 | return -EINVAL; |
| 116 | |
| 117 | for (i = 0; i < length; i++) { |
| 118 | const uint8_t *src = vec[i].iov_base; |
| 119 | const uint8_t *end = src + vec[i].iov_len; |
| 120 | |
| 121 | while (src < end) { |
| 122 | dst[wpos] = *src++; |
| 123 | wpos = (wpos + 1) % queue->header.size; |
| 124 | } |
| 125 | } |
| 126 | |
| 127 | queue->header.write = wpos; |
| 128 | |
| 129 | ret = mbox_send_message(mbox->tx, queue); |
| 130 | if (ret < 0) |
| 131 | return ret; |
| 132 | |
| 133 | return 0; |
| 134 | } |
| 135 | |
| 136 | static int ethosu_queue_write_msg(struct ethosu_mailbox *mbox, |
| 137 | uint32_t type, |
| 138 | void *data, |
| 139 | size_t length) |
| 140 | { |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 141 | struct ethosu_core_msg msg = { |
| 142 | .magic = ETHOSU_CORE_MSG_MAGIC, |
| 143 | .type = type, .length= length |
| 144 | }; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 145 | const struct kvec vec[2] = { |
| 146 | { &msg, sizeof(msg) }, |
| 147 | { data, length } |
| 148 | }; |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame^] | 149 | int ret; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 150 | |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame^] | 151 | ret = ethosu_queue_write(mbox, vec, 2); |
| 152 | if (ret) |
| 153 | return ret; |
| 154 | |
| 155 | ethosu_wd_inc(mbox, type); |
| 156 | |
| 157 | return 0; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 158 | } |
| 159 | |
| 160 | static int ethosu_queue_read(struct ethosu_mailbox *mbox, |
| 161 | void *data, |
| 162 | size_t length) |
| 163 | { |
| 164 | struct ethosu_core_queue *queue = mbox->out_queue; |
| 165 | uint8_t *src = &queue->data[0]; |
| 166 | uint8_t *dst = (uint8_t *)data; |
| 167 | const uint8_t *end = dst + length; |
| 168 | uint32_t rpos = queue->header.read; |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 169 | size_t queue_avail = ethosu_queue_available(queue); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 170 | |
Davide Grohmann | 1c26baa | 2021-06-15 13:21:15 +0200 | [diff] [blame] | 171 | if (length == 0) |
| 172 | return 0; |
| 173 | else if (queue_avail == 0) |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 174 | return -ENOMSG; |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 175 | else if (length > queue_avail) |
| 176 | return -EBADMSG; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 177 | |
| 178 | while (dst < end) { |
| 179 | *dst++ = src[rpos]; |
| 180 | rpos = (rpos + 1) % queue->header.size; |
| 181 | } |
| 182 | |
| 183 | queue->header.read = rpos; |
| 184 | |
| 185 | return 0; |
| 186 | } |
| 187 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 188 | void ethosu_mailbox_reset(struct ethosu_mailbox *mbox) |
| 189 | { |
| 190 | mbox->out_queue->header.read = mbox->out_queue->header.write; |
| 191 | } |
| 192 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 193 | int ethosu_mailbox_read(struct ethosu_mailbox *mbox, |
| 194 | struct ethosu_core_msg *header, |
| 195 | void *data, |
| 196 | size_t length) |
| 197 | { |
| 198 | int ret; |
| 199 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 200 | /* Read message header magic */ |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 201 | ret = ethosu_queue_read(mbox, header, sizeof(*header)); |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 202 | if (ret) { |
| 203 | if (ret != -ENOMSG) |
| 204 | dev_warn(mbox->dev, |
| 205 | "Msg: Failed to read message header\n"); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 206 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 207 | return ret; |
| 208 | } |
| 209 | |
| 210 | if (header->magic != ETHOSU_CORE_MSG_MAGIC) { |
| 211 | dev_warn(mbox->dev, |
| 212 | "Msg: Invalid magic. Got: %08X but expected %08X\n", |
| 213 | header->magic, ETHOSU_CORE_MSG_MAGIC); |
| 214 | |
| 215 | return -EINVAL; |
| 216 | } |
| 217 | |
| 218 | dev_info(mbox->dev, |
| 219 | "mbox: Read msg header. magic=%08X, type=%u, length=%u", |
| 220 | header->magic, header->type, header->length); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 221 | |
| 222 | /* Check that payload is not larger than allocated buffer */ |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 223 | if (header->length > length) { |
| 224 | dev_warn(mbox->dev, |
| 225 | "Msg: Buffer size (%zu) too small for message (%u)\n", |
| 226 | sizeof(data), header->length); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 227 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 228 | return -ENOMEM; |
| 229 | } |
| 230 | |
| 231 | /* Read payload data */ |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 232 | ret = ethosu_queue_read(mbox, data, header->length); |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 233 | if (ret) { |
| 234 | dev_warn(mbox->dev, "Msg: Failed to read payload data\n"); |
| 235 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 236 | return -EBADMSG; |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 237 | } |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 238 | |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame^] | 239 | ethosu_wd_dec(mbox, header->type); |
| 240 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 241 | return 0; |
| 242 | } |
| 243 | |
| 244 | int ethosu_mailbox_ping(struct ethosu_mailbox *mbox) |
| 245 | { |
| 246 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PING, NULL, 0); |
| 247 | } |
| 248 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 249 | int ethosu_mailbox_pong(struct ethosu_mailbox *mbox) |
| 250 | { |
| 251 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PONG, NULL, 0); |
| 252 | } |
| 253 | |
| 254 | int ethosu_mailbox_version_request(struct ethosu_mailbox *mbox) |
| 255 | { |
| 256 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_VERSION_REQ, NULL, |
| 257 | 0); |
| 258 | } |
| 259 | |
Davide Grohmann | 35ce6c8 | 2021-06-01 15:03:51 +0200 | [diff] [blame] | 260 | int ethosu_mailbox_capabilities_request(struct ethosu_mailbox *mbox, |
| 261 | void *user_arg) |
| 262 | { |
| 263 | struct ethosu_core_capabilities_req req = { |
| 264 | .user_arg = (ptrdiff_t)user_arg |
| 265 | }; |
| 266 | |
| 267 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_CAPABILITIES_REQ, |
| 268 | &req, |
| 269 | sizeof(req)); |
| 270 | } |
| 271 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 272 | int ethosu_mailbox_inference(struct ethosu_mailbox *mbox, |
| 273 | void *user_arg, |
Kristofer Jonsson | b74492c | 2020-09-10 13:26:01 +0200 | [diff] [blame] | 274 | uint32_t ifm_count, |
| 275 | struct ethosu_buffer **ifm, |
| 276 | uint32_t ofm_count, |
| 277 | struct ethosu_buffer **ofm, |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 278 | struct ethosu_buffer *network, |
Kristofer Jonsson | 35de9e6 | 2022-03-08 13:25:45 +0100 | [diff] [blame] | 279 | uint32_t network_index, |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 280 | uint8_t *pmu_event_config, |
| 281 | uint8_t pmu_event_config_count, |
| 282 | uint8_t pmu_cycle_counter_enable) |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 283 | { |
| 284 | struct ethosu_core_inference_req inf; |
Kristofer Jonsson | b74492c | 2020-09-10 13:26:01 +0200 | [diff] [blame] | 285 | uint32_t i; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 286 | |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 287 | /* Verify that the uapi and core has the same number of pmus */ |
| 288 | if (pmu_event_config_count != ETHOSU_CORE_PMU_MAX) { |
| 289 | dev_err(mbox->dev, "PMU count misconfigured.\n"); |
| 290 | |
| 291 | return -EINVAL; |
| 292 | } |
| 293 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 294 | inf.user_arg = (ptrdiff_t)user_arg; |
Kristofer Jonsson | b74492c | 2020-09-10 13:26:01 +0200 | [diff] [blame] | 295 | inf.ifm_count = ifm_count; |
| 296 | inf.ofm_count = ofm_count; |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 297 | inf.pmu_cycle_counter_enable = pmu_cycle_counter_enable; |
Kristofer Jonsson | b74492c | 2020-09-10 13:26:01 +0200 | [diff] [blame] | 298 | |
| 299 | for (i = 0; i < ifm_count; i++) |
| 300 | ethosu_core_set_size(ifm[i], &inf.ifm[i]); |
| 301 | |
| 302 | for (i = 0; i < ofm_count; i++) |
| 303 | ethosu_core_set_capacity(ofm[i], &inf.ofm[i]); |
| 304 | |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 305 | for (i = 0; i < ETHOSU_CORE_PMU_MAX; i++) |
| 306 | inf.pmu_event_config[i] = pmu_event_config[i]; |
| 307 | |
Kristofer Jonsson | 35de9e6 | 2022-03-08 13:25:45 +0100 | [diff] [blame] | 308 | if (network != NULL) { |
| 309 | inf.network.type = ETHOSU_CORE_NETWORK_BUFFER; |
| 310 | ethosu_core_set_size(network, &inf.network.buffer); |
| 311 | } else { |
| 312 | inf.network.type = ETHOSU_CORE_NETWORK_INDEX; |
| 313 | inf.network.index = network_index; |
| 314 | } |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 315 | |
| 316 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_INFERENCE_REQ, |
| 317 | &inf, sizeof(inf)); |
| 318 | } |
| 319 | |
Kristofer Jonsson | 3c6a260 | 2022-03-10 11:17:29 +0100 | [diff] [blame] | 320 | int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox, |
| 321 | void *user_arg, |
| 322 | struct ethosu_buffer *network, |
| 323 | uint32_t network_index) |
| 324 | { |
| 325 | struct ethosu_core_network_info_req info; |
| 326 | |
| 327 | info.user_arg = (ptrdiff_t)user_arg; |
| 328 | |
| 329 | if (network != NULL) { |
| 330 | info.network.type = ETHOSU_CORE_NETWORK_BUFFER; |
| 331 | ethosu_core_set_size(network, &info.network.buffer); |
| 332 | } else { |
| 333 | info.network.type = ETHOSU_CORE_NETWORK_INDEX; |
| 334 | info.network.index = network_index; |
| 335 | } |
| 336 | |
| 337 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_NETWORK_INFO_REQ, |
| 338 | &info, sizeof(info)); |
| 339 | } |
| 340 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 341 | static void ethosu_mailbox_rx_work(struct work_struct *work) |
| 342 | { |
| 343 | struct ethosu_mailbox *mbox = container_of(work, typeof(*mbox), work); |
| 344 | |
| 345 | mbox->callback(mbox->user_arg); |
| 346 | } |
| 347 | |
| 348 | static void ethosu_mailbox_rx_callback(struct mbox_client *client, |
| 349 | void *message) |
| 350 | { |
| 351 | struct ethosu_mailbox *mbox = |
| 352 | container_of(client, typeof(*mbox), client); |
| 353 | |
| 354 | dev_info(mbox->dev, "mbox: Received message.\n"); |
| 355 | |
| 356 | queue_work(mbox->wq, &mbox->work); |
| 357 | } |
| 358 | |
| 359 | static void ethosu_mailbox_tx_done(struct mbox_client *client, |
| 360 | void *message, |
| 361 | int r) |
| 362 | { |
| 363 | if (r) |
| 364 | dev_warn(client->dev, "mbox: Failed sending message (%d)\n", r); |
| 365 | else |
| 366 | dev_info(client->dev, "mbox: Message sent\n"); |
| 367 | } |
| 368 | |
| 369 | int ethosu_mailbox_init(struct ethosu_mailbox *mbox, |
| 370 | struct device *dev, |
| 371 | struct resource *in_queue, |
| 372 | struct resource *out_queue, |
| 373 | ethosu_mailbox_cb callback, |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame^] | 374 | void *user_arg, |
| 375 | struct ethosu_watchdog *wdog) |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 376 | { |
| 377 | int ret; |
| 378 | |
| 379 | mbox->dev = dev; |
| 380 | mbox->callback = callback; |
| 381 | mbox->user_arg = user_arg; |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame^] | 382 | mbox->wdog = wdog; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 383 | |
| 384 | mbox->client.dev = dev; |
| 385 | mbox->client.rx_callback = ethosu_mailbox_rx_callback; |
| 386 | mbox->client.tx_prepare = NULL; /* preparation of data is handled |
| 387 | * through the |
| 388 | * queue functions */ |
| 389 | mbox->client.tx_done = ethosu_mailbox_tx_done; |
| 390 | mbox->client.tx_block = true; |
| 391 | mbox->client.knows_txdone = false; |
| 392 | mbox->client.tx_tout = 500; |
| 393 | |
| 394 | mbox->in_queue = devm_ioremap_resource(mbox->dev, in_queue); |
| 395 | if (IS_ERR(mbox->in_queue)) |
| 396 | return PTR_ERR(mbox->in_queue); |
| 397 | |
| 398 | mbox->out_queue = devm_ioremap_resource(mbox->dev, out_queue); |
| 399 | if (IS_ERR(mbox->out_queue)) { |
| 400 | ret = PTR_ERR(mbox->out_queue); |
| 401 | goto unmap_in_queue; |
| 402 | } |
| 403 | |
| 404 | mbox->wq = create_singlethread_workqueue("ethosu_workqueue"); |
| 405 | if (!mbox->wq) { |
| 406 | dev_err(mbox->dev, "Failed to create work queue\n"); |
| 407 | ret = -EINVAL; |
| 408 | goto unmap_out_queue; |
| 409 | } |
| 410 | |
| 411 | INIT_WORK(&mbox->work, ethosu_mailbox_rx_work); |
| 412 | |
| 413 | mbox->tx = mbox_request_channel_byname(&mbox->client, "tx"); |
| 414 | if (IS_ERR(mbox->tx)) { |
| 415 | dev_warn(mbox->dev, "mbox: Failed to request tx channel\n"); |
| 416 | ret = PTR_ERR(mbox->tx); |
| 417 | goto workqueue_destroy; |
| 418 | } |
| 419 | |
| 420 | mbox->rx = mbox_request_channel_byname(&mbox->client, "rx"); |
| 421 | if (IS_ERR(mbox->rx)) { |
| 422 | dev_info(dev, "mbox: Using same channel for RX and TX\n"); |
| 423 | mbox->rx = mbox->tx; |
| 424 | } |
| 425 | |
| 426 | return 0; |
| 427 | |
| 428 | workqueue_destroy: |
| 429 | destroy_workqueue(mbox->wq); |
| 430 | |
| 431 | unmap_out_queue: |
| 432 | devm_iounmap(mbox->dev, mbox->out_queue); |
| 433 | |
| 434 | unmap_in_queue: |
| 435 | devm_iounmap(mbox->dev, mbox->in_queue); |
| 436 | |
| 437 | return ret; |
| 438 | } |
| 439 | |
| 440 | void ethosu_mailbox_deinit(struct ethosu_mailbox *mbox) |
| 441 | { |
| 442 | if (mbox->rx != mbox->tx) |
| 443 | mbox_free_channel(mbox->rx); |
| 444 | |
| 445 | mbox_free_channel(mbox->tx); |
| 446 | destroy_workqueue(mbox->wq); |
| 447 | devm_iounmap(mbox->dev, mbox->out_queue); |
| 448 | devm_iounmap(mbox->dev, mbox->in_queue); |
| 449 | } |