Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 1 | /* |
Kristofer Jonsson | b42bc0b | 2023-01-04 17:09:28 +0100 | [diff] [blame^] | 2 | * Copyright 2020-2023 Arm Limited and/or its affiliates |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 3 | * |
| 4 | * This program is free software and is provided to you under the terms of the |
| 5 | * GNU General Public License version 2 as published by the Free Software |
| 6 | * Foundation, and any use by you of this program is subject to the terms |
| 7 | * of such GNU licence. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, you can access it online at |
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * |
| 18 | * SPDX-License-Identifier: GPL-2.0-only |
| 19 | */ |
| 20 | |
| 21 | /**************************************************************************** |
| 22 | * Includes |
| 23 | ****************************************************************************/ |
| 24 | |
| 25 | #include "ethosu_mailbox.h" |
| 26 | |
| 27 | #include "ethosu_buffer.h" |
| 28 | #include "ethosu_core_interface.h" |
| 29 | #include "ethosu_device.h" |
| 30 | |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 31 | #include <linux/jiffies.h> |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 32 | #include <linux/resource.h> |
| 33 | #include <linux/uio.h> |
| 34 | |
| 35 | /**************************************************************************** |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 36 | * Includes |
| 37 | ****************************************************************************/ |
| 38 | |
| 39 | #ifndef fallthrough |
| 40 | #if __has_attribute(__fallthrough__) |
| 41 | #define fallthrough __attribute__((__fallthrough__)) |
| 42 | #else |
| 43 | #define fallthrough do {} while (0) /* fallthrough */ |
| 44 | #endif |
| 45 | #endif |
| 46 | |
| 47 | /**************************************************************************** |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 48 | * Functions |
| 49 | ****************************************************************************/ |
| 50 | |
| 51 | static void ethosu_core_set_size(struct ethosu_buffer *buf, |
| 52 | struct ethosu_core_buffer *cbuf) |
| 53 | { |
| 54 | cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset; |
| 55 | cbuf->size = (uint32_t)buf->size; |
| 56 | } |
| 57 | |
| 58 | static void ethosu_core_set_capacity(struct ethosu_buffer *buf, |
| 59 | struct ethosu_core_buffer *cbuf) |
| 60 | { |
| 61 | cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset + buf->size; |
| 62 | cbuf->size = (uint32_t)buf->capacity - buf->offset - buf->size; |
| 63 | } |
| 64 | |
| 65 | static size_t ethosu_queue_available(struct ethosu_core_queue *queue) |
| 66 | { |
| 67 | size_t size = queue->header.write - queue->header.read; |
| 68 | |
| 69 | if (queue->header.read > queue->header.write) |
| 70 | size += queue->header.size; |
| 71 | |
| 72 | return size; |
| 73 | } |
| 74 | |
| 75 | static size_t ethosu_queue_capacity(struct ethosu_core_queue *queue) |
| 76 | { |
Kristofer Jonsson | e2a5815 | 2022-06-28 14:09:19 +0200 | [diff] [blame] | 77 | return max_t(ssize_t, |
| 78 | queue->header.size - ethosu_queue_available(queue) - 1, 0); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 79 | } |
| 80 | |
| 81 | static int ethosu_queue_write(struct ethosu_mailbox *mbox, |
| 82 | const struct kvec *vec, |
| 83 | size_t length) |
| 84 | { |
| 85 | struct ethosu_core_queue *queue = mbox->in_queue; |
| 86 | uint8_t *dst = &queue->data[0]; |
| 87 | uint32_t wpos = queue->header.write; |
| 88 | size_t total_size; |
| 89 | size_t i; |
| 90 | int ret; |
| 91 | |
| 92 | for (i = 0, total_size = 0; i < length; i++) |
| 93 | total_size += vec[i].iov_len; |
| 94 | |
| 95 | if (total_size > ethosu_queue_capacity(queue)) |
| 96 | return -EINVAL; |
| 97 | |
| 98 | for (i = 0; i < length; i++) { |
| 99 | const uint8_t *src = vec[i].iov_base; |
| 100 | const uint8_t *end = src + vec[i].iov_len; |
| 101 | |
| 102 | while (src < end) { |
| 103 | dst[wpos] = *src++; |
| 104 | wpos = (wpos + 1) % queue->header.size; |
| 105 | } |
| 106 | } |
| 107 | |
| 108 | queue->header.write = wpos; |
| 109 | |
| 110 | ret = mbox_send_message(mbox->tx, queue); |
| 111 | if (ret < 0) |
| 112 | return ret; |
| 113 | |
| 114 | return 0; |
| 115 | } |
| 116 | |
| 117 | static int ethosu_queue_write_msg(struct ethosu_mailbox *mbox, |
| 118 | uint32_t type, |
| 119 | void *data, |
| 120 | size_t length) |
| 121 | { |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 122 | struct ethosu_core_msg msg = { |
| 123 | .magic = ETHOSU_CORE_MSG_MAGIC, |
| 124 | .type = type, .length= length |
| 125 | }; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 126 | const struct kvec vec[2] = { |
| 127 | { &msg, sizeof(msg) }, |
| 128 | { data, length } |
| 129 | }; |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame] | 130 | int ret; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 131 | |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame] | 132 | ret = ethosu_queue_write(mbox, vec, 2); |
| 133 | if (ret) |
| 134 | return ret; |
| 135 | |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame] | 136 | return 0; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 137 | } |
| 138 | |
| 139 | static int ethosu_queue_read(struct ethosu_mailbox *mbox, |
| 140 | void *data, |
| 141 | size_t length) |
| 142 | { |
| 143 | struct ethosu_core_queue *queue = mbox->out_queue; |
| 144 | uint8_t *src = &queue->data[0]; |
| 145 | uint8_t *dst = (uint8_t *)data; |
| 146 | const uint8_t *end = dst + length; |
| 147 | uint32_t rpos = queue->header.read; |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 148 | size_t queue_avail = ethosu_queue_available(queue); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 149 | |
Davide Grohmann | 1c26baa | 2021-06-15 13:21:15 +0200 | [diff] [blame] | 150 | if (length == 0) |
| 151 | return 0; |
| 152 | else if (queue_avail == 0) |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 153 | return -ENOMSG; |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 154 | else if (length > queue_avail) |
| 155 | return -EBADMSG; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 156 | |
| 157 | while (dst < end) { |
| 158 | *dst++ = src[rpos]; |
| 159 | rpos = (rpos + 1) % queue->header.size; |
| 160 | } |
| 161 | |
| 162 | queue->header.read = rpos; |
| 163 | |
| 164 | return 0; |
| 165 | } |
| 166 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 167 | void ethosu_mailbox_reset(struct ethosu_mailbox *mbox) |
| 168 | { |
| 169 | mbox->out_queue->header.read = mbox->out_queue->header.write; |
| 170 | } |
| 171 | |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 172 | void ethosu_mailbox_wait_prepare(struct ethosu_mailbox *mbox) |
| 173 | { |
| 174 | mbox->in_queue->header.size = 0; |
| 175 | mbox->in_queue->header.read = 0xffffff; |
| 176 | mbox->in_queue->header.write = 0xffffff; |
| 177 | } |
| 178 | |
| 179 | int ethosu_mailbox_wait_firmware(struct ethosu_mailbox *mbox) |
| 180 | { |
| 181 | const unsigned long timeout = 1000; |
| 182 | const unsigned long end = jiffies + msecs_to_jiffies(timeout); |
| 183 | volatile struct ethosu_core_queue_header *hdr = |
| 184 | &mbox->in_queue->header; |
| 185 | int ret = -ETIMEDOUT; |
| 186 | |
| 187 | /* Spin wait on mailbox initialization */ |
| 188 | while ((end - jiffies) < timeout) |
| 189 | if (hdr->size != 0 && |
| 190 | hdr->read != 0xffffff && |
| 191 | hdr->write != 0xffffff) { |
| 192 | ret = 0; |
| 193 | break; |
| 194 | } |
| 195 | |
| 196 | dev_info(mbox->dev, "mbox: Wait. ret=%d, size=%u, read=%u, write=%u", |
| 197 | ret, hdr->size, hdr->read, hdr->write); |
| 198 | |
| 199 | return ret; |
| 200 | } |
| 201 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 202 | int ethosu_mailbox_read(struct ethosu_mailbox *mbox, |
| 203 | struct ethosu_core_msg *header, |
| 204 | void *data, |
| 205 | size_t length) |
| 206 | { |
| 207 | int ret; |
| 208 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 209 | /* Read message header magic */ |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 210 | ret = ethosu_queue_read(mbox, header, sizeof(*header)); |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 211 | if (ret) { |
| 212 | if (ret != -ENOMSG) |
| 213 | dev_warn(mbox->dev, |
| 214 | "Msg: Failed to read message header\n"); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 215 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 216 | return ret; |
| 217 | } |
| 218 | |
| 219 | if (header->magic != ETHOSU_CORE_MSG_MAGIC) { |
| 220 | dev_warn(mbox->dev, |
| 221 | "Msg: Invalid magic. Got: %08X but expected %08X\n", |
| 222 | header->magic, ETHOSU_CORE_MSG_MAGIC); |
| 223 | |
| 224 | return -EINVAL; |
| 225 | } |
| 226 | |
| 227 | dev_info(mbox->dev, |
| 228 | "mbox: Read msg header. magic=%08X, type=%u, length=%u", |
| 229 | header->magic, header->type, header->length); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 230 | |
| 231 | /* Check that payload is not larger than allocated buffer */ |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 232 | if (header->length > length) { |
| 233 | dev_warn(mbox->dev, |
| 234 | "Msg: Buffer size (%zu) too small for message (%u)\n", |
| 235 | sizeof(data), header->length); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 236 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 237 | return -ENOMEM; |
| 238 | } |
| 239 | |
| 240 | /* Read payload data */ |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 241 | ret = ethosu_queue_read(mbox, data, header->length); |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 242 | if (ret) { |
| 243 | dev_warn(mbox->dev, "Msg: Failed to read payload data\n"); |
| 244 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 245 | return -EBADMSG; |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 246 | } |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 247 | |
| 248 | return 0; |
| 249 | } |
| 250 | |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 251 | int ethosu_mailbox_register(struct ethosu_mailbox *mbox, |
| 252 | struct ethosu_mailbox_msg *msg) |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 253 | { |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 254 | msg->id = idr_alloc_cyclic(&mbox->msg_idr, msg, 0, INT_MAX, GFP_KERNEL); |
| 255 | if (msg->id < 0) |
| 256 | return msg->id; |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 257 | |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 258 | return 0; |
| 259 | } |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 260 | |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 261 | void ethosu_mailbox_deregister(struct ethosu_mailbox *mbox, |
| 262 | struct ethosu_mailbox_msg *msg) |
| 263 | { |
| 264 | idr_remove(&mbox->msg_idr, msg->id); |
| 265 | } |
| 266 | |
| 267 | struct ethosu_mailbox_msg *ethosu_mailbox_find(struct ethosu_mailbox *mbox, |
| 268 | int msg_id) |
| 269 | { |
| 270 | struct ethosu_mailbox_msg *ptr = (struct ethosu_mailbox_msg *)idr_find( |
| 271 | &mbox->msg_idr, msg_id); |
| 272 | |
| 273 | if (ptr == NULL) |
| 274 | return ERR_PTR(-EINVAL); |
| 275 | |
| 276 | return ptr; |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 277 | } |
| 278 | |
| 279 | void ethosu_mailbox_fail(struct ethosu_mailbox *mbox) |
| 280 | { |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 281 | struct ethosu_mailbox_msg *cur; |
| 282 | int id; |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 283 | |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 284 | idr_for_each_entry(&mbox->msg_idr, cur, id) { |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 285 | cur->fail(cur); |
| 286 | } |
| 287 | } |
| 288 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 289 | int ethosu_mailbox_ping(struct ethosu_mailbox *mbox) |
| 290 | { |
| 291 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PING, NULL, 0); |
| 292 | } |
| 293 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 294 | int ethosu_mailbox_pong(struct ethosu_mailbox *mbox) |
| 295 | { |
| 296 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PONG, NULL, 0); |
| 297 | } |
| 298 | |
| 299 | int ethosu_mailbox_version_request(struct ethosu_mailbox *mbox) |
| 300 | { |
| 301 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_VERSION_REQ, NULL, |
| 302 | 0); |
| 303 | } |
| 304 | |
Davide Grohmann | 35ce6c8 | 2021-06-01 15:03:51 +0200 | [diff] [blame] | 305 | int ethosu_mailbox_capabilities_request(struct ethosu_mailbox *mbox, |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 306 | struct ethosu_mailbox_msg *msg) |
Davide Grohmann | 35ce6c8 | 2021-06-01 15:03:51 +0200 | [diff] [blame] | 307 | { |
| 308 | struct ethosu_core_capabilities_req req = { |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 309 | .user_arg = msg->id |
Davide Grohmann | 35ce6c8 | 2021-06-01 15:03:51 +0200 | [diff] [blame] | 310 | }; |
| 311 | |
| 312 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_CAPABILITIES_REQ, |
| 313 | &req, |
| 314 | sizeof(req)); |
| 315 | } |
| 316 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 317 | int ethosu_mailbox_inference(struct ethosu_mailbox *mbox, |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 318 | struct ethosu_mailbox_msg *msg, |
Kristofer Jonsson | b74492c | 2020-09-10 13:26:01 +0200 | [diff] [blame] | 319 | uint32_t ifm_count, |
| 320 | struct ethosu_buffer **ifm, |
| 321 | uint32_t ofm_count, |
| 322 | struct ethosu_buffer **ofm, |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 323 | struct ethosu_buffer *network, |
Kristofer Jonsson | 35de9e6 | 2022-03-08 13:25:45 +0100 | [diff] [blame] | 324 | uint32_t network_index, |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 325 | uint8_t *pmu_event_config, |
| 326 | uint8_t pmu_event_config_count, |
| 327 | uint8_t pmu_cycle_counter_enable) |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 328 | { |
| 329 | struct ethosu_core_inference_req inf; |
Kristofer Jonsson | b74492c | 2020-09-10 13:26:01 +0200 | [diff] [blame] | 330 | uint32_t i; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 331 | |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 332 | /* Verify that the uapi and core has the same number of pmus */ |
| 333 | if (pmu_event_config_count != ETHOSU_CORE_PMU_MAX) { |
| 334 | dev_err(mbox->dev, "PMU count misconfigured.\n"); |
| 335 | |
| 336 | return -EINVAL; |
| 337 | } |
| 338 | |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 339 | inf.user_arg = msg->id; |
Kristofer Jonsson | b74492c | 2020-09-10 13:26:01 +0200 | [diff] [blame] | 340 | inf.ifm_count = ifm_count; |
| 341 | inf.ofm_count = ofm_count; |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 342 | inf.pmu_cycle_counter_enable = pmu_cycle_counter_enable; |
Kristofer Jonsson | b74492c | 2020-09-10 13:26:01 +0200 | [diff] [blame] | 343 | |
| 344 | for (i = 0; i < ifm_count; i++) |
| 345 | ethosu_core_set_size(ifm[i], &inf.ifm[i]); |
| 346 | |
| 347 | for (i = 0; i < ofm_count; i++) |
| 348 | ethosu_core_set_capacity(ofm[i], &inf.ofm[i]); |
| 349 | |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 350 | for (i = 0; i < ETHOSU_CORE_PMU_MAX; i++) |
| 351 | inf.pmu_event_config[i] = pmu_event_config[i]; |
| 352 | |
Kristofer Jonsson | 35de9e6 | 2022-03-08 13:25:45 +0100 | [diff] [blame] | 353 | if (network != NULL) { |
| 354 | inf.network.type = ETHOSU_CORE_NETWORK_BUFFER; |
| 355 | ethosu_core_set_size(network, &inf.network.buffer); |
| 356 | } else { |
| 357 | inf.network.type = ETHOSU_CORE_NETWORK_INDEX; |
| 358 | inf.network.index = network_index; |
| 359 | } |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 360 | |
| 361 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_INFERENCE_REQ, |
| 362 | &inf, sizeof(inf)); |
| 363 | } |
| 364 | |
Kristofer Jonsson | 3c6a260 | 2022-03-10 11:17:29 +0100 | [diff] [blame] | 365 | int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox, |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 366 | struct ethosu_mailbox_msg *msg, |
Kristofer Jonsson | 3c6a260 | 2022-03-10 11:17:29 +0100 | [diff] [blame] | 367 | struct ethosu_buffer *network, |
| 368 | uint32_t network_index) |
| 369 | { |
| 370 | struct ethosu_core_network_info_req info; |
| 371 | |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 372 | info.user_arg = msg->id; |
Kristofer Jonsson | 3c6a260 | 2022-03-10 11:17:29 +0100 | [diff] [blame] | 373 | |
| 374 | if (network != NULL) { |
| 375 | info.network.type = ETHOSU_CORE_NETWORK_BUFFER; |
| 376 | ethosu_core_set_size(network, &info.network.buffer); |
| 377 | } else { |
| 378 | info.network.type = ETHOSU_CORE_NETWORK_INDEX; |
| 379 | info.network.index = network_index; |
| 380 | } |
| 381 | |
| 382 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_NETWORK_INFO_REQ, |
| 383 | &info, sizeof(info)); |
| 384 | } |
| 385 | |
Davide Grohmann | 7e8f508 | 2022-03-23 12:48:45 +0100 | [diff] [blame] | 386 | int ethosu_mailbox_cancel_inference(struct ethosu_mailbox *mbox, |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 387 | struct ethosu_mailbox_msg *msg, |
| 388 | int inference_handle) |
Davide Grohmann | 7e8f508 | 2022-03-23 12:48:45 +0100 | [diff] [blame] | 389 | { |
| 390 | struct ethosu_core_cancel_inference_req req; |
| 391 | |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 392 | req.user_arg = msg->id; |
| 393 | req.inference_handle = inference_handle; |
Davide Grohmann | 7e8f508 | 2022-03-23 12:48:45 +0100 | [diff] [blame] | 394 | |
| 395 | return ethosu_queue_write_msg(mbox, |
| 396 | ETHOSU_CORE_MSG_CANCEL_INFERENCE_REQ, |
| 397 | &req, sizeof(req)); |
| 398 | } |
| 399 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 400 | static void ethosu_mailbox_rx_work(struct work_struct *work) |
| 401 | { |
| 402 | struct ethosu_mailbox *mbox = container_of(work, typeof(*mbox), work); |
| 403 | |
| 404 | mbox->callback(mbox->user_arg); |
| 405 | } |
| 406 | |
| 407 | static void ethosu_mailbox_rx_callback(struct mbox_client *client, |
| 408 | void *message) |
| 409 | { |
| 410 | struct ethosu_mailbox *mbox = |
| 411 | container_of(client, typeof(*mbox), client); |
| 412 | |
| 413 | dev_info(mbox->dev, "mbox: Received message.\n"); |
| 414 | |
| 415 | queue_work(mbox->wq, &mbox->work); |
| 416 | } |
| 417 | |
| 418 | static void ethosu_mailbox_tx_done(struct mbox_client *client, |
| 419 | void *message, |
| 420 | int r) |
| 421 | { |
| 422 | if (r) |
| 423 | dev_warn(client->dev, "mbox: Failed sending message (%d)\n", r); |
| 424 | else |
| 425 | dev_info(client->dev, "mbox: Message sent\n"); |
| 426 | } |
| 427 | |
| 428 | int ethosu_mailbox_init(struct ethosu_mailbox *mbox, |
| 429 | struct device *dev, |
| 430 | struct resource *in_queue, |
| 431 | struct resource *out_queue, |
| 432 | ethosu_mailbox_cb callback, |
Kristofer Jonsson | b42bc0b | 2023-01-04 17:09:28 +0100 | [diff] [blame^] | 433 | void *user_arg) |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 434 | { |
| 435 | int ret; |
| 436 | |
| 437 | mbox->dev = dev; |
| 438 | mbox->callback = callback; |
| 439 | mbox->user_arg = user_arg; |
Davide Grohmann | 32660f9 | 2022-04-27 16:49:07 +0200 | [diff] [blame] | 440 | idr_init(&mbox->msg_idr); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 441 | |
| 442 | mbox->client.dev = dev; |
| 443 | mbox->client.rx_callback = ethosu_mailbox_rx_callback; |
| 444 | mbox->client.tx_prepare = NULL; /* preparation of data is handled |
| 445 | * through the |
| 446 | * queue functions */ |
| 447 | mbox->client.tx_done = ethosu_mailbox_tx_done; |
| 448 | mbox->client.tx_block = true; |
| 449 | mbox->client.knows_txdone = false; |
| 450 | mbox->client.tx_tout = 500; |
| 451 | |
| 452 | mbox->in_queue = devm_ioremap_resource(mbox->dev, in_queue); |
| 453 | if (IS_ERR(mbox->in_queue)) |
| 454 | return PTR_ERR(mbox->in_queue); |
| 455 | |
| 456 | mbox->out_queue = devm_ioremap_resource(mbox->dev, out_queue); |
| 457 | if (IS_ERR(mbox->out_queue)) { |
| 458 | ret = PTR_ERR(mbox->out_queue); |
| 459 | goto unmap_in_queue; |
| 460 | } |
| 461 | |
| 462 | mbox->wq = create_singlethread_workqueue("ethosu_workqueue"); |
| 463 | if (!mbox->wq) { |
| 464 | dev_err(mbox->dev, "Failed to create work queue\n"); |
| 465 | ret = -EINVAL; |
| 466 | goto unmap_out_queue; |
| 467 | } |
| 468 | |
| 469 | INIT_WORK(&mbox->work, ethosu_mailbox_rx_work); |
| 470 | |
| 471 | mbox->tx = mbox_request_channel_byname(&mbox->client, "tx"); |
| 472 | if (IS_ERR(mbox->tx)) { |
| 473 | dev_warn(mbox->dev, "mbox: Failed to request tx channel\n"); |
| 474 | ret = PTR_ERR(mbox->tx); |
| 475 | goto workqueue_destroy; |
| 476 | } |
| 477 | |
| 478 | mbox->rx = mbox_request_channel_byname(&mbox->client, "rx"); |
| 479 | if (IS_ERR(mbox->rx)) { |
| 480 | dev_info(dev, "mbox: Using same channel for RX and TX\n"); |
| 481 | mbox->rx = mbox->tx; |
| 482 | } |
| 483 | |
| 484 | return 0; |
| 485 | |
| 486 | workqueue_destroy: |
| 487 | destroy_workqueue(mbox->wq); |
| 488 | |
| 489 | unmap_out_queue: |
| 490 | devm_iounmap(mbox->dev, mbox->out_queue); |
| 491 | |
| 492 | unmap_in_queue: |
| 493 | devm_iounmap(mbox->dev, mbox->in_queue); |
| 494 | |
| 495 | return ret; |
| 496 | } |
| 497 | |
| 498 | void ethosu_mailbox_deinit(struct ethosu_mailbox *mbox) |
| 499 | { |
| 500 | if (mbox->rx != mbox->tx) |
| 501 | mbox_free_channel(mbox->rx); |
| 502 | |
| 503 | mbox_free_channel(mbox->tx); |
| 504 | destroy_workqueue(mbox->wq); |
| 505 | devm_iounmap(mbox->dev, mbox->out_queue); |
| 506 | devm_iounmap(mbox->dev, mbox->in_queue); |
| 507 | } |