Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 1 | /* |
Kristofer Jonsson | 35de9e6 | 2022-03-08 13:25:45 +0100 | [diff] [blame] | 2 | * Copyright (c) 2020-2022 Arm Limited. |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 3 | * |
| 4 | * This program is free software and is provided to you under the terms of the |
| 5 | * GNU General Public License version 2 as published by the Free Software |
| 6 | * Foundation, and any use by you of this program is subject to the terms |
| 7 | * of such GNU licence. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, you can access it online at |
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * |
| 18 | * SPDX-License-Identifier: GPL-2.0-only |
| 19 | */ |
| 20 | |
| 21 | /**************************************************************************** |
| 22 | * Includes |
| 23 | ****************************************************************************/ |
| 24 | |
| 25 | #include "ethosu_mailbox.h" |
| 26 | |
| 27 | #include "ethosu_buffer.h" |
| 28 | #include "ethosu_core_interface.h" |
| 29 | #include "ethosu_device.h" |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 30 | #include "ethosu_watchdog.h" |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 31 | |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 32 | #include <linux/jiffies.h> |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 33 | #include <linux/resource.h> |
| 34 | #include <linux/uio.h> |
| 35 | |
| 36 | /**************************************************************************** |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 37 | * Includes |
| 38 | ****************************************************************************/ |
| 39 | |
| 40 | #ifndef fallthrough |
| 41 | #if __has_attribute(__fallthrough__) |
| 42 | #define fallthrough __attribute__((__fallthrough__)) |
| 43 | #else |
| 44 | #define fallthrough do {} while (0) /* fallthrough */ |
| 45 | #endif |
| 46 | #endif |
| 47 | |
| 48 | /**************************************************************************** |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 49 | * Functions |
| 50 | ****************************************************************************/ |
| 51 | |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame] | 52 | static void ethosu_wd_inc(struct ethosu_mailbox *mbox, |
| 53 | enum ethosu_core_msg_type type) |
| 54 | { |
| 55 | switch (type) { |
| 56 | case ETHOSU_CORE_MSG_PING: |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 57 | mbox->ping_count++; |
| 58 | fallthrough; |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame] | 59 | case ETHOSU_CORE_MSG_INFERENCE_REQ: |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame] | 60 | ethosu_watchdog_inc(mbox->wdog); |
| 61 | break; |
| 62 | default: |
| 63 | break; |
| 64 | } |
| 65 | } |
| 66 | |
| 67 | static void ethosu_wd_dec(struct ethosu_mailbox *mbox, |
| 68 | enum ethosu_core_msg_type type) |
| 69 | { |
| 70 | switch (type) { |
| 71 | case ETHOSU_CORE_MSG_PONG: |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 72 | mbox->ping_count--; |
| 73 | fallthrough; |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame] | 74 | case ETHOSU_CORE_MSG_INFERENCE_RSP: |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame] | 75 | ethosu_watchdog_dec(mbox->wdog); |
| 76 | break; |
| 77 | default: |
| 78 | break; |
| 79 | } |
| 80 | } |
| 81 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 82 | static void ethosu_core_set_size(struct ethosu_buffer *buf, |
| 83 | struct ethosu_core_buffer *cbuf) |
| 84 | { |
| 85 | cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset; |
| 86 | cbuf->size = (uint32_t)buf->size; |
| 87 | } |
| 88 | |
| 89 | static void ethosu_core_set_capacity(struct ethosu_buffer *buf, |
| 90 | struct ethosu_core_buffer *cbuf) |
| 91 | { |
| 92 | cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset + buf->size; |
| 93 | cbuf->size = (uint32_t)buf->capacity - buf->offset - buf->size; |
| 94 | } |
| 95 | |
| 96 | static size_t ethosu_queue_available(struct ethosu_core_queue *queue) |
| 97 | { |
| 98 | size_t size = queue->header.write - queue->header.read; |
| 99 | |
| 100 | if (queue->header.read > queue->header.write) |
| 101 | size += queue->header.size; |
| 102 | |
| 103 | return size; |
| 104 | } |
| 105 | |
| 106 | static size_t ethosu_queue_capacity(struct ethosu_core_queue *queue) |
| 107 | { |
| 108 | return queue->header.size - ethosu_queue_available(queue); |
| 109 | } |
| 110 | |
| 111 | static int ethosu_queue_write(struct ethosu_mailbox *mbox, |
| 112 | const struct kvec *vec, |
| 113 | size_t length) |
| 114 | { |
| 115 | struct ethosu_core_queue *queue = mbox->in_queue; |
| 116 | uint8_t *dst = &queue->data[0]; |
| 117 | uint32_t wpos = queue->header.write; |
| 118 | size_t total_size; |
| 119 | size_t i; |
| 120 | int ret; |
| 121 | |
| 122 | for (i = 0, total_size = 0; i < length; i++) |
| 123 | total_size += vec[i].iov_len; |
| 124 | |
| 125 | if (total_size > ethosu_queue_capacity(queue)) |
| 126 | return -EINVAL; |
| 127 | |
| 128 | for (i = 0; i < length; i++) { |
| 129 | const uint8_t *src = vec[i].iov_base; |
| 130 | const uint8_t *end = src + vec[i].iov_len; |
| 131 | |
| 132 | while (src < end) { |
| 133 | dst[wpos] = *src++; |
| 134 | wpos = (wpos + 1) % queue->header.size; |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | queue->header.write = wpos; |
| 139 | |
| 140 | ret = mbox_send_message(mbox->tx, queue); |
| 141 | if (ret < 0) |
| 142 | return ret; |
| 143 | |
| 144 | return 0; |
| 145 | } |
| 146 | |
| 147 | static int ethosu_queue_write_msg(struct ethosu_mailbox *mbox, |
| 148 | uint32_t type, |
| 149 | void *data, |
| 150 | size_t length) |
| 151 | { |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 152 | struct ethosu_core_msg msg = { |
| 153 | .magic = ETHOSU_CORE_MSG_MAGIC, |
| 154 | .type = type, .length= length |
| 155 | }; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 156 | const struct kvec vec[2] = { |
| 157 | { &msg, sizeof(msg) }, |
| 158 | { data, length } |
| 159 | }; |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame] | 160 | int ret; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 161 | |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame] | 162 | ret = ethosu_queue_write(mbox, vec, 2); |
| 163 | if (ret) |
| 164 | return ret; |
| 165 | |
| 166 | ethosu_wd_inc(mbox, type); |
| 167 | |
| 168 | return 0; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 169 | } |
| 170 | |
| 171 | static int ethosu_queue_read(struct ethosu_mailbox *mbox, |
| 172 | void *data, |
| 173 | size_t length) |
| 174 | { |
| 175 | struct ethosu_core_queue *queue = mbox->out_queue; |
| 176 | uint8_t *src = &queue->data[0]; |
| 177 | uint8_t *dst = (uint8_t *)data; |
| 178 | const uint8_t *end = dst + length; |
| 179 | uint32_t rpos = queue->header.read; |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 180 | size_t queue_avail = ethosu_queue_available(queue); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 181 | |
Davide Grohmann | 1c26baa | 2021-06-15 13:21:15 +0200 | [diff] [blame] | 182 | if (length == 0) |
| 183 | return 0; |
| 184 | else if (queue_avail == 0) |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 185 | return -ENOMSG; |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 186 | else if (length > queue_avail) |
| 187 | return -EBADMSG; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 188 | |
| 189 | while (dst < end) { |
| 190 | *dst++ = src[rpos]; |
| 191 | rpos = (rpos + 1) % queue->header.size; |
| 192 | } |
| 193 | |
| 194 | queue->header.read = rpos; |
| 195 | |
| 196 | return 0; |
| 197 | } |
| 198 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 199 | void ethosu_mailbox_reset(struct ethosu_mailbox *mbox) |
| 200 | { |
| 201 | mbox->out_queue->header.read = mbox->out_queue->header.write; |
| 202 | } |
| 203 | |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 204 | void ethosu_mailbox_wait_prepare(struct ethosu_mailbox *mbox) |
| 205 | { |
| 206 | mbox->in_queue->header.size = 0; |
| 207 | mbox->in_queue->header.read = 0xffffff; |
| 208 | mbox->in_queue->header.write = 0xffffff; |
| 209 | } |
| 210 | |
| 211 | int ethosu_mailbox_wait_firmware(struct ethosu_mailbox *mbox) |
| 212 | { |
| 213 | const unsigned long timeout = 1000; |
| 214 | const unsigned long end = jiffies + msecs_to_jiffies(timeout); |
| 215 | volatile struct ethosu_core_queue_header *hdr = |
| 216 | &mbox->in_queue->header; |
| 217 | int ret = -ETIMEDOUT; |
| 218 | |
| 219 | /* Spin wait on mailbox initialization */ |
| 220 | while ((end - jiffies) < timeout) |
| 221 | if (hdr->size != 0 && |
| 222 | hdr->read != 0xffffff && |
| 223 | hdr->write != 0xffffff) { |
| 224 | ret = 0; |
| 225 | break; |
| 226 | } |
| 227 | |
| 228 | dev_info(mbox->dev, "mbox: Wait. ret=%d, size=%u, read=%u, write=%u", |
| 229 | ret, hdr->size, hdr->read, hdr->write); |
| 230 | |
| 231 | return ret; |
| 232 | } |
| 233 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 234 | int ethosu_mailbox_read(struct ethosu_mailbox *mbox, |
| 235 | struct ethosu_core_msg *header, |
| 236 | void *data, |
| 237 | size_t length) |
| 238 | { |
| 239 | int ret; |
| 240 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 241 | /* Read message header magic */ |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 242 | ret = ethosu_queue_read(mbox, header, sizeof(*header)); |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 243 | if (ret) { |
| 244 | if (ret != -ENOMSG) |
| 245 | dev_warn(mbox->dev, |
| 246 | "Msg: Failed to read message header\n"); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 247 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 248 | return ret; |
| 249 | } |
| 250 | |
| 251 | if (header->magic != ETHOSU_CORE_MSG_MAGIC) { |
| 252 | dev_warn(mbox->dev, |
| 253 | "Msg: Invalid magic. Got: %08X but expected %08X\n", |
| 254 | header->magic, ETHOSU_CORE_MSG_MAGIC); |
| 255 | |
| 256 | return -EINVAL; |
| 257 | } |
| 258 | |
| 259 | dev_info(mbox->dev, |
| 260 | "mbox: Read msg header. magic=%08X, type=%u, length=%u", |
| 261 | header->magic, header->type, header->length); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 262 | |
| 263 | /* Check that payload is not larger than allocated buffer */ |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 264 | if (header->length > length) { |
| 265 | dev_warn(mbox->dev, |
| 266 | "Msg: Buffer size (%zu) too small for message (%u)\n", |
| 267 | sizeof(data), header->length); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 268 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 269 | return -ENOMEM; |
| 270 | } |
| 271 | |
| 272 | /* Read payload data */ |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 273 | ret = ethosu_queue_read(mbox, data, header->length); |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 274 | if (ret) { |
| 275 | dev_warn(mbox->dev, "Msg: Failed to read payload data\n"); |
| 276 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 277 | return -EBADMSG; |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 278 | } |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 279 | |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame] | 280 | ethosu_wd_dec(mbox, header->type); |
| 281 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 282 | return 0; |
| 283 | } |
| 284 | |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 285 | int ethosu_mailbox_find(struct ethosu_mailbox *mbox, |
| 286 | struct ethosu_mailbox_msg *msg) |
| 287 | { |
| 288 | struct ethosu_mailbox_msg *cur; |
| 289 | |
| 290 | list_for_each_entry(cur, &mbox->pending_list, list) { |
| 291 | if (cur == msg) |
| 292 | return 0; |
| 293 | } |
| 294 | |
| 295 | return -EINVAL; |
| 296 | } |
| 297 | |
| 298 | void ethosu_mailbox_fail(struct ethosu_mailbox *mbox) |
| 299 | { |
| 300 | struct ethosu_mailbox_msg *cur, *cur_tmp; |
| 301 | |
| 302 | list_for_each_entry_safe(cur, cur_tmp, &mbox->pending_list, list) { |
| 303 | cur->fail(cur); |
| 304 | } |
| 305 | } |
| 306 | |
Davide Grohmann | 8b1fe55 | 2022-04-07 16:58:32 +0200 | [diff] [blame] | 307 | void ethosu_mailbox_resend(struct ethosu_mailbox *mbox) |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 308 | { |
| 309 | struct ethosu_mailbox_msg *cur, *cur_tmp; |
| 310 | int ret; |
| 311 | |
| 312 | list_for_each_entry_safe(cur, cur_tmp, &mbox->pending_list, list) { |
| 313 | ret = cur->resend(cur); |
| 314 | if (ret) { |
Davide Grohmann | 8b1fe55 | 2022-04-07 16:58:32 +0200 | [diff] [blame] | 315 | dev_warn(mbox->dev, "Failed to resend msg. ret=%d", |
| 316 | ret); |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 317 | cur->fail(cur); |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 318 | } |
| 319 | } |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 320 | } |
| 321 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 322 | int ethosu_mailbox_ping(struct ethosu_mailbox *mbox) |
| 323 | { |
| 324 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PING, NULL, 0); |
| 325 | } |
| 326 | |
Jonny Svärd | 7c24c77 | 2021-01-14 19:53:17 +0100 | [diff] [blame] | 327 | int ethosu_mailbox_pong(struct ethosu_mailbox *mbox) |
| 328 | { |
| 329 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PONG, NULL, 0); |
| 330 | } |
| 331 | |
| 332 | int ethosu_mailbox_version_request(struct ethosu_mailbox *mbox) |
| 333 | { |
| 334 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_VERSION_REQ, NULL, |
| 335 | 0); |
| 336 | } |
| 337 | |
Davide Grohmann | 35ce6c8 | 2021-06-01 15:03:51 +0200 | [diff] [blame] | 338 | int ethosu_mailbox_capabilities_request(struct ethosu_mailbox *mbox, |
| 339 | void *user_arg) |
| 340 | { |
| 341 | struct ethosu_core_capabilities_req req = { |
| 342 | .user_arg = (ptrdiff_t)user_arg |
| 343 | }; |
| 344 | |
| 345 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_CAPABILITIES_REQ, |
| 346 | &req, |
| 347 | sizeof(req)); |
| 348 | } |
| 349 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 350 | int ethosu_mailbox_inference(struct ethosu_mailbox *mbox, |
| 351 | void *user_arg, |
Kristofer Jonsson | b74492c | 2020-09-10 13:26:01 +0200 | [diff] [blame] | 352 | uint32_t ifm_count, |
| 353 | struct ethosu_buffer **ifm, |
| 354 | uint32_t ofm_count, |
| 355 | struct ethosu_buffer **ofm, |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 356 | struct ethosu_buffer *network, |
Kristofer Jonsson | 35de9e6 | 2022-03-08 13:25:45 +0100 | [diff] [blame] | 357 | uint32_t network_index, |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 358 | uint8_t *pmu_event_config, |
| 359 | uint8_t pmu_event_config_count, |
| 360 | uint8_t pmu_cycle_counter_enable) |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 361 | { |
| 362 | struct ethosu_core_inference_req inf; |
Kristofer Jonsson | b74492c | 2020-09-10 13:26:01 +0200 | [diff] [blame] | 363 | uint32_t i; |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 364 | |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 365 | /* Verify that the uapi and core has the same number of pmus */ |
| 366 | if (pmu_event_config_count != ETHOSU_CORE_PMU_MAX) { |
| 367 | dev_err(mbox->dev, "PMU count misconfigured.\n"); |
| 368 | |
| 369 | return -EINVAL; |
| 370 | } |
| 371 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 372 | inf.user_arg = (ptrdiff_t)user_arg; |
Kristofer Jonsson | b74492c | 2020-09-10 13:26:01 +0200 | [diff] [blame] | 373 | inf.ifm_count = ifm_count; |
| 374 | inf.ofm_count = ofm_count; |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 375 | inf.pmu_cycle_counter_enable = pmu_cycle_counter_enable; |
Kristofer Jonsson | b74492c | 2020-09-10 13:26:01 +0200 | [diff] [blame] | 376 | |
| 377 | for (i = 0; i < ifm_count; i++) |
| 378 | ethosu_core_set_size(ifm[i], &inf.ifm[i]); |
| 379 | |
| 380 | for (i = 0; i < ofm_count; i++) |
| 381 | ethosu_core_set_capacity(ofm[i], &inf.ofm[i]); |
| 382 | |
Per Åstrand | f7e407a | 2020-10-23 21:25:05 +0200 | [diff] [blame] | 383 | for (i = 0; i < ETHOSU_CORE_PMU_MAX; i++) |
| 384 | inf.pmu_event_config[i] = pmu_event_config[i]; |
| 385 | |
Kristofer Jonsson | 35de9e6 | 2022-03-08 13:25:45 +0100 | [diff] [blame] | 386 | if (network != NULL) { |
| 387 | inf.network.type = ETHOSU_CORE_NETWORK_BUFFER; |
| 388 | ethosu_core_set_size(network, &inf.network.buffer); |
| 389 | } else { |
| 390 | inf.network.type = ETHOSU_CORE_NETWORK_INDEX; |
| 391 | inf.network.index = network_index; |
| 392 | } |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 393 | |
| 394 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_INFERENCE_REQ, |
| 395 | &inf, sizeof(inf)); |
| 396 | } |
| 397 | |
Kristofer Jonsson | 3c6a260 | 2022-03-10 11:17:29 +0100 | [diff] [blame] | 398 | int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox, |
| 399 | void *user_arg, |
| 400 | struct ethosu_buffer *network, |
| 401 | uint32_t network_index) |
| 402 | { |
| 403 | struct ethosu_core_network_info_req info; |
| 404 | |
| 405 | info.user_arg = (ptrdiff_t)user_arg; |
| 406 | |
| 407 | if (network != NULL) { |
| 408 | info.network.type = ETHOSU_CORE_NETWORK_BUFFER; |
| 409 | ethosu_core_set_size(network, &info.network.buffer); |
| 410 | } else { |
| 411 | info.network.type = ETHOSU_CORE_NETWORK_INDEX; |
| 412 | info.network.index = network_index; |
| 413 | } |
| 414 | |
| 415 | return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_NETWORK_INFO_REQ, |
| 416 | &info, sizeof(info)); |
| 417 | } |
| 418 | |
Davide Grohmann | 7e8f508 | 2022-03-23 12:48:45 +0100 | [diff] [blame] | 419 | int ethosu_mailbox_cancel_inference(struct ethosu_mailbox *mbox, |
| 420 | void *user_arg, |
| 421 | void *inference_handle) |
| 422 | { |
| 423 | struct ethosu_core_cancel_inference_req req; |
| 424 | |
| 425 | req.user_arg = (ptrdiff_t)user_arg; |
| 426 | req.inference_handle = (ptrdiff_t)inference_handle; |
| 427 | |
| 428 | return ethosu_queue_write_msg(mbox, |
| 429 | ETHOSU_CORE_MSG_CANCEL_INFERENCE_REQ, |
| 430 | &req, sizeof(req)); |
| 431 | } |
| 432 | |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 433 | static void ethosu_mailbox_rx_work(struct work_struct *work) |
| 434 | { |
| 435 | struct ethosu_mailbox *mbox = container_of(work, typeof(*mbox), work); |
| 436 | |
| 437 | mbox->callback(mbox->user_arg); |
| 438 | } |
| 439 | |
| 440 | static void ethosu_mailbox_rx_callback(struct mbox_client *client, |
| 441 | void *message) |
| 442 | { |
| 443 | struct ethosu_mailbox *mbox = |
| 444 | container_of(client, typeof(*mbox), client); |
| 445 | |
| 446 | dev_info(mbox->dev, "mbox: Received message.\n"); |
| 447 | |
| 448 | queue_work(mbox->wq, &mbox->work); |
| 449 | } |
| 450 | |
| 451 | static void ethosu_mailbox_tx_done(struct mbox_client *client, |
| 452 | void *message, |
| 453 | int r) |
| 454 | { |
| 455 | if (r) |
| 456 | dev_warn(client->dev, "mbox: Failed sending message (%d)\n", r); |
| 457 | else |
| 458 | dev_info(client->dev, "mbox: Message sent\n"); |
| 459 | } |
| 460 | |
| 461 | int ethosu_mailbox_init(struct ethosu_mailbox *mbox, |
| 462 | struct device *dev, |
| 463 | struct resource *in_queue, |
| 464 | struct resource *out_queue, |
| 465 | ethosu_mailbox_cb callback, |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame] | 466 | void *user_arg, |
| 467 | struct ethosu_watchdog *wdog) |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 468 | { |
| 469 | int ret; |
| 470 | |
| 471 | mbox->dev = dev; |
| 472 | mbox->callback = callback; |
| 473 | mbox->user_arg = user_arg; |
Kristofer Jonsson | f5b98c9 | 2022-03-14 16:09:12 +0100 | [diff] [blame] | 474 | mbox->wdog = wdog; |
Kristofer Jonsson | 442fefb | 2022-03-17 17:15:52 +0100 | [diff] [blame] | 475 | mbox->ping_count = 0; |
| 476 | INIT_LIST_HEAD(&mbox->pending_list); |
Kristofer Jonsson | 116a635 | 2020-08-20 17:25:23 +0200 | [diff] [blame] | 477 | |
| 478 | mbox->client.dev = dev; |
| 479 | mbox->client.rx_callback = ethosu_mailbox_rx_callback; |
| 480 | mbox->client.tx_prepare = NULL; /* preparation of data is handled |
| 481 | * through the |
| 482 | * queue functions */ |
| 483 | mbox->client.tx_done = ethosu_mailbox_tx_done; |
| 484 | mbox->client.tx_block = true; |
| 485 | mbox->client.knows_txdone = false; |
| 486 | mbox->client.tx_tout = 500; |
| 487 | |
| 488 | mbox->in_queue = devm_ioremap_resource(mbox->dev, in_queue); |
| 489 | if (IS_ERR(mbox->in_queue)) |
| 490 | return PTR_ERR(mbox->in_queue); |
| 491 | |
| 492 | mbox->out_queue = devm_ioremap_resource(mbox->dev, out_queue); |
| 493 | if (IS_ERR(mbox->out_queue)) { |
| 494 | ret = PTR_ERR(mbox->out_queue); |
| 495 | goto unmap_in_queue; |
| 496 | } |
| 497 | |
| 498 | mbox->wq = create_singlethread_workqueue("ethosu_workqueue"); |
| 499 | if (!mbox->wq) { |
| 500 | dev_err(mbox->dev, "Failed to create work queue\n"); |
| 501 | ret = -EINVAL; |
| 502 | goto unmap_out_queue; |
| 503 | } |
| 504 | |
| 505 | INIT_WORK(&mbox->work, ethosu_mailbox_rx_work); |
| 506 | |
| 507 | mbox->tx = mbox_request_channel_byname(&mbox->client, "tx"); |
| 508 | if (IS_ERR(mbox->tx)) { |
| 509 | dev_warn(mbox->dev, "mbox: Failed to request tx channel\n"); |
| 510 | ret = PTR_ERR(mbox->tx); |
| 511 | goto workqueue_destroy; |
| 512 | } |
| 513 | |
| 514 | mbox->rx = mbox_request_channel_byname(&mbox->client, "rx"); |
| 515 | if (IS_ERR(mbox->rx)) { |
| 516 | dev_info(dev, "mbox: Using same channel for RX and TX\n"); |
| 517 | mbox->rx = mbox->tx; |
| 518 | } |
| 519 | |
| 520 | return 0; |
| 521 | |
| 522 | workqueue_destroy: |
| 523 | destroy_workqueue(mbox->wq); |
| 524 | |
| 525 | unmap_out_queue: |
| 526 | devm_iounmap(mbox->dev, mbox->out_queue); |
| 527 | |
| 528 | unmap_in_queue: |
| 529 | devm_iounmap(mbox->dev, mbox->in_queue); |
| 530 | |
| 531 | return ret; |
| 532 | } |
| 533 | |
| 534 | void ethosu_mailbox_deinit(struct ethosu_mailbox *mbox) |
| 535 | { |
| 536 | if (mbox->rx != mbox->tx) |
| 537 | mbox_free_channel(mbox->rx); |
| 538 | |
| 539 | mbox_free_channel(mbox->tx); |
| 540 | destroy_workqueue(mbox->wq); |
| 541 | devm_iounmap(mbox->dev, mbox->out_queue); |
| 542 | devm_iounmap(mbox->dev, mbox->in_queue); |
| 543 | } |