blob: 77b961478c1c95edf13464670f1693bb56058b22 [file] [log] [blame]
Kristofer Jonsson116a6352020-08-20 17:25:23 +02001/*
2 * (C) COPYRIGHT 2020 ARM Limited. All rights reserved.
3 *
4 * This program is free software and is provided to you under the terms of the
5 * GNU General Public License version 2 as published by the Free Software
6 * Foundation, and any use by you of this program is subject to the terms
7 * of such GNU licence.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * SPDX-License-Identifier: GPL-2.0-only
19 */
20
21/****************************************************************************
22 * Includes
23 ****************************************************************************/
24
25#include "ethosu_mailbox.h"
26
27#include "ethosu_buffer.h"
28#include "ethosu_core_interface.h"
29#include "ethosu_device.h"
30
31#include <linux/resource.h>
32#include <linux/uio.h>
33
34/****************************************************************************
35 * Functions
36 ****************************************************************************/
37
38static void ethosu_core_set_size(struct ethosu_buffer *buf,
39 struct ethosu_core_buffer *cbuf)
40{
41 cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset;
42 cbuf->size = (uint32_t)buf->size;
43}
44
45static void ethosu_core_set_capacity(struct ethosu_buffer *buf,
46 struct ethosu_core_buffer *cbuf)
47{
48 cbuf->ptr = (uint32_t)buf->dma_addr + buf->offset + buf->size;
49 cbuf->size = (uint32_t)buf->capacity - buf->offset - buf->size;
50}
51
52static size_t ethosu_queue_available(struct ethosu_core_queue *queue)
53{
54 size_t size = queue->header.write - queue->header.read;
55
56 if (queue->header.read > queue->header.write)
57 size += queue->header.size;
58
59 return size;
60}
61
62static size_t ethosu_queue_capacity(struct ethosu_core_queue *queue)
63{
64 return queue->header.size - ethosu_queue_available(queue);
65}
66
67static int ethosu_queue_write(struct ethosu_mailbox *mbox,
68 const struct kvec *vec,
69 size_t length)
70{
71 struct ethosu_core_queue *queue = mbox->in_queue;
72 uint8_t *dst = &queue->data[0];
73 uint32_t wpos = queue->header.write;
74 size_t total_size;
75 size_t i;
76 int ret;
77
78 for (i = 0, total_size = 0; i < length; i++)
79 total_size += vec[i].iov_len;
80
81 if (total_size > ethosu_queue_capacity(queue))
82 return -EINVAL;
83
84 for (i = 0; i < length; i++) {
85 const uint8_t *src = vec[i].iov_base;
86 const uint8_t *end = src + vec[i].iov_len;
87
88 while (src < end) {
89 dst[wpos] = *src++;
90 wpos = (wpos + 1) % queue->header.size;
91 }
92 }
93
94 queue->header.write = wpos;
95
96 ret = mbox_send_message(mbox->tx, queue);
97 if (ret < 0)
98 return ret;
99
100 return 0;
101}
102
103static int ethosu_queue_write_msg(struct ethosu_mailbox *mbox,
104 uint32_t type,
105 void *data,
106 size_t length)
107{
108 struct ethosu_core_msg msg = { .type = type, .length = length };
109 const struct kvec vec[2] = {
110 { &msg, sizeof(msg) },
111 { data, length }
112 };
113
114 return ethosu_queue_write(mbox, vec, 2);
115}
116
117static int ethosu_queue_read(struct ethosu_mailbox *mbox,
118 void *data,
119 size_t length)
120{
121 struct ethosu_core_queue *queue = mbox->out_queue;
122 uint8_t *src = &queue->data[0];
123 uint8_t *dst = (uint8_t *)data;
124 const uint8_t *end = dst + length;
125 uint32_t rpos = queue->header.read;
126
127 if (length > ethosu_queue_available(queue))
128 return -ENOMSG;
129
130 while (dst < end) {
131 *dst++ = src[rpos];
132 rpos = (rpos + 1) % queue->header.size;
133 }
134
135 queue->header.read = rpos;
136
137 return 0;
138}
139
140int ethosu_mailbox_read(struct ethosu_mailbox *mbox,
141 struct ethosu_core_msg *header,
142 void *data,
143 size_t length)
144{
145 int ret;
146
147 /* Read message header */
148 ret = ethosu_queue_read(mbox, header, sizeof(*header));
149 if (ret)
150 return ret;
151
152 dev_info(mbox->dev, "mbox: Read msg header. type=%u, length=%u",
153 header->type, header->length);
154
155 /* Check that payload is not larger than allocated buffer */
156 if (header->length > length)
157 return -ENOMEM;
158
159 /* Ready payload data */
160 ret = ethosu_queue_read(mbox, data, header->length);
161 if (ret)
162 return -EBADMSG;
163
164 return 0;
165}
166
167int ethosu_mailbox_ping(struct ethosu_mailbox *mbox)
168{
169 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_PING, NULL, 0);
170}
171
172int ethosu_mailbox_inference(struct ethosu_mailbox *mbox,
173 void *user_arg,
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200174 uint32_t ifm_count,
175 struct ethosu_buffer **ifm,
176 uint32_t ofm_count,
177 struct ethosu_buffer **ofm,
Per Åstrandf7e407a2020-10-23 21:25:05 +0200178 struct ethosu_buffer *network,
179 uint8_t *pmu_event_config,
180 uint8_t pmu_event_config_count,
181 uint8_t pmu_cycle_counter_enable)
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200182{
183 struct ethosu_core_inference_req inf;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200184 uint32_t i;
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200185
Per Åstrandf7e407a2020-10-23 21:25:05 +0200186 /* Verify that the uapi and core has the same number of pmus */
187 if (pmu_event_config_count != ETHOSU_CORE_PMU_MAX) {
188 dev_err(mbox->dev, "PMU count misconfigured.\n");
189
190 return -EINVAL;
191 }
192
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200193 inf.user_arg = (ptrdiff_t)user_arg;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200194 inf.ifm_count = ifm_count;
195 inf.ofm_count = ofm_count;
Per Åstrandf7e407a2020-10-23 21:25:05 +0200196 inf.pmu_cycle_counter_enable = pmu_cycle_counter_enable;
Kristofer Jonssonb74492c2020-09-10 13:26:01 +0200197
198 for (i = 0; i < ifm_count; i++)
199 ethosu_core_set_size(ifm[i], &inf.ifm[i]);
200
201 for (i = 0; i < ofm_count; i++)
202 ethosu_core_set_capacity(ofm[i], &inf.ofm[i]);
203
Per Åstrandf7e407a2020-10-23 21:25:05 +0200204 for (i = 0; i < ETHOSU_CORE_PMU_MAX; i++)
205 inf.pmu_event_config[i] = pmu_event_config[i];
206
Kristofer Jonsson116a6352020-08-20 17:25:23 +0200207 ethosu_core_set_size(network, &inf.network);
208
209 return ethosu_queue_write_msg(mbox, ETHOSU_CORE_MSG_INFERENCE_REQ,
210 &inf, sizeof(inf));
211}
212
213static void ethosu_mailbox_rx_work(struct work_struct *work)
214{
215 struct ethosu_mailbox *mbox = container_of(work, typeof(*mbox), work);
216
217 mbox->callback(mbox->user_arg);
218}
219
220static void ethosu_mailbox_rx_callback(struct mbox_client *client,
221 void *message)
222{
223 struct ethosu_mailbox *mbox =
224 container_of(client, typeof(*mbox), client);
225
226 dev_info(mbox->dev, "mbox: Received message.\n");
227
228 queue_work(mbox->wq, &mbox->work);
229}
230
231static void ethosu_mailbox_tx_done(struct mbox_client *client,
232 void *message,
233 int r)
234{
235 if (r)
236 dev_warn(client->dev, "mbox: Failed sending message (%d)\n", r);
237 else
238 dev_info(client->dev, "mbox: Message sent\n");
239}
240
241int ethosu_mailbox_init(struct ethosu_mailbox *mbox,
242 struct device *dev,
243 struct resource *in_queue,
244 struct resource *out_queue,
245 ethosu_mailbox_cb callback,
246 void *user_arg)
247{
248 int ret;
249
250 mbox->dev = dev;
251 mbox->callback = callback;
252 mbox->user_arg = user_arg;
253
254 mbox->client.dev = dev;
255 mbox->client.rx_callback = ethosu_mailbox_rx_callback;
256 mbox->client.tx_prepare = NULL; /* preparation of data is handled
257 * through the
258 * queue functions */
259 mbox->client.tx_done = ethosu_mailbox_tx_done;
260 mbox->client.tx_block = true;
261 mbox->client.knows_txdone = false;
262 mbox->client.tx_tout = 500;
263
264 mbox->in_queue = devm_ioremap_resource(mbox->dev, in_queue);
265 if (IS_ERR(mbox->in_queue))
266 return PTR_ERR(mbox->in_queue);
267
268 mbox->out_queue = devm_ioremap_resource(mbox->dev, out_queue);
269 if (IS_ERR(mbox->out_queue)) {
270 ret = PTR_ERR(mbox->out_queue);
271 goto unmap_in_queue;
272 }
273
274 mbox->wq = create_singlethread_workqueue("ethosu_workqueue");
275 if (!mbox->wq) {
276 dev_err(mbox->dev, "Failed to create work queue\n");
277 ret = -EINVAL;
278 goto unmap_out_queue;
279 }
280
281 INIT_WORK(&mbox->work, ethosu_mailbox_rx_work);
282
283 mbox->tx = mbox_request_channel_byname(&mbox->client, "tx");
284 if (IS_ERR(mbox->tx)) {
285 dev_warn(mbox->dev, "mbox: Failed to request tx channel\n");
286 ret = PTR_ERR(mbox->tx);
287 goto workqueue_destroy;
288 }
289
290 mbox->rx = mbox_request_channel_byname(&mbox->client, "rx");
291 if (IS_ERR(mbox->rx)) {
292 dev_info(dev, "mbox: Using same channel for RX and TX\n");
293 mbox->rx = mbox->tx;
294 }
295
296 return 0;
297
298workqueue_destroy:
299 destroy_workqueue(mbox->wq);
300
301unmap_out_queue:
302 devm_iounmap(mbox->dev, mbox->out_queue);
303
304unmap_in_queue:
305 devm_iounmap(mbox->dev, mbox->in_queue);
306
307 return ret;
308}
309
310void ethosu_mailbox_deinit(struct ethosu_mailbox *mbox)
311{
312 if (mbox->rx != mbox->tx)
313 mbox_free_channel(mbox->rx);
314
315 mbox_free_channel(mbox->tx);
316 destroy_workqueue(mbox->wq);
317 devm_iounmap(mbox->dev, mbox->out_queue);
318 devm_iounmap(mbox->dev, mbox->in_queue);
319}