blob: 66ce216821163590d3ffa0e36305ca516f9ea108 [file] [log] [blame]
Per Åstrandd7483362020-10-07 13:50:46 +02001/* SPDX-License-Identifier: GPL-2.0 */
2
Per Åstrandbe87baf2020-10-07 13:47:24 +02003/*
4 * Message Handling Unit version 2 controller driver
Per Åstrand8fac4a42020-10-12 13:37:16 +02005 * Copyright (C) 2019-2020 ARM Ltd.
Per Åstrandbe87baf2020-10-07 13:47:24 +02006 *
7 * Based on drivers/mailbox/arm_mhu.c
8 *
9 */
10
11#include <linux/interrupt.h>
12#include <linux/mutex.h>
13#include <linux/slab.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/module.h>
17#include <linux/amba/bus.h>
18#include <linux/mailbox_controller.h>
19#include <linux/of_device.h>
20#include <linux/of_address.h>
21
Per Åstrandd7483362020-10-07 13:50:46 +020022#define MHU_V2_REG_STAT_OFS 0x0
23#define MHU_V2_REG_CLR_OFS 0x8
24#define MHU_V2_REG_SET_OFS 0xC
25#define MHU_V2_REG_MSG_NO_CAP_OFS 0xF80
26#define MHU_V2_REG_ACC_REQ_OFS 0xF88
27#define MHU_V2_REG_ACC_RDY_OFS 0xF8C
28#define MHU_V2_INT_EN_OFS 0xF98
29#define MHU_V2_AIDR_OFS 0xFCC
Per Åstrandbe87baf2020-10-07 13:47:24 +020030
Per Åstrandd7483362020-10-07 13:50:46 +020031#define MHU_V2_CHCOMB BIT(2)
32#define MHU_V2_AIDR_MINOR(_reg) ((_reg) & 0xF)
Per Åstrandbe87baf2020-10-07 13:47:24 +020033
Per Åstrandd7483362020-10-07 13:50:46 +020034#define MHU_V2_EACH_CHANNEL_SIZE 0x20
Per Åstrandbe87baf2020-10-07 13:47:24 +020035
36#define mbox_to_arm_mhuv2(c) container_of(c, struct arm_mhuv2, mbox)
37
38struct mhuv2_link {
39 unsigned int irq;
40 void __iomem *tx_reg;
41 void __iomem *rx_reg;
42};
43
44struct arm_mhuv2 {
Per Åstrandd7483362020-10-07 13:50:46 +020045 void __iomem *base;
46 struct mhuv2_link *mlink;
47 struct mbox_chan *chan;
Per Åstrandbe87baf2020-10-07 13:47:24 +020048 struct mbox_controller mbox;
49};
50
Per Åstrandd7483362020-10-07 13:50:46 +020051static irqreturn_t mhuv2_rx_interrupt(int irq,
52 void *p)
Per Åstrandbe87baf2020-10-07 13:47:24 +020053{
54 struct mbox_chan *chan = p;
55 struct mhuv2_link *mlink = chan->con_priv;
56 u32 val;
57
58 val = readl_relaxed(mlink->rx_reg + MHU_V2_REG_STAT_OFS);
59 if (!val)
60 return IRQ_NONE;
61
62 mbox_chan_received_data(chan, (void *)&val);
63
64 writel_relaxed(val, mlink->rx_reg + MHU_V2_REG_CLR_OFS);
65
66 return IRQ_HANDLED;
67}
68
69static bool mhuv2_last_tx_done(struct mbox_chan *chan)
70{
71 struct mhuv2_link *mlink = chan->con_priv;
72 u32 val = readl_relaxed(mlink->tx_reg + MHU_V2_REG_STAT_OFS);
73
74 return (val == 0);
75}
76
Per Åstrandd7483362020-10-07 13:50:46 +020077static int mhuv2_send_data(struct mbox_chan *chan,
78 void *data)
Per Åstrandbe87baf2020-10-07 13:47:24 +020079{
80 struct mhuv2_link *mlink = chan->con_priv;
Per Åstrand8fac4a42020-10-12 13:37:16 +020081 struct arm_mhuv2 *mhuv2 = mbox_to_arm_mhuv2(chan->mbox);
Per Åstrandbe87baf2020-10-07 13:47:24 +020082 u32 *arg = data;
Per Åstrand8fac4a42020-10-12 13:37:16 +020083 u32 tmo = 100000;
84
85 /* If ACCESS_REQUEST is low, we have to wait for the other side
86 * to relase ACCESS_READY before continuing. */
87 if (!readl_relaxed(mhuv2->base + MHU_V2_REG_ACC_REQ_OFS)) {
88 while (readl_relaxed(mhuv2->base + MHU_V2_REG_ACC_RDY_OFS) &&
89 --tmo != 0)
90 continue;
91
92 if (!tmo)
93 goto err;
94
95 /* Request access and wait for other side to ack */
96 writel_relaxed(0x1, mhuv2->base + MHU_V2_REG_ACC_REQ_OFS);
97 tmo = 100000;
98 while (!readl_relaxed(mhuv2->base + MHU_V2_REG_ACC_RDY_OFS) &&
99 --tmo != 0)
100 continue;
101
102 if (!tmo)
103 goto err;
104 } else {
105 while (!readl_relaxed(mhuv2->base + MHU_V2_REG_ACC_RDY_OFS) &&
106 --tmo != 0)
107 continue;
108
109 if (!tmo)
110 goto err;
111 }
Per Åstrandbe87baf2020-10-07 13:47:24 +0200112
113 writel_relaxed(*arg, mlink->tx_reg + MHU_V2_REG_SET_OFS);
114
115 return 0;
Per Åstrand8fac4a42020-10-12 13:37:16 +0200116
117err:
118 dev_err(chan->mbox->dev, "Failed to acquire access to mhu.\n");
119
120 return 1;
Per Åstrandbe87baf2020-10-07 13:47:24 +0200121}
122
123static int mhuv2_startup(struct mbox_chan *chan)
124{
125 struct mhuv2_link *mlink = chan->con_priv;
126 u32 val;
127 int ret;
128 struct arm_mhuv2 *mhuv2 = mbox_to_arm_mhuv2(chan->mbox);
129
130 writel_relaxed(0x1, mhuv2->base + MHU_V2_REG_ACC_REQ_OFS);
131
132 val = readl_relaxed(mlink->tx_reg + MHU_V2_REG_STAT_OFS);
133 writel_relaxed(val, mlink->tx_reg + MHU_V2_REG_CLR_OFS);
134
135 ret = request_irq(mlink->irq, mhuv2_rx_interrupt,
136 IRQF_SHARED, "mhuv2_link", chan);
137 if (ret) {
138 dev_err(chan->mbox->dev,
139 "unable to acquire IRQ %d\n", mlink->irq);
Per Åstrandd7483362020-10-07 13:50:46 +0200140
Per Åstrandbe87baf2020-10-07 13:47:24 +0200141 return ret;
142 }
143
144 return 0;
145}
146
147static void mhuv2_shutdown(struct mbox_chan *chan)
148{
149 struct mhuv2_link *mlink = chan->con_priv;
150 struct arm_mhuv2 *mhuv2 = mbox_to_arm_mhuv2(chan->mbox);
151
152 writel_relaxed(0x0, mhuv2->base + MHU_V2_REG_ACC_REQ_OFS);
153
154 free_irq(mlink->irq, chan);
155}
156
157static const struct mbox_chan_ops mhuv2_ops = {
Per Åstrandd7483362020-10-07 13:50:46 +0200158 .send_data = mhuv2_send_data,
159 .startup = mhuv2_startup,
160 .shutdown = mhuv2_shutdown,
Per Åstrandbe87baf2020-10-07 13:47:24 +0200161 .last_tx_done = mhuv2_last_tx_done,
162};
163
164void mhuv2_check_enable_cmbint(struct mhuv2_link *link)
165{
166 const u32 aidr = readl_relaxed(link->rx_reg + MHU_V2_AIDR_OFS);
167
Per Åstrandd7483362020-10-07 13:50:46 +0200168 if (MHU_V2_AIDR_MINOR(aidr) == 1)
169 /* Enable combined receiver interrupt for MHUv2.1 */
Per Åstrandbe87baf2020-10-07 13:47:24 +0200170 writel_relaxed(MHU_V2_CHCOMB, link->rx_reg + MHU_V2_INT_EN_OFS);
Per Åstrandbe87baf2020-10-07 13:47:24 +0200171}
172
Per Åstrandd7483362020-10-07 13:50:46 +0200173static int mhuv2_probe(struct amba_device *adev,
174 const struct amba_id *id)
Per Åstrandbe87baf2020-10-07 13:47:24 +0200175{
176 int i, err;
177 struct arm_mhuv2 *mhuv2;
178 struct device *dev = &adev->dev;
179 void __iomem *rx_base, *tx_base;
180 const struct device_node *np = dev->of_node;
181 unsigned int pchans;
182 struct mhuv2_link *mlink;
183 struct mbox_chan *chan;
184
Per Åstrandbe87baf2020-10-07 13:47:24 +0200185 /* Allocate memory for device */
186 mhuv2 = devm_kzalloc(dev, sizeof(*mhuv2), GFP_KERNEL);
187 if (!mhuv2)
188 return -ENOMEM;
189
190 tx_base = of_iomap((struct device_node *)np, 0);
191 if (!tx_base) {
192 dev_err(dev, "failed to map tx registers\n");
Per Åstrandbe87baf2020-10-07 13:47:24 +0200193 return -ENOMEM;
194 }
195
196 rx_base = of_iomap((struct device_node *)np, 1);
197 if (!rx_base) {
198 dev_err(dev, "failed to map rx registers\n");
Jonny Svärd44c507e2021-02-09 16:37:40 +0100199 iounmap(tx_base);
Per Åstrandbe87baf2020-10-07 13:47:24 +0200200 return -ENOMEM;
201 }
202
203 pchans = readl_relaxed(tx_base + MHU_V2_REG_MSG_NO_CAP_OFS);
204 if (pchans == 0 || pchans % 2) {
205 dev_err(dev, "invalid number of channels %d\n", pchans);
206 iounmap(rx_base);
207 iounmap(tx_base);
Per Åstrandd7483362020-10-07 13:50:46 +0200208
Per Åstrandbe87baf2020-10-07 13:47:24 +0200209 return -EINVAL;
210 }
211
212 mhuv2->mlink = devm_kcalloc(dev, pchans, sizeof(*mlink), GFP_KERNEL);
213 if (!mhuv2->mlink) {
214 iounmap(rx_base);
215 iounmap(tx_base);
Per Åstrandd7483362020-10-07 13:50:46 +0200216
Per Åstrandbe87baf2020-10-07 13:47:24 +0200217 return -ENOMEM;
218 }
219
220 mhuv2->chan = devm_kcalloc(dev, pchans, sizeof(*chan), GFP_KERNEL);
221 if (!mhuv2->chan) {
222 iounmap(rx_base);
223 iounmap(tx_base);
224 kfree(mhuv2->mlink);
Per Åstrandd7483362020-10-07 13:50:46 +0200225
Per Åstrandbe87baf2020-10-07 13:47:24 +0200226 return -ENOMEM;
227 }
228
229 for (i = 0; i < pchans; i++) {
230 mlink = mhuv2->mlink + i;
231 chan = mhuv2->chan + i;
232 chan->con_priv = mlink;
233 mlink->rx_reg = rx_base + (i * MHU_V2_EACH_CHANNEL_SIZE);
234 mlink->tx_reg = tx_base + (i * MHU_V2_EACH_CHANNEL_SIZE);
235 }
236
237 mhuv2->mlink->irq = adev->irq[0];
238 mhuv2_check_enable_cmbint(mhuv2->mlink);
239
240 mhuv2->base = tx_base;
241 mhuv2->mbox.dev = dev;
242 mhuv2->mbox.chans = mhuv2->chan;
243 mhuv2->mbox.num_chans = pchans;
244 mhuv2->mbox.ops = &mhuv2_ops;
245 mhuv2->mbox.txdone_irq = false;
246 mhuv2->mbox.txdone_poll = true;
247 mhuv2->mbox.txpoll_period = 1;
248
249 amba_set_drvdata(adev, mhuv2);
250
251 err = mbox_controller_register(&mhuv2->mbox);
252 if (err) {
253 dev_err(dev, "failed to register mailboxes %d\n", err);
254 iounmap(rx_base);
255 iounmap(tx_base);
256 kfree(mhuv2->mlink);
257 kfree(mhuv2->chan);
Per Åstrandd7483362020-10-07 13:50:46 +0200258
Per Åstrandbe87baf2020-10-07 13:47:24 +0200259 return err;
260 }
261
262 dev_info(dev, "ARM MHUv2 Mailbox driver registered\n");
Per Åstrandd7483362020-10-07 13:50:46 +0200263
Per Åstrandbe87baf2020-10-07 13:47:24 +0200264 return 0;
265}
266
267static int mhuv2_remove(struct amba_device *adev)
268{
269 struct arm_mhuv2 *mhuv2 = amba_get_drvdata(adev);
270
271 mbox_controller_unregister(&mhuv2->mbox);
272
273 return 0;
274}
275
276static struct amba_id mhuv2_ids[] = {
277 {
Per Åstrandd7483362020-10-07 13:50:46 +0200278 .id = 0x4b0d1,
279 .mask = 0xfffff,
Per Åstrandbe87baf2020-10-07 13:47:24 +0200280 },
281 {
Per Åstrandd7483362020-10-07 13:50:46 +0200282 .id = 0xbb0d1,
283 .mask = 0xfffff,
Per Åstrandbe87baf2020-10-07 13:47:24 +0200284 },
285 {
Per Åstrandd7483362020-10-07 13:50:46 +0200286 .id = 0xbb076,
287 .mask = 0xfffff,
Per Åstrandbe87baf2020-10-07 13:47:24 +0200288 },
289 { 0, 0 },
290};
291MODULE_DEVICE_TABLE(amba, mhuv2_ids);
292
293static struct amba_driver arm_mhuv2_driver = {
Per Åstrandd7483362020-10-07 13:50:46 +0200294 .drv = {
295 .name = "mhuv2",
Per Åstrandbe87baf2020-10-07 13:47:24 +0200296 },
Per Åstrandd7483362020-10-07 13:50:46 +0200297 .id_table = mhuv2_ids,
298 .probe = mhuv2_probe,
299 .remove = mhuv2_remove,
Per Åstrandbe87baf2020-10-07 13:47:24 +0200300};
301module_amba_driver(arm_mhuv2_driver);
302
303MODULE_LICENSE("GPL v2");
304MODULE_DESCRIPTION("ARM MHUv2 Driver");
305MODULE_AUTHOR("Samarth Parikh <samarthp@ymail.com>");