blob: 365715b2380d2b56349d5bcdc418148da5b408df [file] [log] [blame]
Per Åstrandd7483362020-10-07 13:50:46 +02001/* SPDX-License-Identifier: GPL-2.0 */
2
Per Åstrandbe87baf2020-10-07 13:47:24 +02003/*
4 * Message Handling Unit version 2 controller driver
Per Åstrand8fac4a42020-10-12 13:37:16 +02005 * Copyright (C) 2019-2020 ARM Ltd.
Per Åstrandbe87baf2020-10-07 13:47:24 +02006 *
7 * Based on drivers/mailbox/arm_mhu.c
8 *
9 */
10
11#include <linux/interrupt.h>
12#include <linux/mutex.h>
13#include <linux/slab.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/module.h>
17#include <linux/amba/bus.h>
18#include <linux/mailbox_controller.h>
19#include <linux/of_device.h>
20#include <linux/of_address.h>
21
Per Åstrandd7483362020-10-07 13:50:46 +020022#define MHU_V2_REG_STAT_OFS 0x0
23#define MHU_V2_REG_CLR_OFS 0x8
24#define MHU_V2_REG_SET_OFS 0xC
25#define MHU_V2_REG_MSG_NO_CAP_OFS 0xF80
26#define MHU_V2_REG_ACC_REQ_OFS 0xF88
27#define MHU_V2_REG_ACC_RDY_OFS 0xF8C
28#define MHU_V2_INT_EN_OFS 0xF98
29#define MHU_V2_AIDR_OFS 0xFCC
Per Åstrandbe87baf2020-10-07 13:47:24 +020030
Per Åstrandd7483362020-10-07 13:50:46 +020031#define MHU_V2_CHCOMB BIT(2)
32#define MHU_V2_AIDR_MINOR(_reg) ((_reg) & 0xF)
Per Åstrandbe87baf2020-10-07 13:47:24 +020033
Per Åstrandd7483362020-10-07 13:50:46 +020034#define MHU_V2_EACH_CHANNEL_SIZE 0x20
Per Åstrandbe87baf2020-10-07 13:47:24 +020035
36#define mbox_to_arm_mhuv2(c) container_of(c, struct arm_mhuv2, mbox)
37
38struct mhuv2_link {
39 unsigned int irq;
40 void __iomem *tx_reg;
41 void __iomem *rx_reg;
42};
43
44struct arm_mhuv2 {
Per Åstrandd7483362020-10-07 13:50:46 +020045 void __iomem *base;
46 struct mhuv2_link *mlink;
47 struct mbox_chan *chan;
Per Åstrandbe87baf2020-10-07 13:47:24 +020048 struct mbox_controller mbox;
49};
50
Per Åstrandd7483362020-10-07 13:50:46 +020051static irqreturn_t mhuv2_rx_interrupt(int irq,
52 void *p)
Per Åstrandbe87baf2020-10-07 13:47:24 +020053{
54 struct mbox_chan *chan = p;
55 struct mhuv2_link *mlink = chan->con_priv;
56 u32 val;
57
58 val = readl_relaxed(mlink->rx_reg + MHU_V2_REG_STAT_OFS);
59 if (!val)
60 return IRQ_NONE;
61
62 mbox_chan_received_data(chan, (void *)&val);
63
64 writel_relaxed(val, mlink->rx_reg + MHU_V2_REG_CLR_OFS);
65
66 return IRQ_HANDLED;
67}
68
69static bool mhuv2_last_tx_done(struct mbox_chan *chan)
70{
71 struct mhuv2_link *mlink = chan->con_priv;
72 u32 val = readl_relaxed(mlink->tx_reg + MHU_V2_REG_STAT_OFS);
73
74 return (val == 0);
75}
76
Per Åstrandd7483362020-10-07 13:50:46 +020077static int mhuv2_send_data(struct mbox_chan *chan,
78 void *data)
Per Åstrandbe87baf2020-10-07 13:47:24 +020079{
80 struct mhuv2_link *mlink = chan->con_priv;
Per Åstrand8fac4a42020-10-12 13:37:16 +020081 struct arm_mhuv2 *mhuv2 = mbox_to_arm_mhuv2(chan->mbox);
Per Åstrandbe87baf2020-10-07 13:47:24 +020082 u32 *arg = data;
Per Åstrand8fac4a42020-10-12 13:37:16 +020083 u32 tmo = 100000;
84
85 /* If ACCESS_REQUEST is low, we have to wait for the other side
86 * to relase ACCESS_READY before continuing. */
87 if (!readl_relaxed(mhuv2->base + MHU_V2_REG_ACC_REQ_OFS)) {
88 while (readl_relaxed(mhuv2->base + MHU_V2_REG_ACC_RDY_OFS) &&
89 --tmo != 0)
90 continue;
91
92 if (!tmo)
93 goto err;
94
95 /* Request access and wait for other side to ack */
96 writel_relaxed(0x1, mhuv2->base + MHU_V2_REG_ACC_REQ_OFS);
97 tmo = 100000;
98 while (!readl_relaxed(mhuv2->base + MHU_V2_REG_ACC_RDY_OFS) &&
99 --tmo != 0)
100 continue;
101
102 if (!tmo)
103 goto err;
104 } else {
105 while (!readl_relaxed(mhuv2->base + MHU_V2_REG_ACC_RDY_OFS) &&
106 --tmo != 0)
107 continue;
108
109 if (!tmo)
110 goto err;
111 }
Per Åstrandbe87baf2020-10-07 13:47:24 +0200112
113 writel_relaxed(*arg, mlink->tx_reg + MHU_V2_REG_SET_OFS);
114
115 return 0;
Per Åstrand8fac4a42020-10-12 13:37:16 +0200116
117err:
118 dev_err(chan->mbox->dev, "Failed to acquire access to mhu.\n");
119
120 return 1;
Per Åstrandbe87baf2020-10-07 13:47:24 +0200121}
122
123static int mhuv2_startup(struct mbox_chan *chan)
124{
125 struct mhuv2_link *mlink = chan->con_priv;
126 u32 val;
127 int ret;
128 struct arm_mhuv2 *mhuv2 = mbox_to_arm_mhuv2(chan->mbox);
129
130 writel_relaxed(0x1, mhuv2->base + MHU_V2_REG_ACC_REQ_OFS);
131
132 val = readl_relaxed(mlink->tx_reg + MHU_V2_REG_STAT_OFS);
133 writel_relaxed(val, mlink->tx_reg + MHU_V2_REG_CLR_OFS);
134
135 ret = request_irq(mlink->irq, mhuv2_rx_interrupt,
136 IRQF_SHARED, "mhuv2_link", chan);
137 if (ret) {
138 dev_err(chan->mbox->dev,
139 "unable to acquire IRQ %d\n", mlink->irq);
Per Åstrandd7483362020-10-07 13:50:46 +0200140
Per Åstrandbe87baf2020-10-07 13:47:24 +0200141 return ret;
142 }
143
144 return 0;
145}
146
147static void mhuv2_shutdown(struct mbox_chan *chan)
148{
149 struct mhuv2_link *mlink = chan->con_priv;
150 struct arm_mhuv2 *mhuv2 = mbox_to_arm_mhuv2(chan->mbox);
151
152 writel_relaxed(0x0, mhuv2->base + MHU_V2_REG_ACC_REQ_OFS);
153
154 free_irq(mlink->irq, chan);
155}
156
157static const struct mbox_chan_ops mhuv2_ops = {
Per Åstrandd7483362020-10-07 13:50:46 +0200158 .send_data = mhuv2_send_data,
159 .startup = mhuv2_startup,
160 .shutdown = mhuv2_shutdown,
Per Åstrandbe87baf2020-10-07 13:47:24 +0200161 .last_tx_done = mhuv2_last_tx_done,
162};
163
164void mhuv2_check_enable_cmbint(struct mhuv2_link *link)
165{
166 const u32 aidr = readl_relaxed(link->rx_reg + MHU_V2_AIDR_OFS);
167
Per Åstrandd7483362020-10-07 13:50:46 +0200168 if (MHU_V2_AIDR_MINOR(aidr) == 1)
169 /* Enable combined receiver interrupt for MHUv2.1 */
Per Åstrandbe87baf2020-10-07 13:47:24 +0200170 writel_relaxed(MHU_V2_CHCOMB, link->rx_reg + MHU_V2_INT_EN_OFS);
Per Åstrandbe87baf2020-10-07 13:47:24 +0200171}
172
Per Åstrandd7483362020-10-07 13:50:46 +0200173static int mhuv2_probe(struct amba_device *adev,
174 const struct amba_id *id)
Per Åstrandbe87baf2020-10-07 13:47:24 +0200175{
176 int i, err;
177 struct arm_mhuv2 *mhuv2;
178 struct device *dev = &adev->dev;
179 void __iomem *rx_base, *tx_base;
180 const struct device_node *np = dev->of_node;
181 unsigned int pchans;
182 struct mhuv2_link *mlink;
183 struct mbox_chan *chan;
184
Per Åstrandbe87baf2020-10-07 13:47:24 +0200185 /* Allocate memory for device */
186 mhuv2 = devm_kzalloc(dev, sizeof(*mhuv2), GFP_KERNEL);
187 if (!mhuv2)
188 return -ENOMEM;
189
190 tx_base = of_iomap((struct device_node *)np, 0);
191 if (!tx_base) {
192 dev_err(dev, "failed to map tx registers\n");
193 iounmap(rx_base);
Per Åstrandd7483362020-10-07 13:50:46 +0200194
Per Åstrandbe87baf2020-10-07 13:47:24 +0200195 return -ENOMEM;
196 }
197
198 rx_base = of_iomap((struct device_node *)np, 1);
199 if (!rx_base) {
200 dev_err(dev, "failed to map rx registers\n");
Per Åstrandd7483362020-10-07 13:50:46 +0200201
Per Åstrandbe87baf2020-10-07 13:47:24 +0200202 return -ENOMEM;
203 }
204
205 pchans = readl_relaxed(tx_base + MHU_V2_REG_MSG_NO_CAP_OFS);
206 if (pchans == 0 || pchans % 2) {
207 dev_err(dev, "invalid number of channels %d\n", pchans);
208 iounmap(rx_base);
209 iounmap(tx_base);
Per Åstrandd7483362020-10-07 13:50:46 +0200210
Per Åstrandbe87baf2020-10-07 13:47:24 +0200211 return -EINVAL;
212 }
213
214 mhuv2->mlink = devm_kcalloc(dev, pchans, sizeof(*mlink), GFP_KERNEL);
215 if (!mhuv2->mlink) {
216 iounmap(rx_base);
217 iounmap(tx_base);
Per Åstrandd7483362020-10-07 13:50:46 +0200218
Per Åstrandbe87baf2020-10-07 13:47:24 +0200219 return -ENOMEM;
220 }
221
222 mhuv2->chan = devm_kcalloc(dev, pchans, sizeof(*chan), GFP_KERNEL);
223 if (!mhuv2->chan) {
224 iounmap(rx_base);
225 iounmap(tx_base);
226 kfree(mhuv2->mlink);
Per Åstrandd7483362020-10-07 13:50:46 +0200227
Per Åstrandbe87baf2020-10-07 13:47:24 +0200228 return -ENOMEM;
229 }
230
231 for (i = 0; i < pchans; i++) {
232 mlink = mhuv2->mlink + i;
233 chan = mhuv2->chan + i;
234 chan->con_priv = mlink;
235 mlink->rx_reg = rx_base + (i * MHU_V2_EACH_CHANNEL_SIZE);
236 mlink->tx_reg = tx_base + (i * MHU_V2_EACH_CHANNEL_SIZE);
237 }
238
239 mhuv2->mlink->irq = adev->irq[0];
240 mhuv2_check_enable_cmbint(mhuv2->mlink);
241
242 mhuv2->base = tx_base;
243 mhuv2->mbox.dev = dev;
244 mhuv2->mbox.chans = mhuv2->chan;
245 mhuv2->mbox.num_chans = pchans;
246 mhuv2->mbox.ops = &mhuv2_ops;
247 mhuv2->mbox.txdone_irq = false;
248 mhuv2->mbox.txdone_poll = true;
249 mhuv2->mbox.txpoll_period = 1;
250
251 amba_set_drvdata(adev, mhuv2);
252
253 err = mbox_controller_register(&mhuv2->mbox);
254 if (err) {
255 dev_err(dev, "failed to register mailboxes %d\n", err);
256 iounmap(rx_base);
257 iounmap(tx_base);
258 kfree(mhuv2->mlink);
259 kfree(mhuv2->chan);
Per Åstrandd7483362020-10-07 13:50:46 +0200260
Per Åstrandbe87baf2020-10-07 13:47:24 +0200261 return err;
262 }
263
264 dev_info(dev, "ARM MHUv2 Mailbox driver registered\n");
Per Åstrandd7483362020-10-07 13:50:46 +0200265
Per Åstrandbe87baf2020-10-07 13:47:24 +0200266 return 0;
267}
268
269static int mhuv2_remove(struct amba_device *adev)
270{
271 struct arm_mhuv2 *mhuv2 = amba_get_drvdata(adev);
272
273 mbox_controller_unregister(&mhuv2->mbox);
274
275 return 0;
276}
277
278static struct amba_id mhuv2_ids[] = {
279 {
Per Åstrandd7483362020-10-07 13:50:46 +0200280 .id = 0x4b0d1,
281 .mask = 0xfffff,
Per Åstrandbe87baf2020-10-07 13:47:24 +0200282 },
283 {
Per Åstrandd7483362020-10-07 13:50:46 +0200284 .id = 0xbb0d1,
285 .mask = 0xfffff,
Per Åstrandbe87baf2020-10-07 13:47:24 +0200286 },
287 {
Per Åstrandd7483362020-10-07 13:50:46 +0200288 .id = 0xbb076,
289 .mask = 0xfffff,
Per Åstrandbe87baf2020-10-07 13:47:24 +0200290 },
291 { 0, 0 },
292};
293MODULE_DEVICE_TABLE(amba, mhuv2_ids);
294
295static struct amba_driver arm_mhuv2_driver = {
Per Åstrandd7483362020-10-07 13:50:46 +0200296 .drv = {
297 .name = "mhuv2",
Per Åstrandbe87baf2020-10-07 13:47:24 +0200298 },
Per Åstrandd7483362020-10-07 13:50:46 +0200299 .id_table = mhuv2_ids,
300 .probe = mhuv2_probe,
301 .remove = mhuv2_remove,
Per Åstrandbe87baf2020-10-07 13:47:24 +0200302};
303module_amba_driver(arm_mhuv2_driver);
304
305MODULE_LICENSE("GPL v2");
306MODULE_DESCRIPTION("ARM MHUv2 Driver");
307MODULE_AUTHOR("Samarth Parikh <samarthp@ymail.com>");