blob: b1e0465e7a8bfa204664e487825f4221784a760c [file] [log] [blame]
Peng Fanb6c776c2025-03-04 14:57:40 +08001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright 2025 NXP
4 */
5
6#include <asm/io.h>
7#include <dm.h>
8#include <dm/device_compat.h>
9#include <mailbox-uclass.h>
10#include <linux/bitfield.h>
11#include <linux/bug.h>
12#include <linux/iopoll.h>
13#include <linux/compat.h>
14
15/* This driver only exposes the status bits to keep with the
16 * polling methodology of u-boot.
17 */
18DECLARE_GLOBAL_DATA_PTR;
19
20#define IMX_MU_CHANS 24
21
22#define IMX_MU_V2_PAR_OFF 0x4
23#define IMX_MU_V2_TR_MASK GENMASK(7, 0)
24#define IMX_MU_V2_RR_MASK GENMASK(15, 8)
25
26enum imx_mu_chan_type {
27 IMX_MU_TYPE_TX = 0, /* Tx */
28 IMX_MU_TYPE_RX = 1, /* Rx */
29 IMX_MU_TYPE_TXDB = 2, /* Tx doorbell */
30 IMX_MU_TYPE_RXDB = 3, /* Rx doorbell */
31 IMX_MU_TYPE_RST = 4, /* Reset */
32 IMX_MU_TYPE_TXDB_V2 = 5, /* Tx doorbell with S/W ACK */
33};
34
35enum imx_mu_xcr {
36 IMX_MU_CR,
37 IMX_MU_GIER,
38 IMX_MU_GCR,
39 IMX_MU_TCR,
40 IMX_MU_RCR,
41 IMX_MU_xCR_MAX,
42};
43
44enum imx_mu_xsr {
45 IMX_MU_SR,
46 IMX_MU_GSR,
47 IMX_MU_TSR,
48 IMX_MU_RSR,
49 IMX_MU_xSR_MAX,
50};
51
52struct imx_mu_con_priv {
53 unsigned int idx;
54 enum imx_mu_chan_type type;
55 struct mbox_chan *chan;
56};
57
58enum imx_mu_type {
59 IMX_MU_V1,
60 IMX_MU_V2 = BIT(1),
61 IMX_MU_V2_S4 = BIT(15),
62 IMX_MU_V2_IRQ = BIT(16),
63};
64
65struct imx_mu {
66 void __iomem *base;
67 const struct imx_mu_dcfg *dcfg;
68 u32 num_tr;
69 u32 num_rr;
70 /* use pointers to channel as a way to reserve channels */
71 struct mbox_chan *channels[IMX_MU_CHANS];
72 struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
73};
74
75struct imx_mu_dcfg {
76 int (*tx)(struct imx_mu *plat, struct imx_mu_con_priv *cp, const void *data);
77 int (*rx)(struct imx_mu *plat, struct imx_mu_con_priv *cp);
78 int (*rxdb)(struct imx_mu *plat, struct imx_mu_con_priv *cp);
79 int (*init)(struct imx_mu *plat);
80 int (*of_xlate)(struct mbox_chan *chan, struct ofnode_phandle_args *args);
81 enum imx_mu_type type;
82 u32 xTR; /* Transmit Register0 */
83 u32 xRR; /* Receive Register0 */
84 u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */
85 u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */
86};
87
88#define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
89#define IMX_MU_xSR_RFn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
90#define IMX_MU_xSR_TEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
91
92/* General Purpose Interrupt Enable */
93#define IMX_MU_xCR_GIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
94/* Receive Interrupt Enable */
95#define IMX_MU_xCR_RIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
96/* Transmit Interrupt Enable */
97#define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
98/* General Purpose Interrupt Request */
99#define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
100/* MU reset */
101#define IMX_MU_xCR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(5))
102#define IMX_MU_xSR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(7))
103
104static void imx_mu_write(struct imx_mu *plat, u32 val, u32 offs)
105{
106 iowrite32(val, plat->base + offs);
107}
108
109static u32 imx_mu_read(struct imx_mu *plat, u32 offs)
110{
111 return ioread32(plat->base + offs);
112}
113
114static u32 imx_mu_xcr_rmw(struct imx_mu *plat, enum imx_mu_xcr type, u32 set, u32 clr)
115{
116 u32 val;
117
118 val = imx_mu_read(plat, plat->dcfg->xCR[type]);
119 val &= ~clr;
120 val |= set;
121 imx_mu_write(plat, val, plat->dcfg->xCR[type]);
122
123 return val;
124}
125
126/* check that the channel is open or owned by caller */
127static int imx_mu_check_channel(struct mbox_chan *chan)
128{
129 struct imx_mu *plat = dev_get_plat(chan->dev);
130
131 if (plat->channels[chan->id]) {
132 /* if reserved check that caller owns */
133 if (plat->channels[chan->id] == chan)
134 return 1; /* caller owns the channel */
135
136 return -EACCES;
137 }
138
139 return 0; /* channel empty */
140}
141
142static int imx_mu_chan_request(struct mbox_chan *chan)
143{
144 struct imx_mu *plat = dev_get_plat(chan->dev);
145 struct imx_mu_con_priv *cp;
146 enum imx_mu_chan_type type;
147 int idx;
148
149 type = chan->id / 4;
150 idx = chan->id % 4;
151
152 if (imx_mu_check_channel(chan) < 0) /* check if channel already in use */
153 return -EPERM;
154
155 plat->channels[chan->id] = chan;
156 chan->con_priv = kcalloc(1, sizeof(struct imx_mu_con_priv), 0);
157 if (!chan->con_priv)
158 return -ENOMEM;
159 cp = chan->con_priv;
160 cp->idx = idx;
161 cp->type = type;
162 cp->chan = chan;
163
164 switch (type) {
165 case IMX_MU_TYPE_RX:
166 imx_mu_xcr_rmw(plat, IMX_MU_RCR, IMX_MU_xCR_RIEn(plat->dcfg->type, idx), 0);
167 break;
168 case IMX_MU_TYPE_TXDB_V2:
169 case IMX_MU_TYPE_TXDB:
170 case IMX_MU_TYPE_RXDB:
171 imx_mu_xcr_rmw(plat, IMX_MU_GIER, IMX_MU_xCR_GIEn(plat->dcfg->type, idx), 0);
172 break;
173 default:
174 break;
175 }
176
177 return 0;
178}
179
180static int imx_mu_chan_free(struct mbox_chan *chan)
181{
182 struct imx_mu *plat = dev_get_plat(chan->dev);
183 struct imx_mu_con_priv *cp = chan->con_priv;
184
185 if (imx_mu_check_channel(chan) <= 0) /* check that the channel is also not empty */
186 return -EINVAL;
187
188 /* if you own channel and channel is NOT empty */
189 plat->channels[chan->id] = NULL;
190 switch (cp->type) {
191 case IMX_MU_TYPE_TX:
192 imx_mu_xcr_rmw(plat, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(plat->dcfg->type, cp->idx));
193 break;
194 case IMX_MU_TYPE_RX:
195 imx_mu_xcr_rmw(plat, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(plat->dcfg->type, cp->idx));
196 break;
197 case IMX_MU_TYPE_TXDB_V2:
198 case IMX_MU_TYPE_TXDB:
199 case IMX_MU_TYPE_RXDB:
200 imx_mu_xcr_rmw(plat, IMX_MU_GIER, 0, IMX_MU_xCR_GIEn(plat->dcfg->type, cp->idx));
201 break;
202 default:
203 break;
204 }
205
206 kfree(cp);
207
208 return 0;
209}
210
211static int imx_mu_send(struct mbox_chan *chan, const void *data)
212{
213 struct imx_mu *plat = dev_get_plat(chan->dev);
214 struct imx_mu_con_priv *cp = chan->con_priv;
215
216 if (imx_mu_check_channel(chan) < 1) /* return if channel isn't owned */
217 return -EPERM;
218
219 return plat->dcfg->tx(plat, cp, data);
220}
221
222static int imx_mu_recv(struct mbox_chan *chan, void *data)
223{
224 struct imx_mu *plat = dev_get_plat(chan->dev);
225 struct imx_mu_con_priv *cp = chan->con_priv;
226 u32 ctrl, val;
227
228 if (imx_mu_check_channel(chan) < 1) /* return if channel isn't owned */
229 return -EPERM;
230
231 switch (cp->type) {
232 case IMX_MU_TYPE_TXDB_V2:
233 case IMX_MU_TYPE_RXDB:
234 /* check if GSR[GIRn] bit is set */
235 if (readx_poll_timeout(ioread32, plat->base + plat->dcfg->xSR[IMX_MU_GSR],
236 val, val & BIT(cp->idx), 1000000) < 0)
237 return -EBUSY;
238
239 ctrl = imx_mu_read(plat, plat->dcfg->xCR[IMX_MU_GIER]);
240 val = imx_mu_read(plat, plat->dcfg->xSR[IMX_MU_GSR]);
241 val &= IMX_MU_xSR_GIPn(plat->dcfg->type, cp->idx) &
242 (ctrl & IMX_MU_xCR_GIEn(plat->dcfg->type, cp->idx));
243 break;
244 default:
245 dev_warn(chan->dev, "Unhandled channel type %d\n", cp->type);
246 return -EOPNOTSUPP;
247 };
248
249 if (val == IMX_MU_xSR_GIPn(plat->dcfg->type, cp->idx))
250 plat->dcfg->rxdb(plat, cp);
251
252 return 0;
253}
254
255static int imx_mu_of_to_plat(struct udevice *dev)
256{
257 struct imx_mu *plat = dev_get_plat(dev);
258 fdt_addr_t addr;
259
260 addr = dev_read_addr(dev);
261 if (addr == FDT_ADDR_T_NONE)
262 return -ENODEV;
263
264 plat->base = (struct mu_type *)addr;
265
266 return 0;
267}
268
269static int imx_mu_init_generic(struct imx_mu *plat)
270{
271 unsigned int i;
272 unsigned int val;
273
274 if (plat->num_rr > 4 || plat->num_tr > 4) {
275 WARN_ONCE(true, "%s not support TR/RR larger than 4\n", __func__);
276 return -EOPNOTSUPP;
277 }
278
279 /* Set default MU configuration */
280 for (i = 0; i < IMX_MU_xCR_MAX; i++)
281 imx_mu_write(plat, 0, plat->dcfg->xCR[i]);
282
283 /* Clear any pending GIP */
284 val = imx_mu_read(plat, plat->dcfg->xSR[IMX_MU_GSR]);
285 imx_mu_write(plat, val, plat->dcfg->xSR[IMX_MU_GSR]);
286
287 /* Clear any pending RSR */
288 for (i = 0; i < plat->num_rr; i++)
289 imx_mu_read(plat, plat->dcfg->xRR + i * 4);
290
291 return 0;
292}
293
294static int imx_mu_generic_of_xlate(struct mbox_chan *chan, struct ofnode_phandle_args *args)
295{
296 enum imx_mu_chan_type type;
297 int idx, cid;
298
299 if (args->args_count != 2) {
300 dev_err(chan->dev, "Invalid argument count %d\n", args->args_count);
301 return -EINVAL;
302 }
303
304 type = args->args[0]; /* channel type */
305 idx = args->args[1]; /* index */
306
307 cid = type * 4 + idx;
308 if (cid >= IMX_MU_CHANS) {
309 dev_err(chan->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n",
310 cid, type, idx);
311 return -EINVAL;
312 }
313
314 chan->id = cid;
315
316 return 0;
317}
318
319static int imx_mu_generic_tx(struct imx_mu *plat, struct imx_mu_con_priv *cp,
320 const void *data)
321{
322 switch (cp->type) {
323 case IMX_MU_TYPE_TXDB_V2:
324 imx_mu_xcr_rmw(plat, IMX_MU_GCR, IMX_MU_xCR_GIRn(plat->dcfg->type, cp->idx), 0);
325 break;
326 default:
327 dev_warn(cp->chan->dev, "Send data on wrong channel type: %d\n", cp->type);
328 return -EINVAL;
329 }
330
331 return 0;
332}
333
334static int imx_mu_generic_rxdb(struct imx_mu *plat, struct imx_mu_con_priv *cp)
335{
336 imx_mu_write(plat, IMX_MU_xSR_GIPn(plat->dcfg->type, cp->idx),
337 plat->dcfg->xSR[IMX_MU_GSR]);
338
339 return 0;
340}
341
342static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
343 .tx = imx_mu_generic_tx,
344 .rxdb = imx_mu_generic_rxdb,
345 .init = imx_mu_init_generic,
346 .of_xlate = imx_mu_generic_of_xlate,
347 .type = IMX_MU_V1,
348 .xTR = 0x0,
349 .xRR = 0x10,
350 .xSR = {0x20, 0x20, 0x20, 0x20},
351 .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
352};
353
354static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
355 .tx = imx_mu_generic_tx,
356 .rxdb = imx_mu_generic_rxdb,
357 .init = imx_mu_init_generic,
358 .of_xlate = imx_mu_generic_of_xlate,
359 .type = IMX_MU_V1,
360 .xTR = 0x20,
361 .xRR = 0x40,
362 .xSR = {0x60, 0x60, 0x60, 0x60},
363 .xCR = {0x64, 0x64, 0x64, 0x64, 0x64},
364};
365
366static const struct imx_mu_dcfg imx_mu_cfg_imx95 = {
367 .tx = imx_mu_generic_tx,
368 .rxdb = imx_mu_generic_rxdb,
369 .init = imx_mu_init_generic,
370 .of_xlate = imx_mu_generic_of_xlate,
371 .type = IMX_MU_V2,
372 .xTR = 0x200,
373 .xRR = 0x280,
374 .xSR = {0xC, 0x118, 0x124, 0x12C},
375 .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
376};
377
378static const struct udevice_id ids[] = {
379 { .compatible = "fsl,imx6sx-mu", .data = (ulong)&imx_mu_cfg_imx6sx },
380 { .compatible = "fsl,imx7ulp-mu", .data = (ulong)&imx_mu_cfg_imx7ulp },
381 { .compatible = "fsl,imx95-mu", .data = (ulong)&imx_mu_cfg_imx95 },
382 { }
383};
384
385int imx_mu_of_xlate(struct mbox_chan *chan, struct ofnode_phandle_args *args)
386{
387 struct imx_mu *plat = dev_get_plat(chan->dev);
388
389 return plat->dcfg->of_xlate(chan, args);
390}
391
392struct mbox_ops imx_mu_ops = {
393 .of_xlate = imx_mu_of_xlate,
394 .request = imx_mu_chan_request,
395 .rfree = imx_mu_chan_free,
396 .send = imx_mu_send,
397 .recv = imx_mu_recv,
398};
399
400static void imx_mu_get_tr_rr(struct imx_mu *plat)
401{
402 u32 val;
403
404 if (plat->dcfg->type & IMX_MU_V2) {
405 val = imx_mu_read(plat, IMX_MU_V2_PAR_OFF);
406 plat->num_tr = FIELD_GET(IMX_MU_V2_TR_MASK, val);
407 plat->num_rr = FIELD_GET(IMX_MU_V2_RR_MASK, val);
408 } else {
409 plat->num_tr = 4;
410 plat->num_rr = 4;
411 }
412}
413
414static int imx_mu_probe(struct udevice *dev)
415{
416 struct imx_mu *plat = dev_get_plat(dev);
417 int ret;
418
419 debug("%s(dev=%p)\n", __func__, dev);
420
421 plat->dcfg = (void *)dev_get_driver_data(dev);
422
423 imx_mu_get_tr_rr(plat);
424
425 ret = plat->dcfg->init(plat);
426 if (ret) {
427 dev_err(dev, "Failed to init MU\n");
428 return ret;
429 }
430
431 return 0;
432}
433
434U_BOOT_DRIVER(imx_mu) = {
435 .name = "imx-mu",
436 .id = UCLASS_MAILBOX,
437 .of_match = ids,
438 .of_to_plat = imx_mu_of_to_plat,
439 .plat_auto = sizeof(struct imx_mu),
440 .probe = imx_mu_probe,
441 .ops = &imx_mu_ops,
442 .flags = DM_FLAG_PRE_RELOC,
443};