blob: c10cbca7c9f8bbeb36ec50884bc863a5527b72b4 [file] [log] [blame]
developere5e687d2023-08-08 16:05:33 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
6 */
7
8#include <linux/io.h>
9#include <linux/of.h>
10#include <linux/err.h>
11#include <linux/ktime.h>
12#include <linux/device.h>
13#include <linux/spinlock.h>
14#include <linux/interrupt.h>
15#include <linux/of_address.h>
16#include <linux/platform_device.h>
17
18#include "mcu.h"
19#include "mbox.h"
20#include "internal.h"
21
22#define MBOX_SEND_TIMEOUT (2000)
23
24struct mailbox_reg {
25 u32 cmd_set_reg;
26 u32 cmd_clr_reg;
27 u32 msg_reg;
28};
29
30struct mailbox_core {
31 struct list_head mdev_list;
32 u32 registered_cmd;
33 spinlock_t lock;
34};
35
36struct mailbox_hw {
37 struct mailbox_core core[MBOX_ACT_MAX][CORE_MAX];
38 struct device *dev;
39 void __iomem *base;
40};
41
42static struct mailbox_hw mbox;
43
44static inline void mbox_write(u32 reg, u32 val)
45{
46 writel(val, mbox.base + reg);
47}
48
49static inline void mbox_set(u32 reg, u32 mask)
50{
51 setbits(mbox.base + reg, mask);
52}
53
54static inline void mbox_clr(u32 reg, u32 mask)
55{
56 clrbits(mbox.base + reg, mask);
57}
58
59static inline void mbox_rmw(u32 reg, u32 mask, u32 val)
60{
61 clrsetbits(mbox.base + reg, mask, val);
62}
63
64static inline u32 mbox_read(u32 reg)
65{
66 return readl(mbox.base + reg);
67}
68
69static inline void mbox_fill_msg(enum mbox_msg_cnt cnt, struct mailbox_msg *msg,
70 struct mailbox_reg *mbox_reg)
71{
72 if (cnt == MBOX_RET_MSG4)
73 goto send_msg4;
74 else if (cnt == MBOX_RET_MSG3)
75 goto send_msg3;
76 else if (cnt == MBOX_RET_MSG2)
77 goto send_msg2;
78 else if (cnt == MBOX_RET_MSG1)
79 goto send_msg1;
80 else
81 return;
82
83send_msg4:
84 mbox_write(mbox_reg->msg_reg + 0x10, msg->msg4);
85send_msg3:
86 mbox_write(mbox_reg->msg_reg + 0xC, msg->msg3);
87send_msg2:
88 mbox_write(mbox_reg->msg_reg + 0x8, msg->msg3);
89send_msg1:
90 mbox_write(mbox_reg->msg_reg + 0x4, msg->msg1);
91}
92
93static inline void mbox_clear_msg(enum mbox_msg_cnt cnt,
94 struct mailbox_reg *mbox_reg)
95{
96 if (cnt == MBOX_NO_RET_MSG)
97 goto clear_msg4;
98 else if (cnt == MBOX_RET_MSG1)
99 goto clear_msg3;
100 else if (cnt == MBOX_RET_MSG2)
101 goto clear_msg2;
102 else if (cnt == MBOX_RET_MSG3)
103 goto clear_msg1;
104 else
105 return;
106
107clear_msg4:
108 mbox_write(mbox_reg->msg_reg + 0x4, 0);
109clear_msg3:
110 mbox_write(mbox_reg->msg_reg + 0x8, 0);
111clear_msg2:
112 mbox_write(mbox_reg->msg_reg + 0xC, 0);
113clear_msg1:
114 mbox_write(mbox_reg->msg_reg + 0x10, 0);
115}
116
117static void exec_mbox_handler(enum core_id core, struct mailbox_reg *mbox_reg)
118{
119 struct mailbox_core *mcore = &mbox.core[MBOX_RECV][core];
120 struct mailbox_dev *mdev = NULL;
121 struct mailbox_msg msg = {0};
122 enum mbox_msg_cnt ret = 0;
123 u32 cmd_id = 0;
124
125 cmd_id = mbox_read(mbox_reg->msg_reg);
126
127 list_for_each_entry(mdev, &mcore->mdev_list, list) {
128 if (mdev->cmd_id == cmd_id) {
129 if (!mdev->mbox_handler)
130 goto out;
131
132 /* setup msg for handler */
133 msg.msg1 = mbox_read(mbox_reg->msg_reg + 0x4);
134 msg.msg2 = mbox_read(mbox_reg->msg_reg + 0x8);
135 msg.msg3 = mbox_read(mbox_reg->msg_reg + 0xC);
136 msg.msg4 = mbox_read(mbox_reg->msg_reg + 0x10);
137
138 ret = mdev->mbox_handler(mdev, &msg);
139
140 mbox_fill_msg(ret, &msg, mbox_reg);
141
142 break;
143 }
144 }
145out:
146 mbox_write(mbox_reg->msg_reg, 0);
147 mbox_clear_msg(ret, mbox_reg);
148
149 /* clear cmd */
150 mbox_write(mbox_reg->cmd_clr_reg, 0xFFFFFFFF);
151}
152
153static irqreturn_t mtk_tops_mbox_handler(int irq, void *dev_id)
154{
155 struct mailbox_reg mreg = {0};
156 u32 cluster_reg = 0;
157 u32 top_reg = 0;
158
159 top_reg = mbox_read(TOPS_TOP_AP_SLOT);
160 cluster_reg = mbox_read(TOPS_CLUST0_AP_SLOT);
161
162 if (top_reg & MBOX_TOP_MBOX_FROM_CM) {
163 mreg.cmd_set_reg = TOPS_TOP_CM_TO_AP_CMD_SET;
164 mreg.cmd_clr_reg = TOPS_TOP_CM_TO_AP_CMD_CLR;
165 mreg.msg_reg = TOPS_TOP_CM_TO_AP_MSG_N(0);
166 exec_mbox_handler(CORE_MGMT, &mreg);
167 }
168 if (cluster_reg & MBOX_CLUST0_MBOX_FROM_C0) {
169 mreg.cmd_set_reg = TOPS_CLUST0_CX_TO_AP_CMD_SET(0);
170 mreg.cmd_clr_reg = TOPS_CLUST0_CX_TO_AP_CMD_CLR(0);
171 mreg.msg_reg = TOPS_CLUST0_CX_TO_AP_MSG_N(0, 0);
172 exec_mbox_handler(CORE_OFFLOAD_0, &mreg);
173 }
174 if (cluster_reg & MBOX_CLUST0_MBOX_FROM_C1) {
175 mreg.cmd_set_reg = TOPS_CLUST0_CX_TO_AP_CMD_SET(1);
176 mreg.cmd_clr_reg = TOPS_CLUST0_CX_TO_AP_CMD_CLR(1);
177 mreg.msg_reg = TOPS_CLUST0_CX_TO_AP_MSG_N(1, 0);
178 exec_mbox_handler(CORE_OFFLOAD_1, &mreg);
179 }
180 if (cluster_reg & MBOX_CLUST0_MBOX_FROM_C2) {
181 mreg.cmd_set_reg = TOPS_CLUST0_CX_TO_AP_CMD_SET(2);
182 mreg.cmd_clr_reg = TOPS_CLUST0_CX_TO_AP_CMD_CLR(2);
183 mreg.msg_reg = TOPS_CLUST0_CX_TO_AP_MSG_N(2, 0);
184 exec_mbox_handler(CORE_OFFLOAD_2, &mreg);
185 }
186 if (cluster_reg & MBOX_CLUST0_MBOX_FROM_C3) {
187 mreg.cmd_set_reg = TOPS_CLUST0_CX_TO_AP_CMD_SET(3);
188 mreg.cmd_clr_reg = TOPS_CLUST0_CX_TO_AP_CMD_CLR(3);
189 mreg.msg_reg = TOPS_CLUST0_CX_TO_AP_MSG_N(3, 0);
190 exec_mbox_handler(CORE_OFFLOAD_3, &mreg);
191 }
192
193 return IRQ_HANDLED;
194}
195
196static int mbox_get_send_reg(struct mailbox_dev *mdev,
197 struct mailbox_reg *mbox_reg)
198{
199 if (!mdev) {
200 dev_notice(mbox.dev, "no mdev specified!\n");
201 return -EINVAL;
202 }
203
204 if (mdev->core == CORE_MGMT) {
205 mbox_reg->cmd_set_reg = TOPS_TOP_AP_TO_CM_CMD_SET;
206 mbox_reg->cmd_clr_reg = TOPS_TOP_AP_TO_CM_CMD_CLR;
207 mbox_reg->msg_reg = TOPS_TOP_AP_TO_CM_MSG_N(0);
208 } else if (mdev->core == CORE_OFFLOAD_0) {
209 mbox_reg->cmd_set_reg = TOPS_CLUST0_AP_TO_CX_CMD_SET(0);
210 mbox_reg->cmd_clr_reg = TOPS_CLUST0_AP_TO_CX_CMD_CLR(0);
211 mbox_reg->msg_reg = TOPS_CLUST0_AP_TO_CX_MSG_N(0, 0);
212 } else if (mdev->core == CORE_OFFLOAD_1) {
213 mbox_reg->cmd_set_reg = TOPS_CLUST0_AP_TO_CX_CMD_SET(1);
214 mbox_reg->cmd_clr_reg = TOPS_CLUST0_AP_TO_CX_CMD_CLR(1);
215 mbox_reg->msg_reg = TOPS_CLUST0_AP_TO_CX_MSG_N(1, 0);
216 } else if (mdev->core == CORE_OFFLOAD_2) {
217 mbox_reg->cmd_set_reg = TOPS_CLUST0_AP_TO_CX_CMD_SET(2);
218 mbox_reg->cmd_clr_reg = TOPS_CLUST0_AP_TO_CX_CMD_CLR(2);
219 mbox_reg->msg_reg = TOPS_CLUST0_AP_TO_CX_MSG_N(2, 0);
220 } else if (mdev->core == CORE_OFFLOAD_3) {
221 mbox_reg->cmd_set_reg = TOPS_CLUST0_AP_TO_CX_CMD_SET(3);
222 mbox_reg->cmd_clr_reg = TOPS_CLUST0_AP_TO_CX_CMD_CLR(3);
223 mbox_reg->msg_reg = TOPS_CLUST0_AP_TO_CX_MSG_N(3, 0);
224 } else {
225 dev_notice(mbox.dev, "invalid mdev->core: %u\n", mdev->core);
226 return -EINVAL;
227 }
228
229 return 0;
230}
231
232static void mbox_post_send(u32 msg_reg, struct mailbox_msg *msg,
233 void *priv,
234 mbox_ret_func_t ret_handler)
235{
236 if (!ret_handler)
237 goto out;
238
239 msg->msg1 = mbox_read(msg_reg + 0x4);
240 msg->msg2 = mbox_read(msg_reg + 0x8);
241 msg->msg3 = mbox_read(msg_reg + 0xC);
242 msg->msg4 = mbox_read(msg_reg + 0x10);
243
244 ret_handler(priv, msg);
245
246out:
247 mbox_write(msg_reg, 0);
248 mbox_write(msg_reg + 0x4, 0);
249 mbox_write(msg_reg + 0x8, 0);
250 mbox_write(msg_reg + 0xC, 0);
251 mbox_write(msg_reg + 0x10, 0);
252}
253
254static inline bool mbox_send_msg_chk_timeout(ktime_t start)
255{
256 return ktime_to_us(ktime_sub(ktime_get(), start)) > MBOX_SEND_TIMEOUT;
257}
258
259static inline int __mbox_send_msg_no_wait_irq(struct mailbox_dev *mdev,
260 struct mailbox_msg *msg,
261 struct mailbox_reg *mbox_reg)
262{
263 ktime_t start;
264
265 if (!mdev || !msg || !mbox_reg) {
266 dev_notice(mbox.dev, "missing some necessary parameters!\n");
267 return -EPERM;
268 }
269
270 start = ktime_get();
271
272 /* wait for all cmd cleared */
273 while (mbox_read(mbox_reg->cmd_set_reg)) {
274 if (mbox_send_msg_chk_timeout(start)) {
275 dev_notice(mbox.dev, "mbox occupied too long\n");
276 dev_notice(mbox.dev, "cmd set reg (0x%x): 0x%x\n",
277 mbox_reg->cmd_set_reg,
278 mbox_read(mbox_reg->cmd_set_reg));
279 dev_notice(mbox.dev, "msg1 reg (0x%x): 0x%x\n",
280 mbox_reg->msg_reg,
281 mbox_read(mbox_reg->msg_reg));
282 dev_notice(mbox.dev, "msg2 reg (0x%x): 0x%x\n",
283 mbox_reg->msg_reg,
284 mbox_read(mbox_reg->msg_reg + 0x4));
285 dev_notice(mbox.dev, "msg3 reg (0x%x): 0x%x\n",
286 mbox_reg->msg_reg,
287 mbox_read(mbox_reg->msg_reg + 0x8));
288 dev_notice(mbox.dev, "msg4 reg (0x%x): 0x%x\n",
289 mbox_reg->msg_reg,
290 mbox_read(mbox_reg->msg_reg + 0xC));
291 dev_notice(mbox.dev, "msg5 reg (0x%x): 0x%x\n",
292 mbox_reg->msg_reg,
293 mbox_read(mbox_reg->msg_reg + 0x10));
294 WARN_ON(1);
295 }
296 }
297
298 /* write msg */
299 mbox_write(mbox_reg->msg_reg, mdev->cmd_id);
300 mbox_write(mbox_reg->msg_reg + 0x4, msg->msg1);
301 mbox_write(mbox_reg->msg_reg + 0x8, msg->msg2);
302 mbox_write(mbox_reg->msg_reg + 0xC, msg->msg3);
303 mbox_write(mbox_reg->msg_reg + 0x10, msg->msg4);
304
305 /* write cmd */
306 mbox_write(mbox_reg->cmd_set_reg, BIT(mdev->cmd_id));
307
308 return 0;
309}
310
311int mbox_send_msg_no_wait_irq(struct mailbox_dev *mdev, struct mailbox_msg *msg)
312{
313 struct mailbox_reg mbox_reg = {0};
314 int ret = 0;
315
316 ret = mbox_get_send_reg(mdev, &mbox_reg);
317 if (ret)
318 return ret;
319
320 spin_lock(&mbox.core[MBOX_SEND][mdev->core].lock);
321
322 /* send cmd + msg */
323 ret = __mbox_send_msg_no_wait_irq(mdev, msg, &mbox_reg);
324
325 spin_unlock(&mbox.core[MBOX_SEND][mdev->core].lock);
326
327 return ret;
328}
329EXPORT_SYMBOL(mbox_send_msg_no_wait_irq);
330
331int mbox_send_msg_no_wait(struct mailbox_dev *mdev, struct mailbox_msg *msg)
332{
333 struct mailbox_reg mbox_reg = {0};
334 unsigned long flag = 0;
335 int ret = 0;
336
337 ret = mbox_get_send_reg(mdev, &mbox_reg);
338 if (ret)
339 return ret;
340
341 spin_lock_irqsave(&mbox.core[MBOX_SEND][mdev->core].lock, flag);
342
343 /* send cmd + msg */
344 ret = __mbox_send_msg_no_wait_irq(mdev, msg, &mbox_reg);
345
346 spin_unlock_irqrestore(&mbox.core[MBOX_SEND][mdev->core].lock, flag);
347
348 return ret;
349}
350EXPORT_SYMBOL(mbox_send_msg_no_wait);
351
352int mbox_send_msg(struct mailbox_dev *mdev, struct mailbox_msg *msg, void *priv,
353 mbox_ret_func_t ret_handler)
354{
355 struct mailbox_reg mbox_reg = {0};
356 unsigned long flag = 0;
357 ktime_t start;
358 int ret = 0;
359
360 ret = mbox_get_send_reg(mdev, &mbox_reg);
361 if (ret)
362 return ret;
363
364 spin_lock_irqsave(&mbox.core[MBOX_SEND][mdev->core].lock, flag);
365
366 /* send cmd + msg */
367 ret = __mbox_send_msg_no_wait_irq(mdev, msg, &mbox_reg);
368
369 start = ktime_get();
370
371 /* wait for cmd clear */
372 while (mbox_read(mbox_reg.cmd_set_reg) & BIT(mdev->cmd_id))
373 mbox_send_msg_chk_timeout(start);
374
375 /* execute return handler and clear message */
376 mbox_post_send(mbox_reg.msg_reg, msg, priv, ret_handler);
377
378 spin_unlock_irqrestore(&mbox.core[MBOX_SEND][mdev->core].lock, flag);
379
380 return ret;
381}
382EXPORT_SYMBOL(mbox_send_msg);
383
384static inline int mbox_ctrl_sanity_check(enum core_id core, enum mbox_act act)
385{
386 /* sanity check */
387 if (core >= CORE_MAX || act >= MBOX_ACT_MAX)
388 return -EINVAL;
389
390 /* mbox handler should not be register to core itself */
391 if (core == CORE_AP)
392 return -EINVAL;
393
394 return 0;
395}
396
397static void __register_mbox_dev(struct mailbox_core *mcore,
398 struct mailbox_dev *mdev)
399{
400 struct mailbox_dev *cur = NULL;
401
402 INIT_LIST_HEAD(&mdev->list);
403
404 /* insert the mailbox_dev in order */
405 list_for_each_entry(cur, &mcore->mdev_list, list)
406 if (cur->cmd_id > mdev->cmd_id)
407 break;
408
409 list_add(&mdev->list, &cur->list);
410
411 mcore->registered_cmd |= (0x1 << mdev->cmd_id);
412}
413
414static void __unregister_mbox_dev(struct mailbox_core *mcore,
415 struct mailbox_dev *mdev)
416{
417 struct mailbox_dev *cur = NULL;
418 struct mailbox_dev *next = NULL;
419
420 /* ensure the node being deleted is existed in the list */
421 list_for_each_entry_safe(cur, next, &mcore->mdev_list, list) {
422 if (cur->cmd_id == mdev->cmd_id && cur == mdev) {
423 list_del(&mdev->list);
424 break;
425 }
426 }
427
428 mcore->registered_cmd &= (~(0x1 << mdev->cmd_id));
429}
430
431int register_mbox_dev(enum mbox_act act, struct mailbox_dev *mdev)
432{
433 struct mailbox_core *mcore;
434 int ret = 0;
435
436 /* sanity check */
437 ret = mbox_ctrl_sanity_check(mdev->core, act);
438 if (ret)
439 return ret;
440
441 mcore = &mbox.core[act][mdev->core];
442
443 /* check cmd is occupied or not */
444 if (mcore->registered_cmd & (0x1 << mdev->cmd_id))
445 return -EBUSY;
446
447 __register_mbox_dev(mcore, mdev);
448
449 return 0;
450}
451EXPORT_SYMBOL(register_mbox_dev);
452
453int unregister_mbox_dev(enum mbox_act act, struct mailbox_dev *mdev)
454{
455 struct mailbox_core *mcore;
456 int ret = 0;
457
458 /* sanity check */
459 ret = mbox_ctrl_sanity_check(mdev->core, act);
460 if (ret)
461 return ret;
462
463 mcore = &mbox.core[act][mdev->core];
464
465 /* check cmd need to unregister or not */
466 if (!(mcore->registered_cmd & (0x1 << mdev->cmd_id)))
467 return 0;
468
469 __unregister_mbox_dev(mcore, mdev);
470
471 return 0;
472}
473EXPORT_SYMBOL(unregister_mbox_dev);
474
475void mtk_tops_mbox_clear_all_cmd(void)
476{
477 u32 i, j;
478
479 mbox_write(TOPS_TOP_AP_TO_CM_CMD_CLR, 0xFFFFFFFF);
480 mbox_write(TOPS_TOP_CM_TO_AP_CMD_CLR, 0xFFFFFFFF);
481
482 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
483 mbox_write(TOPS_CLUST0_CX_TO_CM_CMD_CLR(i), 0xFFFFFFFF);
484 mbox_write(TOPS_CLUST0_CM_TO_CX_CMD_CLR(i), 0xFFFFFFFF);
485 mbox_write(TOPS_CLUST0_CX_TO_AP_CMD_CLR(i), 0xFFFFFFFF);
486 mbox_write(TOPS_CLUST0_AP_TO_CX_CMD_CLR(i), 0xFFFFFFFF);
487
488 for (j = 0; j < CORE_OFFLOAD_NUM; j++) {
489 if (i == j)
490 continue;
491
492 mbox_write(TOPS_CLUST0_CX_TO_CY_CMD_CLR(i, j), 0xFFFFFFFF);
493 }
494 }
495}
496
497static int mtk_tops_mbox_probe(struct platform_device *pdev)
498{
499 struct device_node *tops = NULL;
500 struct resource res;
501 int irq = platform_get_irq_byname(pdev, "mbox");
502 int ret = 0;
503 u32 idx = 0;
504
505 mbox.dev = &pdev->dev;
506
507 tops = of_parse_phandle(pdev->dev.of_node, "tops", 0);
508 if (!tops) {
509 dev_err(mbox.dev, "can not find tops node\n");
510 return -ENODEV;
511 }
512
513 if (of_address_to_resource(tops, 0, &res))
514 return -ENXIO;
515
516 mbox.base = devm_ioremap(mbox.dev, res.start, resource_size(&res));
517 if (!mbox.base)
518 return -ENOMEM;
519
520 if (irq < 0) {
521 dev_err(mbox.dev, "get mbox irq failed\n");
522 return irq;
523 }
524
525 ret = devm_request_irq(&pdev->dev, irq,
526 mtk_tops_mbox_handler,
527 IRQF_ONESHOT,
528 pdev->name, NULL);
529 if (ret) {
530 dev_err(mbox.dev, "request mbox irq failed\n");
531 return ret;
532 }
533
534 for (idx = 0; idx < CORE_MAX; idx++) {
535 INIT_LIST_HEAD(&mbox.core[MBOX_SEND][idx].mdev_list);
536 INIT_LIST_HEAD(&mbox.core[MBOX_RECV][idx].mdev_list);
537 spin_lock_init(&mbox.core[MBOX_SEND][idx].lock);
538 spin_lock_init(&mbox.core[MBOX_RECV][idx].lock);
539 }
540
541 mtk_tops_mbox_clear_all_cmd();
542
543 return ret;
544}
545
546static int mtk_tops_mbox_remove(struct platform_device *pdev)
547{
548 return 0;
549}
550
551static struct of_device_id mtk_mbox_match[] = {
552 { .compatible = "mediatek,tops-mbox", },
553 { },
554};
555
556static struct platform_driver mtk_tops_mbox_driver = {
557 .probe = mtk_tops_mbox_probe,
558 .remove = mtk_tops_mbox_remove,
559 .driver = {
560 .name = "mediatek,tops-mbox",
561 .owner = THIS_MODULE,
562 .of_match_table = mtk_mbox_match,
563 },
564};
565
566int __init mtk_tops_mbox_init(void)
567{
568 return platform_driver_register(&mtk_tops_mbox_driver);
569}
570
571void __exit mtk_tops_mbox_exit(void)
572{
573 platform_driver_unregister(&mtk_tops_mbox_driver);
574}