blob: 30cb0321358ec3f930ddbedd0a4a09bdf37b73e0 [file] [log] [blame]
developere5e687d2023-08-08 16:05:33 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
6 */
7
8#include <linux/delay.h>
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
11#include <linux/err.h>
12#include <linux/firmware.h>
13#include <linux/io.h>
14#include <linux/kthread.h>
15#include <linux/of.h>
16#include <linux/of_address.h>
17#include <linux/of_device.h>
18#include <linux/of_platform.h>
19#include <linux/platform_device.h>
20#include <linux/pm_domain.h>
21#include <linux/pm_runtime.h>
22
23#include <pce/pce.h>
24
25#include "ctrl.h"
26#include "firmware.h"
27#include "hpdma.h"
28#include "internal.h"
29#include "mbox.h"
30#include "mcu.h"
31#include "netsys.h"
32#include "tdma.h"
33#include "trm.h"
34
35#define TDMA_TIMEOUT_MAX_CNT (3)
36#define TDMA_TIMEOUT_DELAY (100) /* 100ms */
37
38#define MCU_STATE_TRANS_TIMEOUT (5000) /* 5000ms */
39#define MCU_CTRL_DONE_BIT (31)
40#define MCU_CTRL_DONE (CORE_TOPS_MASK | \
41 BIT(MCU_CTRL_DONE_BIT))
42
43/* TRM dump length */
44#define TOP_CORE_BASE_LEN (0x80)
45#define TOP_L2SRAM_LEN (0x40000)
46#define TOP_CORE_M_XTCM_LEN (0x8000)
47
48#define CLUST_CORE_BASE_LEN (0x80)
49#define CLUST_L2SRAM_LEN (0x40000)
50#define CLUST_CORE_X_XTCM_LEN (0x8000)
51
52/* MCU State */
53#define MCU_STATE_FUNC_DECLARE(name) \
54static int mtk_tops_mcu_state_ ## name ## _enter(struct mcu_state *state); \
55static int mtk_tops_mcu_state_ ## name ## _leave(struct mcu_state *state); \
56static struct mcu_state *mtk_tops_mcu_state_ ## name ## _trans( \
57 u32 mcu_act, \
58 struct mcu_state *state)
59
60#define MCU_STATE_DATA(name, id) \
61 [id] = { \
62 .state = id, \
63 .state_trans = mtk_tops_mcu_state_ ## name ## _trans, \
64 .enter = mtk_tops_mcu_state_ ## name ## _enter, \
65 .leave = mtk_tops_mcu_state_ ## name ## _leave, \
66 }
67
68static inline void mcu_ctrl_issue_pending_act(u32 mcu_act);
69static enum mbox_msg_cnt mtk_tops_ap_recv_mgmt_mbox_msg(struct mailbox_dev *mdev,
70 struct mailbox_msg *msg);
71static enum mbox_msg_cnt mtk_tops_ap_recv_offload_mbox_msg(struct mailbox_dev *mdev,
72 struct mailbox_msg *msg);
73static int mcu_trm_hw_dump(void *dst, u32 ofs, u32 len);
74
75MCU_STATE_FUNC_DECLARE(shutdown);
76MCU_STATE_FUNC_DECLARE(init);
77MCU_STATE_FUNC_DECLARE(freerun);
78MCU_STATE_FUNC_DECLARE(stall);
79MCU_STATE_FUNC_DECLARE(netstop);
80MCU_STATE_FUNC_DECLARE(reset);
81MCU_STATE_FUNC_DECLARE(abnormal);
82
83struct npu {
84 void __iomem *base;
85
86 struct clk *bus_clk;
87 struct clk *sram_clk;
88 struct clk *xdma_clk;
89 struct clk *offload_clk;
90 struct clk *mgmt_clk;
91
92 struct device **pd_devices;
93 struct device_link **pd_links;
94 int pd_num;
95
96 struct task_struct *mcu_ctrl_thread;
97 struct timer_list mcu_ctrl_timer;
98 struct mcu_state *next_state;
99 struct mcu_state *cur_state;
100 /* ensure that only 1 user can trigger state transition at a time */
101 struct mutex mcu_ctrl_lock;
102 spinlock_t pending_act_lock;
103 wait_queue_head_t mcu_ctrl_wait_act;
104 wait_queue_head_t mcu_state_wait_done;
105 bool mcu_bring_up_done;
106 bool state_trans_fail;
107 u32 pending_act;
108
109 spinlock_t ctrl_done_lock;
110 wait_queue_head_t mcu_ctrl_wait_done;
111 enum mcu_cmd_type ctrl_done_cmd;
112 /* MSB = 1 means that mcu control done. Otherwise it is still ongoing */
113 u32 ctrl_done;
114
115 struct work_struct recover_work;
116 bool in_reset;
117 bool in_recover;
118 bool netsys_fe_ser;
119 bool shuting_down;
120
121 struct mailbox_msg ctrl_msg;
122 struct mailbox_dev recv_mgmt_mbox_dev;
123 struct mailbox_dev send_mgmt_mbox_dev;
124
125 struct mailbox_dev recv_offload_mbox_dev[CORE_OFFLOAD_NUM];
126 struct mailbox_dev send_offload_mbox_dev[CORE_OFFLOAD_NUM];
127};
128
129static struct mcu_state mcu_states[__MCU_STATE_TYPE_MAX] = {
130 MCU_STATE_DATA(shutdown, MCU_STATE_TYPE_SHUTDOWN),
131 MCU_STATE_DATA(init, MCU_STATE_TYPE_INIT),
132 MCU_STATE_DATA(freerun, MCU_STATE_TYPE_FREERUN),
133 MCU_STATE_DATA(stall, MCU_STATE_TYPE_STALL),
134 MCU_STATE_DATA(netstop, MCU_STATE_TYPE_NETSTOP),
135 MCU_STATE_DATA(reset, MCU_STATE_TYPE_RESET),
136 MCU_STATE_DATA(abnormal, MCU_STATE_TYPE_ABNORMAL),
137};
138
139static struct npu npu = {
140 .send_mgmt_mbox_dev = MBOX_SEND_MGMT_DEV(CORE_CTRL),
141 .send_offload_mbox_dev = {
142 [CORE_OFFLOAD_0] = MBOX_SEND_OFFLOAD_DEV(0, CORE_CTRL),
143 [CORE_OFFLOAD_1] = MBOX_SEND_OFFLOAD_DEV(1, CORE_CTRL),
144 [CORE_OFFLOAD_2] = MBOX_SEND_OFFLOAD_DEV(2, CORE_CTRL),
145 [CORE_OFFLOAD_3] = MBOX_SEND_OFFLOAD_DEV(3, CORE_CTRL),
146 },
147 .recv_mgmt_mbox_dev =
148 MBOX_RECV_MGMT_DEV(CORE_CTRL, mtk_tops_ap_recv_mgmt_mbox_msg),
149 .recv_offload_mbox_dev = {
150 [CORE_OFFLOAD_0] =
151 MBOX_RECV_OFFLOAD_DEV(0,
152 CORE_CTRL,
153 mtk_tops_ap_recv_offload_mbox_msg
154 ),
155 [CORE_OFFLOAD_1] =
156 MBOX_RECV_OFFLOAD_DEV(1,
157 CORE_CTRL,
158 mtk_tops_ap_recv_offload_mbox_msg
159 ),
160 [CORE_OFFLOAD_2] =
161 MBOX_RECV_OFFLOAD_DEV(2,
162 CORE_CTRL,
163 mtk_tops_ap_recv_offload_mbox_msg
164 ),
165 [CORE_OFFLOAD_3] =
166 MBOX_RECV_OFFLOAD_DEV(3,
167 CORE_CTRL,
168 mtk_tops_ap_recv_offload_mbox_msg
169 ),
170 },
171};
172
173static struct trm_config mcu_trm_cfgs[] = {
174 {
175 TRM_CFG_EN("top-core-base",
176 TOP_CORE_BASE, TOP_CORE_BASE_LEN,
177 0x0, TOP_CORE_BASE_LEN,
178 0)
179 },
180 {
181 TRM_CFG_EN("clust-core0-base",
182 CLUST_CORE_BASE(0), CLUST_CORE_BASE_LEN,
183 0x0, CLUST_CORE_BASE_LEN,
184 0)
185 },
186 {
187 TRM_CFG_EN("clust-core1-base",
188 CLUST_CORE_BASE(1), CLUST_CORE_BASE_LEN,
189 0x0, CLUST_CORE_BASE_LEN,
190 0)
191 },
192 {
193 TRM_CFG_EN("clust-core2-base",
194 CLUST_CORE_BASE(2), CLUST_CORE_BASE_LEN,
195 0x0, CLUST_CORE_BASE_LEN,
196 0)
197 },
198 {
199 TRM_CFG_EN("clust-core3-base",
200 CLUST_CORE_BASE(3), CLUST_CORE_BASE_LEN,
201 0x0, CLUST_CORE_BASE_LEN,
202 0)
203 },
204 {
205 TRM_CFG_CORE_DUMP_EN("top-core-m-dtcm",
206 TOP_CORE_M_DTCM, TOP_CORE_M_XTCM_LEN,
207 0x0, TOP_CORE_M_XTCM_LEN,
208 0, CORE_MGMT)
209 },
210 {
211 TRM_CFG_CORE_DUMP_EN("clust-core-0-dtcm",
212 CLUST_CORE_X_DTCM(0), CLUST_CORE_X_XTCM_LEN,
213 0x0, CLUST_CORE_X_XTCM_LEN,
214 0, CORE_OFFLOAD_0)
215 },
216 {
217 TRM_CFG_CORE_DUMP_EN("clust-core-1-dtcm",
218 CLUST_CORE_X_DTCM(1), CLUST_CORE_X_XTCM_LEN,
219 0x0, CLUST_CORE_X_XTCM_LEN,
220 0, CORE_OFFLOAD_1)
221 },
222 {
223 TRM_CFG_CORE_DUMP_EN("clust-core-2-dtcm",
224 CLUST_CORE_X_DTCM(2), CLUST_CORE_X_XTCM_LEN,
225 0x0, CLUST_CORE_X_XTCM_LEN,
226 0, CORE_OFFLOAD_2)
227 },
228 {
229 TRM_CFG_CORE_DUMP_EN("clust-core-3-dtcm",
230 CLUST_CORE_X_DTCM(3), CLUST_CORE_X_XTCM_LEN,
231 0x0, CLUST_CORE_X_XTCM_LEN,
232 0, CORE_OFFLOAD_3)
233 },
234 {
235 TRM_CFG("top-core-m-itcm",
236 TOP_CORE_M_ITCM, TOP_CORE_M_XTCM_LEN,
237 0x0, TOP_CORE_M_XTCM_LEN,
238 0)
239 },
240 {
241 TRM_CFG("clust-core-0-itcm",
242 CLUST_CORE_X_ITCM(0), CLUST_CORE_X_XTCM_LEN,
243 0x0, CLUST_CORE_X_XTCM_LEN,
244 0)
245 },
246 {
247 TRM_CFG("clust-core-1-itcm",
248 CLUST_CORE_X_ITCM(1), CLUST_CORE_X_XTCM_LEN,
249 0x0, CLUST_CORE_X_XTCM_LEN,
250 0)
251 },
252 {
253 TRM_CFG("clust-core-2-itcm",
254 CLUST_CORE_X_ITCM(2), CLUST_CORE_X_XTCM_LEN,
255 0x0, CLUST_CORE_X_XTCM_LEN,
256 0)
257 },
258 {
259 TRM_CFG("clust-core-3-itcm",
260 CLUST_CORE_X_ITCM(3), CLUST_CORE_X_XTCM_LEN,
261 0x0, CLUST_CORE_X_XTCM_LEN,
262 0)
263 },
264 {
265 TRM_CFG("top-l2sram",
266 TOP_L2SRAM, TOP_L2SRAM_LEN,
267 0x0, TOP_L2SRAM_LEN,
268 0)
269 },
270 {
271 TRM_CFG_EN("clust-l2sram",
272 CLUST_L2SRAM, CLUST_L2SRAM_LEN,
273 0x38000, 0x8000,
274 0)
275 },
276};
277
278static struct trm_hw_config mcu_trm_hw_cfg = {
279 .trm_cfgs = mcu_trm_cfgs,
280 .cfg_len = ARRAY_SIZE(mcu_trm_cfgs),
281 .trm_hw_dump = mcu_trm_hw_dump,
282};
283
284static inline void npu_write(u32 reg, u32 val)
285{
286 writel(val, npu.base + reg);
287}
288
289static inline void npu_set(u32 reg, u32 mask)
290{
291 setbits(npu.base + reg, mask);
292}
293
294static inline void npu_clr(u32 reg, u32 mask)
295{
296 clrbits(npu.base + reg, mask);
297}
298
299static inline void npu_rmw(u32 reg, u32 mask, u32 val)
300{
301 clrsetbits(npu.base + reg, mask, val);
302}
303
304static inline u32 npu_read(u32 reg)
305{
306 return readl(npu.base + reg);
307}
308
309static int mcu_trm_hw_dump(void *dst, u32 start_addr, u32 len)
310{
311 u32 ofs;
312
313 if (unlikely(!dst))
314 return -ENODEV;
315
316 for (ofs = 0; len > 0; len -= 0x4, ofs += 0x4)
317 writel(npu_read(start_addr + ofs), dst + ofs);
318
319 return 0;
320}
321
322static int mcu_power_on(void)
323{
324 int ret = 0;
325
326 ret = clk_prepare_enable(npu.bus_clk);
327 if (ret) {
328 TOPS_ERR("bus clk enable failed: %d\n", ret);
329 return ret;
330 }
331
332 ret = clk_prepare_enable(npu.sram_clk);
333 if (ret) {
334 TOPS_ERR("sram clk enable failed: %d\n", ret);
335 goto err_disable_bus_clk;
336 }
337
338 ret = clk_prepare_enable(npu.xdma_clk);
339 if (ret) {
340 TOPS_ERR("xdma clk enable failed: %d\n", ret);
341 goto err_disable_sram_clk;
342 }
343
344 ret = clk_prepare_enable(npu.offload_clk);
345 if (ret) {
346 TOPS_ERR("offload clk enable failed: %d\n", ret);
347 goto err_disable_xdma_clk;
348 }
349
350 ret = clk_prepare_enable(npu.mgmt_clk);
351 if (ret) {
352 TOPS_ERR("mgmt clk enable failed: %d\n", ret);
353 goto err_disable_offload_clk;
354 }
355
356 ret = pm_runtime_get_sync(tops_dev);
357 if (ret < 0) {
358 TOPS_ERR("power on failed: %d\n", ret);
359 goto err_disable_mgmt_clk;
360 }
361
362 return ret;
363
364err_disable_mgmt_clk:
365 clk_disable_unprepare(npu.mgmt_clk);
366
367err_disable_offload_clk:
368 clk_disable_unprepare(npu.offload_clk);
369
370err_disable_xdma_clk:
371 clk_disable_unprepare(npu.xdma_clk);
372
373err_disable_sram_clk:
374 clk_disable_unprepare(npu.sram_clk);
375
376err_disable_bus_clk:
377 clk_disable_unprepare(npu.bus_clk);
378
379 return ret;
380}
381
382static void mcu_power_off(void)
383{
384 pm_runtime_put_sync(tops_dev);
385
386 clk_disable_unprepare(npu.mgmt_clk);
387
388 clk_disable_unprepare(npu.offload_clk);
389
390 clk_disable_unprepare(npu.xdma_clk);
391
392 clk_disable_unprepare(npu.sram_clk);
393
394 clk_disable_unprepare(npu.bus_clk);
395}
396
397static inline int mcu_state_send_cmd(struct mcu_state *state)
398{
399 unsigned long flag;
400 enum core_id core;
401 u32 ctrl_cpu;
402 int ret;
403
404 spin_lock_irqsave(&npu.ctrl_done_lock, flag);
405 ctrl_cpu = (~npu.ctrl_done) & CORE_TOPS_MASK;
406 spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
407
408 if (ctrl_cpu & BIT(CORE_MGMT)) {
409 ret = mbox_send_msg_no_wait(&npu.send_mgmt_mbox_dev,
410 &npu.ctrl_msg);
411 if (ret)
412 goto out;
413 }
414
415 for (core = CORE_OFFLOAD_0; core < CORE_OFFLOAD_NUM; core++) {
416 if (ctrl_cpu & BIT(core)) {
417 ret = mbox_send_msg_no_wait(&npu.send_offload_mbox_dev[core],
418 &npu.ctrl_msg);
419 if (ret)
420 goto out;
421 }
422 }
423
424out:
425 return ret;
426}
427
428static inline void mcu_state_trans_start(void)
429{
430 mod_timer(&npu.mcu_ctrl_timer,
431 jiffies + msecs_to_jiffies(MCU_STATE_TRANS_TIMEOUT));
432}
433
434static inline void mcu_state_trans_end(void)
435{
436 del_timer_sync(&npu.mcu_ctrl_timer);
437}
438
439static inline void mcu_state_trans_err(void)
440{
441 wake_up_interruptible(&npu.mcu_ctrl_wait_done);
442}
443
444static inline int mcu_state_wait_complete(void (*state_complete_cb)(void))
445{
446 unsigned long flag;
447 int ret = 0;
448
449 wait_event_interruptible(npu.mcu_state_wait_done,
450 (npu.ctrl_done == CORE_TOPS_MASK) ||
451 (npu.state_trans_fail));
452
453 if (npu.state_trans_fail)
454 return -EINVAL;
455
456 npu.ctrl_msg.msg1 = npu.ctrl_done_cmd;
457
458 spin_lock_irqsave(&npu.ctrl_done_lock, flag);
459 npu.ctrl_done |= BIT(MCU_CTRL_DONE_BIT);
460 spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
461
462 if (state_complete_cb)
463 state_complete_cb();
464
465 wake_up_interruptible(&npu.mcu_ctrl_wait_done);
466
467 return ret;
468}
469
470static inline void mcu_state_prepare_wait(enum mcu_cmd_type done_cmd)
471{
472 unsigned long flag;
473
474 /* if user does not specify CPU to control, default controll all CPU */
475 spin_lock_irqsave(&npu.ctrl_done_lock, flag);
476 if ((npu.ctrl_done & CORE_TOPS_MASK) == CORE_TOPS_MASK)
477 npu.ctrl_done = 0;
478 spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
479
480 npu.ctrl_done_cmd = done_cmd;
481}
482
483static struct mcu_state *mtk_tops_mcu_state_shutdown_trans(u32 mcu_act,
484 struct mcu_state *state)
485{
486 if (mcu_act == MCU_ACT_INIT)
487 return &mcu_states[MCU_STATE_TYPE_INIT];
488
489 return ERR_PTR(-ENODEV);
490}
491
492static int mtk_tops_mcu_state_shutdown_enter(struct mcu_state *state)
493{
494 mcu_power_off();
495
496 mtk_tops_tdma_record_last_state();
497
498 mtk_tops_fw_clean_up();
499
500 npu.mcu_bring_up_done = false;
501
502 if (npu.shuting_down) {
503 npu.shuting_down = false;
504 wake_up_interruptible(&npu.mcu_ctrl_wait_done);
505
506 return 0;
507 }
508
509 if (npu.in_recover || npu.in_reset)
510 mcu_ctrl_issue_pending_act(MCU_ACT_INIT);
511
512 return 0;
513}
514
515static int mtk_tops_mcu_state_shutdown_leave(struct mcu_state *state)
516{
517 return 0;
518}
519
520static struct mcu_state *mtk_tops_mcu_state_init_trans(u32 mcu_act,
521 struct mcu_state *state)
522{
523 if (mcu_act == MCU_ACT_FREERUN)
524 return &mcu_states[MCU_STATE_TYPE_FREERUN];
525 else if (mcu_act == MCU_ACT_NETSTOP)
526 return &mcu_states[MCU_STATE_TYPE_NETSTOP];
527
528 return ERR_PTR(-ENODEV);
529}
530
531static void mtk_tops_mcu_state_init_enter_complete_cb(void)
532{
533 npu.mcu_bring_up_done = true;
534 npu.in_reset = false;
535 npu.in_recover = false;
536 npu.netsys_fe_ser = false;
537
538 mcu_ctrl_issue_pending_act(MCU_ACT_FREERUN);
539}
540
541static int mtk_tops_mcu_state_init_enter(struct mcu_state *state)
542{
543 int ret = 0;
544
545 ret = mcu_power_on();
546 if (ret)
547 return ret;
548
549 mtk_tops_mbox_clear_all_cmd();
550
551 /* reset TDMA first */
552 mtk_tops_tdma_reset();
553
554 npu.ctrl_done = 0;
555 mcu_state_prepare_wait(MCU_CMD_TYPE_INIT_DONE);
556
557 ret = mtk_tops_fw_bring_up_default_cores();
558 if (ret) {
559 TOPS_ERR("bring up TOPS cores failed: %d\n", ret);
560 goto out;
561 }
562
563 ret = mcu_state_wait_complete(mtk_tops_mcu_state_init_enter_complete_cb);
564 if (unlikely(ret))
565 TOPS_ERR("init leave failed\n");
566
567out:
568 return ret;
569}
570
571static int mtk_tops_mcu_state_init_leave(struct mcu_state *state)
572{
573 int ret;
574
575 mtk_tops_tdma_enable();
576
577 mtk_tops_tnl_offload_recover();
578
579 /* enable cls, dipfilter */
580 ret = mtk_pce_enable();
581 if (ret) {
582 TOPS_ERR("netsys enable failed: %d\n", ret);
583 return ret;
584 }
585
586 return ret;
587}
588
589static struct mcu_state *mtk_tops_mcu_state_freerun_trans(u32 mcu_act,
590 struct mcu_state *state)
591{
592 if (mcu_act == MCU_ACT_RESET)
593 return &mcu_states[MCU_STATE_TYPE_RESET];
594 else if (mcu_act == MCU_ACT_STALL)
595 return &mcu_states[MCU_STATE_TYPE_STALL];
596 else if (mcu_act == MCU_ACT_NETSTOP)
597 return &mcu_states[MCU_STATE_TYPE_NETSTOP];
598
599 return ERR_PTR(-ENODEV);
600}
601
602static int mtk_tops_mcu_state_freerun_enter(struct mcu_state *state)
603{
604 /* TODO : switch to HW path */
605
606 return 0;
607}
608
609static int mtk_tops_mcu_state_freerun_leave(struct mcu_state *state)
610{
611 /* TODO : switch to SW path */
612
613 return 0;
614}
615
616static struct mcu_state *mtk_tops_mcu_state_stall_trans(u32 mcu_act,
617 struct mcu_state *state)
618{
619 if (mcu_act == MCU_ACT_RESET)
620 return &mcu_states[MCU_STATE_TYPE_RESET];
621 else if (mcu_act == MCU_ACT_FREERUN)
622 return &mcu_states[MCU_STATE_TYPE_FREERUN];
623 else if (mcu_act == MCU_ACT_NETSTOP)
624 return &mcu_states[MCU_STATE_TYPE_NETSTOP];
625
626 return ERR_PTR(-ENODEV);
627}
628
629static int mtk_tops_mcu_state_stall_enter(struct mcu_state *state)
630{
631 int ret = 0;
632
633 mcu_state_prepare_wait(MCU_CMD_TYPE_STALL_DONE);
634
635 ret = mcu_state_send_cmd(state);
636 if (ret)
637 return ret;
638
639 ret = mcu_state_wait_complete(NULL);
640 if (ret)
641 TOPS_ERR("stall enter failed\n");
642
643 return ret;
644}
645
646static int mtk_tops_mcu_state_stall_leave(struct mcu_state *state)
647{
648 int ret = 0;
649
650 /*
651 * if next state is going to stop network,
652 * we should not let mcu do freerun cmd since it is going to abort stall
653 */
654 if (npu.next_state->state == MCU_STATE_TYPE_NETSTOP)
655 return 0;
656
657 mcu_state_prepare_wait(MCU_CMD_TYPE_FREERUN_DONE);
658
659 ret = mcu_state_send_cmd(state);
660 if (ret)
661 return ret;
662
663 ret = mcu_state_wait_complete(NULL);
664 if (ret)
665 TOPS_ERR("stall leave failed\n");
666
667 return ret;
668}
669
670static struct mcu_state *mtk_tops_mcu_state_netstop_trans(u32 mcu_act,
671 struct mcu_state *state)
672{
673 if (mcu_act == MCU_ACT_ABNORMAL)
674 return &mcu_states[MCU_STATE_TYPE_ABNORMAL];
675 else if (mcu_act == MCU_ACT_RESET)
676 return &mcu_states[MCU_STATE_TYPE_RESET];
677 else if (mcu_act == MCU_ACT_SHUTDOWN)
678 return &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
679
680 return ERR_PTR(-ENODEV);
681}
682
683static int mtk_tops_mcu_state_netstop_enter(struct mcu_state *state)
684{
685 mtk_tops_tnl_offload_flush();
686
687 mtk_pce_disable();
688
689 mtk_tops_tdma_disable();
690
691 if (npu.in_recover)
692 mcu_ctrl_issue_pending_act(MCU_ACT_ABNORMAL);
693 else if (npu.in_reset)
694 mcu_ctrl_issue_pending_act(MCU_ACT_RESET);
695 else
696 mcu_ctrl_issue_pending_act(MCU_ACT_SHUTDOWN);
697
698 return 0;
699}
700
701static int mtk_tops_mcu_state_netstop_leave(struct mcu_state *state)
702{
703 return 0;
704}
705
706static struct mcu_state *mtk_tops_mcu_state_reset_trans(u32 mcu_act,
707 struct mcu_state *state)
708{
709 if (mcu_act == MCU_ACT_FREERUN)
710 return &mcu_states[MCU_STATE_TYPE_FREERUN];
711 else if (mcu_act == MCU_ACT_SHUTDOWN)
712 return &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
713 else if (mcu_act == MCU_ACT_NETSTOP)
714 /*
715 * since netstop is already done before reset,
716 * there is no need to do it again. We just go to abnormal directly
717 */
718 return &mcu_states[MCU_STATE_TYPE_ABNORMAL];
719
720 return ERR_PTR(-ENODEV);
721}
722
723static int mtk_tops_mcu_state_reset_enter(struct mcu_state *state)
724{
725 int ret = 0;
726
727 mcu_state_prepare_wait(MCU_CMD_TYPE_ASSERT_RESET_DONE);
728
729 if (!npu.netsys_fe_ser) {
730 ret = mcu_state_send_cmd(state);
731 if (ret)
732 return ret;
733 } else {
734 /* skip to assert reset mcu if NETSYS SER */
735 npu.ctrl_done = CORE_TOPS_MASK;
736 }
737
738 ret = mcu_state_wait_complete(NULL);
739 if (ret)
740 TOPS_ERR("assert reset failed\n");
741
742 return ret;
743}
744
745static int mtk_tops_mcu_state_reset_leave(struct mcu_state *state)
746{
747 int ret = 0;
748
749 /*
750 * if next state is going to shutdown,
751 * no need to let mcu do release reset cmd
752 */
753 if (npu.next_state->state == MCU_STATE_TYPE_ABNORMAL
754 || npu.next_state->state == MCU_STATE_TYPE_SHUTDOWN)
755 return 0;
756
757 mcu_state_prepare_wait(MCU_CMD_TYPE_RELEASE_RESET_DONE);
758
759 ret = mcu_state_send_cmd(state);
760 if (ret)
761 return ret;
762
763 ret = mcu_state_wait_complete(NULL);
764 if (ret)
765 TOPS_ERR("release reset failed\n");
766
767 return ret;
768}
769
770static struct mcu_state *mtk_tops_mcu_state_abnormal_trans(u32 mcu_act,
771 struct mcu_state *state)
772{
773 if (mcu_act == MCU_ACT_SHUTDOWN)
774 return &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
775
776 return ERR_PTR(-ENODEV);
777}
778
779static int mtk_tops_mcu_state_abnormal_enter(struct mcu_state *state)
780{
781 mcu_ctrl_issue_pending_act(MCU_ACT_SHUTDOWN);
782
783 return 0;
784}
785
786static int mtk_tops_mcu_state_abnormal_leave(struct mcu_state *state)
787{
788 if (npu.mcu_bring_up_done)
789 mtk_trm_dump(TRM_RSN_MCU_STATE_ACT_FAIL);
790
791 return 0;
792}
793
794static int mtk_tops_mcu_state_transition(u32 mcu_act)
795{
796 int ret = 0;
797
798 npu.next_state = npu.cur_state->state_trans(mcu_act, npu.cur_state);
799 if (IS_ERR(npu.next_state))
800 return PTR_ERR(npu.next_state);
801
802 /* skip mcu_state leave if current MCU_ACT has failure */
803 if (unlikely(mcu_act == MCU_ACT_ABNORMAL))
804 goto skip_state_leave;
805
806 mcu_state_trans_start();
807 if (npu.cur_state->leave) {
808 ret = npu.cur_state->leave(npu.cur_state);
809 if (ret) {
810 TOPS_ERR("state%d transition leave failed: %d\n",
811 npu.cur_state->state, ret);
812 goto state_trans_end;
813 }
814 }
815 mcu_state_trans_end();
816
817skip_state_leave:
818 npu.cur_state = npu.next_state;
819
820 mcu_state_trans_start();
821 if (npu.cur_state->enter) {
822 ret = npu.cur_state->enter(npu.cur_state);
823 if (ret) {
824 TOPS_ERR("state%d transition enter failed: %d\n",
825 npu.cur_state->state, ret);
826 goto state_trans_end;
827 }
828 }
829
830state_trans_end:
831 mcu_state_trans_end();
832
833 return ret;
834}
835
836static void mtk_tops_mcu_state_trans_timeout(struct timer_list *timer)
837{
838 TOPS_ERR("state%d transition timeout!\n", npu.cur_state->state);
839 TOPS_ERR("ctrl_done=0x%x ctrl_msg.msg1: 0x%x\n",
840 npu.ctrl_done, npu.ctrl_msg.msg1);
841
842 npu.state_trans_fail = true;
843
844 wake_up_interruptible(&npu.mcu_state_wait_done);
845}
846
847static inline int mcu_ctrl_cmd_prepare(enum mcu_cmd_type cmd,
848 struct mcu_ctrl_cmd *mcmd)
849{
850 if (!mcmd || cmd == MCU_CMD_TYPE_NULL || cmd >= __MCU_CMD_TYPE_MAX)
851 return -EINVAL;
852
853 lockdep_assert_held(&npu.mcu_ctrl_lock);
854
855 npu.ctrl_msg.msg1 = cmd;
856 npu.ctrl_msg.msg2 = mcmd->e;
857 npu.ctrl_msg.msg3 = mcmd->arg[0];
858 npu.ctrl_msg.msg4 = mcmd->arg[1];
859
860 if (mcmd->core_mask) {
861 unsigned long flag;
862
863 spin_lock_irqsave(&npu.ctrl_done_lock, flag);
864 npu.ctrl_done = ~(CORE_TOPS_MASK & mcmd->core_mask);
865 npu.ctrl_done &= CORE_TOPS_MASK;
866 spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
867 }
868
869 return 0;
870}
871
872static inline void mcu_ctrl_callback(void (*callback)(void *param), void *param)
873{
874 if (callback)
875 callback(param);
876}
877
878static inline void mcu_ctrl_issue_pending_act(u32 mcu_act)
879{
880 unsigned long flag;
881
882 spin_lock_irqsave(&npu.pending_act_lock, flag);
883
884 npu.pending_act |= mcu_act;
885
886 spin_unlock_irqrestore(&npu.pending_act_lock, flag);
887
888 wake_up_interruptible(&npu.mcu_ctrl_wait_act);
889}
890
891static inline enum mcu_act mcu_ctrl_pop_pending_act(void)
892{
893 unsigned long flag;
894 enum mcu_act act;
895
896 spin_lock_irqsave(&npu.pending_act_lock, flag);
897
898 act = ffs(npu.pending_act) - 1;
899 npu.pending_act &= ~BIT(act);
900
901 spin_unlock_irqrestore(&npu.pending_act_lock, flag);
902
903 return act;
904}
905
906static inline bool mcu_ctrl_is_complete(enum mcu_cmd_type done_cmd)
907{
908 unsigned long flag;
909 bool ctrl_done;
910
911 spin_lock_irqsave(&npu.ctrl_done_lock, flag);
912 ctrl_done = npu.ctrl_done == MCU_CTRL_DONE && npu.ctrl_msg.msg1 == done_cmd;
913 spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
914
915 return ctrl_done;
916}
917
918static inline void mcu_ctrl_done(enum core_id core)
919{
920 unsigned long flag;
921
922 if (core > CORE_MGMT)
923 return;
924
925 spin_lock_irqsave(&npu.ctrl_done_lock, flag);
926 npu.ctrl_done |= BIT(core);
927 spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
928}
929
930static int mcu_ctrl_task(void *data)
931{
932 enum mcu_act act;
933 int ret;
934
935 while (1) {
936 wait_event_interruptible(npu.mcu_ctrl_wait_act,
937 npu.pending_act || kthread_should_stop());
938
939 if (kthread_should_stop()) {
940 TOPS_INFO("tops mcu ctrl task stop\n");
941 break;
942 }
943
944 act = mcu_ctrl_pop_pending_act();
945 if (unlikely(act >= __MCU_ACT_MAX)) {
946 TOPS_ERR("invalid MCU act: %u\n", act);
947 continue;
948 }
949
950 /*
951 * ensure that the act is submitted by either
952 * mtk_tops_mcu_stall, mtk_tops_mcu_reset or mtk_tops_mcu_cold_boot
953 * if mcu_act is ABNORMAL, it must be caused by the state transition
954 * triggerred by above APIs
955 * as a result, mcu_ctrl_lock must be held before mcu_ctrl_task start
956 */
957 lockdep_assert_held(&npu.mcu_ctrl_lock);
958
959 if (unlikely(!npu.cur_state->state_trans)) {
960 TOPS_ERR("cur state has no state_trans()\n");
961 WARN_ON(1);
962 }
963
964 ret = mtk_tops_mcu_state_transition(BIT(act));
965 if (ret) {
966 npu.state_trans_fail = true;
967
968 mcu_state_trans_err();
969 }
970 }
971 return 0;
972}
973
974bool mtk_tops_mcu_alive(void)
975{
976 return npu.mcu_bring_up_done && !npu.in_reset && !npu.state_trans_fail;
977}
978
979bool mtk_tops_mcu_bring_up_done(void)
980{
981 return npu.mcu_bring_up_done;
982}
983
984bool mtk_tops_mcu_netsys_fe_rst(void)
985{
986 return npu.netsys_fe_ser;
987}
988
989static int mtk_tops_mcu_wait_done(enum mcu_cmd_type done_cmd)
990{
991 int ret = 0;
992
993 wait_event_interruptible(npu.mcu_ctrl_wait_done,
994 mcu_ctrl_is_complete(done_cmd)
995 || npu.state_trans_fail);
996
997 if (npu.state_trans_fail)
998 return -EINVAL;
999
1000 return ret;
1001}
1002
1003int mtk_tops_mcu_stall(struct mcu_ctrl_cmd *mcmd,
1004 void (*callback)(void *param), void *param)
1005{
1006 int ret = 0;
1007
1008 if (unlikely(!npu.mcu_bring_up_done || npu.state_trans_fail))
1009 return -EBUSY;
1010
1011 if (unlikely(!mcmd || mcmd->e >= __MCU_EVENT_TYPE_MAX))
1012 return -EINVAL;
1013
1014 mutex_lock(&npu.mcu_ctrl_lock);
1015
1016 /* go to stall state */
1017 ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_STALL, mcmd);
1018 if (ret)
1019 goto unlock;
1020
1021 mcu_ctrl_issue_pending_act(MCU_ACT_STALL);
1022
1023 ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_STALL_DONE);
1024 if (ret) {
1025 TOPS_ERR("tops stall failed: %d\n", ret);
1026 goto recover_mcu;
1027 }
1028
1029 mcu_ctrl_callback(callback, param);
1030
1031 /* go to freerun state */
1032 ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_FREERUN, mcmd);
1033 if (ret)
1034 goto recover_mcu;
1035
1036 mcu_ctrl_issue_pending_act(MCU_ACT_FREERUN);
1037
1038 ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_FREERUN_DONE);
1039 if (ret) {
1040 TOPS_ERR("tops freerun failed: %d\n", ret);
1041 goto recover_mcu;
1042 }
1043
1044 /* stall freerun successfully done */
1045 goto unlock;
1046
1047recover_mcu:
1048 schedule_work(&npu.recover_work);
1049
1050unlock:
1051 mutex_unlock(&npu.mcu_ctrl_lock);
1052
1053 return ret;
1054}
1055
1056int mtk_tops_mcu_reset(struct mcu_ctrl_cmd *mcmd,
1057 void (*callback)(void *param), void *param)
1058{
1059 int ret = 0;
1060
1061 if (unlikely(!npu.mcu_bring_up_done || npu.state_trans_fail))
1062 return -EBUSY;
1063
1064 if (unlikely(!mcmd || mcmd->e >= __MCU_EVENT_TYPE_MAX))
1065 return -EINVAL;
1066
1067 mutex_lock(&npu.mcu_ctrl_lock);
1068
1069 npu.in_reset = true;
1070 if (mcmd->e == MCU_EVENT_TYPE_FE_RESET)
1071 npu.netsys_fe_ser = true;
1072
1073 ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_ASSERT_RESET, mcmd);
1074 if (ret)
1075 goto unlock;
1076
1077 mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
1078
1079 ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_ASSERT_RESET_DONE);
1080 if (ret) {
1081 TOPS_ERR("tops assert reset failed: %d\n", ret);
1082 goto recover_mcu;
1083 }
1084
1085 mcu_ctrl_callback(callback, param);
1086
1087 switch (mcmd->e) {
1088 case MCU_EVENT_TYPE_WDT_TIMEOUT:
1089 case MCU_EVENT_TYPE_FE_RESET:
1090 mcu_ctrl_issue_pending_act(MCU_ACT_SHUTDOWN);
1091
1092 ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_INIT_DONE);
1093 if (ret)
1094 goto recover_mcu;
1095
1096 break;
1097 default:
1098 ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_RELEASE_RESET, mcmd);
1099 if (ret)
1100 goto recover_mcu;
1101
1102 mcu_ctrl_issue_pending_act(MCU_ACT_FREERUN);
1103
1104 ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_RELEASE_RESET_DONE);
1105 if (ret)
1106 goto recover_mcu;
1107
1108 break;
1109 }
1110
1111 goto unlock;
1112
1113recover_mcu:
1114 schedule_work(&npu.recover_work);
1115
1116unlock:
1117 mutex_unlock(&npu.mcu_ctrl_lock);
1118
1119 return ret;
1120}
1121
1122static void mtk_tops_mcu_recover_work(struct work_struct *work)
1123{
1124 int ret;
1125
1126 mutex_lock(&npu.mcu_ctrl_lock);
1127
1128 if (!npu.mcu_bring_up_done && !npu.in_reset && !npu.state_trans_fail)
1129 mcu_ctrl_issue_pending_act(MCU_ACT_INIT);
1130 else if (npu.in_reset || npu.state_trans_fail)
1131 mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
1132
1133 npu.state_trans_fail = false;
1134 npu.in_recover = true;
1135
1136 while ((ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_INIT_DONE))) {
1137 if (npu.shuting_down)
1138 goto unlock;
1139
1140 npu.mcu_bring_up_done = false;
1141 npu.state_trans_fail = false;
1142 TOPS_ERR("bring up failed: %d\n", ret);
1143
1144 msleep(1000);
1145
1146 mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
1147 }
1148
1149unlock:
1150 mutex_unlock(&npu.mcu_ctrl_lock);
1151}
1152
1153static int mtk_tops_mcu_register_mbox(void)
1154{
1155 int ret;
1156 int i;
1157
1158 ret = register_mbox_dev(MBOX_SEND, &npu.send_mgmt_mbox_dev);
1159 if (ret) {
1160 TOPS_ERR("register mcu_ctrl mgmt mbox send failed: %d\n", ret);
1161 return ret;
1162 }
1163
1164 ret = register_mbox_dev(MBOX_RECV, &npu.recv_mgmt_mbox_dev);
1165 if (ret) {
1166 TOPS_ERR("register mcu_ctrl mgmt mbox recv failed: %d\n", ret);
1167 goto err_unregister_mgmt_mbox_send;
1168 }
1169
1170 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
1171 ret = register_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
1172 if (ret) {
1173 TOPS_ERR("register mcu_ctrl offload %d mbox send failed: %d\n",
1174 i, ret);
1175 goto err_unregister_offload_mbox;
1176 }
1177
1178 ret = register_mbox_dev(MBOX_RECV, &npu.recv_offload_mbox_dev[i]);
1179 if (ret) {
1180 TOPS_ERR("register mcu_ctrl offload %d mbox recv failed: %d\n",
1181 i, ret);
1182 unregister_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
1183 goto err_unregister_offload_mbox;
1184 }
1185 }
1186
1187 return ret;
1188
1189err_unregister_offload_mbox:
1190 for (i -= 1; i >= 0; i--) {
1191 unregister_mbox_dev(MBOX_RECV, &npu.recv_offload_mbox_dev[i]);
1192 unregister_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
1193 }
1194
1195 unregister_mbox_dev(MBOX_RECV, &npu.recv_mgmt_mbox_dev);
1196
1197err_unregister_mgmt_mbox_send:
1198 unregister_mbox_dev(MBOX_SEND, &npu.send_mgmt_mbox_dev);
1199
1200 return ret;
1201}
1202
1203static void mtk_tops_mcu_unregister_mbox(void)
1204{
1205 int i;
1206
1207 unregister_mbox_dev(MBOX_SEND, &npu.send_mgmt_mbox_dev);
1208 unregister_mbox_dev(MBOX_RECV, &npu.recv_mgmt_mbox_dev);
1209
1210 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
1211 unregister_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
1212 unregister_mbox_dev(MBOX_RECV, &npu.recv_offload_mbox_dev[i]);
1213 }
1214}
1215
1216static void mtk_tops_mcu_shutdown(void)
1217{
1218 npu.shuting_down = true;
1219
1220 mutex_lock(&npu.mcu_ctrl_lock);
1221
1222 mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
1223
1224 wait_event_interruptible(npu.mcu_ctrl_wait_done,
1225 !npu.mcu_bring_up_done && !npu.shuting_down);
1226
1227 mutex_unlock(&npu.mcu_ctrl_lock);
1228}
1229
1230/* TODO: should be implemented to not block other module's init tasks */
1231static int mtk_tops_mcu_cold_boot(void)
1232{
1233 int ret = 0;
1234
1235 npu.cur_state = &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
1236
1237 mutex_lock(&npu.mcu_ctrl_lock);
1238
1239 mcu_ctrl_issue_pending_act(MCU_ACT_INIT);
1240 ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_INIT_DONE);
1241
1242 mutex_unlock(&npu.mcu_ctrl_lock);
1243 if (!ret)
1244 return ret;
1245
1246 TOPS_ERR("cold boot failed: %d\n", ret);
1247
1248 schedule_work(&npu.recover_work);
1249
1250 return 0;
1251}
1252
1253int mtk_tops_mcu_bring_up(struct platform_device *pdev)
1254{
1255 int ret = 0;
1256
1257 pm_runtime_enable(&pdev->dev);
1258
1259 ret = mtk_tops_mcu_register_mbox();
1260 if (ret) {
1261 TOPS_ERR("register mcu ctrl mbox failed: %d\n", ret);
1262 goto runtime_disable;
1263 }
1264
1265 npu.mcu_ctrl_thread = kthread_run(mcu_ctrl_task, NULL, "tops mcu ctrl task");
1266 if (IS_ERR(npu.mcu_ctrl_thread)) {
1267 ret = PTR_ERR(npu.mcu_ctrl_thread);
1268 TOPS_ERR("mcu ctrl thread create failed: %d\n", ret);
1269 goto err_unregister_mbox;
1270 }
1271
1272 ret = mtk_tops_mcu_cold_boot();
1273 if (ret) {
1274 TOPS_ERR("cold boot failed: %d\n", ret);
1275 goto err_stop_mcu_ctrl_thread;
1276 }
1277
1278 return ret;
1279
1280err_stop_mcu_ctrl_thread:
1281 kthread_stop(npu.mcu_ctrl_thread);
1282
1283err_unregister_mbox:
1284 mtk_tops_mcu_unregister_mbox();
1285
1286runtime_disable:
1287 pm_runtime_disable(&pdev->dev);
1288
1289 return ret;
1290}
1291
1292void mtk_tops_mcu_tear_down(struct platform_device *pdev)
1293{
1294 mtk_tops_mcu_shutdown();
1295
1296 kthread_stop(npu.mcu_ctrl_thread);
1297
1298 /* TODO: stop mcu? */
1299
1300 mtk_tops_mcu_unregister_mbox();
1301
1302 pm_runtime_disable(&pdev->dev);
1303}
1304
1305static int mtk_tops_mcu_dts_init(struct platform_device *pdev)
1306{
1307 struct device_node *node = pdev->dev.of_node;
1308 struct resource *res = NULL;
1309 int ret = 0;
1310
1311 if (!node)
1312 return -EINVAL;
1313
1314 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tops-base");
1315 if (!res) {
1316 TOPS_ERR("can not find tops base\n");
1317 return -ENXIO;
1318 }
1319
1320 npu.base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1321 if (!npu.base) {
1322 TOPS_ERR("map tops base failed\n");
1323 return -ENOMEM;
1324 }
1325
1326 npu.bus_clk = devm_clk_get(tops_dev, "bus");
1327 if (IS_ERR(npu.bus_clk)) {
1328 TOPS_ERR("get bus clk failed: %ld\n", PTR_ERR(npu.bus_clk));
1329 return PTR_ERR(npu.bus_clk);
1330 }
1331
1332 npu.sram_clk = devm_clk_get(tops_dev, "sram");
1333 if (IS_ERR(npu.sram_clk)) {
1334 TOPS_ERR("get sram clk failed: %ld\n", PTR_ERR(npu.sram_clk));
1335 return PTR_ERR(npu.sram_clk);
1336 }
1337
1338 npu.xdma_clk = devm_clk_get(tops_dev, "xdma");
1339 if (IS_ERR(npu.xdma_clk)) {
1340 TOPS_ERR("get xdma clk failed: %ld\n", PTR_ERR(npu.xdma_clk));
1341 return PTR_ERR(npu.xdma_clk);
1342 }
1343
1344 npu.offload_clk = devm_clk_get(tops_dev, "offload");
1345 if (IS_ERR(npu.offload_clk)) {
1346 TOPS_ERR("get offload clk failed: %ld\n", PTR_ERR(npu.offload_clk));
1347 return PTR_ERR(npu.offload_clk);
1348 }
1349
1350 npu.mgmt_clk = devm_clk_get(tops_dev, "mgmt");
1351 if (IS_ERR(npu.mgmt_clk)) {
1352 TOPS_ERR("get mgmt clk failed: %ld\n", PTR_ERR(npu.mgmt_clk));
1353 return PTR_ERR(npu.mgmt_clk);
1354 }
1355
1356 return ret;
1357}
1358
1359static void mtk_tops_mcu_pm_domain_detach(void)
1360{
1361 int i = npu.pd_num;
1362
1363 while (--i >= 0) {
1364 device_link_del(npu.pd_links[i]);
1365 dev_pm_domain_detach(npu.pd_devices[i], true);
1366 }
1367}
1368
1369static int mtk_tops_mcu_pm_domain_attach(struct platform_device *pdev)
1370{
1371 struct device *dev = &pdev->dev;
1372 int ret = 0;
1373 int i;
1374
1375 npu.pd_num = of_count_phandle_with_args(dev->of_node,
1376 "power-domains",
1377 "#power-domain-cells");
1378
1379 /* only 1 power domain exist, no need to link devices */
1380 if (npu.pd_num <= 1)
1381 return 0;
1382
1383 npu.pd_devices = devm_kmalloc_array(dev, npu.pd_num,
1384 sizeof(struct device),
1385 GFP_KERNEL);
1386 if (!npu.pd_devices)
1387 return -ENOMEM;
1388
1389 npu.pd_links = devm_kmalloc_array(dev, npu.pd_num,
developer0b3c7712023-08-24 16:23:03 +08001390 sizeof(*npu.pd_links),
developere5e687d2023-08-08 16:05:33 +08001391 GFP_KERNEL);
1392 if (!npu.pd_links)
1393 return -ENOMEM;
1394
1395 for (i = 0; i < npu.pd_num; i++) {
1396 npu.pd_devices[i] = dev_pm_domain_attach_by_id(dev, i);
1397 if (IS_ERR(npu.pd_devices[i])) {
1398 ret = PTR_ERR(npu.pd_devices[i]);
1399 goto pm_attach_fail;
1400 }
1401
1402 npu.pd_links[i] = device_link_add(dev, npu.pd_devices[i],
1403 DL_FLAG_STATELESS |
1404 DL_FLAG_PM_RUNTIME);
1405 if (!npu.pd_links[i]) {
1406 ret = -EINVAL;
1407 dev_pm_domain_detach(npu.pd_devices[i], false);
1408 goto pm_attach_fail;
1409 }
1410 }
1411
1412 return 0;
1413
1414pm_attach_fail:
1415 TOPS_ERR("attach power domain failed: %d\n", ret);
1416
1417 while (--i >= 0) {
1418 device_link_del(npu.pd_links[i]);
1419 dev_pm_domain_detach(npu.pd_devices[i], false);
1420 }
1421
1422 return ret;
1423}
1424
1425int mtk_tops_mcu_init(struct platform_device *pdev)
1426{
1427 int ret = 0;
1428
1429 dma_set_mask(tops_dev, DMA_BIT_MASK(32));
1430
1431 ret = mtk_tops_mcu_dts_init(pdev);
1432 if (ret)
1433 return ret;
1434
1435 ret = mtk_tops_mcu_pm_domain_attach(pdev);
1436 if (ret)
1437 return ret;
1438
1439 INIT_WORK(&npu.recover_work, mtk_tops_mcu_recover_work);
1440 init_waitqueue_head(&npu.mcu_ctrl_wait_act);
1441 init_waitqueue_head(&npu.mcu_ctrl_wait_done);
1442 init_waitqueue_head(&npu.mcu_state_wait_done);
1443 spin_lock_init(&npu.pending_act_lock);
1444 spin_lock_init(&npu.ctrl_done_lock);
1445 mutex_init(&npu.mcu_ctrl_lock);
1446 timer_setup(&npu.mcu_ctrl_timer, mtk_tops_mcu_state_trans_timeout, 0);
1447
1448 ret = mtk_trm_hw_config_register(TRM_TOPS, &mcu_trm_hw_cfg);
1449 if (ret) {
1450 TOPS_ERR("TRM register failed: %d\n", ret);
1451 return ret;
1452 }
1453
1454 return ret;
1455}
1456
1457void mtk_tops_mcu_deinit(struct platform_device *pdev)
1458{
1459 mtk_trm_hw_config_unregister(TRM_TOPS, &mcu_trm_hw_cfg);
1460
1461 mtk_tops_mcu_pm_domain_detach();
1462}
1463
1464static enum mbox_msg_cnt mtk_tops_ap_recv_mgmt_mbox_msg(struct mailbox_dev *mdev,
1465 struct mailbox_msg *msg)
1466{
1467 if (msg->msg1 == npu.ctrl_done_cmd)
1468 /* mcu side state transition success */
1469 mcu_ctrl_done(mdev->core);
1470 else
1471 /* mcu side state transition failed */
1472 npu.state_trans_fail = true;
1473
1474 wake_up_interruptible(&npu.mcu_state_wait_done);
1475
1476 return MBOX_NO_RET_MSG;
1477}
1478
1479static enum mbox_msg_cnt mtk_tops_ap_recv_offload_mbox_msg(struct mailbox_dev *mdev,
1480 struct mailbox_msg *msg)
1481{
1482 if (msg->msg1 == npu.ctrl_done_cmd)
1483 /* mcu side state transition success */
1484 mcu_ctrl_done(mdev->core);
1485 else
1486 /* mcu side state transition failed */
1487 npu.state_trans_fail = true;
1488
1489 wake_up_interruptible(&npu.mcu_state_wait_done);
1490
1491 return MBOX_NO_RET_MSG;
1492}