blob: 2150c977062b1fe3f5bcc8d344ce701e8527a99a [file] [log] [blame]
developere5e687d2023-08-08 16:05:33 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
6 */
7
8#include <linux/delay.h>
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
11#include <linux/err.h>
12#include <linux/firmware.h>
13#include <linux/io.h>
14#include <linux/kthread.h>
15#include <linux/of.h>
16#include <linux/of_address.h>
17#include <linux/of_device.h>
18#include <linux/of_platform.h>
19#include <linux/platform_device.h>
20#include <linux/pm_domain.h>
21#include <linux/pm_runtime.h>
22
23#include <pce/pce.h>
24
developer0fb30d52023-12-04 09:51:36 +080025#include "tops/ctrl.h"
26#include "tops/firmware.h"
27#include "tops/hpdma.h"
28#include "tops/internal.h"
29#include "tops/mbox.h"
30#include "tops/mcu.h"
developerd80acd02024-02-20 14:28:44 +080031#include "tops/misc.h"
developer0fb30d52023-12-04 09:51:36 +080032#include "tops/netsys.h"
33#include "tops/tdma.h"
34#include "tops/trm.h"
developere5e687d2023-08-08 16:05:33 +080035
36#define TDMA_TIMEOUT_MAX_CNT (3)
37#define TDMA_TIMEOUT_DELAY (100) /* 100ms */
38
39#define MCU_STATE_TRANS_TIMEOUT (5000) /* 5000ms */
40#define MCU_CTRL_DONE_BIT (31)
41#define MCU_CTRL_DONE (CORE_TOPS_MASK | \
42 BIT(MCU_CTRL_DONE_BIT))
43
44/* TRM dump length */
45#define TOP_CORE_BASE_LEN (0x80)
46#define TOP_L2SRAM_LEN (0x40000)
47#define TOP_CORE_M_XTCM_LEN (0x8000)
48
49#define CLUST_CORE_BASE_LEN (0x80)
50#define CLUST_L2SRAM_LEN (0x40000)
51#define CLUST_CORE_X_XTCM_LEN (0x8000)
52
53/* MCU State */
54#define MCU_STATE_FUNC_DECLARE(name) \
55static int mtk_tops_mcu_state_ ## name ## _enter(struct mcu_state *state); \
56static int mtk_tops_mcu_state_ ## name ## _leave(struct mcu_state *state); \
57static struct mcu_state *mtk_tops_mcu_state_ ## name ## _trans( \
58 u32 mcu_act, \
59 struct mcu_state *state)
60
61#define MCU_STATE_DATA(name, id) \
62 [id] = { \
63 .state = id, \
64 .state_trans = mtk_tops_mcu_state_ ## name ## _trans, \
65 .enter = mtk_tops_mcu_state_ ## name ## _enter, \
66 .leave = mtk_tops_mcu_state_ ## name ## _leave, \
67 }
68
69static inline void mcu_ctrl_issue_pending_act(u32 mcu_act);
70static enum mbox_msg_cnt mtk_tops_ap_recv_mgmt_mbox_msg(struct mailbox_dev *mdev,
71 struct mailbox_msg *msg);
72static enum mbox_msg_cnt mtk_tops_ap_recv_offload_mbox_msg(struct mailbox_dev *mdev,
73 struct mailbox_msg *msg);
74static int mcu_trm_hw_dump(void *dst, u32 ofs, u32 len);
75
76MCU_STATE_FUNC_DECLARE(shutdown);
77MCU_STATE_FUNC_DECLARE(init);
78MCU_STATE_FUNC_DECLARE(freerun);
79MCU_STATE_FUNC_DECLARE(stall);
80MCU_STATE_FUNC_DECLARE(netstop);
81MCU_STATE_FUNC_DECLARE(reset);
82MCU_STATE_FUNC_DECLARE(abnormal);
83
84struct npu {
85 void __iomem *base;
86
87 struct clk *bus_clk;
88 struct clk *sram_clk;
89 struct clk *xdma_clk;
90 struct clk *offload_clk;
91 struct clk *mgmt_clk;
92
93 struct device **pd_devices;
94 struct device_link **pd_links;
95 int pd_num;
96
97 struct task_struct *mcu_ctrl_thread;
98 struct timer_list mcu_ctrl_timer;
99 struct mcu_state *next_state;
100 struct mcu_state *cur_state;
101 /* ensure that only 1 user can trigger state transition at a time */
102 struct mutex mcu_ctrl_lock;
103 spinlock_t pending_act_lock;
104 wait_queue_head_t mcu_ctrl_wait_act;
105 wait_queue_head_t mcu_state_wait_done;
106 bool mcu_bring_up_done;
107 bool state_trans_fail;
108 u32 pending_act;
109
110 spinlock_t ctrl_done_lock;
111 wait_queue_head_t mcu_ctrl_wait_done;
112 enum mcu_cmd_type ctrl_done_cmd;
113 /* MSB = 1 means that mcu control done. Otherwise it is still ongoing */
114 u32 ctrl_done;
115
116 struct work_struct recover_work;
117 bool in_reset;
118 bool in_recover;
119 bool netsys_fe_ser;
120 bool shuting_down;
121
122 struct mailbox_msg ctrl_msg;
123 struct mailbox_dev recv_mgmt_mbox_dev;
124 struct mailbox_dev send_mgmt_mbox_dev;
125
126 struct mailbox_dev recv_offload_mbox_dev[CORE_OFFLOAD_NUM];
127 struct mailbox_dev send_offload_mbox_dev[CORE_OFFLOAD_NUM];
128};
129
130static struct mcu_state mcu_states[__MCU_STATE_TYPE_MAX] = {
131 MCU_STATE_DATA(shutdown, MCU_STATE_TYPE_SHUTDOWN),
132 MCU_STATE_DATA(init, MCU_STATE_TYPE_INIT),
133 MCU_STATE_DATA(freerun, MCU_STATE_TYPE_FREERUN),
134 MCU_STATE_DATA(stall, MCU_STATE_TYPE_STALL),
135 MCU_STATE_DATA(netstop, MCU_STATE_TYPE_NETSTOP),
136 MCU_STATE_DATA(reset, MCU_STATE_TYPE_RESET),
137 MCU_STATE_DATA(abnormal, MCU_STATE_TYPE_ABNORMAL),
138};
139
140static struct npu npu = {
141 .send_mgmt_mbox_dev = MBOX_SEND_MGMT_DEV(CORE_CTRL),
142 .send_offload_mbox_dev = {
143 [CORE_OFFLOAD_0] = MBOX_SEND_OFFLOAD_DEV(0, CORE_CTRL),
144 [CORE_OFFLOAD_1] = MBOX_SEND_OFFLOAD_DEV(1, CORE_CTRL),
145 [CORE_OFFLOAD_2] = MBOX_SEND_OFFLOAD_DEV(2, CORE_CTRL),
146 [CORE_OFFLOAD_3] = MBOX_SEND_OFFLOAD_DEV(3, CORE_CTRL),
147 },
148 .recv_mgmt_mbox_dev =
149 MBOX_RECV_MGMT_DEV(CORE_CTRL, mtk_tops_ap_recv_mgmt_mbox_msg),
150 .recv_offload_mbox_dev = {
151 [CORE_OFFLOAD_0] =
152 MBOX_RECV_OFFLOAD_DEV(0,
153 CORE_CTRL,
154 mtk_tops_ap_recv_offload_mbox_msg
155 ),
156 [CORE_OFFLOAD_1] =
157 MBOX_RECV_OFFLOAD_DEV(1,
158 CORE_CTRL,
159 mtk_tops_ap_recv_offload_mbox_msg
160 ),
161 [CORE_OFFLOAD_2] =
162 MBOX_RECV_OFFLOAD_DEV(2,
163 CORE_CTRL,
164 mtk_tops_ap_recv_offload_mbox_msg
165 ),
166 [CORE_OFFLOAD_3] =
167 MBOX_RECV_OFFLOAD_DEV(3,
168 CORE_CTRL,
169 mtk_tops_ap_recv_offload_mbox_msg
170 ),
171 },
172};
173
174static struct trm_config mcu_trm_cfgs[] = {
175 {
176 TRM_CFG_EN("top-core-base",
177 TOP_CORE_BASE, TOP_CORE_BASE_LEN,
178 0x0, TOP_CORE_BASE_LEN,
179 0)
180 },
181 {
182 TRM_CFG_EN("clust-core0-base",
183 CLUST_CORE_BASE(0), CLUST_CORE_BASE_LEN,
184 0x0, CLUST_CORE_BASE_LEN,
185 0)
186 },
187 {
188 TRM_CFG_EN("clust-core1-base",
189 CLUST_CORE_BASE(1), CLUST_CORE_BASE_LEN,
190 0x0, CLUST_CORE_BASE_LEN,
191 0)
192 },
193 {
194 TRM_CFG_EN("clust-core2-base",
195 CLUST_CORE_BASE(2), CLUST_CORE_BASE_LEN,
196 0x0, CLUST_CORE_BASE_LEN,
197 0)
198 },
199 {
200 TRM_CFG_EN("clust-core3-base",
201 CLUST_CORE_BASE(3), CLUST_CORE_BASE_LEN,
202 0x0, CLUST_CORE_BASE_LEN,
203 0)
204 },
205 {
206 TRM_CFG_CORE_DUMP_EN("top-core-m-dtcm",
207 TOP_CORE_M_DTCM, TOP_CORE_M_XTCM_LEN,
208 0x0, TOP_CORE_M_XTCM_LEN,
209 0, CORE_MGMT)
210 },
211 {
212 TRM_CFG_CORE_DUMP_EN("clust-core-0-dtcm",
213 CLUST_CORE_X_DTCM(0), CLUST_CORE_X_XTCM_LEN,
214 0x0, CLUST_CORE_X_XTCM_LEN,
215 0, CORE_OFFLOAD_0)
216 },
217 {
218 TRM_CFG_CORE_DUMP_EN("clust-core-1-dtcm",
219 CLUST_CORE_X_DTCM(1), CLUST_CORE_X_XTCM_LEN,
220 0x0, CLUST_CORE_X_XTCM_LEN,
221 0, CORE_OFFLOAD_1)
222 },
223 {
224 TRM_CFG_CORE_DUMP_EN("clust-core-2-dtcm",
225 CLUST_CORE_X_DTCM(2), CLUST_CORE_X_XTCM_LEN,
226 0x0, CLUST_CORE_X_XTCM_LEN,
227 0, CORE_OFFLOAD_2)
228 },
229 {
230 TRM_CFG_CORE_DUMP_EN("clust-core-3-dtcm",
231 CLUST_CORE_X_DTCM(3), CLUST_CORE_X_XTCM_LEN,
232 0x0, CLUST_CORE_X_XTCM_LEN,
233 0, CORE_OFFLOAD_3)
234 },
235 {
236 TRM_CFG("top-core-m-itcm",
237 TOP_CORE_M_ITCM, TOP_CORE_M_XTCM_LEN,
238 0x0, TOP_CORE_M_XTCM_LEN,
239 0)
240 },
241 {
242 TRM_CFG("clust-core-0-itcm",
243 CLUST_CORE_X_ITCM(0), CLUST_CORE_X_XTCM_LEN,
244 0x0, CLUST_CORE_X_XTCM_LEN,
245 0)
246 },
247 {
248 TRM_CFG("clust-core-1-itcm",
249 CLUST_CORE_X_ITCM(1), CLUST_CORE_X_XTCM_LEN,
250 0x0, CLUST_CORE_X_XTCM_LEN,
251 0)
252 },
253 {
254 TRM_CFG("clust-core-2-itcm",
255 CLUST_CORE_X_ITCM(2), CLUST_CORE_X_XTCM_LEN,
256 0x0, CLUST_CORE_X_XTCM_LEN,
257 0)
258 },
259 {
260 TRM_CFG("clust-core-3-itcm",
261 CLUST_CORE_X_ITCM(3), CLUST_CORE_X_XTCM_LEN,
262 0x0, CLUST_CORE_X_XTCM_LEN,
263 0)
264 },
265 {
266 TRM_CFG("top-l2sram",
267 TOP_L2SRAM, TOP_L2SRAM_LEN,
268 0x0, TOP_L2SRAM_LEN,
269 0)
270 },
271 {
272 TRM_CFG_EN("clust-l2sram",
273 CLUST_L2SRAM, CLUST_L2SRAM_LEN,
274 0x38000, 0x8000,
275 0)
276 },
277};
278
279static struct trm_hw_config mcu_trm_hw_cfg = {
280 .trm_cfgs = mcu_trm_cfgs,
281 .cfg_len = ARRAY_SIZE(mcu_trm_cfgs),
282 .trm_hw_dump = mcu_trm_hw_dump,
283};
284
285static inline void npu_write(u32 reg, u32 val)
286{
287 writel(val, npu.base + reg);
288}
289
290static inline void npu_set(u32 reg, u32 mask)
291{
292 setbits(npu.base + reg, mask);
293}
294
295static inline void npu_clr(u32 reg, u32 mask)
296{
297 clrbits(npu.base + reg, mask);
298}
299
300static inline void npu_rmw(u32 reg, u32 mask, u32 val)
301{
302 clrsetbits(npu.base + reg, mask, val);
303}
304
305static inline u32 npu_read(u32 reg)
306{
307 return readl(npu.base + reg);
308}
309
310static int mcu_trm_hw_dump(void *dst, u32 start_addr, u32 len)
311{
312 u32 ofs;
313
314 if (unlikely(!dst))
315 return -ENODEV;
316
317 for (ofs = 0; len > 0; len -= 0x4, ofs += 0x4)
318 writel(npu_read(start_addr + ofs), dst + ofs);
319
320 return 0;
321}
322
323static int mcu_power_on(void)
324{
325 int ret = 0;
326
327 ret = clk_prepare_enable(npu.bus_clk);
328 if (ret) {
329 TOPS_ERR("bus clk enable failed: %d\n", ret);
330 return ret;
331 }
332
333 ret = clk_prepare_enable(npu.sram_clk);
334 if (ret) {
335 TOPS_ERR("sram clk enable failed: %d\n", ret);
336 goto err_disable_bus_clk;
337 }
338
339 ret = clk_prepare_enable(npu.xdma_clk);
340 if (ret) {
341 TOPS_ERR("xdma clk enable failed: %d\n", ret);
342 goto err_disable_sram_clk;
343 }
344
345 ret = clk_prepare_enable(npu.offload_clk);
346 if (ret) {
347 TOPS_ERR("offload clk enable failed: %d\n", ret);
348 goto err_disable_xdma_clk;
349 }
350
351 ret = clk_prepare_enable(npu.mgmt_clk);
352 if (ret) {
353 TOPS_ERR("mgmt clk enable failed: %d\n", ret);
354 goto err_disable_offload_clk;
355 }
356
357 ret = pm_runtime_get_sync(tops_dev);
358 if (ret < 0) {
359 TOPS_ERR("power on failed: %d\n", ret);
360 goto err_disable_mgmt_clk;
361 }
362
363 return ret;
364
365err_disable_mgmt_clk:
366 clk_disable_unprepare(npu.mgmt_clk);
367
368err_disable_offload_clk:
369 clk_disable_unprepare(npu.offload_clk);
370
371err_disable_xdma_clk:
372 clk_disable_unprepare(npu.xdma_clk);
373
374err_disable_sram_clk:
375 clk_disable_unprepare(npu.sram_clk);
376
377err_disable_bus_clk:
378 clk_disable_unprepare(npu.bus_clk);
379
380 return ret;
381}
382
383static void mcu_power_off(void)
384{
385 pm_runtime_put_sync(tops_dev);
386
387 clk_disable_unprepare(npu.mgmt_clk);
388
389 clk_disable_unprepare(npu.offload_clk);
390
391 clk_disable_unprepare(npu.xdma_clk);
392
393 clk_disable_unprepare(npu.sram_clk);
394
395 clk_disable_unprepare(npu.bus_clk);
396}
397
398static inline int mcu_state_send_cmd(struct mcu_state *state)
399{
400 unsigned long flag;
401 enum core_id core;
402 u32 ctrl_cpu;
403 int ret;
404
405 spin_lock_irqsave(&npu.ctrl_done_lock, flag);
406 ctrl_cpu = (~npu.ctrl_done) & CORE_TOPS_MASK;
407 spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
408
409 if (ctrl_cpu & BIT(CORE_MGMT)) {
410 ret = mbox_send_msg_no_wait(&npu.send_mgmt_mbox_dev,
411 &npu.ctrl_msg);
412 if (ret)
413 goto out;
414 }
415
416 for (core = CORE_OFFLOAD_0; core < CORE_OFFLOAD_NUM; core++) {
417 if (ctrl_cpu & BIT(core)) {
418 ret = mbox_send_msg_no_wait(&npu.send_offload_mbox_dev[core],
419 &npu.ctrl_msg);
420 if (ret)
421 goto out;
422 }
423 }
424
425out:
426 return ret;
427}
428
429static inline void mcu_state_trans_start(void)
430{
431 mod_timer(&npu.mcu_ctrl_timer,
432 jiffies + msecs_to_jiffies(MCU_STATE_TRANS_TIMEOUT));
433}
434
435static inline void mcu_state_trans_end(void)
436{
437 del_timer_sync(&npu.mcu_ctrl_timer);
438}
439
440static inline void mcu_state_trans_err(void)
441{
442 wake_up_interruptible(&npu.mcu_ctrl_wait_done);
443}
444
445static inline int mcu_state_wait_complete(void (*state_complete_cb)(void))
446{
447 unsigned long flag;
448 int ret = 0;
449
450 wait_event_interruptible(npu.mcu_state_wait_done,
451 (npu.ctrl_done == CORE_TOPS_MASK) ||
452 (npu.state_trans_fail));
453
454 if (npu.state_trans_fail)
455 return -EINVAL;
456
457 npu.ctrl_msg.msg1 = npu.ctrl_done_cmd;
458
459 spin_lock_irqsave(&npu.ctrl_done_lock, flag);
460 npu.ctrl_done |= BIT(MCU_CTRL_DONE_BIT);
461 spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
462
463 if (state_complete_cb)
464 state_complete_cb();
465
466 wake_up_interruptible(&npu.mcu_ctrl_wait_done);
467
468 return ret;
469}
470
471static inline void mcu_state_prepare_wait(enum mcu_cmd_type done_cmd)
472{
473 unsigned long flag;
474
475 /* if user does not specify CPU to control, default controll all CPU */
476 spin_lock_irqsave(&npu.ctrl_done_lock, flag);
477 if ((npu.ctrl_done & CORE_TOPS_MASK) == CORE_TOPS_MASK)
478 npu.ctrl_done = 0;
479 spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
480
481 npu.ctrl_done_cmd = done_cmd;
482}
483
484static struct mcu_state *mtk_tops_mcu_state_shutdown_trans(u32 mcu_act,
485 struct mcu_state *state)
486{
487 if (mcu_act == MCU_ACT_INIT)
488 return &mcu_states[MCU_STATE_TYPE_INIT];
489
490 return ERR_PTR(-ENODEV);
491}
492
493static int mtk_tops_mcu_state_shutdown_enter(struct mcu_state *state)
494{
495 mcu_power_off();
496
497 mtk_tops_tdma_record_last_state();
498
499 mtk_tops_fw_clean_up();
500
501 npu.mcu_bring_up_done = false;
502
503 if (npu.shuting_down) {
504 npu.shuting_down = false;
505 wake_up_interruptible(&npu.mcu_ctrl_wait_done);
506
507 return 0;
508 }
509
510 if (npu.in_recover || npu.in_reset)
511 mcu_ctrl_issue_pending_act(MCU_ACT_INIT);
512
513 return 0;
514}
515
516static int mtk_tops_mcu_state_shutdown_leave(struct mcu_state *state)
517{
518 return 0;
519}
520
521static struct mcu_state *mtk_tops_mcu_state_init_trans(u32 mcu_act,
522 struct mcu_state *state)
523{
524 if (mcu_act == MCU_ACT_FREERUN)
525 return &mcu_states[MCU_STATE_TYPE_FREERUN];
526 else if (mcu_act == MCU_ACT_NETSTOP)
527 return &mcu_states[MCU_STATE_TYPE_NETSTOP];
528
529 return ERR_PTR(-ENODEV);
530}
531
532static void mtk_tops_mcu_state_init_enter_complete_cb(void)
533{
534 npu.mcu_bring_up_done = true;
535 npu.in_reset = false;
536 npu.in_recover = false;
537 npu.netsys_fe_ser = false;
538
539 mcu_ctrl_issue_pending_act(MCU_ACT_FREERUN);
540}
541
542static int mtk_tops_mcu_state_init_enter(struct mcu_state *state)
543{
544 int ret = 0;
545
546 ret = mcu_power_on();
547 if (ret)
548 return ret;
549
550 mtk_tops_mbox_clear_all_cmd();
551
552 /* reset TDMA first */
553 mtk_tops_tdma_reset();
554
555 npu.ctrl_done = 0;
556 mcu_state_prepare_wait(MCU_CMD_TYPE_INIT_DONE);
557
558 ret = mtk_tops_fw_bring_up_default_cores();
559 if (ret) {
560 TOPS_ERR("bring up TOPS cores failed: %d\n", ret);
561 goto out;
562 }
563
564 ret = mcu_state_wait_complete(mtk_tops_mcu_state_init_enter_complete_cb);
565 if (unlikely(ret))
566 TOPS_ERR("init leave failed\n");
567
568out:
569 return ret;
570}
571
572static int mtk_tops_mcu_state_init_leave(struct mcu_state *state)
573{
574 int ret;
575
developerd80acd02024-02-20 14:28:44 +0800576 mtk_tops_misc_set_ppe_num();
577
developere5e687d2023-08-08 16:05:33 +0800578 mtk_tops_tdma_enable();
579
580 mtk_tops_tnl_offload_recover();
581
582 /* enable cls, dipfilter */
583 ret = mtk_pce_enable();
584 if (ret) {
585 TOPS_ERR("netsys enable failed: %d\n", ret);
586 return ret;
587 }
588
589 return ret;
590}
591
592static struct mcu_state *mtk_tops_mcu_state_freerun_trans(u32 mcu_act,
593 struct mcu_state *state)
594{
595 if (mcu_act == MCU_ACT_RESET)
596 return &mcu_states[MCU_STATE_TYPE_RESET];
597 else if (mcu_act == MCU_ACT_STALL)
598 return &mcu_states[MCU_STATE_TYPE_STALL];
599 else if (mcu_act == MCU_ACT_NETSTOP)
600 return &mcu_states[MCU_STATE_TYPE_NETSTOP];
601
602 return ERR_PTR(-ENODEV);
603}
604
605static int mtk_tops_mcu_state_freerun_enter(struct mcu_state *state)
606{
607 /* TODO : switch to HW path */
608
609 return 0;
610}
611
612static int mtk_tops_mcu_state_freerun_leave(struct mcu_state *state)
613{
614 /* TODO : switch to SW path */
615
616 return 0;
617}
618
619static struct mcu_state *mtk_tops_mcu_state_stall_trans(u32 mcu_act,
620 struct mcu_state *state)
621{
622 if (mcu_act == MCU_ACT_RESET)
623 return &mcu_states[MCU_STATE_TYPE_RESET];
624 else if (mcu_act == MCU_ACT_FREERUN)
625 return &mcu_states[MCU_STATE_TYPE_FREERUN];
626 else if (mcu_act == MCU_ACT_NETSTOP)
627 return &mcu_states[MCU_STATE_TYPE_NETSTOP];
628
629 return ERR_PTR(-ENODEV);
630}
631
632static int mtk_tops_mcu_state_stall_enter(struct mcu_state *state)
633{
634 int ret = 0;
635
636 mcu_state_prepare_wait(MCU_CMD_TYPE_STALL_DONE);
637
638 ret = mcu_state_send_cmd(state);
639 if (ret)
640 return ret;
641
642 ret = mcu_state_wait_complete(NULL);
643 if (ret)
644 TOPS_ERR("stall enter failed\n");
645
646 return ret;
647}
648
649static int mtk_tops_mcu_state_stall_leave(struct mcu_state *state)
650{
651 int ret = 0;
652
653 /*
654 * if next state is going to stop network,
655 * we should not let mcu do freerun cmd since it is going to abort stall
656 */
657 if (npu.next_state->state == MCU_STATE_TYPE_NETSTOP)
658 return 0;
659
660 mcu_state_prepare_wait(MCU_CMD_TYPE_FREERUN_DONE);
661
662 ret = mcu_state_send_cmd(state);
663 if (ret)
664 return ret;
665
666 ret = mcu_state_wait_complete(NULL);
667 if (ret)
668 TOPS_ERR("stall leave failed\n");
669
670 return ret;
671}
672
673static struct mcu_state *mtk_tops_mcu_state_netstop_trans(u32 mcu_act,
674 struct mcu_state *state)
675{
676 if (mcu_act == MCU_ACT_ABNORMAL)
677 return &mcu_states[MCU_STATE_TYPE_ABNORMAL];
678 else if (mcu_act == MCU_ACT_RESET)
679 return &mcu_states[MCU_STATE_TYPE_RESET];
680 else if (mcu_act == MCU_ACT_SHUTDOWN)
681 return &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
682
683 return ERR_PTR(-ENODEV);
684}
685
686static int mtk_tops_mcu_state_netstop_enter(struct mcu_state *state)
687{
688 mtk_tops_tnl_offload_flush();
689
690 mtk_pce_disable();
691
692 mtk_tops_tdma_disable();
693
694 if (npu.in_recover)
695 mcu_ctrl_issue_pending_act(MCU_ACT_ABNORMAL);
696 else if (npu.in_reset)
697 mcu_ctrl_issue_pending_act(MCU_ACT_RESET);
698 else
699 mcu_ctrl_issue_pending_act(MCU_ACT_SHUTDOWN);
700
701 return 0;
702}
703
704static int mtk_tops_mcu_state_netstop_leave(struct mcu_state *state)
705{
706 return 0;
707}
708
709static struct mcu_state *mtk_tops_mcu_state_reset_trans(u32 mcu_act,
710 struct mcu_state *state)
711{
712 if (mcu_act == MCU_ACT_FREERUN)
713 return &mcu_states[MCU_STATE_TYPE_FREERUN];
714 else if (mcu_act == MCU_ACT_SHUTDOWN)
715 return &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
716 else if (mcu_act == MCU_ACT_NETSTOP)
717 /*
718 * since netstop is already done before reset,
719 * there is no need to do it again. We just go to abnormal directly
720 */
721 return &mcu_states[MCU_STATE_TYPE_ABNORMAL];
722
723 return ERR_PTR(-ENODEV);
724}
725
726static int mtk_tops_mcu_state_reset_enter(struct mcu_state *state)
727{
728 int ret = 0;
729
730 mcu_state_prepare_wait(MCU_CMD_TYPE_ASSERT_RESET_DONE);
731
732 if (!npu.netsys_fe_ser) {
733 ret = mcu_state_send_cmd(state);
734 if (ret)
735 return ret;
736 } else {
737 /* skip to assert reset mcu if NETSYS SER */
738 npu.ctrl_done = CORE_TOPS_MASK;
739 }
740
741 ret = mcu_state_wait_complete(NULL);
742 if (ret)
743 TOPS_ERR("assert reset failed\n");
744
745 return ret;
746}
747
748static int mtk_tops_mcu_state_reset_leave(struct mcu_state *state)
749{
750 int ret = 0;
751
752 /*
753 * if next state is going to shutdown,
754 * no need to let mcu do release reset cmd
755 */
756 if (npu.next_state->state == MCU_STATE_TYPE_ABNORMAL
757 || npu.next_state->state == MCU_STATE_TYPE_SHUTDOWN)
758 return 0;
759
760 mcu_state_prepare_wait(MCU_CMD_TYPE_RELEASE_RESET_DONE);
761
762 ret = mcu_state_send_cmd(state);
763 if (ret)
764 return ret;
765
766 ret = mcu_state_wait_complete(NULL);
767 if (ret)
768 TOPS_ERR("release reset failed\n");
769
770 return ret;
771}
772
773static struct mcu_state *mtk_tops_mcu_state_abnormal_trans(u32 mcu_act,
774 struct mcu_state *state)
775{
776 if (mcu_act == MCU_ACT_SHUTDOWN)
777 return &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
778
779 return ERR_PTR(-ENODEV);
780}
781
782static int mtk_tops_mcu_state_abnormal_enter(struct mcu_state *state)
783{
784 mcu_ctrl_issue_pending_act(MCU_ACT_SHUTDOWN);
785
786 return 0;
787}
788
789static int mtk_tops_mcu_state_abnormal_leave(struct mcu_state *state)
790{
791 if (npu.mcu_bring_up_done)
792 mtk_trm_dump(TRM_RSN_MCU_STATE_ACT_FAIL);
793
794 return 0;
795}
796
797static int mtk_tops_mcu_state_transition(u32 mcu_act)
798{
799 int ret = 0;
800
801 npu.next_state = npu.cur_state->state_trans(mcu_act, npu.cur_state);
802 if (IS_ERR(npu.next_state))
803 return PTR_ERR(npu.next_state);
804
805 /* skip mcu_state leave if current MCU_ACT has failure */
806 if (unlikely(mcu_act == MCU_ACT_ABNORMAL))
807 goto skip_state_leave;
808
809 mcu_state_trans_start();
810 if (npu.cur_state->leave) {
811 ret = npu.cur_state->leave(npu.cur_state);
812 if (ret) {
813 TOPS_ERR("state%d transition leave failed: %d\n",
814 npu.cur_state->state, ret);
815 goto state_trans_end;
816 }
817 }
818 mcu_state_trans_end();
819
820skip_state_leave:
821 npu.cur_state = npu.next_state;
822
823 mcu_state_trans_start();
824 if (npu.cur_state->enter) {
825 ret = npu.cur_state->enter(npu.cur_state);
826 if (ret) {
827 TOPS_ERR("state%d transition enter failed: %d\n",
828 npu.cur_state->state, ret);
829 goto state_trans_end;
830 }
831 }
832
833state_trans_end:
834 mcu_state_trans_end();
835
836 return ret;
837}
838
839static void mtk_tops_mcu_state_trans_timeout(struct timer_list *timer)
840{
841 TOPS_ERR("state%d transition timeout!\n", npu.cur_state->state);
842 TOPS_ERR("ctrl_done=0x%x ctrl_msg.msg1: 0x%x\n",
843 npu.ctrl_done, npu.ctrl_msg.msg1);
844
845 npu.state_trans_fail = true;
846
847 wake_up_interruptible(&npu.mcu_state_wait_done);
848}
849
850static inline int mcu_ctrl_cmd_prepare(enum mcu_cmd_type cmd,
851 struct mcu_ctrl_cmd *mcmd)
852{
853 if (!mcmd || cmd == MCU_CMD_TYPE_NULL || cmd >= __MCU_CMD_TYPE_MAX)
854 return -EINVAL;
855
856 lockdep_assert_held(&npu.mcu_ctrl_lock);
857
858 npu.ctrl_msg.msg1 = cmd;
859 npu.ctrl_msg.msg2 = mcmd->e;
860 npu.ctrl_msg.msg3 = mcmd->arg[0];
861 npu.ctrl_msg.msg4 = mcmd->arg[1];
862
863 if (mcmd->core_mask) {
864 unsigned long flag;
865
866 spin_lock_irqsave(&npu.ctrl_done_lock, flag);
867 npu.ctrl_done = ~(CORE_TOPS_MASK & mcmd->core_mask);
868 npu.ctrl_done &= CORE_TOPS_MASK;
869 spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
870 }
871
872 return 0;
873}
874
875static inline void mcu_ctrl_callback(void (*callback)(void *param), void *param)
876{
877 if (callback)
878 callback(param);
879}
880
881static inline void mcu_ctrl_issue_pending_act(u32 mcu_act)
882{
883 unsigned long flag;
884
885 spin_lock_irqsave(&npu.pending_act_lock, flag);
886
887 npu.pending_act |= mcu_act;
888
889 spin_unlock_irqrestore(&npu.pending_act_lock, flag);
890
891 wake_up_interruptible(&npu.mcu_ctrl_wait_act);
892}
893
894static inline enum mcu_act mcu_ctrl_pop_pending_act(void)
895{
896 unsigned long flag;
897 enum mcu_act act;
898
899 spin_lock_irqsave(&npu.pending_act_lock, flag);
900
901 act = ffs(npu.pending_act) - 1;
902 npu.pending_act &= ~BIT(act);
903
904 spin_unlock_irqrestore(&npu.pending_act_lock, flag);
905
906 return act;
907}
908
909static inline bool mcu_ctrl_is_complete(enum mcu_cmd_type done_cmd)
910{
911 unsigned long flag;
912 bool ctrl_done;
913
914 spin_lock_irqsave(&npu.ctrl_done_lock, flag);
915 ctrl_done = npu.ctrl_done == MCU_CTRL_DONE && npu.ctrl_msg.msg1 == done_cmd;
916 spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
917
918 return ctrl_done;
919}
920
921static inline void mcu_ctrl_done(enum core_id core)
922{
923 unsigned long flag;
924
925 if (core > CORE_MGMT)
926 return;
927
928 spin_lock_irqsave(&npu.ctrl_done_lock, flag);
929 npu.ctrl_done |= BIT(core);
930 spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
931}
932
933static int mcu_ctrl_task(void *data)
934{
935 enum mcu_act act;
936 int ret;
937
938 while (1) {
939 wait_event_interruptible(npu.mcu_ctrl_wait_act,
940 npu.pending_act || kthread_should_stop());
941
942 if (kthread_should_stop()) {
943 TOPS_INFO("tops mcu ctrl task stop\n");
944 break;
945 }
946
947 act = mcu_ctrl_pop_pending_act();
948 if (unlikely(act >= __MCU_ACT_MAX)) {
949 TOPS_ERR("invalid MCU act: %u\n", act);
950 continue;
951 }
952
953 /*
954 * ensure that the act is submitted by either
955 * mtk_tops_mcu_stall, mtk_tops_mcu_reset or mtk_tops_mcu_cold_boot
956 * if mcu_act is ABNORMAL, it must be caused by the state transition
957 * triggerred by above APIs
958 * as a result, mcu_ctrl_lock must be held before mcu_ctrl_task start
959 */
960 lockdep_assert_held(&npu.mcu_ctrl_lock);
961
962 if (unlikely(!npu.cur_state->state_trans)) {
963 TOPS_ERR("cur state has no state_trans()\n");
964 WARN_ON(1);
965 }
966
967 ret = mtk_tops_mcu_state_transition(BIT(act));
968 if (ret) {
969 npu.state_trans_fail = true;
970
971 mcu_state_trans_err();
972 }
973 }
974 return 0;
975}
976
977bool mtk_tops_mcu_alive(void)
978{
979 return npu.mcu_bring_up_done && !npu.in_reset && !npu.state_trans_fail;
980}
981
982bool mtk_tops_mcu_bring_up_done(void)
983{
984 return npu.mcu_bring_up_done;
985}
986
987bool mtk_tops_mcu_netsys_fe_rst(void)
988{
989 return npu.netsys_fe_ser;
990}
991
992static int mtk_tops_mcu_wait_done(enum mcu_cmd_type done_cmd)
993{
994 int ret = 0;
995
996 wait_event_interruptible(npu.mcu_ctrl_wait_done,
997 mcu_ctrl_is_complete(done_cmd)
998 || npu.state_trans_fail);
999
1000 if (npu.state_trans_fail)
1001 return -EINVAL;
1002
1003 return ret;
1004}
1005
1006int mtk_tops_mcu_stall(struct mcu_ctrl_cmd *mcmd,
1007 void (*callback)(void *param), void *param)
1008{
1009 int ret = 0;
1010
1011 if (unlikely(!npu.mcu_bring_up_done || npu.state_trans_fail))
1012 return -EBUSY;
1013
1014 if (unlikely(!mcmd || mcmd->e >= __MCU_EVENT_TYPE_MAX))
1015 return -EINVAL;
1016
1017 mutex_lock(&npu.mcu_ctrl_lock);
1018
1019 /* go to stall state */
1020 ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_STALL, mcmd);
1021 if (ret)
1022 goto unlock;
1023
1024 mcu_ctrl_issue_pending_act(MCU_ACT_STALL);
1025
1026 ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_STALL_DONE);
1027 if (ret) {
1028 TOPS_ERR("tops stall failed: %d\n", ret);
1029 goto recover_mcu;
1030 }
1031
1032 mcu_ctrl_callback(callback, param);
1033
1034 /* go to freerun state */
1035 ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_FREERUN, mcmd);
1036 if (ret)
1037 goto recover_mcu;
1038
1039 mcu_ctrl_issue_pending_act(MCU_ACT_FREERUN);
1040
1041 ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_FREERUN_DONE);
1042 if (ret) {
1043 TOPS_ERR("tops freerun failed: %d\n", ret);
1044 goto recover_mcu;
1045 }
1046
1047 /* stall freerun successfully done */
1048 goto unlock;
1049
1050recover_mcu:
1051 schedule_work(&npu.recover_work);
1052
1053unlock:
1054 mutex_unlock(&npu.mcu_ctrl_lock);
1055
1056 return ret;
1057}
1058
1059int mtk_tops_mcu_reset(struct mcu_ctrl_cmd *mcmd,
1060 void (*callback)(void *param), void *param)
1061{
1062 int ret = 0;
1063
1064 if (unlikely(!npu.mcu_bring_up_done || npu.state_trans_fail))
1065 return -EBUSY;
1066
1067 if (unlikely(!mcmd || mcmd->e >= __MCU_EVENT_TYPE_MAX))
1068 return -EINVAL;
1069
1070 mutex_lock(&npu.mcu_ctrl_lock);
1071
1072 npu.in_reset = true;
1073 if (mcmd->e == MCU_EVENT_TYPE_FE_RESET)
1074 npu.netsys_fe_ser = true;
1075
1076 ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_ASSERT_RESET, mcmd);
1077 if (ret)
1078 goto unlock;
1079
1080 mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
1081
1082 ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_ASSERT_RESET_DONE);
1083 if (ret) {
1084 TOPS_ERR("tops assert reset failed: %d\n", ret);
1085 goto recover_mcu;
1086 }
1087
1088 mcu_ctrl_callback(callback, param);
1089
1090 switch (mcmd->e) {
1091 case MCU_EVENT_TYPE_WDT_TIMEOUT:
1092 case MCU_EVENT_TYPE_FE_RESET:
1093 mcu_ctrl_issue_pending_act(MCU_ACT_SHUTDOWN);
1094
1095 ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_INIT_DONE);
1096 if (ret)
1097 goto recover_mcu;
1098
1099 break;
1100 default:
1101 ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_RELEASE_RESET, mcmd);
1102 if (ret)
1103 goto recover_mcu;
1104
1105 mcu_ctrl_issue_pending_act(MCU_ACT_FREERUN);
1106
1107 ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_RELEASE_RESET_DONE);
1108 if (ret)
1109 goto recover_mcu;
1110
1111 break;
1112 }
1113
1114 goto unlock;
1115
1116recover_mcu:
1117 schedule_work(&npu.recover_work);
1118
1119unlock:
1120 mutex_unlock(&npu.mcu_ctrl_lock);
1121
1122 return ret;
1123}
1124
1125static void mtk_tops_mcu_recover_work(struct work_struct *work)
1126{
1127 int ret;
1128
1129 mutex_lock(&npu.mcu_ctrl_lock);
1130
1131 if (!npu.mcu_bring_up_done && !npu.in_reset && !npu.state_trans_fail)
1132 mcu_ctrl_issue_pending_act(MCU_ACT_INIT);
1133 else if (npu.in_reset || npu.state_trans_fail)
1134 mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
1135
1136 npu.state_trans_fail = false;
1137 npu.in_recover = true;
1138
1139 while ((ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_INIT_DONE))) {
1140 if (npu.shuting_down)
1141 goto unlock;
1142
1143 npu.mcu_bring_up_done = false;
1144 npu.state_trans_fail = false;
1145 TOPS_ERR("bring up failed: %d\n", ret);
1146
1147 msleep(1000);
1148
1149 mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
1150 }
1151
1152unlock:
1153 mutex_unlock(&npu.mcu_ctrl_lock);
1154}
1155
1156static int mtk_tops_mcu_register_mbox(void)
1157{
1158 int ret;
1159 int i;
1160
1161 ret = register_mbox_dev(MBOX_SEND, &npu.send_mgmt_mbox_dev);
1162 if (ret) {
1163 TOPS_ERR("register mcu_ctrl mgmt mbox send failed: %d\n", ret);
1164 return ret;
1165 }
1166
1167 ret = register_mbox_dev(MBOX_RECV, &npu.recv_mgmt_mbox_dev);
1168 if (ret) {
1169 TOPS_ERR("register mcu_ctrl mgmt mbox recv failed: %d\n", ret);
1170 goto err_unregister_mgmt_mbox_send;
1171 }
1172
1173 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
1174 ret = register_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
1175 if (ret) {
1176 TOPS_ERR("register mcu_ctrl offload %d mbox send failed: %d\n",
1177 i, ret);
1178 goto err_unregister_offload_mbox;
1179 }
1180
1181 ret = register_mbox_dev(MBOX_RECV, &npu.recv_offload_mbox_dev[i]);
1182 if (ret) {
1183 TOPS_ERR("register mcu_ctrl offload %d mbox recv failed: %d\n",
1184 i, ret);
1185 unregister_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
1186 goto err_unregister_offload_mbox;
1187 }
1188 }
1189
1190 return ret;
1191
1192err_unregister_offload_mbox:
1193 for (i -= 1; i >= 0; i--) {
1194 unregister_mbox_dev(MBOX_RECV, &npu.recv_offload_mbox_dev[i]);
1195 unregister_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
1196 }
1197
1198 unregister_mbox_dev(MBOX_RECV, &npu.recv_mgmt_mbox_dev);
1199
1200err_unregister_mgmt_mbox_send:
1201 unregister_mbox_dev(MBOX_SEND, &npu.send_mgmt_mbox_dev);
1202
1203 return ret;
1204}
1205
1206static void mtk_tops_mcu_unregister_mbox(void)
1207{
1208 int i;
1209
1210 unregister_mbox_dev(MBOX_SEND, &npu.send_mgmt_mbox_dev);
1211 unregister_mbox_dev(MBOX_RECV, &npu.recv_mgmt_mbox_dev);
1212
1213 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
1214 unregister_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
1215 unregister_mbox_dev(MBOX_RECV, &npu.recv_offload_mbox_dev[i]);
1216 }
1217}
1218
1219static void mtk_tops_mcu_shutdown(void)
1220{
1221 npu.shuting_down = true;
1222
1223 mutex_lock(&npu.mcu_ctrl_lock);
1224
1225 mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
1226
1227 wait_event_interruptible(npu.mcu_ctrl_wait_done,
1228 !npu.mcu_bring_up_done && !npu.shuting_down);
1229
1230 mutex_unlock(&npu.mcu_ctrl_lock);
1231}
1232
1233/* TODO: should be implemented to not block other module's init tasks */
1234static int mtk_tops_mcu_cold_boot(void)
1235{
1236 int ret = 0;
1237
1238 npu.cur_state = &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
1239
1240 mutex_lock(&npu.mcu_ctrl_lock);
1241
1242 mcu_ctrl_issue_pending_act(MCU_ACT_INIT);
1243 ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_INIT_DONE);
1244
1245 mutex_unlock(&npu.mcu_ctrl_lock);
1246 if (!ret)
1247 return ret;
1248
1249 TOPS_ERR("cold boot failed: %d\n", ret);
1250
1251 schedule_work(&npu.recover_work);
1252
1253 return 0;
1254}
1255
1256int mtk_tops_mcu_bring_up(struct platform_device *pdev)
1257{
1258 int ret = 0;
1259
1260 pm_runtime_enable(&pdev->dev);
1261
1262 ret = mtk_tops_mcu_register_mbox();
1263 if (ret) {
1264 TOPS_ERR("register mcu ctrl mbox failed: %d\n", ret);
1265 goto runtime_disable;
1266 }
1267
1268 npu.mcu_ctrl_thread = kthread_run(mcu_ctrl_task, NULL, "tops mcu ctrl task");
1269 if (IS_ERR(npu.mcu_ctrl_thread)) {
1270 ret = PTR_ERR(npu.mcu_ctrl_thread);
1271 TOPS_ERR("mcu ctrl thread create failed: %d\n", ret);
1272 goto err_unregister_mbox;
1273 }
1274
1275 ret = mtk_tops_mcu_cold_boot();
1276 if (ret) {
1277 TOPS_ERR("cold boot failed: %d\n", ret);
1278 goto err_stop_mcu_ctrl_thread;
1279 }
1280
1281 return ret;
1282
1283err_stop_mcu_ctrl_thread:
1284 kthread_stop(npu.mcu_ctrl_thread);
1285
1286err_unregister_mbox:
1287 mtk_tops_mcu_unregister_mbox();
1288
1289runtime_disable:
1290 pm_runtime_disable(&pdev->dev);
1291
1292 return ret;
1293}
1294
1295void mtk_tops_mcu_tear_down(struct platform_device *pdev)
1296{
1297 mtk_tops_mcu_shutdown();
1298
1299 kthread_stop(npu.mcu_ctrl_thread);
1300
1301 /* TODO: stop mcu? */
1302
1303 mtk_tops_mcu_unregister_mbox();
1304
1305 pm_runtime_disable(&pdev->dev);
1306}
1307
1308static int mtk_tops_mcu_dts_init(struct platform_device *pdev)
1309{
1310 struct device_node *node = pdev->dev.of_node;
1311 struct resource *res = NULL;
1312 int ret = 0;
1313
1314 if (!node)
1315 return -EINVAL;
1316
1317 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tops-base");
1318 if (!res) {
1319 TOPS_ERR("can not find tops base\n");
1320 return -ENXIO;
1321 }
1322
1323 npu.base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1324 if (!npu.base) {
1325 TOPS_ERR("map tops base failed\n");
1326 return -ENOMEM;
1327 }
1328
1329 npu.bus_clk = devm_clk_get(tops_dev, "bus");
1330 if (IS_ERR(npu.bus_clk)) {
1331 TOPS_ERR("get bus clk failed: %ld\n", PTR_ERR(npu.bus_clk));
1332 return PTR_ERR(npu.bus_clk);
1333 }
1334
1335 npu.sram_clk = devm_clk_get(tops_dev, "sram");
1336 if (IS_ERR(npu.sram_clk)) {
1337 TOPS_ERR("get sram clk failed: %ld\n", PTR_ERR(npu.sram_clk));
1338 return PTR_ERR(npu.sram_clk);
1339 }
1340
1341 npu.xdma_clk = devm_clk_get(tops_dev, "xdma");
1342 if (IS_ERR(npu.xdma_clk)) {
1343 TOPS_ERR("get xdma clk failed: %ld\n", PTR_ERR(npu.xdma_clk));
1344 return PTR_ERR(npu.xdma_clk);
1345 }
1346
1347 npu.offload_clk = devm_clk_get(tops_dev, "offload");
1348 if (IS_ERR(npu.offload_clk)) {
1349 TOPS_ERR("get offload clk failed: %ld\n", PTR_ERR(npu.offload_clk));
1350 return PTR_ERR(npu.offload_clk);
1351 }
1352
1353 npu.mgmt_clk = devm_clk_get(tops_dev, "mgmt");
1354 if (IS_ERR(npu.mgmt_clk)) {
1355 TOPS_ERR("get mgmt clk failed: %ld\n", PTR_ERR(npu.mgmt_clk));
1356 return PTR_ERR(npu.mgmt_clk);
1357 }
1358
1359 return ret;
1360}
1361
1362static void mtk_tops_mcu_pm_domain_detach(void)
1363{
1364 int i = npu.pd_num;
1365
1366 while (--i >= 0) {
1367 device_link_del(npu.pd_links[i]);
1368 dev_pm_domain_detach(npu.pd_devices[i], true);
1369 }
1370}
1371
1372static int mtk_tops_mcu_pm_domain_attach(struct platform_device *pdev)
1373{
1374 struct device *dev = &pdev->dev;
1375 int ret = 0;
1376 int i;
1377
1378 npu.pd_num = of_count_phandle_with_args(dev->of_node,
1379 "power-domains",
1380 "#power-domain-cells");
1381
1382 /* only 1 power domain exist, no need to link devices */
1383 if (npu.pd_num <= 1)
1384 return 0;
1385
1386 npu.pd_devices = devm_kmalloc_array(dev, npu.pd_num,
1387 sizeof(struct device),
1388 GFP_KERNEL);
1389 if (!npu.pd_devices)
1390 return -ENOMEM;
1391
1392 npu.pd_links = devm_kmalloc_array(dev, npu.pd_num,
developer0b3c7712023-08-24 16:23:03 +08001393 sizeof(*npu.pd_links),
developere5e687d2023-08-08 16:05:33 +08001394 GFP_KERNEL);
1395 if (!npu.pd_links)
1396 return -ENOMEM;
1397
1398 for (i = 0; i < npu.pd_num; i++) {
1399 npu.pd_devices[i] = dev_pm_domain_attach_by_id(dev, i);
1400 if (IS_ERR(npu.pd_devices[i])) {
1401 ret = PTR_ERR(npu.pd_devices[i]);
1402 goto pm_attach_fail;
1403 }
1404
1405 npu.pd_links[i] = device_link_add(dev, npu.pd_devices[i],
1406 DL_FLAG_STATELESS |
1407 DL_FLAG_PM_RUNTIME);
1408 if (!npu.pd_links[i]) {
1409 ret = -EINVAL;
1410 dev_pm_domain_detach(npu.pd_devices[i], false);
1411 goto pm_attach_fail;
1412 }
1413 }
1414
1415 return 0;
1416
1417pm_attach_fail:
1418 TOPS_ERR("attach power domain failed: %d\n", ret);
1419
1420 while (--i >= 0) {
1421 device_link_del(npu.pd_links[i]);
1422 dev_pm_domain_detach(npu.pd_devices[i], false);
1423 }
1424
1425 return ret;
1426}
1427
1428int mtk_tops_mcu_init(struct platform_device *pdev)
1429{
1430 int ret = 0;
1431
1432 dma_set_mask(tops_dev, DMA_BIT_MASK(32));
1433
1434 ret = mtk_tops_mcu_dts_init(pdev);
1435 if (ret)
1436 return ret;
1437
1438 ret = mtk_tops_mcu_pm_domain_attach(pdev);
1439 if (ret)
1440 return ret;
1441
1442 INIT_WORK(&npu.recover_work, mtk_tops_mcu_recover_work);
1443 init_waitqueue_head(&npu.mcu_ctrl_wait_act);
1444 init_waitqueue_head(&npu.mcu_ctrl_wait_done);
1445 init_waitqueue_head(&npu.mcu_state_wait_done);
1446 spin_lock_init(&npu.pending_act_lock);
1447 spin_lock_init(&npu.ctrl_done_lock);
1448 mutex_init(&npu.mcu_ctrl_lock);
1449 timer_setup(&npu.mcu_ctrl_timer, mtk_tops_mcu_state_trans_timeout, 0);
1450
1451 ret = mtk_trm_hw_config_register(TRM_TOPS, &mcu_trm_hw_cfg);
1452 if (ret) {
1453 TOPS_ERR("TRM register failed: %d\n", ret);
1454 return ret;
1455 }
1456
1457 return ret;
1458}
1459
1460void mtk_tops_mcu_deinit(struct platform_device *pdev)
1461{
1462 mtk_trm_hw_config_unregister(TRM_TOPS, &mcu_trm_hw_cfg);
1463
1464 mtk_tops_mcu_pm_domain_detach();
1465}
1466
1467static enum mbox_msg_cnt mtk_tops_ap_recv_mgmt_mbox_msg(struct mailbox_dev *mdev,
1468 struct mailbox_msg *msg)
1469{
1470 if (msg->msg1 == npu.ctrl_done_cmd)
1471 /* mcu side state transition success */
1472 mcu_ctrl_done(mdev->core);
1473 else
1474 /* mcu side state transition failed */
1475 npu.state_trans_fail = true;
1476
1477 wake_up_interruptible(&npu.mcu_state_wait_done);
1478
1479 return MBOX_NO_RET_MSG;
1480}
1481
1482static enum mbox_msg_cnt mtk_tops_ap_recv_offload_mbox_msg(struct mailbox_dev *mdev,
1483 struct mailbox_msg *msg)
1484{
1485 if (msg->msg1 == npu.ctrl_done_cmd)
1486 /* mcu side state transition success */
1487 mcu_ctrl_done(mdev->core);
1488 else
1489 /* mcu side state transition failed */
1490 npu.state_trans_fail = true;
1491
1492 wake_up_interruptible(&npu.mcu_state_wait_done);
1493
1494 return MBOX_NO_RET_MSG;
1495}