blob: c7053e71dd1d9e379aff154efaabf532ca174ad1 [file] [log] [blame]
developere5e687d2023-08-08 16:05:33 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
6 */
7
8#include <linux/bitmap.h>
9#include <linux/bitops.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/io.h>
13#include <linux/of.h>
14#include <linux/of_address.h>
15#include <linux/platform_device.h>
16
17#include "internal.h"
18#include "mbox.h"
19#include "mcu.h"
20#include "tdma.h"
21#include "tops.h"
22#include "trm.h"
23
24/* TDMA dump length */
25#define TDMA_BASE_LEN (0x400)
26
27static int tdma_trm_hw_dump(void *dst, u32 start_addr, u32 len);
28
29struct tdma_hw {
30 void __iomem *base;
31 u32 start_ring;
32
33 struct mailbox_dev mgmt_mdev;
34 struct mailbox_dev offload_mdev[CORE_OFFLOAD_NUM];
35};
36
37struct tdma_hw tdma = {
38 .mgmt_mdev = MBOX_SEND_MGMT_DEV(NET),
39 .offload_mdev = {
40 [CORE_OFFLOAD_0] = MBOX_SEND_OFFLOAD_DEV(0, NET),
41 [CORE_OFFLOAD_1] = MBOX_SEND_OFFLOAD_DEV(1, NET),
42 [CORE_OFFLOAD_2] = MBOX_SEND_OFFLOAD_DEV(2, NET),
43 [CORE_OFFLOAD_3] = MBOX_SEND_OFFLOAD_DEV(3, NET),
44 },
45};
46
47static inline void tdma_write(u32 reg, u32 val)
48{
49 writel(val, tdma.base + reg);
50}
51
52static inline void tdma_set(u32 reg, u32 mask)
53{
54 setbits(tdma.base + reg, mask);
55}
56
57static inline void tdma_clr(u32 reg, u32 mask)
58{
59 clrbits(tdma.base + reg, mask);
60}
61
62static inline void tdma_rmw(u32 reg, u32 mask, u32 val)
63{
64 clrsetbits(tdma.base + reg, mask, val);
65}
66
67static inline u32 tdma_read(u32 reg)
68{
69 return readl(tdma.base + reg);
70}
71
72static struct trm_config tdma_trm_configs[] = {
73 {
74 TRM_CFG_EN("netsys-tdma",
75 TDMA_BASE, TDMA_BASE_LEN,
76 0x0, TDMA_BASE_LEN,
77 0)
78 },
79};
80
81static struct trm_hw_config tdma_trm_hw_cfg = {
82 .trm_cfgs = tdma_trm_configs,
83 .cfg_len = ARRAY_SIZE(tdma_trm_configs),
84 .trm_hw_dump = tdma_trm_hw_dump,
85};
86
87static int tdma_trm_hw_dump(void *dst, u32 start_addr, u32 len)
88{
89 u32 ofs;
90
91 if (unlikely(!dst))
92 return -ENODEV;
93
94 for (ofs = 0; len > 0; len -= 0x4, ofs += 0x4)
95 writel(tdma_read(start_addr + ofs), dst + ofs);
96
97 return 0;
98}
99
100static inline void tdma_prefetch_enable(bool en)
101{
102 if (en) {
103 tdma_set(TDMA_PREF_TX_CFG, PREF_EN);
104 tdma_set(TDMA_PREF_RX_CFG, PREF_EN);
105 } else {
106 /* wait for prefetch idle */
107 while ((tdma_read(TDMA_PREF_TX_CFG) & PREF_BUSY)
108 || (tdma_read(TDMA_PREF_RX_CFG) & PREF_BUSY))
109 ;
110
111 tdma_write(TDMA_PREF_TX_CFG,
112 tdma_read(TDMA_PREF_TX_CFG) & (~PREF_EN));
113 tdma_write(TDMA_PREF_RX_CFG,
114 tdma_read(TDMA_PREF_RX_CFG) & (~PREF_EN));
115 }
116}
117
118static inline void tdma_writeback_enable(bool en)
119{
120 if (en) {
121 tdma_set(TDMA_WRBK_TX_CFG, WRBK_EN);
122 tdma_set(TDMA_WRBK_RX_CFG, WRBK_EN);
123 } else {
124 /* wait for write back idle */
125 while ((tdma_read(TDMA_WRBK_TX_CFG) & WRBK_BUSY)
126 || (tdma_read(TDMA_WRBK_RX_CFG) & WRBK_BUSY))
127 ;
128
129 tdma_write(TDMA_WRBK_TX_CFG,
130 tdma_read(TDMA_WRBK_TX_CFG) & (~WRBK_EN));
131 tdma_write(TDMA_WRBK_RX_CFG,
132 tdma_read(TDMA_WRBK_RX_CFG) & (~WRBK_EN));
133 }
134}
135
136static inline void tdma_assert_prefetch_reset(bool en)
137{
138 if (en) {
139 tdma_set(TDMA_PREF_TX_FIFO_CFG0, PREF_TX_RING0_CLEAR);
140 tdma_set(TDMA_PREF_RX_FIFO_CFG0,
141 PREF_RX_RINGX_CLEAR(0) | PREF_RX_RINGX_CLEAR(1));
142 tdma_set(TDMA_PREF_RX_FIFO_CFG1,
143 PREF_RX_RINGX_CLEAR(2) | PREF_RX_RINGX_CLEAR(3));
144 } else {
145 tdma_clr(TDMA_PREF_TX_FIFO_CFG0, PREF_TX_RING0_CLEAR);
146 tdma_clr(TDMA_PREF_RX_FIFO_CFG0,
147 PREF_RX_RINGX_CLEAR(0) | PREF_RX_RINGX_CLEAR(1));
148 tdma_clr(TDMA_PREF_RX_FIFO_CFG1,
149 PREF_RX_RINGX_CLEAR(2) | PREF_RX_RINGX_CLEAR(3));
150 }
151}
152
153static inline void tdma_assert_fifo_reset(bool en)
154{
155 if (en) {
156 tdma_set(TDMA_TX_XDMA_FIFO_CFG0,
157 (PAR_FIFO_CLEAR
158 | CMD_FIFO_CLEAR
159 | DMAD_FIFO_CLEAR
160 | ARR_FIFO_CLEAR));
161 tdma_set(TDMA_RX_XDMA_FIFO_CFG0,
162 (PAR_FIFO_CLEAR
163 | CMD_FIFO_CLEAR
164 | DMAD_FIFO_CLEAR
165 | ARR_FIFO_CLEAR
166 | LEN_FIFO_CLEAR
167 | WID_FIFO_CLEAR
168 | BID_FIFO_CLEAR));
169 } else {
170 tdma_clr(TDMA_TX_XDMA_FIFO_CFG0,
171 (PAR_FIFO_CLEAR
172 | CMD_FIFO_CLEAR
173 | DMAD_FIFO_CLEAR
174 | ARR_FIFO_CLEAR));
175 tdma_clr(TDMA_RX_XDMA_FIFO_CFG0,
176 (PAR_FIFO_CLEAR
177 | CMD_FIFO_CLEAR
178 | DMAD_FIFO_CLEAR
179 | ARR_FIFO_CLEAR
180 | LEN_FIFO_CLEAR
181 | WID_FIFO_CLEAR
182 | BID_FIFO_CLEAR));
183 }
184}
185
186static inline void tdma_assert_writeback_reset(bool en)
187{
188 if (en) {
189 tdma_set(TDMA_WRBK_TX_FIFO_CFG0, WRBK_RING_CLEAR);
190 tdma_set(TDMA_WRBK_RX_FIFO_CFGX(0), WRBK_RING_CLEAR);
191 tdma_set(TDMA_WRBK_RX_FIFO_CFGX(1), WRBK_RING_CLEAR);
192 tdma_set(TDMA_WRBK_RX_FIFO_CFGX(2), WRBK_RING_CLEAR);
193 tdma_set(TDMA_WRBK_RX_FIFO_CFGX(3), WRBK_RING_CLEAR);
194 } else {
195 tdma_clr(TDMA_WRBK_TX_FIFO_CFG0, WRBK_RING_CLEAR);
196 tdma_clr(TDMA_WRBK_RX_FIFO_CFGX(0), WRBK_RING_CLEAR);
197 tdma_clr(TDMA_WRBK_RX_FIFO_CFGX(1), WRBK_RING_CLEAR);
198 tdma_clr(TDMA_WRBK_RX_FIFO_CFGX(2), WRBK_RING_CLEAR);
199 tdma_clr(TDMA_WRBK_RX_FIFO_CFGX(3), WRBK_RING_CLEAR);
200 }
201}
202
203static inline void tdma_assert_prefetch_ring_reset(bool en)
204{
205 if (en) {
206 tdma_set(TDMA_PREF_SIDX_CFG,
207 (TX_RING0_SIDX_CLR
208 | RX_RINGX_SIDX_CLR(0)
209 | RX_RINGX_SIDX_CLR(1)
210 | RX_RINGX_SIDX_CLR(2)
211 | RX_RINGX_SIDX_CLR(3)));
212 } else {
213 tdma_clr(TDMA_PREF_SIDX_CFG,
214 (TX_RING0_SIDX_CLR
215 | RX_RINGX_SIDX_CLR(0)
216 | RX_RINGX_SIDX_CLR(1)
217 | RX_RINGX_SIDX_CLR(2)
218 | RX_RINGX_SIDX_CLR(3)));
219 }
220}
221
222static inline void tdma_assert_writeback_ring_reset(bool en)
223{
224 if (en) {
225 tdma_set(TDMA_WRBK_SIDX_CFG,
226 (TX_RING0_SIDX_CLR
227 | RX_RINGX_SIDX_CLR(0)
228 | RX_RINGX_SIDX_CLR(1)
229 | RX_RINGX_SIDX_CLR(2)
230 | RX_RINGX_SIDX_CLR(3)));
231 } else {
232 tdma_clr(TDMA_WRBK_SIDX_CFG,
233 (TX_RING0_SIDX_CLR
234 | RX_RINGX_SIDX_CLR(0)
235 | RX_RINGX_SIDX_CLR(1)
236 | RX_RINGX_SIDX_CLR(2)
237 | RX_RINGX_SIDX_CLR(3)));
238 }
239}
240
241static void mtk_tops_tdma_retrieve_last_state(void)
242{
243 tdma.start_ring = tdma_read(TDMA_TX_CTX_IDX_0);
244}
245
246void mtk_tops_tdma_record_last_state(void)
247{
248 tdma_write(TDMA_TX_CTX_IDX_0, tdma.start_ring);
249}
250
251static void tdma_get_next_rx_ring(void)
252{
253 u32 pkt_num_per_core = tdma_read(TDMA_RX_MAX_CNT_X(0));
254 u32 ring[TDMA_RING_NUM] = {0};
255 u32 start = 0;
256 u32 tmp_idx;
257 u32 i;
258
259 for (i = 0; i < TDMA_RING_NUM; i++) {
260 tmp_idx = (tdma.start_ring + i) % TDMA_RING_NUM;
261 ring[i] = tdma_read(TDMA_RX_DRX_IDX_X(tmp_idx));
262 }
263
264 for (i = 1; i < TDMA_RING_NUM; i++) {
265 if (ring[i] >= (pkt_num_per_core - 1) && !ring[i - 1])
266 ring[i - 1] += pkt_num_per_core;
267
268 if (!ring[i] && ring[i - 1] >= (pkt_num_per_core - 1))
269 ring[i] = pkt_num_per_core;
270
271 if (ring[i] < ring[i - 1])
272 start = i;
273 }
274
275 tdma.start_ring = (tdma.start_ring + start) & TDMA_RING_NUM_MOD;
276}
277
278void mtk_tops_tdma_reset(void)
279{
280 if (!mtk_tops_mcu_netsys_fe_rst())
281 /* get next start Rx ring if TDMA reset without NETSYS FE reset */
282 tdma_get_next_rx_ring();
283 else
284 /*
285 * NETSYS FE reset will restart CDM ring index
286 * so we don't need to calculate next ring index
287 */
288 tdma.start_ring = 0;
289
290 /* then start reset TDMA */
291 tdma_assert_prefetch_reset(true);
292 tdma_assert_prefetch_reset(false);
293
294 tdma_assert_fifo_reset(true);
295 tdma_assert_fifo_reset(false);
296
297 tdma_assert_writeback_reset(true);
298 tdma_assert_writeback_reset(false);
299
300 /* reset tdma ring */
301 tdma_set(TDMA_RST_IDX,
302 (RST_DTX_IDX_0
303 | RST_DRX_IDX_X(0)
304 | RST_DRX_IDX_X(1)
305 | RST_DRX_IDX_X(2)
306 | RST_DRX_IDX_X(3)));
307
308 tdma_assert_prefetch_ring_reset(true);
309 tdma_assert_prefetch_ring_reset(false);
310
311 tdma_assert_writeback_ring_reset(true);
312 tdma_assert_writeback_ring_reset(false);
313
314 /* TODO: should we reset Tx/Rx CPU ring index? */
315}
316
317int mtk_tops_tdma_enable(void)
318{
319 struct mailbox_msg msg = {
320 .msg1 = TOPS_NET_CMD_START,
321 .msg2 = tdma.start_ring,
322 };
323 int ret;
324 u32 i;
325
326 tdma_prefetch_enable(true);
327
328 tdma_set(TDMA_GLO_CFG0, RX_DMA_EN | TX_DMA_EN);
329
330 tdma_writeback_enable(true);
331
332 /* notify TOPS start network processing */
333 ret = mbox_send_msg_no_wait(&tdma.mgmt_mdev, &msg);
334 if (unlikely(ret))
335 return ret;
336
337 for (i = CORE_OFFLOAD_0; i < CORE_OFFLOAD_NUM; i++) {
338 ret = mbox_send_msg_no_wait(&tdma.offload_mdev[i], &msg);
339 if (unlikely(ret))
340 return ret;
341 }
342
343 return ret;
344}
345
346void mtk_tops_tdma_disable(void)
347{
348 struct mailbox_msg msg = {
349 .msg1 = TOPS_NET_CMD_STOP,
350 };
351 u32 i;
352
353 if (mtk_tops_mcu_bring_up_done()) {
354 /* notify TOPS stop network processing */
355 if (unlikely(mbox_send_msg_no_wait(&tdma.mgmt_mdev, &msg)))
356 return;
357
358 for (i = CORE_OFFLOAD_0; i < CORE_OFFLOAD_NUM; i++) {
359 if (unlikely(mbox_send_msg_no_wait(&tdma.offload_mdev[i],
360 &msg)))
361 return;
362 }
363 }
364
365 tdma_prefetch_enable(false);
366
367 /* There is no need to wait for Tx/Rx idle before we stop Tx/Rx */
368 if (!mtk_tops_mcu_netsys_fe_rst())
369 while (tdma_read(TDMA_GLO_CFG0) & RX_DMA_BUSY)
370 ;
371 tdma_write(TDMA_GLO_CFG0, tdma_read(TDMA_GLO_CFG0) & (~RX_DMA_EN));
372
373 if (!mtk_tops_mcu_netsys_fe_rst())
374 while (tdma_read(TDMA_GLO_CFG0) & TX_DMA_BUSY)
375 ;
376 tdma_write(TDMA_GLO_CFG0, tdma_read(TDMA_GLO_CFG0) & (~TX_DMA_EN));
377
378 tdma_writeback_enable(false);
379}
380
381static int mtk_tops_tdma_register_mbox(void)
382{
383 int ret;
384 int i;
385
386 ret = register_mbox_dev(MBOX_SEND, &tdma.mgmt_mdev);
387 if (ret) {
388 TOPS_ERR("register tdma mgmt mbox send failed: %d\n", ret);
389 return ret;
390 }
391
392 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
393 ret = register_mbox_dev(MBOX_SEND, &tdma.offload_mdev[i]);
394 if (ret) {
395 TOPS_ERR("register tdma offload %d mbox send failed: %d\n",
396 i, ret);
397 goto err_unregister_offload_mbox;
398 }
399 }
400
401 return ret;
402
403err_unregister_offload_mbox:
404 for (i -= 1; i >= 0; i--)
405 unregister_mbox_dev(MBOX_SEND, &tdma.offload_mdev[i]);
406
407 unregister_mbox_dev(MBOX_SEND, &tdma.mgmt_mdev);
408
409 return ret;
410}
411
412static void mtk_tops_tdma_unregister_mbox(void)
413{
414 int i;
415
416 unregister_mbox_dev(MBOX_SEND, &tdma.mgmt_mdev);
417
418 for (i = 0; i < CORE_OFFLOAD_NUM; i++)
419 unregister_mbox_dev(MBOX_SEND, &tdma.offload_mdev[i]);
420}
421
422static int mtk_tops_tdma_dts_init(struct platform_device *pdev)
423{
424 struct device_node *fe_mem = NULL;
425 struct resource res;
426 int ret = 0;
427
428 fe_mem = of_parse_phandle(pdev->dev.of_node, "fe_mem", 0);
429 if (!fe_mem) {
430 TOPS_ERR("can not find fe_mem node\n");
431 return -ENODEV;
432 }
433
434 if (of_address_to_resource(fe_mem, 0, &res))
435 return -ENXIO;
436
437 /* map FE address */
438 tdma.base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
439 if (!tdma.base)
440 return -ENOMEM;
441
442 /* shift FE address to TDMA base */
443 tdma.base += TDMA_BASE;
444
445 of_node_put(fe_mem);
446
447 return ret;
448}
449
450int mtk_tops_tdma_init(struct platform_device *pdev)
451{
452 int ret = 0;
453
454 ret = mtk_tops_tdma_register_mbox();
455 if (ret)
456 return ret;
457
458 ret = mtk_tops_tdma_dts_init(pdev);
459 if (ret)
460 return ret;
461
462 ret = mtk_trm_hw_config_register(TRM_TDMA, &tdma_trm_hw_cfg);
463 if (ret)
464 return ret;
465
466 mtk_tops_tdma_retrieve_last_state();
467
468 return ret;
469}
470
471void mtk_tops_tdma_deinit(struct platform_device *pdev)
472{
473 mtk_trm_hw_config_unregister(TRM_TDMA, &tdma_trm_hw_cfg);
474
475 mtk_tops_tdma_unregister_mbox();
476}