blob: c6f76bf085a4985c0e0dbd6e97ff849cb6ead21b [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
14 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
15 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
16 */
17
18#include <linux/trace_seq.h>
19#include <linux/seq_file.h>
20#include <linux/proc_fs.h>
21#include <linux/u64_stats_sync.h>
22#include <linux/dma-mapping.h>
23#include <linux/netdevice.h>
24#include <linux/ctype.h>
25#include <linux/debugfs.h>
26#include <linux/of_mdio.h>
developer089e8852022-09-28 14:43:46 +080027#include <linux/of_address.h>
developerfd40db22021-04-29 10:08:25 +080028
29#include "mtk_eth_soc.h"
30#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080031#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080032
developer77d03a72021-06-06 00:06:00 +080033u32 hw_lro_agg_num_cnt[MTK_HW_LRO_RING_NUM][MTK_HW_LRO_MAX_AGG_CNT + 1];
34u32 hw_lro_agg_size_cnt[MTK_HW_LRO_RING_NUM][16];
35u32 hw_lro_tot_agg_cnt[MTK_HW_LRO_RING_NUM];
36u32 hw_lro_tot_flush_cnt[MTK_HW_LRO_RING_NUM];
37u32 hw_lro_agg_flush_cnt[MTK_HW_LRO_RING_NUM];
38u32 hw_lro_age_flush_cnt[MTK_HW_LRO_RING_NUM];
39u32 hw_lro_seq_flush_cnt[MTK_HW_LRO_RING_NUM];
40u32 hw_lro_timestamp_flush_cnt[MTK_HW_LRO_RING_NUM];
41u32 hw_lro_norule_flush_cnt[MTK_HW_LRO_RING_NUM];
42u32 mtk_hwlro_stats_ebl;
43static struct proc_dir_entry *proc_hw_lro_stats, *proc_hw_lro_auto_tlb;
44typedef int (*mtk_lro_dbg_func) (int par);
45
developerfd40db22021-04-29 10:08:25 +080046struct mtk_eth_debug {
developer089e8852022-09-28 14:43:46 +080047 struct dentry *root;
48 void __iomem *base;
49 int direct_access;
developerfd40db22021-04-29 10:08:25 +080050};
51
52struct mtk_eth *g_eth;
53
54struct mtk_eth_debug eth_debug;
55
developer089e8852022-09-28 14:43:46 +080056int mt798x_iomap(void)
57{
58 struct device_node *np = NULL;
59
60 np = of_find_node_by_name(NULL, "switch0");
61 if (np) {
62 eth_debug.base = of_iomap(np, 0);
63 if (!eth_debug.base) {
64 pr_err("of_iomap failed\n");
65 of_node_put(np);
66 return -ENOMEM;
67 }
68
69 of_node_put(np);
70 eth_debug.direct_access = 1;
71 }
72
73 return 0;
74}
75
76int mt798x_iounmap(void)
77{
78 eth_debug.direct_access = 0;
79 if (eth_debug.base)
80 iounmap(eth_debug.base);
81
82 return 0;
83}
84
developer3957a912021-05-13 16:44:31 +080085void mt7530_mdio_w32(struct mtk_eth *eth, u16 reg, u32 val)
developerfd40db22021-04-29 10:08:25 +080086{
87 mutex_lock(&eth->mii_bus->mdio_lock);
88
developer089e8852022-09-28 14:43:46 +080089 if (eth_debug.direct_access)
90 __raw_writel(val, eth_debug.base + reg);
91 else {
92 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
93 _mtk_mdio_write(eth, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
94 _mtk_mdio_write(eth, 0x1f, 0x10, val >> 16);
95 }
developerfd40db22021-04-29 10:08:25 +080096
97 mutex_unlock(&eth->mii_bus->mdio_lock);
98}
99
100u32 mt7530_mdio_r32(struct mtk_eth *eth, u32 reg)
101{
102 u16 high, low;
developer089e8852022-09-28 14:43:46 +0800103 u32 ret;
developerfd40db22021-04-29 10:08:25 +0800104
105 mutex_lock(&eth->mii_bus->mdio_lock);
106
developer089e8852022-09-28 14:43:46 +0800107 if (eth_debug.direct_access) {
108 ret = __raw_readl(eth_debug.base + reg);
109 mutex_unlock(&eth->mii_bus->mdio_lock);
110 return ret;
111 }
developerfd40db22021-04-29 10:08:25 +0800112 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
113 low = _mtk_mdio_read(eth, 0x1f, (reg >> 2) & 0xf);
114 high = _mtk_mdio_read(eth, 0x1f, 0x10);
115
116 mutex_unlock(&eth->mii_bus->mdio_lock);
117
118 return (high << 16) | (low & 0xffff);
119}
120
121void mtk_switch_w32(struct mtk_eth *eth, u32 val, unsigned reg)
122{
123 mtk_w32(eth, val, reg + 0x10000);
124}
125EXPORT_SYMBOL(mtk_switch_w32);
126
127u32 mtk_switch_r32(struct mtk_eth *eth, unsigned reg)
128{
129 return mtk_r32(eth, reg + 0x10000);
130}
131EXPORT_SYMBOL(mtk_switch_r32);
132
133static int mtketh_debug_show(struct seq_file *m, void *private)
134{
135 struct mtk_eth *eth = m->private;
136 struct mtk_mac *mac = 0;
developer77d03a72021-06-06 00:06:00 +0800137 int i = 0;
developerfd40db22021-04-29 10:08:25 +0800138
139 for (i = 0 ; i < MTK_MAX_DEVS ; i++) {
140 if (!eth->mac[i] ||
141 of_phy_is_fixed_link(eth->mac[i]->of_node))
142 continue;
143 mac = eth->mac[i];
144#if 0 //FIXME
145 while (j < 30) {
146 d = _mtk_mdio_read(eth, mac->phy_dev->addr, j);
147
148 seq_printf(m, "phy=%d, reg=0x%08x, data=0x%08x\n",
149 mac->phy_dev->addr, j, d);
150 j++;
151 }
152#endif
153 }
154 return 0;
155}
156
157static int mtketh_debug_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, mtketh_debug_show, inode->i_private);
160}
161
162static const struct file_operations mtketh_debug_fops = {
developer089e8852022-09-28 14:43:46 +0800163 .owner = THIS_MODULE,
developerfd40db22021-04-29 10:08:25 +0800164 .open = mtketh_debug_open,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = single_release,
168};
169
170static int mtketh_mt7530sw_debug_show(struct seq_file *m, void *private)
171{
172 struct mtk_eth *eth = m->private;
173 u32 offset, data;
174 int i;
175 struct mt7530_ranges {
176 u32 start;
177 u32 end;
178 } ranges[] = {
179 {0x0, 0xac},
180 {0x1000, 0x10e0},
181 {0x1100, 0x1140},
182 {0x1200, 0x1240},
183 {0x1300, 0x1340},
184 {0x1400, 0x1440},
185 {0x1500, 0x1540},
186 {0x1600, 0x1640},
187 {0x1800, 0x1848},
188 {0x1900, 0x1948},
189 {0x1a00, 0x1a48},
190 {0x1b00, 0x1b48},
191 {0x1c00, 0x1c48},
192 {0x1d00, 0x1d48},
193 {0x1e00, 0x1e48},
194 {0x1f60, 0x1ffc},
195 {0x2000, 0x212c},
196 {0x2200, 0x222c},
197 {0x2300, 0x232c},
198 {0x2400, 0x242c},
199 {0x2500, 0x252c},
200 {0x2600, 0x262c},
201 {0x3000, 0x3014},
202 {0x30c0, 0x30f8},
203 {0x3100, 0x3114},
204 {0x3200, 0x3214},
205 {0x3300, 0x3314},
206 {0x3400, 0x3414},
207 {0x3500, 0x3514},
208 {0x3600, 0x3614},
209 {0x4000, 0x40d4},
210 {0x4100, 0x41d4},
211 {0x4200, 0x42d4},
212 {0x4300, 0x43d4},
213 {0x4400, 0x44d4},
214 {0x4500, 0x45d4},
215 {0x4600, 0x46d4},
216 {0x4f00, 0x461c},
217 {0x7000, 0x7038},
218 {0x7120, 0x7124},
219 {0x7800, 0x7804},
220 {0x7810, 0x7810},
221 {0x7830, 0x7830},
222 {0x7a00, 0x7a7c},
223 {0x7b00, 0x7b04},
224 {0x7e00, 0x7e04},
225 {0x7ffc, 0x7ffc},
226 };
227
228 if (!mt7530_exist(eth))
229 return -EOPNOTSUPP;
230
231 if ((!eth->mac[0] || !of_phy_is_fixed_link(eth->mac[0]->of_node)) &&
232 (!eth->mac[1] || !of_phy_is_fixed_link(eth->mac[1]->of_node))) {
233 seq_puts(m, "no switch found\n");
234 return 0;
235 }
236
237 for (i = 0 ; i < ARRAY_SIZE(ranges) ; i++) {
238 for (offset = ranges[i].start;
239 offset <= ranges[i].end; offset += 4) {
240 data = mt7530_mdio_r32(eth, offset);
241 seq_printf(m, "mt7530 switch reg=0x%08x, data=0x%08x\n",
242 offset, data);
243 }
244 }
245
246 return 0;
247}
248
249static int mtketh_debug_mt7530sw_open(struct inode *inode, struct file *file)
250{
251 return single_open(file, mtketh_mt7530sw_debug_show, inode->i_private);
252}
253
254static const struct file_operations mtketh_debug_mt7530sw_fops = {
developer089e8852022-09-28 14:43:46 +0800255 .owner = THIS_MODULE,
developerfd40db22021-04-29 10:08:25 +0800256 .open = mtketh_debug_mt7530sw_open,
257 .read = seq_read,
258 .llseek = seq_lseek,
259 .release = single_release,
260};
261
262static ssize_t mtketh_mt7530sw_debugfs_write(struct file *file,
263 const char __user *ptr,
264 size_t len, loff_t *off)
265{
266 struct mtk_eth *eth = file->private_data;
267 char buf[32], *token, *p = buf;
268 u32 reg, value, phy;
269 int ret;
270
271 if (!mt7530_exist(eth))
272 return -EOPNOTSUPP;
273
274 if (*off != 0)
275 return 0;
276
277 if (len > sizeof(buf) - 1)
278 len = sizeof(buf) - 1;
279
280 ret = strncpy_from_user(buf, ptr, len);
281 if (ret < 0)
282 return ret;
283 buf[len] = '\0';
284
285 token = strsep(&p, " ");
286 if (!token)
287 return -EINVAL;
288 if (kstrtoul(token, 16, (unsigned long *)&phy))
289 return -EINVAL;
290
291 token = strsep(&p, " ");
292 if (!token)
293 return -EINVAL;
294 if (kstrtoul(token, 16, (unsigned long *)&reg))
295 return -EINVAL;
296
297 token = strsep(&p, " ");
298 if (!token)
299 return -EINVAL;
300 if (kstrtoul(token, 16, (unsigned long *)&value))
301 return -EINVAL;
302
303 pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
304 0x1f, reg, value);
305 mt7530_mdio_w32(eth, reg, value);
306 pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
307 0x1f, reg, mt7530_mdio_r32(eth, reg));
308
309 return len;
310}
311
312static ssize_t mtketh_debugfs_write(struct file *file, const char __user *ptr,
313 size_t len, loff_t *off)
314{
315 struct mtk_eth *eth = file->private_data;
316 char buf[32], *token, *p = buf;
317 u32 reg, value, phy;
318 int ret;
319
320 if (*off != 0)
321 return 0;
322
323 if (len > sizeof(buf) - 1)
324 len = sizeof(buf) - 1;
325
326 ret = strncpy_from_user(buf, ptr, len);
327 if (ret < 0)
328 return ret;
329 buf[len] = '\0';
330
331 token = strsep(&p, " ");
332 if (!token)
333 return -EINVAL;
334 if (kstrtoul(token, 16, (unsigned long *)&phy))
335 return -EINVAL;
336
337 token = strsep(&p, " ");
338
339 if (!token)
340 return -EINVAL;
341 if (kstrtoul(token, 16, (unsigned long *)&reg))
342 return -EINVAL;
343
344 token = strsep(&p, " ");
345
346 if (!token)
347 return -EINVAL;
348 if (kstrtoul(token, 16, (unsigned long *)&value))
349 return -EINVAL;
350
351 pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
352 phy, reg, value);
353
354 _mtk_mdio_write(eth, phy, reg, value);
355
356 pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
357 phy, reg, _mtk_mdio_read(eth, phy, reg));
358
359 return len;
360}
361
362static ssize_t mtketh_debugfs_reset(struct file *file, const char __user *ptr,
363 size_t len, loff_t *off)
364{
365 struct mtk_eth *eth = file->private_data;
developerbd42c172022-07-18 17:51:30 +0800366 char buf[8] = "";
367 int count = len;
368 unsigned long dbg_level = 0;
369
370 len = min(count, sizeof(buf) - 1);
371 if (copy_from_user(buf, ptr, len))
372 return -EFAULT;
373
374 buf[len] = '\0';
375 if (kstrtoul(buf, 0, &dbg_level))
376 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +0800377
developerbd42c172022-07-18 17:51:30 +0800378 switch(dbg_level)
379 {
380 case 0:
381 if (atomic_read(&reset_lock) == 0)
382 atomic_inc(&reset_lock);
383 break;
384 case 1:
385 if (atomic_read(&force) == 0)
386 atomic_inc(&force);
387 schedule_work(&eth->pending_work);
388 break;
389 case 2:
390 if (atomic_read(&reset_lock) == 1)
391 atomic_dec(&reset_lock);
392 break;
393 default:
394 pr_info("Usage: echo [level] > /sys/kernel/debug/mtketh/reset\n");
395 pr_info("Commands: [level] \n");
396 pr_info(" 0 disable reset \n");
397 pr_info(" 1 force reset \n");
398 pr_info(" 2 enable reset\n");
399 break;
400 }
401 return count;
developerfd40db22021-04-29 10:08:25 +0800402}
403
404static const struct file_operations fops_reg_w = {
405 .owner = THIS_MODULE,
406 .open = simple_open,
407 .write = mtketh_debugfs_write,
408 .llseek = noop_llseek,
409};
410
411static const struct file_operations fops_eth_reset = {
412 .owner = THIS_MODULE,
413 .open = simple_open,
414 .write = mtketh_debugfs_reset,
415 .llseek = noop_llseek,
416};
417
418static const struct file_operations fops_mt7530sw_reg_w = {
419 .owner = THIS_MODULE,
420 .open = simple_open,
421 .write = mtketh_mt7530sw_debugfs_write,
422 .llseek = noop_llseek,
423};
424
425void mtketh_debugfs_exit(struct mtk_eth *eth)
426{
427 debugfs_remove_recursive(eth_debug.root);
428}
429
430int mtketh_debugfs_init(struct mtk_eth *eth)
431{
432 int ret = 0;
433
434 eth_debug.root = debugfs_create_dir("mtketh", NULL);
435 if (!eth_debug.root) {
436 dev_notice(eth->dev, "%s:err at %d\n", __func__, __LINE__);
437 ret = -ENOMEM;
438 }
439
440 debugfs_create_file("phy_regs", S_IRUGO,
441 eth_debug.root, eth, &mtketh_debug_fops);
442 debugfs_create_file("phy_reg_w", S_IFREG | S_IWUSR,
443 eth_debug.root, eth, &fops_reg_w);
444 debugfs_create_file("reset", S_IFREG | S_IWUSR,
445 eth_debug.root, eth, &fops_eth_reset);
446 if (mt7530_exist(eth)) {
447 debugfs_create_file("mt7530sw_regs", S_IRUGO,
448 eth_debug.root, eth,
449 &mtketh_debug_mt7530sw_fops);
450 debugfs_create_file("mt7530sw_reg_w", S_IFREG | S_IWUSR,
451 eth_debug.root, eth,
452 &fops_mt7530sw_reg_w);
453 }
454 return ret;
455}
456
457void mii_mgr_read_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
458 u32 *read_data)
459{
460 if (mt7530_exist(eth) && phy_addr == 31)
461 *read_data = mt7530_mdio_r32(eth, phy_register);
462
463 else
developer089e8852022-09-28 14:43:46 +0800464 *read_data = mdiobus_read(eth->mii_bus, phy_addr, phy_register);
developerfd40db22021-04-29 10:08:25 +0800465}
466
developer3957a912021-05-13 16:44:31 +0800467void mii_mgr_write_combine(struct mtk_eth *eth, u16 phy_addr, u16 phy_register,
developerfd40db22021-04-29 10:08:25 +0800468 u32 write_data)
469{
470 if (mt7530_exist(eth) && phy_addr == 31)
471 mt7530_mdio_w32(eth, phy_register, write_data);
472
473 else
developer089e8852022-09-28 14:43:46 +0800474 mdiobus_write(eth->mii_bus, phy_addr, phy_register, write_data);
developerfd40db22021-04-29 10:08:25 +0800475}
476
developer3957a912021-05-13 16:44:31 +0800477static void mii_mgr_read_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data)
developerfd40db22021-04-29 10:08:25 +0800478{
developer089e8852022-09-28 14:43:46 +0800479 *data = mdiobus_read(eth->mii_bus, port, mdiobus_c45_addr(devad, reg));
developerfd40db22021-04-29 10:08:25 +0800480}
481
developer3957a912021-05-13 16:44:31 +0800482static void mii_mgr_write_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data)
developerfd40db22021-04-29 10:08:25 +0800483{
developer089e8852022-09-28 14:43:46 +0800484 mdiobus_write(eth->mii_bus, port, mdiobus_c45_addr(devad, reg), data);
developerfd40db22021-04-29 10:08:25 +0800485}
486
487int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
488{
489 struct mtk_mac *mac = netdev_priv(dev);
490 struct mtk_eth *eth = mac->hw;
491 struct mtk_mii_ioctl_data mii;
492 struct mtk_esw_reg reg;
developerba2d1eb2021-05-25 19:26:45 +0800493 u16 val;
developerfd40db22021-04-29 10:08:25 +0800494
495 switch (cmd) {
496 case MTKETH_MII_READ:
497 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
498 goto err_copy;
499 mii_mgr_read_combine(eth, mii.phy_id, mii.reg_num,
500 &mii.val_out);
501 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
502 goto err_copy;
503
504 return 0;
505 case MTKETH_MII_WRITE:
506 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
507 goto err_copy;
508 mii_mgr_write_combine(eth, mii.phy_id, mii.reg_num,
509 mii.val_in);
developerfd40db22021-04-29 10:08:25 +0800510 return 0;
511 case MTKETH_MII_READ_CL45:
512 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
513 goto err_copy;
developer3957a912021-05-13 16:44:31 +0800514 mii_mgr_read_cl45(eth,
515 mdio_phy_id_prtad(mii.phy_id),
516 mdio_phy_id_devad(mii.phy_id),
517 mii.reg_num,
developerba2d1eb2021-05-25 19:26:45 +0800518 &val);
519 mii.val_out = val;
developerfd40db22021-04-29 10:08:25 +0800520 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
521 goto err_copy;
522
523 return 0;
524 case MTKETH_MII_WRITE_CL45:
525 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
526 goto err_copy;
developerba2d1eb2021-05-25 19:26:45 +0800527 val = mii.val_in;
developer3957a912021-05-13 16:44:31 +0800528 mii_mgr_write_cl45(eth,
529 mdio_phy_id_prtad(mii.phy_id),
530 mdio_phy_id_devad(mii.phy_id),
531 mii.reg_num,
developerba2d1eb2021-05-25 19:26:45 +0800532 val);
developerfd40db22021-04-29 10:08:25 +0800533 return 0;
534 case MTKETH_ESW_REG_READ:
535 if (!mt7530_exist(eth))
536 return -EOPNOTSUPP;
537 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
538 goto err_copy;
539 if (reg.off > REG_ESW_MAX)
540 return -EINVAL;
541 reg.val = mtk_switch_r32(eth, reg.off);
542
543 if (copy_to_user(ifr->ifr_data, &reg, sizeof(reg)))
544 goto err_copy;
545
546 return 0;
547 case MTKETH_ESW_REG_WRITE:
548 if (!mt7530_exist(eth))
549 return -EOPNOTSUPP;
550 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
551 goto err_copy;
552 if (reg.off > REG_ESW_MAX)
553 return -EINVAL;
554 mtk_switch_w32(eth, reg.val, reg.off);
555
556 return 0;
557 default:
558 break;
559 }
560
561 return -EOPNOTSUPP;
562err_copy:
563 return -EFAULT;
564}
565
developer089e8852022-09-28 14:43:46 +0800566static void gdm_reg_dump_v3(struct mtk_eth *eth, u32 gdm_id, u32 mib_base)
567{
568 pr_info("| GDMA%d_RX_GBCNT : %010u (Rx Good Bytes) |\n",
569 gdm_id, mtk_r32(eth, mib_base));
570 pr_info("| GDMA%d_RX_GPCNT : %010u (Rx Good Pkts) |\n",
571 gdm_id, mtk_r32(eth, mib_base + 0x08));
572 pr_info("| GDMA%d_RX_OERCNT : %010u (overflow error) |\n",
573 gdm_id, mtk_r32(eth, mib_base + 0x10));
574 pr_info("| GDMA%d_RX_FERCNT : %010u (FCS error) |\n",
575 gdm_id, mtk_r32(eth, mib_base + 0x14));
576 pr_info("| GDMA%d_RX_SERCNT : %010u (too short) |\n",
577 gdm_id, mtk_r32(eth, mib_base + 0x18));
578 pr_info("| GDMA%d_RX_LERCNT : %010u (too long) |\n",
579 gdm_id, mtk_r32(eth, mib_base + 0x1C));
580 pr_info("| GDMA%d_RX_CERCNT : %010u (checksum error) |\n",
581 gdm_id, mtk_r32(eth, mib_base + 0x20));
582 pr_info("| GDMA%d_RX_FCCNT : %010u (flow control) |\n",
583 gdm_id, mtk_r32(eth, mib_base + 0x24));
584 pr_info("| GDMA%d_RX_VDPCNT : %010u (VID drop) |\n",
585 gdm_id, mtk_r32(eth, mib_base + 0x28));
586 pr_info("| GDMA%d_RX_PFCCNT : %010u (priority flow control)\n",
587 gdm_id, mtk_r32(eth, mib_base + 0x2C));
588 pr_info("| GDMA%d_TX_GBCNT : %010u (Tx Good Bytes) |\n",
589 gdm_id, mtk_r32(eth, mib_base + 0x40));
590 pr_info("| GDMA%d_TX_GPCNT : %010u (Tx Good Pkts) |\n",
591 gdm_id, mtk_r32(eth, mib_base + 0x48));
592 pr_info("| GDMA%d_TX_SKIPCNT: %010u (abort count) |\n",
593 gdm_id, mtk_r32(eth, mib_base + 0x50));
594 pr_info("| GDMA%d_TX_COLCNT : %010u (collision count)|\n",
595 gdm_id, mtk_r32(eth, mib_base + 0x54));
596 pr_info("| GDMA%d_TX_OERCNT : %010u (overflow error) |\n",
597 gdm_id, mtk_r32(eth, mib_base + 0x58));
598 pr_info("| GDMA%d_TX_FCCNT : %010u (flow control) |\n",
599 gdm_id, mtk_r32(eth, mib_base + 0x60));
600 pr_info("| GDMA%d_TX_PFCCNT : %010u (priority flow control)\n",
601 gdm_id, mtk_r32(eth, mib_base + 0x64));
602 pr_info("| |\n");
603}
604
605static void gdm_reg_dump_v2(struct mtk_eth *eth, u32 gdm_id, u32 mib_base)
606{
607 pr_info("| GDMA%d_RX_GBCNT : %010u (Rx Good Bytes) |\n",
608 gdm_id, mtk_r32(eth, mib_base));
609 pr_info("| GDMA%d_RX_GPCNT : %010u (Rx Good Pkts) |\n",
610 gdm_id, mtk_r32(eth, mib_base + 0x08));
611 pr_info("| GDMA%d_RX_OERCNT : %010u (overflow error) |\n",
612 gdm_id, mtk_r32(eth, mib_base + 0x10));
613 pr_info("| GDMA%d_RX_FERCNT : %010u (FCS error) |\n",
614 gdm_id, mtk_r32(eth, mib_base + 0x14));
615 pr_info("| GDMA%d_RX_SERCNT : %010u (too short) |\n",
616 gdm_id, mtk_r32(eth, mib_base + 0x18));
617 pr_info("| GDMA%d_RX_LERCNT : %010u (too long) |\n",
618 gdm_id, mtk_r32(eth, mib_base + 0x1C));
619 pr_info("| GDMA%d_RX_CERCNT : %010u (checksum error) |\n",
620 gdm_id, mtk_r32(eth, mib_base + 0x20));
621 pr_info("| GDMA%d_RX_FCCNT : %010u (flow control) |\n",
622 gdm_id, mtk_r32(eth, mib_base + 0x24));
623 pr_info("| GDMA%d_TX_SKIPCNT: %010u (abort count) |\n",
624 gdm_id, mtk_r32(eth, mib_base + 0x28));
625 pr_info("| GDMA%d_TX_COLCNT : %010u (collision count) |\n",
626 gdm_id, mtk_r32(eth, mib_base + 0x2C));
627 pr_info("| GDMA%d_TX_GBCNT : %010u (Tx Good Bytes) |\n",
628 gdm_id, mtk_r32(eth, mib_base + 0x30));
629 pr_info("| GDMA%d_TX_GPCNT : %010u (Tx Good Pkts) |\n",
630 gdm_id, mtk_r32(eth, mib_base + 0x38));
631 pr_info("| |\n");
632}
633
634static void gdm_cnt_read(struct mtk_eth *eth)
635{
636 u32 i, mib_base;
637
638 pr_info("\n <<CPU>>\n");
639 pr_info(" |\n");
640 pr_info("+-----------------------------------------------+\n");
641 pr_info("| <<PSE>> |\n");
642 pr_info("+-----------------------------------------------+\n");
643 pr_info(" |\n");
644 pr_info("+-----------------------------------------------+\n");
645 pr_info("| <<GDMA>> |\n");
646
647 for (i = 0; i < MTK_MAC_COUNT; i++) {
648 mib_base = MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET * i;
649
650 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
651 gdm_reg_dump_v3(eth, i + 1, mib_base);
652 else
653 gdm_reg_dump_v2(eth, i + 1, mib_base);
654 }
655
656 pr_info("+-----------------------------------------------+\n");
657}
658
developerfd40db22021-04-29 10:08:25 +0800659int esw_cnt_read(struct seq_file *seq, void *v)
660{
661 unsigned int pkt_cnt = 0;
662 int i = 0;
663 struct mtk_eth *eth = g_eth;
developerfd40db22021-04-29 10:08:25 +0800664
developer089e8852022-09-28 14:43:46 +0800665 gdm_cnt_read(eth);
developerfd40db22021-04-29 10:08:25 +0800666
667 if (!mt7530_exist(eth))
668 return 0;
669
developer089e8852022-09-28 14:43:46 +0800670 mt798x_iomap();
671
developerfd40db22021-04-29 10:08:25 +0800672#define DUMP_EACH_PORT(base) \
673 do { \
674 for (i = 0; i < 7; i++) { \
675 pkt_cnt = mt7530_mdio_r32(eth, (base) + (i * 0x100));\
676 seq_printf(seq, "%8u ", pkt_cnt); \
677 } \
678 seq_puts(seq, "\n"); \
679 } while (0)
680
681 seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n",
682 "Port0", "Port1", "Port2", "Port3", "Port4", "Port5",
683 "Port6");
684 seq_puts(seq, "Tx Drop Packet :");
685 DUMP_EACH_PORT(0x4000);
686 seq_puts(seq, "Tx CRC Error :");
687 DUMP_EACH_PORT(0x4004);
688 seq_puts(seq, "Tx Unicast Packet :");
689 DUMP_EACH_PORT(0x4008);
690 seq_puts(seq, "Tx Multicast Packet :");
691 DUMP_EACH_PORT(0x400C);
692 seq_puts(seq, "Tx Broadcast Packet :");
693 DUMP_EACH_PORT(0x4010);
694 seq_puts(seq, "Tx Collision Event :");
695 DUMP_EACH_PORT(0x4014);
696 seq_puts(seq, "Tx Pause Packet :");
697 DUMP_EACH_PORT(0x402C);
698 seq_puts(seq, "Rx Drop Packet :");
699 DUMP_EACH_PORT(0x4060);
700 seq_puts(seq, "Rx Filtering Packet :");
701 DUMP_EACH_PORT(0x4064);
702 seq_puts(seq, "Rx Unicast Packet :");
703 DUMP_EACH_PORT(0x4068);
704 seq_puts(seq, "Rx Multicast Packet :");
705 DUMP_EACH_PORT(0x406C);
706 seq_puts(seq, "Rx Broadcast Packet :");
707 DUMP_EACH_PORT(0x4070);
708 seq_puts(seq, "Rx Alignment Error :");
709 DUMP_EACH_PORT(0x4074);
710 seq_puts(seq, "Rx CRC Error :");
711 DUMP_EACH_PORT(0x4078);
712 seq_puts(seq, "Rx Undersize Error :");
713 DUMP_EACH_PORT(0x407C);
714 seq_puts(seq, "Rx Fragment Error :");
715 DUMP_EACH_PORT(0x4080);
716 seq_puts(seq, "Rx Oversize Error :");
717 DUMP_EACH_PORT(0x4084);
718 seq_puts(seq, "Rx Jabber Error :");
719 DUMP_EACH_PORT(0x4088);
720 seq_puts(seq, "Rx Pause Packet :");
721 DUMP_EACH_PORT(0x408C);
722 mt7530_mdio_w32(eth, 0x4fe0, 0xf0);
723 mt7530_mdio_w32(eth, 0x4fe0, 0x800000f0);
724
725 seq_puts(seq, "\n");
726
developer089e8852022-09-28 14:43:46 +0800727 mt798x_iounmap();
728
developerfd40db22021-04-29 10:08:25 +0800729 return 0;
730}
731
732static int switch_count_open(struct inode *inode, struct file *file)
733{
734 return single_open(file, esw_cnt_read, 0);
735}
736
737static const struct file_operations switch_count_fops = {
738 .owner = THIS_MODULE,
739 .open = switch_count_open,
740 .read = seq_read,
741 .llseek = seq_lseek,
742 .release = single_release
743};
744
developer8051e042022-04-08 13:26:36 +0800745static struct proc_dir_entry *proc_tx_ring, *proc_hwtx_ring, *proc_rx_ring;
developerfd40db22021-04-29 10:08:25 +0800746
747int tx_ring_read(struct seq_file *seq, void *v)
748{
developere9356982022-07-04 09:03:20 +0800749 struct mtk_eth *eth = g_eth;
developerfd40db22021-04-29 10:08:25 +0800750 struct mtk_tx_ring *ring = &g_eth->tx_ring;
developere9356982022-07-04 09:03:20 +0800751 struct mtk_tx_dma_v2 *tx_ring;
developerfd40db22021-04-29 10:08:25 +0800752 int i = 0;
753
developerfd40db22021-04-29 10:08:25 +0800754 seq_printf(seq, "free count = %d\n", (int)atomic_read(&ring->free_count));
755 seq_printf(seq, "cpu next free: %d\n", (int)(ring->next_free - ring->dma));
756 seq_printf(seq, "cpu last free: %d\n", (int)(ring->last_free - ring->dma));
757 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +0800758 dma_addr_t tmp = ring->phys + i * eth->soc->txrx.txd_size;
759
760 tx_ring = ring->dma + i * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +0800761
762 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &tmp,
developere9356982022-07-04 09:03:20 +0800763 tx_ring->txd1, tx_ring->txd2,
764 tx_ring->txd3, tx_ring->txd4);
765
developer089e8852022-09-28 14:43:46 +0800766 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
767 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +0800768 seq_printf(seq, " %08x %08x %08x %08x",
769 tx_ring->txd5, tx_ring->txd6,
770 tx_ring->txd7, tx_ring->txd8);
771 }
772
developerfd40db22021-04-29 10:08:25 +0800773 seq_printf(seq, "\n");
774 }
775
developerfd40db22021-04-29 10:08:25 +0800776 return 0;
777}
778
779static int tx_ring_open(struct inode *inode, struct file *file)
780{
781 return single_open(file, tx_ring_read, NULL);
782}
783
784static const struct file_operations tx_ring_fops = {
785 .owner = THIS_MODULE,
786 .open = tx_ring_open,
787 .read = seq_read,
788 .llseek = seq_lseek,
789 .release = single_release
790};
791
developer8051e042022-04-08 13:26:36 +0800792int hwtx_ring_read(struct seq_file *seq, void *v)
793{
794 struct mtk_eth *eth = g_eth;
developere9356982022-07-04 09:03:20 +0800795 struct mtk_tx_dma_v2 *hwtx_ring;
developer8051e042022-04-08 13:26:36 +0800796 int i = 0;
797
developer8051e042022-04-08 13:26:36 +0800798 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +0800799 dma_addr_t addr = eth->phy_scratch_ring + i * eth->soc->txrx.txd_size;
800
801 hwtx_ring = eth->scratch_ring + i * eth->soc->txrx.txd_size;
developer8051e042022-04-08 13:26:36 +0800802
803 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &addr,
developere9356982022-07-04 09:03:20 +0800804 hwtx_ring->txd1, hwtx_ring->txd2,
805 hwtx_ring->txd3, hwtx_ring->txd4);
806
developer089e8852022-09-28 14:43:46 +0800807 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
808 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +0800809 seq_printf(seq, " %08x %08x %08x %08x",
810 hwtx_ring->txd5, hwtx_ring->txd6,
811 hwtx_ring->txd7, hwtx_ring->txd8);
812 }
813
developer8051e042022-04-08 13:26:36 +0800814 seq_printf(seq, "\n");
815 }
816
developer8051e042022-04-08 13:26:36 +0800817 return 0;
818}
819
820static int hwtx_ring_open(struct inode *inode, struct file *file)
821{
822 return single_open(file, hwtx_ring_read, NULL);
823}
824
825static const struct file_operations hwtx_ring_fops = {
826 .owner = THIS_MODULE,
827 .open = hwtx_ring_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release
831};
832
developerfd40db22021-04-29 10:08:25 +0800833int rx_ring_read(struct seq_file *seq, void *v)
834{
developere9356982022-07-04 09:03:20 +0800835 struct mtk_eth *eth = g_eth;
developerfd40db22021-04-29 10:08:25 +0800836 struct mtk_rx_ring *ring = &g_eth->rx_ring[0];
developere9356982022-07-04 09:03:20 +0800837 struct mtk_rx_dma_v2 *rx_ring;
developerfd40db22021-04-29 10:08:25 +0800838 int i = 0;
839
developerfd40db22021-04-29 10:08:25 +0800840 seq_printf(seq, "next to read: %d\n",
841 NEXT_DESP_IDX(ring->calc_idx, MTK_DMA_SIZE));
842 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +0800843 rx_ring = ring->dma + i * eth->soc->txrx.rxd_size;
844
developerfd40db22021-04-29 10:08:25 +0800845 seq_printf(seq, "%d: %08x %08x %08x %08x", i,
developere9356982022-07-04 09:03:20 +0800846 rx_ring->rxd1, rx_ring->rxd2,
847 rx_ring->rxd3, rx_ring->rxd4);
848
developer089e8852022-09-28 14:43:46 +0800849 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
850 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +0800851 seq_printf(seq, " %08x %08x %08x %08x",
852 rx_ring->rxd5, rx_ring->rxd6,
853 rx_ring->rxd7, rx_ring->rxd8);
854 }
855
developerfd40db22021-04-29 10:08:25 +0800856 seq_printf(seq, "\n");
857 }
858
developerfd40db22021-04-29 10:08:25 +0800859 return 0;
860}
861
862static int rx_ring_open(struct inode *inode, struct file *file)
863{
864 return single_open(file, rx_ring_read, NULL);
865}
866
867static const struct file_operations rx_ring_fops = {
868 .owner = THIS_MODULE,
869 .open = rx_ring_open,
870 .read = seq_read,
871 .llseek = seq_lseek,
872 .release = single_release
873};
874
developer77f3fd42021-10-05 15:16:05 +0800875static inline u32 mtk_dbg_r32(u32 reg)
876{
877 void __iomem *virt_reg;
878 u32 val;
879
880 virt_reg = ioremap(reg, 32);
881 val = __raw_readl(virt_reg);
882 iounmap(virt_reg);
883
884 return val;
885}
886
developerfd40db22021-04-29 10:08:25 +0800887int dbg_regs_read(struct seq_file *seq, void *v)
888{
889 struct mtk_eth *eth = g_eth;
890
developer77f3fd42021-10-05 15:16:05 +0800891 seq_puts(seq, " <<DEBUG REG DUMP>>\n");
892
893 seq_printf(seq, "| FE_INT_STA : %08x |\n",
developer8051e042022-04-08 13:26:36 +0800894 mtk_r32(eth, MTK_FE_INT_STATUS));
developer089e8852022-09-28 14:43:46 +0800895 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
896 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer77f3fd42021-10-05 15:16:05 +0800897 seq_printf(seq, "| FE_INT_STA2 : %08x |\n",
developer8051e042022-04-08 13:26:36 +0800898 mtk_r32(eth, MTK_FE_INT_STATUS2));
developer77f3fd42021-10-05 15:16:05 +0800899
developerfd40db22021-04-29 10:08:25 +0800900 seq_printf(seq, "| PSE_FQFC_CFG : %08x |\n",
901 mtk_r32(eth, MTK_PSE_FQFC_CFG));
902 seq_printf(seq, "| PSE_IQ_STA1 : %08x |\n",
903 mtk_r32(eth, MTK_PSE_IQ_STA(0)));
904 seq_printf(seq, "| PSE_IQ_STA2 : %08x |\n",
905 mtk_r32(eth, MTK_PSE_IQ_STA(1)));
906
developer089e8852022-09-28 14:43:46 +0800907 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
908 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerfd40db22021-04-29 10:08:25 +0800909 seq_printf(seq, "| PSE_IQ_STA3 : %08x |\n",
910 mtk_r32(eth, MTK_PSE_IQ_STA(2)));
911 seq_printf(seq, "| PSE_IQ_STA4 : %08x |\n",
912 mtk_r32(eth, MTK_PSE_IQ_STA(3)));
developer77f3fd42021-10-05 15:16:05 +0800913 seq_printf(seq, "| PSE_IQ_STA5 : %08x |\n",
914 mtk_r32(eth, MTK_PSE_IQ_STA(4)));
developer089e8852022-09-28 14:43:46 +0800915 seq_printf(seq, "| PSE_IQ_STA6 : %08x |\n",
916 mtk_r32(eth, MTK_PSE_IQ_STA(5)));
917 seq_printf(seq, "| PSE_IQ_STA7 : %08x |\n",
918 mtk_r32(eth, MTK_PSE_IQ_STA(6)));
919 seq_printf(seq, "| PSE_IQ_STA8 : %08x |\n",
920 mtk_r32(eth, MTK_PSE_IQ_STA(7)));
developerfd40db22021-04-29 10:08:25 +0800921 }
922
923 seq_printf(seq, "| PSE_OQ_STA1 : %08x |\n",
924 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
925 seq_printf(seq, "| PSE_OQ_STA2 : %08x |\n",
926 mtk_r32(eth, MTK_PSE_OQ_STA(1)));
927
developer089e8852022-09-28 14:43:46 +0800928 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
929 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerfd40db22021-04-29 10:08:25 +0800930 seq_printf(seq, "| PSE_OQ_STA3 : %08x |\n",
931 mtk_r32(eth, MTK_PSE_OQ_STA(2)));
932 seq_printf(seq, "| PSE_OQ_STA4 : %08x |\n",
933 mtk_r32(eth, MTK_PSE_OQ_STA(3)));
developer77f3fd42021-10-05 15:16:05 +0800934 seq_printf(seq, "| PSE_OQ_STA5 : %08x |\n",
935 mtk_r32(eth, MTK_PSE_OQ_STA(4)));
developer089e8852022-09-28 14:43:46 +0800936 seq_printf(seq, "| PSE_OQ_STA6 : %08x |\n",
937 mtk_r32(eth, MTK_PSE_OQ_STA(5)));
938 seq_printf(seq, "| PSE_OQ_STA7 : %08x |\n",
939 mtk_r32(eth, MTK_PSE_OQ_STA(6)));
940 seq_printf(seq, "| PSE_OQ_STA8 : %08x |\n",
941 mtk_r32(eth, MTK_PSE_OQ_STA(7)));
developerfd40db22021-04-29 10:08:25 +0800942 }
943
developer77f3fd42021-10-05 15:16:05 +0800944 seq_printf(seq, "| PDMA_CRX_IDX : %08x |\n",
945 mtk_r32(eth, MTK_PRX_CRX_IDX0));
946 seq_printf(seq, "| PDMA_DRX_IDX : %08x |\n",
947 mtk_r32(eth, MTK_PRX_DRX_IDX0));
948 seq_printf(seq, "| QDMA_CTX_IDX : %08x |\n",
949 mtk_r32(eth, MTK_QTX_CTX_PTR));
950 seq_printf(seq, "| QDMA_DTX_IDX : %08x |\n",
951 mtk_r32(eth, MTK_QTX_DTX_PTR));
developerfd40db22021-04-29 10:08:25 +0800952 seq_printf(seq, "| QDMA_FQ_CNT : %08x |\n",
953 mtk_r32(eth, MTK_QDMA_FQ_CNT));
developer089e8852022-09-28 14:43:46 +0800954 seq_printf(seq, "| QDMA_FWD_CNT : %08x |\n",
955 mtk_r32(eth, MTK_QDMA_FWD_CNT));
956 seq_printf(seq, "| QDMA_FSM : %08x |\n",
957 mtk_r32(eth, MTK_QDMA_FSM));
developerfd40db22021-04-29 10:08:25 +0800958 seq_printf(seq, "| FE_PSE_FREE : %08x |\n",
959 mtk_r32(eth, MTK_FE_PSE_FREE));
960 seq_printf(seq, "| FE_DROP_FQ : %08x |\n",
961 mtk_r32(eth, MTK_FE_DROP_FQ));
962 seq_printf(seq, "| FE_DROP_FC : %08x |\n",
963 mtk_r32(eth, MTK_FE_DROP_FC));
964 seq_printf(seq, "| FE_DROP_PPE : %08x |\n",
965 mtk_r32(eth, MTK_FE_DROP_PPE));
966 seq_printf(seq, "| GDM1_IG_CTRL : %08x |\n",
967 mtk_r32(eth, MTK_GDMA_FWD_CFG(0)));
968 seq_printf(seq, "| GDM2_IG_CTRL : %08x |\n",
969 mtk_r32(eth, MTK_GDMA_FWD_CFG(1)));
developer089e8852022-09-28 14:43:46 +0800970 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
971 seq_printf(seq, "| GDM3_IG_CTRL : %08x |\n",
972 mtk_r32(eth, MTK_GDMA_FWD_CFG(2)));
973 }
developerfd40db22021-04-29 10:08:25 +0800974 seq_printf(seq, "| MAC_P1_MCR : %08x |\n",
975 mtk_r32(eth, MTK_MAC_MCR(0)));
976 seq_printf(seq, "| MAC_P2_MCR : %08x |\n",
977 mtk_r32(eth, MTK_MAC_MCR(1)));
developer089e8852022-09-28 14:43:46 +0800978 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
979 seq_printf(seq, "| MAC_P3_MCR : %08x |\n",
980 mtk_r32(eth, MTK_MAC_MCR(2)));
981 }
developer77f3fd42021-10-05 15:16:05 +0800982 seq_printf(seq, "| MAC_P1_FSM : %08x |\n",
983 mtk_r32(eth, MTK_MAC_FSM(0)));
984 seq_printf(seq, "| MAC_P2_FSM : %08x |\n",
985 mtk_r32(eth, MTK_MAC_FSM(1)));
developer089e8852022-09-28 14:43:46 +0800986 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
987 seq_printf(seq, "| MAC_P3_FSM : %08x |\n",
988 mtk_r32(eth, MTK_MAC_FSM(2)));
989 }
developerfd40db22021-04-29 10:08:25 +0800990
developer089e8852022-09-28 14:43:46 +0800991 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
992 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerfd40db22021-04-29 10:08:25 +0800993 seq_printf(seq, "| FE_CDM1_FSM : %08x |\n",
994 mtk_r32(eth, MTK_FE_CDM1_FSM));
995 seq_printf(seq, "| FE_CDM2_FSM : %08x |\n",
996 mtk_r32(eth, MTK_FE_CDM2_FSM));
developer77f3fd42021-10-05 15:16:05 +0800997 seq_printf(seq, "| FE_CDM3_FSM : %08x |\n",
998 mtk_r32(eth, MTK_FE_CDM3_FSM));
999 seq_printf(seq, "| FE_CDM4_FSM : %08x |\n",
1000 mtk_r32(eth, MTK_FE_CDM4_FSM));
developer089e8852022-09-28 14:43:46 +08001001 seq_printf(seq, "| FE_CDM5_FSM : %08x |\n",
1002 mtk_r32(eth, MTK_FE_CDM5_FSM));
1003 seq_printf(seq, "| FE_CDM6_FSM : %08x |\n",
1004 mtk_r32(eth, MTK_FE_CDM6_FSM));
developerfd40db22021-04-29 10:08:25 +08001005 seq_printf(seq, "| FE_GDM1_FSM : %08x |\n",
1006 mtk_r32(eth, MTK_FE_GDM1_FSM));
1007 seq_printf(seq, "| FE_GDM2_FSM : %08x |\n",
1008 mtk_r32(eth, MTK_FE_GDM2_FSM));
developer77f3fd42021-10-05 15:16:05 +08001009 seq_printf(seq, "| SGMII_EFUSE : %08x |\n",
1010 mtk_dbg_r32(MTK_SGMII_EFUSE));
1011 seq_printf(seq, "| SGMII0_RX_CNT : %08x |\n",
1012 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(0)));
1013 seq_printf(seq, "| SGMII1_RX_CNT : %08x |\n",
1014 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(1)));
1015 seq_printf(seq, "| WED_RTQM_GLO : %08x |\n",
1016 mtk_dbg_r32(MTK_WED_RTQM_GLO_CFG));
developerfd40db22021-04-29 10:08:25 +08001017 }
1018
developer8051e042022-04-08 13:26:36 +08001019 mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS);
developer089e8852022-09-28 14:43:46 +08001020 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1021 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer8051e042022-04-08 13:26:36 +08001022 mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS2);
developer77f3fd42021-10-05 15:16:05 +08001023
developerfd40db22021-04-29 10:08:25 +08001024 return 0;
1025}
1026
1027static int dbg_regs_open(struct inode *inode, struct file *file)
1028{
1029 return single_open(file, dbg_regs_read, 0);
1030}
1031
1032static const struct file_operations dbg_regs_fops = {
1033 .owner = THIS_MODULE,
1034 .open = dbg_regs_open,
1035 .read = seq_read,
1036 .llseek = seq_lseek,
developer77d03a72021-06-06 00:06:00 +08001037 .release = single_release
1038};
1039
developere9356982022-07-04 09:03:20 +08001040void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
developer77d03a72021-06-06 00:06:00 +08001041{
developere9356982022-07-04 09:03:20 +08001042 struct mtk_eth *eth = g_eth;
developer77d03a72021-06-06 00:06:00 +08001043 u32 idx, agg_cnt, agg_size;
1044
developer089e8852022-09-28 14:43:46 +08001045 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1046 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001047 idx = ring_no - 4;
1048 agg_cnt = RX_DMA_GET_AGG_CNT_V2(rxd->rxd6);
1049 } else {
1050 idx = ring_no - 1;
1051 agg_cnt = RX_DMA_GET_AGG_CNT(rxd->rxd2);
1052 }
developer77d03a72021-06-06 00:06:00 +08001053
1054 agg_size = RX_DMA_GET_PLEN0(rxd->rxd2);
1055
1056 hw_lro_agg_size_cnt[idx][agg_size / 5000]++;
1057 hw_lro_agg_num_cnt[idx][agg_cnt]++;
1058 hw_lro_tot_flush_cnt[idx]++;
1059 hw_lro_tot_agg_cnt[idx] += agg_cnt;
1060}
1061
developere9356982022-07-04 09:03:20 +08001062void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
developer77d03a72021-06-06 00:06:00 +08001063{
developere9356982022-07-04 09:03:20 +08001064 struct mtk_eth *eth = g_eth;
developer77d03a72021-06-06 00:06:00 +08001065 u32 idx, flush_reason;
1066
developer089e8852022-09-28 14:43:46 +08001067 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1068 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001069 idx = ring_no - 4;
1070 flush_reason = RX_DMA_GET_FLUSH_RSN_V2(rxd->rxd6);
1071 } else {
1072 idx = ring_no - 1;
1073 flush_reason = RX_DMA_GET_REV(rxd->rxd2);
1074 }
developer77d03a72021-06-06 00:06:00 +08001075
1076 if ((flush_reason & 0x7) == MTK_HW_LRO_AGG_FLUSH)
1077 hw_lro_agg_flush_cnt[idx]++;
1078 else if ((flush_reason & 0x7) == MTK_HW_LRO_AGE_FLUSH)
1079 hw_lro_age_flush_cnt[idx]++;
1080 else if ((flush_reason & 0x7) == MTK_HW_LRO_NOT_IN_SEQ_FLUSH)
1081 hw_lro_seq_flush_cnt[idx]++;
1082 else if ((flush_reason & 0x7) == MTK_HW_LRO_TIMESTAMP_FLUSH)
1083 hw_lro_timestamp_flush_cnt[idx]++;
1084 else if ((flush_reason & 0x7) == MTK_HW_LRO_NON_RULE_FLUSH)
1085 hw_lro_norule_flush_cnt[idx]++;
1086}
1087
1088ssize_t hw_lro_stats_write(struct file *file, const char __user *buffer,
1089 size_t count, loff_t *data)
1090{
1091 memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt));
1092 memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt));
1093 memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt));
1094 memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt));
1095 memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt));
1096 memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt));
1097 memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt));
1098 memset(hw_lro_timestamp_flush_cnt, 0,
1099 sizeof(hw_lro_timestamp_flush_cnt));
1100 memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt));
1101
1102 pr_info("clear hw lro cnt table\n");
1103
1104 return count;
1105}
1106
1107int hw_lro_stats_read_v1(struct seq_file *seq, void *v)
1108{
1109 int i;
1110
1111 seq_puts(seq, "HW LRO statistic dump:\n");
1112
1113 /* Agg number count */
1114 seq_puts(seq, "Cnt: RING1 | RING2 | RING3 | Total\n");
1115 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
1116 seq_printf(seq, " %d : %d %d %d %d\n",
1117 i, hw_lro_agg_num_cnt[0][i],
1118 hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i],
1119 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
1120 hw_lro_agg_num_cnt[2][i]);
1121 }
1122
1123 /* Total agg count */
1124 seq_puts(seq, "Total agg: RING1 | RING2 | RING3 | Total\n");
1125 seq_printf(seq, " %d %d %d %d\n",
1126 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
1127 hw_lro_tot_agg_cnt[2],
1128 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1129 hw_lro_tot_agg_cnt[2]);
1130
1131 /* Total flush count */
1132 seq_puts(seq, "Total flush: RING1 | RING2 | RING3 | Total\n");
1133 seq_printf(seq, " %d %d %d %d\n",
1134 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
1135 hw_lro_tot_flush_cnt[2],
1136 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1137 hw_lro_tot_flush_cnt[2]);
1138
1139 /* Avg agg count */
1140 seq_puts(seq, "Avg agg: RING1 | RING2 | RING3 | Total\n");
1141 seq_printf(seq, " %d %d %d %d\n",
1142 (hw_lro_tot_flush_cnt[0]) ?
1143 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
1144 (hw_lro_tot_flush_cnt[1]) ?
1145 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
1146 (hw_lro_tot_flush_cnt[2]) ?
1147 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
1148 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1149 hw_lro_tot_flush_cnt[2]) ?
1150 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1151 hw_lro_tot_agg_cnt[2]) / (hw_lro_tot_flush_cnt[0] +
1152 hw_lro_tot_flush_cnt[1] + hw_lro_tot_flush_cnt[2])) : 0);
1153
1154 /* Statistics of aggregation size counts */
1155 seq_puts(seq, "HW LRO flush pkt len:\n");
1156 seq_puts(seq, " Length | RING1 | RING2 | RING3 | Total\n");
1157 for (i = 0; i < 15; i++) {
1158 seq_printf(seq, "%d~%d: %d %d %d %d\n", i * 5000,
1159 (i + 1) * 5000, hw_lro_agg_size_cnt[0][i],
1160 hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i],
1161 hw_lro_agg_size_cnt[0][i] +
1162 hw_lro_agg_size_cnt[1][i] +
1163 hw_lro_agg_size_cnt[2][i]);
1164 }
1165
1166 seq_puts(seq, "Flush reason: RING1 | RING2 | RING3 | Total\n");
1167 seq_printf(seq, "AGG timeout: %d %d %d %d\n",
1168 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1169 hw_lro_agg_flush_cnt[2],
1170 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1171 hw_lro_agg_flush_cnt[2]));
1172
1173 seq_printf(seq, "AGE timeout: %d %d %d %d\n",
1174 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1175 hw_lro_age_flush_cnt[2],
1176 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1177 hw_lro_age_flush_cnt[2]));
1178
1179 seq_printf(seq, "Not in-sequence: %d %d %d %d\n",
1180 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1181 hw_lro_seq_flush_cnt[2],
1182 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1183 hw_lro_seq_flush_cnt[2]));
1184
1185 seq_printf(seq, "Timestamp: %d %d %d %d\n",
1186 hw_lro_timestamp_flush_cnt[0],
1187 hw_lro_timestamp_flush_cnt[1],
1188 hw_lro_timestamp_flush_cnt[2],
1189 (hw_lro_timestamp_flush_cnt[0] +
1190 hw_lro_timestamp_flush_cnt[1] +
1191 hw_lro_timestamp_flush_cnt[2]));
1192
1193 seq_printf(seq, "No LRO rule: %d %d %d %d\n",
1194 hw_lro_norule_flush_cnt[0],
1195 hw_lro_norule_flush_cnt[1],
1196 hw_lro_norule_flush_cnt[2],
1197 (hw_lro_norule_flush_cnt[0] +
1198 hw_lro_norule_flush_cnt[1] +
1199 hw_lro_norule_flush_cnt[2]));
1200
1201 return 0;
1202}
1203
1204int hw_lro_stats_read_v2(struct seq_file *seq, void *v)
1205{
1206 int i;
1207
1208 seq_puts(seq, "HW LRO statistic dump:\n");
1209
1210 /* Agg number count */
1211 seq_puts(seq, "Cnt: RING4 | RING5 | RING6 | RING7 Total\n");
1212 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
1213 seq_printf(seq,
1214 " %d : %d %d %d %d %d\n",
1215 i, hw_lro_agg_num_cnt[0][i], hw_lro_agg_num_cnt[1][i],
1216 hw_lro_agg_num_cnt[2][i], hw_lro_agg_num_cnt[3][i],
1217 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
1218 hw_lro_agg_num_cnt[2][i] + hw_lro_agg_num_cnt[3][i]);
1219 }
1220
1221 /* Total agg count */
1222 seq_puts(seq, "Total agg: RING4 | RING5 | RING6 | RING7 Total\n");
1223 seq_printf(seq, " %d %d %d %d %d\n",
1224 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
1225 hw_lro_tot_agg_cnt[2], hw_lro_tot_agg_cnt[3],
1226 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1227 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]);
1228
1229 /* Total flush count */
1230 seq_puts(seq, "Total flush: RING4 | RING5 | RING6 | RING7 Total\n");
1231 seq_printf(seq, " %d %d %d %d %d\n",
1232 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
1233 hw_lro_tot_flush_cnt[2], hw_lro_tot_flush_cnt[3],
1234 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1235 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]);
1236
1237 /* Avg agg count */
1238 seq_puts(seq, "Avg agg: RING4 | RING5 | RING6 | RING7 Total\n");
1239 seq_printf(seq, " %d %d %d %d %d\n",
1240 (hw_lro_tot_flush_cnt[0]) ?
1241 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
1242 (hw_lro_tot_flush_cnt[1]) ?
1243 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
1244 (hw_lro_tot_flush_cnt[2]) ?
1245 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
1246 (hw_lro_tot_flush_cnt[3]) ?
1247 hw_lro_tot_agg_cnt[3] / hw_lro_tot_flush_cnt[3] : 0,
1248 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1249 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]) ?
1250 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1251 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]) /
1252 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1253 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3])) : 0);
1254
1255 /* Statistics of aggregation size counts */
1256 seq_puts(seq, "HW LRO flush pkt len:\n");
1257 seq_puts(seq, " Length | RING4 | RING5 | RING6 | RING7 Total\n");
1258 for (i = 0; i < 15; i++) {
1259 seq_printf(seq, "%d~%d: %d %d %d %d %d\n",
1260 i * 5000, (i + 1) * 5000,
1261 hw_lro_agg_size_cnt[0][i], hw_lro_agg_size_cnt[1][i],
1262 hw_lro_agg_size_cnt[2][i], hw_lro_agg_size_cnt[3][i],
1263 hw_lro_agg_size_cnt[0][i] +
1264 hw_lro_agg_size_cnt[1][i] +
1265 hw_lro_agg_size_cnt[2][i] +
1266 hw_lro_agg_size_cnt[3][i]);
1267 }
1268
1269 seq_puts(seq, "Flush reason: RING4 | RING5 | RING6 | RING7 Total\n");
1270 seq_printf(seq, "AGG timeout: %d %d %d %d %d\n",
1271 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1272 hw_lro_agg_flush_cnt[2], hw_lro_agg_flush_cnt[3],
1273 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1274 hw_lro_agg_flush_cnt[2] + hw_lro_agg_flush_cnt[3]));
1275
1276 seq_printf(seq, "AGE timeout: %d %d %d %d %d\n",
1277 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1278 hw_lro_age_flush_cnt[2], hw_lro_age_flush_cnt[3],
1279 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1280 hw_lro_age_flush_cnt[2] + hw_lro_age_flush_cnt[3]));
1281
1282 seq_printf(seq, "Not in-sequence: %d %d %d %d %d\n",
1283 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1284 hw_lro_seq_flush_cnt[2], hw_lro_seq_flush_cnt[3],
1285 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1286 hw_lro_seq_flush_cnt[2] + hw_lro_seq_flush_cnt[3]));
1287
1288 seq_printf(seq, "Timestamp: %d %d %d %d %d\n",
1289 hw_lro_timestamp_flush_cnt[0],
1290 hw_lro_timestamp_flush_cnt[1],
1291 hw_lro_timestamp_flush_cnt[2],
1292 hw_lro_timestamp_flush_cnt[3],
1293 (hw_lro_timestamp_flush_cnt[0] +
1294 hw_lro_timestamp_flush_cnt[1] +
1295 hw_lro_timestamp_flush_cnt[2] +
1296 hw_lro_timestamp_flush_cnt[3]));
1297
1298 seq_printf(seq, "No LRO rule: %d %d %d %d %d\n",
1299 hw_lro_norule_flush_cnt[0],
1300 hw_lro_norule_flush_cnt[1],
1301 hw_lro_norule_flush_cnt[2],
1302 hw_lro_norule_flush_cnt[3],
1303 (hw_lro_norule_flush_cnt[0] +
1304 hw_lro_norule_flush_cnt[1] +
1305 hw_lro_norule_flush_cnt[2] +
1306 hw_lro_norule_flush_cnt[3]));
1307
1308 return 0;
1309}
1310
1311int hw_lro_stats_read_wrapper(struct seq_file *seq, void *v)
1312{
1313 struct mtk_eth *eth = g_eth;
1314
developer089e8852022-09-28 14:43:46 +08001315 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1316 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer77d03a72021-06-06 00:06:00 +08001317 hw_lro_stats_read_v2(seq, v);
1318 else
1319 hw_lro_stats_read_v1(seq, v);
1320
1321 return 0;
1322}
1323
1324static int hw_lro_stats_open(struct inode *inode, struct file *file)
1325{
1326 return single_open(file, hw_lro_stats_read_wrapper, NULL);
1327}
1328
1329static const struct file_operations hw_lro_stats_fops = {
1330 .owner = THIS_MODULE,
1331 .open = hw_lro_stats_open,
1332 .read = seq_read,
1333 .llseek = seq_lseek,
1334 .write = hw_lro_stats_write,
developerfd40db22021-04-29 10:08:25 +08001335 .release = single_release
1336};
1337
developer77d03a72021-06-06 00:06:00 +08001338int hwlro_agg_cnt_ctrl(int cnt)
1339{
1340 int i;
1341
1342 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1343 SET_PDMA_RXRING_MAX_AGG_CNT(g_eth, i, cnt);
1344
1345 return 0;
1346}
1347
1348int hwlro_agg_time_ctrl(int time)
1349{
1350 int i;
1351
1352 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1353 SET_PDMA_RXRING_AGG_TIME(g_eth, i, time);
1354
1355 return 0;
1356}
1357
1358int hwlro_age_time_ctrl(int time)
1359{
1360 int i;
1361
1362 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1363 SET_PDMA_RXRING_AGE_TIME(g_eth, i, time);
1364
1365 return 0;
1366}
1367
1368int hwlro_threshold_ctrl(int bandwidth)
1369{
1370 SET_PDMA_LRO_BW_THRESHOLD(g_eth, bandwidth);
1371
1372 return 0;
1373}
1374
1375int hwlro_ring_enable_ctrl(int enable)
1376{
1377 int i;
1378
1379 pr_info("[%s] %s HW LRO rings\n", __func__, (enable) ? "Enable" : "Disable");
1380
1381 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1382 SET_PDMA_RXRING_VALID(g_eth, i, enable);
1383
1384 return 0;
1385}
1386
1387int hwlro_stats_enable_ctrl(int enable)
1388{
1389 pr_info("[%s] %s HW LRO statistics\n", __func__, (enable) ? "Enable" : "Disable");
1390 mtk_hwlro_stats_ebl = enable;
1391
1392 return 0;
1393}
1394
1395static const mtk_lro_dbg_func lro_dbg_func[] = {
1396 [0] = hwlro_agg_cnt_ctrl,
1397 [1] = hwlro_agg_time_ctrl,
1398 [2] = hwlro_age_time_ctrl,
1399 [3] = hwlro_threshold_ctrl,
1400 [4] = hwlro_ring_enable_ctrl,
1401 [5] = hwlro_stats_enable_ctrl,
1402};
1403
1404ssize_t hw_lro_auto_tlb_write(struct file *file, const char __user *buffer,
1405 size_t count, loff_t *data)
1406{
1407 char buf[32];
1408 char *p_buf;
1409 char *p_token = NULL;
1410 char *p_delimiter = " \t";
1411 long x = 0, y = 0;
developer4c32b7a2021-11-13 16:46:43 +08001412 u32 len = count;
developer77d03a72021-06-06 00:06:00 +08001413 int ret;
1414
1415 if (len >= sizeof(buf)) {
1416 pr_info("Input handling fail!\n");
developer77d03a72021-06-06 00:06:00 +08001417 return -1;
1418 }
1419
1420 if (copy_from_user(buf, buffer, len))
1421 return -EFAULT;
1422
1423 buf[len] = '\0';
1424
1425 p_buf = buf;
1426 p_token = strsep(&p_buf, p_delimiter);
1427 if (!p_token)
1428 x = 0;
1429 else
1430 ret = kstrtol(p_token, 10, &x);
1431
1432 p_token = strsep(&p_buf, "\t\n ");
1433 if (p_token)
1434 ret = kstrtol(p_token, 10, &y);
1435
1436 if (lro_dbg_func[x] && (ARRAY_SIZE(lro_dbg_func) > x))
1437 (*lro_dbg_func[x]) (y);
1438
1439 return count;
1440}
1441
1442void hw_lro_auto_tlb_dump_v1(struct seq_file *seq, u32 index)
1443{
1444 int i;
1445 struct mtk_lro_alt_v1 alt;
1446 __be32 addr;
1447 u32 tlb_info[9];
1448 u32 dw_len, cnt, priority;
1449 u32 entry;
1450
1451 if (index > 4)
1452 index = index - 1;
1453 entry = (index * 9) + 1;
1454
1455 /* read valid entries of the auto-learn table */
1456 mtk_w32(g_eth, entry, MTK_FE_ALT_CF8);
1457
1458 for (i = 0; i < 9; i++)
1459 tlb_info[i] = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1460
1461 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v1));
1462
1463 dw_len = alt.alt_info7.dw_len;
1464 cnt = alt.alt_info6.cnt;
1465
1466 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1467 priority = cnt; /* packet count */
1468 else
1469 priority = dw_len; /* byte count */
1470
1471 /* dump valid entries of the auto-learn table */
1472 if (index >= 4)
1473 seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index);
1474 else
1475 seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index);
1476
1477 if (alt.alt_info8.ipv4) {
1478 addr = htonl(alt.alt_info1.sip0);
1479 seq_printf(seq, "SIP = %pI4 (IPv4)\n", &addr);
1480 } else {
1481 seq_printf(seq, "SIP = %08X:%08X:%08X:%08X (IPv6)\n",
1482 alt.alt_info4.sip3, alt.alt_info3.sip2,
1483 alt.alt_info2.sip1, alt.alt_info1.sip0);
1484 }
1485
1486 seq_printf(seq, "DIP_ID = %d\n", alt.alt_info8.dip_id);
1487 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1488 alt.alt_info0.stp, alt.alt_info0.dtp);
1489 seq_printf(seq, "VLAN_VID_VLD = %d\n", alt.alt_info6.vlan_vid_vld);
1490 seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d\n",
1491 (alt.alt_info5.vlan_vid0 & 0xfff),
1492 ((alt.alt_info5.vlan_vid0 >> 12) & 0xfff),
1493 ((alt.alt_info6.vlan_vid1 << 8) |
1494 ((alt.alt_info5.vlan_vid0 >> 24) & 0xfff)),
1495 ((alt.alt_info6.vlan_vid1 >> 4) & 0xfff));
1496 seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt);
1497 seq_printf(seq, "PRIORITY = %d\n", priority);
1498}
1499
1500void hw_lro_auto_tlb_dump_v2(struct seq_file *seq, u32 index)
1501{
1502 int i;
1503 struct mtk_lro_alt_v2 alt;
1504 u32 score = 0, ipv4 = 0;
1505 u32 ipv6[4] = { 0 };
1506 u32 tlb_info[12];
1507
1508 /* read valid entries of the auto-learn table */
1509 mtk_w32(g_eth, index << MTK_LRO_ALT_INDEX_OFFSET, MTK_LRO_ALT_DBG);
1510
1511 for (i = 0; i < 11; i++)
1512 tlb_info[i] = mtk_r32(g_eth, MTK_LRO_ALT_DBG_DATA);
1513
1514 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v2));
1515
1516 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1517 score = 1; /* packet count */
1518 else
1519 score = 0; /* byte count */
1520
1521 /* dump valid entries of the auto-learn table */
1522 if (alt.alt_info0.valid) {
1523 if (index < 5)
1524 seq_printf(seq,
1525 "\n===== TABLE Entry: %d (onging) =====\n",
1526 index);
1527 else
1528 seq_printf(seq,
1529 "\n===== TABLE Entry: %d (candidate) =====\n",
1530 index);
1531
1532 if (alt.alt_info1.v4_valid) {
1533 ipv4 = (alt.alt_info4.sip0_h << 23) |
1534 alt.alt_info5.sip0_l;
1535 seq_printf(seq, "SIP = 0x%x: (IPv4)\n", ipv4);
1536
1537 ipv4 = (alt.alt_info8.dip0_h << 23) |
1538 alt.alt_info9.dip0_l;
1539 seq_printf(seq, "DIP = 0x%x: (IPv4)\n", ipv4);
1540 } else if (alt.alt_info1.v6_valid) {
1541 ipv6[3] = (alt.alt_info1.sip3_h << 23) |
1542 (alt.alt_info2.sip3_l << 9);
1543 ipv6[2] = (alt.alt_info2.sip2_h << 23) |
1544 (alt.alt_info3.sip2_l << 9);
1545 ipv6[1] = (alt.alt_info3.sip1_h << 23) |
1546 (alt.alt_info4.sip1_l << 9);
1547 ipv6[0] = (alt.alt_info4.sip0_h << 23) |
1548 (alt.alt_info5.sip0_l << 9);
1549 seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1550 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1551
1552 ipv6[3] = (alt.alt_info5.dip3_h << 23) |
1553 (alt.alt_info6.dip3_l << 9);
1554 ipv6[2] = (alt.alt_info6.dip2_h << 23) |
1555 (alt.alt_info7.dip2_l << 9);
1556 ipv6[1] = (alt.alt_info7.dip1_h << 23) |
1557 (alt.alt_info8.dip1_l << 9);
1558 ipv6[0] = (alt.alt_info8.dip0_h << 23) |
1559 (alt.alt_info9.dip0_l << 9);
1560 seq_printf(seq, "DIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1561 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1562 }
1563
1564 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1565 (alt.alt_info9.sp_h << 7) | (alt.alt_info10.sp_l),
1566 alt.alt_info10.dp);
1567 }
1568}
1569
1570int hw_lro_auto_tlb_read(struct seq_file *seq, void *v)
1571{
1572 int i;
1573 u32 reg_val;
1574 u32 reg_op1, reg_op2, reg_op3, reg_op4;
1575 u32 agg_cnt, agg_time, age_time;
1576
1577 seq_puts(seq, "Usage of /proc/mtketh/hw_lro_auto_tlb:\n");
1578 seq_puts(seq, "echo [function] [setting] > /proc/mtketh/hw_lro_auto_tlb\n");
1579 seq_puts(seq, "Functions:\n");
1580 seq_puts(seq, "[0] = hwlro_agg_cnt_ctrl\n");
1581 seq_puts(seq, "[1] = hwlro_agg_time_ctrl\n");
1582 seq_puts(seq, "[2] = hwlro_age_time_ctrl\n");
1583 seq_puts(seq, "[3] = hwlro_threshold_ctrl\n");
1584 seq_puts(seq, "[4] = hwlro_ring_enable_ctrl\n");
1585 seq_puts(seq, "[5] = hwlro_stats_enable_ctrl\n\n");
1586
developer089e8852022-09-28 14:43:46 +08001587 if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2) ||
1588 MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V3)) {
developer77d03a72021-06-06 00:06:00 +08001589 for (i = 1; i <= 8; i++)
1590 hw_lro_auto_tlb_dump_v2(seq, i);
1591 } else {
1592 /* Read valid entries of the auto-learn table */
1593 mtk_w32(g_eth, 0, MTK_FE_ALT_CF8);
1594 reg_val = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1595
1596 seq_printf(seq,
1597 "HW LRO Auto-learn Table: (MTK_FE_ALT_SEQ_CFC=0x%x)\n",
1598 reg_val);
1599
1600 for (i = 7; i >= 0; i--) {
1601 if (reg_val & (1 << i))
1602 hw_lro_auto_tlb_dump_v1(seq, i);
1603 }
1604 }
1605
1606 /* Read the agg_time/age_time/agg_cnt of LRO rings */
1607 seq_puts(seq, "\nHW LRO Ring Settings\n");
1608
1609 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
1610 reg_op1 = mtk_r32(g_eth, MTK_LRO_CTRL_DW1_CFG(i));
1611 reg_op2 = mtk_r32(g_eth, MTK_LRO_CTRL_DW2_CFG(i));
1612 reg_op3 = mtk_r32(g_eth, MTK_LRO_CTRL_DW3_CFG(i));
1613 reg_op4 = mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW2);
1614
1615 agg_cnt =
1616 ((reg_op3 & 0x3) << 6) |
1617 ((reg_op2 >> MTK_LRO_RING_AGG_CNT_L_OFFSET) & 0x3f);
1618 agg_time = (reg_op2 >> MTK_LRO_RING_AGG_TIME_OFFSET) & 0xffff;
1619 age_time =
1620 ((reg_op2 & 0x3f) << 10) |
1621 ((reg_op1 >> MTK_LRO_RING_AGE_TIME_L_OFFSET) & 0x3ff);
1622 seq_printf(seq,
1623 "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
developer089e8852022-09-28 14:43:46 +08001624 (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V1)) ? i : i+3,
developer77d03a72021-06-06 00:06:00 +08001625 agg_cnt, agg_time, age_time, reg_op4);
1626 }
1627
1628 seq_puts(seq, "\n");
1629
1630 return 0;
1631}
1632
1633static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file)
1634{
1635 return single_open(file, hw_lro_auto_tlb_read, NULL);
1636}
1637
1638static const struct file_operations hw_lro_auto_tlb_fops = {
1639 .owner = THIS_MODULE,
1640 .open = hw_lro_auto_tlb_open,
1641 .read = seq_read,
1642 .llseek = seq_lseek,
1643 .write = hw_lro_auto_tlb_write,
1644 .release = single_release
1645};
developerfd40db22021-04-29 10:08:25 +08001646
developer8051e042022-04-08 13:26:36 +08001647int reset_event_read(struct seq_file *seq, void *v)
1648{
1649 struct mtk_eth *eth = g_eth;
1650 struct mtk_reset_event reset_event = eth->reset_event;
1651
1652 seq_printf(seq, "[Event] [Count]\n");
1653 seq_printf(seq, " FQ Empty: %d\n",
1654 reset_event.count[MTK_EVENT_FQ_EMPTY]);
1655 seq_printf(seq, " TSO Fail: %d\n",
1656 reset_event.count[MTK_EVENT_TSO_FAIL]);
1657 seq_printf(seq, " TSO Illegal: %d\n",
1658 reset_event.count[MTK_EVENT_TSO_ILLEGAL]);
1659 seq_printf(seq, " TSO Align: %d\n",
1660 reset_event.count[MTK_EVENT_TSO_ALIGN]);
1661 seq_printf(seq, " RFIFO OV: %d\n",
1662 reset_event.count[MTK_EVENT_RFIFO_OV]);
1663 seq_printf(seq, " RFIFO UF: %d\n",
1664 reset_event.count[MTK_EVENT_RFIFO_UF]);
1665 seq_printf(seq, " Force: %d\n",
1666 reset_event.count[MTK_EVENT_FORCE]);
1667 seq_printf(seq, "----------------------------\n");
1668 seq_printf(seq, " Warm Cnt: %d\n",
1669 reset_event.count[MTK_EVENT_WARM_CNT]);
1670 seq_printf(seq, " Cold Cnt: %d\n",
1671 reset_event.count[MTK_EVENT_COLD_CNT]);
1672 seq_printf(seq, " Total Cnt: %d\n",
1673 reset_event.count[MTK_EVENT_TOTAL_CNT]);
1674
1675 return 0;
1676}
1677
1678static int reset_event_open(struct inode *inode, struct file *file)
1679{
1680 return single_open(file, reset_event_read, 0);
1681}
1682
1683ssize_t reset_event_write(struct file *file, const char __user *buffer,
1684 size_t count, loff_t *data)
1685{
1686 struct mtk_eth *eth = g_eth;
1687 struct mtk_reset_event *reset_event = &eth->reset_event;
1688
1689 memset(reset_event, 0, sizeof(struct mtk_reset_event));
1690 pr_info("MTK reset event counter is cleared !\n");
1691
1692 return count;
1693}
1694
1695static const struct file_operations reset_event_fops = {
1696 .owner = THIS_MODULE,
1697 .open = reset_event_open,
1698 .read = seq_read,
1699 .llseek = seq_lseek,
1700 .write = reset_event_write,
1701 .release = single_release
1702};
1703
1704
developerfd40db22021-04-29 10:08:25 +08001705struct proc_dir_entry *proc_reg_dir;
developer8051e042022-04-08 13:26:36 +08001706static struct proc_dir_entry *proc_esw_cnt, *proc_dbg_regs, *proc_reset_event;
developerfd40db22021-04-29 10:08:25 +08001707
1708int debug_proc_init(struct mtk_eth *eth)
1709{
1710 g_eth = eth;
1711
1712 if (!proc_reg_dir)
1713 proc_reg_dir = proc_mkdir(PROCREG_DIR, NULL);
1714
1715 proc_tx_ring =
1716 proc_create(PROCREG_TXRING, 0, proc_reg_dir, &tx_ring_fops);
1717 if (!proc_tx_ring)
1718 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
1719
developer8051e042022-04-08 13:26:36 +08001720 proc_hwtx_ring =
1721 proc_create(PROCREG_HWTXRING, 0, proc_reg_dir, &hwtx_ring_fops);
1722 if (!proc_hwtx_ring)
1723 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_HWTXRING);
1724
developerfd40db22021-04-29 10:08:25 +08001725 proc_rx_ring =
1726 proc_create(PROCREG_RXRING, 0, proc_reg_dir, &rx_ring_fops);
1727 if (!proc_rx_ring)
1728 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
1729
1730 proc_esw_cnt =
1731 proc_create(PROCREG_ESW_CNT, 0, proc_reg_dir, &switch_count_fops);
1732 if (!proc_esw_cnt)
1733 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
1734
1735 proc_dbg_regs =
1736 proc_create(PROCREG_DBG_REGS, 0, proc_reg_dir, &dbg_regs_fops);
1737 if (!proc_dbg_regs)
1738 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_DBG_REGS);
1739
developer77d03a72021-06-06 00:06:00 +08001740 if (g_eth->hwlro) {
1741 proc_hw_lro_stats =
1742 proc_create(PROCREG_HW_LRO_STATS, 0, proc_reg_dir,
1743 &hw_lro_stats_fops);
1744 if (!proc_hw_lro_stats)
1745 pr_info("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS);
1746
1747 proc_hw_lro_auto_tlb =
1748 proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, proc_reg_dir,
1749 &hw_lro_auto_tlb_fops);
1750 if (!proc_hw_lro_auto_tlb)
1751 pr_info("!! FAIL to create %s PROC !!\n",
1752 PROCREG_HW_LRO_AUTO_TLB);
1753 }
1754
developer8051e042022-04-08 13:26:36 +08001755 proc_reset_event =
1756 proc_create(PROCREG_RESET_EVENT, 0, proc_reg_dir, &reset_event_fops);
1757 if (!proc_reset_event)
1758 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RESET_EVENT);
1759
developerfd40db22021-04-29 10:08:25 +08001760 return 0;
1761}
1762
1763void debug_proc_exit(void)
1764{
1765 if (proc_tx_ring)
1766 remove_proc_entry(PROCREG_TXRING, proc_reg_dir);
developer8051e042022-04-08 13:26:36 +08001767 if (proc_hwtx_ring)
1768 remove_proc_entry(PROCREG_HWTXRING, proc_reg_dir);
developerfd40db22021-04-29 10:08:25 +08001769 if (proc_rx_ring)
1770 remove_proc_entry(PROCREG_RXRING, proc_reg_dir);
1771
1772 if (proc_esw_cnt)
1773 remove_proc_entry(PROCREG_ESW_CNT, proc_reg_dir);
1774
1775 if (proc_reg_dir)
1776 remove_proc_entry(PROCREG_DIR, 0);
1777
1778 if (proc_dbg_regs)
1779 remove_proc_entry(PROCREG_DBG_REGS, proc_reg_dir);
developer77d03a72021-06-06 00:06:00 +08001780
1781 if (g_eth->hwlro) {
1782 if (proc_hw_lro_stats)
1783 remove_proc_entry(PROCREG_HW_LRO_STATS, proc_reg_dir);
1784
1785 if (proc_hw_lro_auto_tlb)
1786 remove_proc_entry(PROCREG_HW_LRO_AUTO_TLB, proc_reg_dir);
1787 }
developer8051e042022-04-08 13:26:36 +08001788
1789 if (proc_reset_event)
1790 remove_proc_entry(PROCREG_RESET_EVENT, proc_reg_dir);
developerfd40db22021-04-29 10:08:25 +08001791}
1792