blob: 968f6911e479f475f284bcedb345b2f6dbaf9f57 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
14 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
15 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
16 */
17
18#include <linux/trace_seq.h>
19#include <linux/seq_file.h>
20#include <linux/proc_fs.h>
21#include <linux/u64_stats_sync.h>
22#include <linux/dma-mapping.h>
23#include <linux/netdevice.h>
24#include <linux/ctype.h>
25#include <linux/debugfs.h>
26#include <linux/of_mdio.h>
developer089e8852022-09-28 14:43:46 +080027#include <linux/of_address.h>
developerfd40db22021-04-29 10:08:25 +080028
29#include "mtk_eth_soc.h"
30#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080031#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080032
developer77d03a72021-06-06 00:06:00 +080033u32 hw_lro_agg_num_cnt[MTK_HW_LRO_RING_NUM][MTK_HW_LRO_MAX_AGG_CNT + 1];
34u32 hw_lro_agg_size_cnt[MTK_HW_LRO_RING_NUM][16];
35u32 hw_lro_tot_agg_cnt[MTK_HW_LRO_RING_NUM];
36u32 hw_lro_tot_flush_cnt[MTK_HW_LRO_RING_NUM];
37u32 hw_lro_agg_flush_cnt[MTK_HW_LRO_RING_NUM];
38u32 hw_lro_age_flush_cnt[MTK_HW_LRO_RING_NUM];
39u32 hw_lro_seq_flush_cnt[MTK_HW_LRO_RING_NUM];
40u32 hw_lro_timestamp_flush_cnt[MTK_HW_LRO_RING_NUM];
41u32 hw_lro_norule_flush_cnt[MTK_HW_LRO_RING_NUM];
42u32 mtk_hwlro_stats_ebl;
43static struct proc_dir_entry *proc_hw_lro_stats, *proc_hw_lro_auto_tlb;
44typedef int (*mtk_lro_dbg_func) (int par);
45
developerfd40db22021-04-29 10:08:25 +080046struct mtk_eth_debug {
developer089e8852022-09-28 14:43:46 +080047 struct dentry *root;
48 void __iomem *base;
49 int direct_access;
developerfd40db22021-04-29 10:08:25 +080050};
51
52struct mtk_eth *g_eth;
53
54struct mtk_eth_debug eth_debug;
55
developer089e8852022-09-28 14:43:46 +080056int mt798x_iomap(void)
57{
58 struct device_node *np = NULL;
59
60 np = of_find_node_by_name(NULL, "switch0");
61 if (np) {
62 eth_debug.base = of_iomap(np, 0);
63 if (!eth_debug.base) {
64 pr_err("of_iomap failed\n");
65 of_node_put(np);
66 return -ENOMEM;
67 }
68
69 of_node_put(np);
70 eth_debug.direct_access = 1;
71 }
72
73 return 0;
74}
75
76int mt798x_iounmap(void)
77{
78 eth_debug.direct_access = 0;
79 if (eth_debug.base)
80 iounmap(eth_debug.base);
81
82 return 0;
83}
84
developer3957a912021-05-13 16:44:31 +080085void mt7530_mdio_w32(struct mtk_eth *eth, u16 reg, u32 val)
developerfd40db22021-04-29 10:08:25 +080086{
87 mutex_lock(&eth->mii_bus->mdio_lock);
88
developer089e8852022-09-28 14:43:46 +080089 if (eth_debug.direct_access)
90 __raw_writel(val, eth_debug.base + reg);
91 else {
92 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
93 _mtk_mdio_write(eth, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
94 _mtk_mdio_write(eth, 0x1f, 0x10, val >> 16);
95 }
developerfd40db22021-04-29 10:08:25 +080096
97 mutex_unlock(&eth->mii_bus->mdio_lock);
98}
99
100u32 mt7530_mdio_r32(struct mtk_eth *eth, u32 reg)
101{
102 u16 high, low;
developer089e8852022-09-28 14:43:46 +0800103 u32 ret;
developerfd40db22021-04-29 10:08:25 +0800104
105 mutex_lock(&eth->mii_bus->mdio_lock);
106
developer089e8852022-09-28 14:43:46 +0800107 if (eth_debug.direct_access) {
108 ret = __raw_readl(eth_debug.base + reg);
109 mutex_unlock(&eth->mii_bus->mdio_lock);
110 return ret;
111 }
developerfd40db22021-04-29 10:08:25 +0800112 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
113 low = _mtk_mdio_read(eth, 0x1f, (reg >> 2) & 0xf);
114 high = _mtk_mdio_read(eth, 0x1f, 0x10);
115
116 mutex_unlock(&eth->mii_bus->mdio_lock);
117
118 return (high << 16) | (low & 0xffff);
119}
120
121void mtk_switch_w32(struct mtk_eth *eth, u32 val, unsigned reg)
122{
123 mtk_w32(eth, val, reg + 0x10000);
124}
125EXPORT_SYMBOL(mtk_switch_w32);
126
127u32 mtk_switch_r32(struct mtk_eth *eth, unsigned reg)
128{
129 return mtk_r32(eth, reg + 0x10000);
130}
131EXPORT_SYMBOL(mtk_switch_r32);
132
133static int mtketh_debug_show(struct seq_file *m, void *private)
134{
135 struct mtk_eth *eth = m->private;
136 struct mtk_mac *mac = 0;
developer77d03a72021-06-06 00:06:00 +0800137 int i = 0;
developerfd40db22021-04-29 10:08:25 +0800138
139 for (i = 0 ; i < MTK_MAX_DEVS ; i++) {
140 if (!eth->mac[i] ||
141 of_phy_is_fixed_link(eth->mac[i]->of_node))
142 continue;
143 mac = eth->mac[i];
144#if 0 //FIXME
145 while (j < 30) {
146 d = _mtk_mdio_read(eth, mac->phy_dev->addr, j);
147
148 seq_printf(m, "phy=%d, reg=0x%08x, data=0x%08x\n",
149 mac->phy_dev->addr, j, d);
150 j++;
151 }
152#endif
153 }
154 return 0;
155}
156
157static int mtketh_debug_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, mtketh_debug_show, inode->i_private);
160}
161
162static const struct file_operations mtketh_debug_fops = {
developer089e8852022-09-28 14:43:46 +0800163 .owner = THIS_MODULE,
developerfd40db22021-04-29 10:08:25 +0800164 .open = mtketh_debug_open,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = single_release,
168};
169
170static int mtketh_mt7530sw_debug_show(struct seq_file *m, void *private)
171{
172 struct mtk_eth *eth = m->private;
173 u32 offset, data;
174 int i;
175 struct mt7530_ranges {
176 u32 start;
177 u32 end;
178 } ranges[] = {
179 {0x0, 0xac},
180 {0x1000, 0x10e0},
181 {0x1100, 0x1140},
182 {0x1200, 0x1240},
183 {0x1300, 0x1340},
184 {0x1400, 0x1440},
185 {0x1500, 0x1540},
186 {0x1600, 0x1640},
187 {0x1800, 0x1848},
188 {0x1900, 0x1948},
189 {0x1a00, 0x1a48},
190 {0x1b00, 0x1b48},
191 {0x1c00, 0x1c48},
192 {0x1d00, 0x1d48},
193 {0x1e00, 0x1e48},
194 {0x1f60, 0x1ffc},
195 {0x2000, 0x212c},
196 {0x2200, 0x222c},
197 {0x2300, 0x232c},
198 {0x2400, 0x242c},
199 {0x2500, 0x252c},
200 {0x2600, 0x262c},
201 {0x3000, 0x3014},
202 {0x30c0, 0x30f8},
203 {0x3100, 0x3114},
204 {0x3200, 0x3214},
205 {0x3300, 0x3314},
206 {0x3400, 0x3414},
207 {0x3500, 0x3514},
208 {0x3600, 0x3614},
209 {0x4000, 0x40d4},
210 {0x4100, 0x41d4},
211 {0x4200, 0x42d4},
212 {0x4300, 0x43d4},
213 {0x4400, 0x44d4},
214 {0x4500, 0x45d4},
215 {0x4600, 0x46d4},
216 {0x4f00, 0x461c},
217 {0x7000, 0x7038},
218 {0x7120, 0x7124},
219 {0x7800, 0x7804},
220 {0x7810, 0x7810},
221 {0x7830, 0x7830},
222 {0x7a00, 0x7a7c},
223 {0x7b00, 0x7b04},
224 {0x7e00, 0x7e04},
225 {0x7ffc, 0x7ffc},
226 };
227
228 if (!mt7530_exist(eth))
229 return -EOPNOTSUPP;
230
231 if ((!eth->mac[0] || !of_phy_is_fixed_link(eth->mac[0]->of_node)) &&
232 (!eth->mac[1] || !of_phy_is_fixed_link(eth->mac[1]->of_node))) {
233 seq_puts(m, "no switch found\n");
234 return 0;
235 }
236
237 for (i = 0 ; i < ARRAY_SIZE(ranges) ; i++) {
238 for (offset = ranges[i].start;
239 offset <= ranges[i].end; offset += 4) {
240 data = mt7530_mdio_r32(eth, offset);
241 seq_printf(m, "mt7530 switch reg=0x%08x, data=0x%08x\n",
242 offset, data);
243 }
244 }
245
246 return 0;
247}
248
249static int mtketh_debug_mt7530sw_open(struct inode *inode, struct file *file)
250{
251 return single_open(file, mtketh_mt7530sw_debug_show, inode->i_private);
252}
253
254static const struct file_operations mtketh_debug_mt7530sw_fops = {
developer089e8852022-09-28 14:43:46 +0800255 .owner = THIS_MODULE,
developerfd40db22021-04-29 10:08:25 +0800256 .open = mtketh_debug_mt7530sw_open,
257 .read = seq_read,
258 .llseek = seq_lseek,
259 .release = single_release,
260};
261
262static ssize_t mtketh_mt7530sw_debugfs_write(struct file *file,
263 const char __user *ptr,
264 size_t len, loff_t *off)
265{
266 struct mtk_eth *eth = file->private_data;
267 char buf[32], *token, *p = buf;
developer8b6f2402022-11-28 13:42:34 +0800268 unsigned long reg, value, phy;
developerfd40db22021-04-29 10:08:25 +0800269 int ret;
270
271 if (!mt7530_exist(eth))
272 return -EOPNOTSUPP;
273
274 if (*off != 0)
275 return 0;
276
277 if (len > sizeof(buf) - 1)
278 len = sizeof(buf) - 1;
279
280 ret = strncpy_from_user(buf, ptr, len);
281 if (ret < 0)
282 return ret;
283 buf[len] = '\0';
284
285 token = strsep(&p, " ");
286 if (!token)
287 return -EINVAL;
288 if (kstrtoul(token, 16, (unsigned long *)&phy))
289 return -EINVAL;
290
291 token = strsep(&p, " ");
292 if (!token)
293 return -EINVAL;
294 if (kstrtoul(token, 16, (unsigned long *)&reg))
295 return -EINVAL;
296
297 token = strsep(&p, " ");
298 if (!token)
299 return -EINVAL;
300 if (kstrtoul(token, 16, (unsigned long *)&value))
301 return -EINVAL;
302
303 pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
304 0x1f, reg, value);
305 mt7530_mdio_w32(eth, reg, value);
306 pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
307 0x1f, reg, mt7530_mdio_r32(eth, reg));
308
309 return len;
310}
311
312static ssize_t mtketh_debugfs_write(struct file *file, const char __user *ptr,
313 size_t len, loff_t *off)
314{
315 struct mtk_eth *eth = file->private_data;
316 char buf[32], *token, *p = buf;
developer8b6f2402022-11-28 13:42:34 +0800317 unsigned long reg, value, phy;
developerfd40db22021-04-29 10:08:25 +0800318 int ret;
319
320 if (*off != 0)
321 return 0;
322
323 if (len > sizeof(buf) - 1)
324 len = sizeof(buf) - 1;
325
326 ret = strncpy_from_user(buf, ptr, len);
327 if (ret < 0)
328 return ret;
329 buf[len] = '\0';
330
331 token = strsep(&p, " ");
332 if (!token)
333 return -EINVAL;
334 if (kstrtoul(token, 16, (unsigned long *)&phy))
335 return -EINVAL;
336
337 token = strsep(&p, " ");
338
339 if (!token)
340 return -EINVAL;
341 if (kstrtoul(token, 16, (unsigned long *)&reg))
342 return -EINVAL;
343
344 token = strsep(&p, " ");
345
346 if (!token)
347 return -EINVAL;
348 if (kstrtoul(token, 16, (unsigned long *)&value))
349 return -EINVAL;
350
351 pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
352 phy, reg, value);
353
354 _mtk_mdio_write(eth, phy, reg, value);
355
356 pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
357 phy, reg, _mtk_mdio_read(eth, phy, reg));
358
359 return len;
360}
361
362static ssize_t mtketh_debugfs_reset(struct file *file, const char __user *ptr,
363 size_t len, loff_t *off)
364{
365 struct mtk_eth *eth = file->private_data;
developerbd42c172022-07-18 17:51:30 +0800366 char buf[8] = "";
367 int count = len;
368 unsigned long dbg_level = 0;
369
370 len = min(count, sizeof(buf) - 1);
371 if (copy_from_user(buf, ptr, len))
372 return -EFAULT;
373
374 buf[len] = '\0';
375 if (kstrtoul(buf, 0, &dbg_level))
376 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +0800377
developerbd42c172022-07-18 17:51:30 +0800378 switch(dbg_level)
379 {
380 case 0:
381 if (atomic_read(&reset_lock) == 0)
382 atomic_inc(&reset_lock);
383 break;
384 case 1:
developer37482a42022-12-26 13:31:13 +0800385 if (atomic_read(&force) == 0) {
developerbd42c172022-07-18 17:51:30 +0800386 atomic_inc(&force);
developer37482a42022-12-26 13:31:13 +0800387 schedule_work(&eth->pending_work);
388 } else
389 pr_info(" device resetting !!!\n");
developerbd42c172022-07-18 17:51:30 +0800390 break;
391 case 2:
392 if (atomic_read(&reset_lock) == 1)
393 atomic_dec(&reset_lock);
394 break;
developer37482a42022-12-26 13:31:13 +0800395 case 3:
396 if (atomic_read(&force) == 0) {
397 atomic_inc(&force);
398 mtk_reset_flag = MTK_FE_STOP_TRAFFIC;
399 schedule_work(&eth->pending_work);
400 } else
401 pr_info(" device resetting !!!\n");
402 break;
developerbd42c172022-07-18 17:51:30 +0800403 default:
404 pr_info("Usage: echo [level] > /sys/kernel/debug/mtketh/reset\n");
developer37482a42022-12-26 13:31:13 +0800405 pr_info("Commands: [level]\n");
406 pr_info(" 0 disable reset\n");
407 pr_info(" 1 FE and WDMA reset\n");
developerbd42c172022-07-18 17:51:30 +0800408 pr_info(" 2 enable reset\n");
developer37482a42022-12-26 13:31:13 +0800409 pr_info(" 3 FE reset\n");
developerbd42c172022-07-18 17:51:30 +0800410 break;
411 }
412 return count;
developerfd40db22021-04-29 10:08:25 +0800413}
414
415static const struct file_operations fops_reg_w = {
416 .owner = THIS_MODULE,
417 .open = simple_open,
418 .write = mtketh_debugfs_write,
419 .llseek = noop_llseek,
420};
421
422static const struct file_operations fops_eth_reset = {
423 .owner = THIS_MODULE,
424 .open = simple_open,
425 .write = mtketh_debugfs_reset,
426 .llseek = noop_llseek,
427};
428
429static const struct file_operations fops_mt7530sw_reg_w = {
430 .owner = THIS_MODULE,
431 .open = simple_open,
432 .write = mtketh_mt7530sw_debugfs_write,
433 .llseek = noop_llseek,
434};
435
436void mtketh_debugfs_exit(struct mtk_eth *eth)
437{
438 debugfs_remove_recursive(eth_debug.root);
439}
440
441int mtketh_debugfs_init(struct mtk_eth *eth)
442{
443 int ret = 0;
444
445 eth_debug.root = debugfs_create_dir("mtketh", NULL);
446 if (!eth_debug.root) {
447 dev_notice(eth->dev, "%s:err at %d\n", __func__, __LINE__);
448 ret = -ENOMEM;
449 }
450
451 debugfs_create_file("phy_regs", S_IRUGO,
452 eth_debug.root, eth, &mtketh_debug_fops);
453 debugfs_create_file("phy_reg_w", S_IFREG | S_IWUSR,
454 eth_debug.root, eth, &fops_reg_w);
455 debugfs_create_file("reset", S_IFREG | S_IWUSR,
456 eth_debug.root, eth, &fops_eth_reset);
457 if (mt7530_exist(eth)) {
458 debugfs_create_file("mt7530sw_regs", S_IRUGO,
459 eth_debug.root, eth,
460 &mtketh_debug_mt7530sw_fops);
461 debugfs_create_file("mt7530sw_reg_w", S_IFREG | S_IWUSR,
462 eth_debug.root, eth,
463 &fops_mt7530sw_reg_w);
464 }
465 return ret;
466}
467
468void mii_mgr_read_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
469 u32 *read_data)
470{
471 if (mt7530_exist(eth) && phy_addr == 31)
472 *read_data = mt7530_mdio_r32(eth, phy_register);
473
474 else
developer089e8852022-09-28 14:43:46 +0800475 *read_data = mdiobus_read(eth->mii_bus, phy_addr, phy_register);
developerfd40db22021-04-29 10:08:25 +0800476}
477
developer3957a912021-05-13 16:44:31 +0800478void mii_mgr_write_combine(struct mtk_eth *eth, u16 phy_addr, u16 phy_register,
developerfd40db22021-04-29 10:08:25 +0800479 u32 write_data)
480{
481 if (mt7530_exist(eth) && phy_addr == 31)
482 mt7530_mdio_w32(eth, phy_register, write_data);
483
484 else
developer089e8852022-09-28 14:43:46 +0800485 mdiobus_write(eth->mii_bus, phy_addr, phy_register, write_data);
developerfd40db22021-04-29 10:08:25 +0800486}
487
developer3957a912021-05-13 16:44:31 +0800488static void mii_mgr_read_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data)
developerfd40db22021-04-29 10:08:25 +0800489{
developer089e8852022-09-28 14:43:46 +0800490 *data = mdiobus_read(eth->mii_bus, port, mdiobus_c45_addr(devad, reg));
developerfd40db22021-04-29 10:08:25 +0800491}
492
developer3957a912021-05-13 16:44:31 +0800493static void mii_mgr_write_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data)
developerfd40db22021-04-29 10:08:25 +0800494{
developer089e8852022-09-28 14:43:46 +0800495 mdiobus_write(eth->mii_bus, port, mdiobus_c45_addr(devad, reg), data);
developerfd40db22021-04-29 10:08:25 +0800496}
497
498int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
499{
500 struct mtk_mac *mac = netdev_priv(dev);
501 struct mtk_eth *eth = mac->hw;
502 struct mtk_mii_ioctl_data mii;
503 struct mtk_esw_reg reg;
developerba2d1eb2021-05-25 19:26:45 +0800504 u16 val;
developerfd40db22021-04-29 10:08:25 +0800505
506 switch (cmd) {
507 case MTKETH_MII_READ:
508 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
509 goto err_copy;
510 mii_mgr_read_combine(eth, mii.phy_id, mii.reg_num,
511 &mii.val_out);
512 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
513 goto err_copy;
514
515 return 0;
516 case MTKETH_MII_WRITE:
517 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
518 goto err_copy;
519 mii_mgr_write_combine(eth, mii.phy_id, mii.reg_num,
520 mii.val_in);
developerfd40db22021-04-29 10:08:25 +0800521 return 0;
522 case MTKETH_MII_READ_CL45:
523 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
524 goto err_copy;
developer3957a912021-05-13 16:44:31 +0800525 mii_mgr_read_cl45(eth,
526 mdio_phy_id_prtad(mii.phy_id),
527 mdio_phy_id_devad(mii.phy_id),
528 mii.reg_num,
developerba2d1eb2021-05-25 19:26:45 +0800529 &val);
530 mii.val_out = val;
developerfd40db22021-04-29 10:08:25 +0800531 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
532 goto err_copy;
533
534 return 0;
535 case MTKETH_MII_WRITE_CL45:
536 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
537 goto err_copy;
developerba2d1eb2021-05-25 19:26:45 +0800538 val = mii.val_in;
developer3957a912021-05-13 16:44:31 +0800539 mii_mgr_write_cl45(eth,
540 mdio_phy_id_prtad(mii.phy_id),
541 mdio_phy_id_devad(mii.phy_id),
542 mii.reg_num,
developerba2d1eb2021-05-25 19:26:45 +0800543 val);
developerfd40db22021-04-29 10:08:25 +0800544 return 0;
545 case MTKETH_ESW_REG_READ:
546 if (!mt7530_exist(eth))
547 return -EOPNOTSUPP;
548 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
549 goto err_copy;
550 if (reg.off > REG_ESW_MAX)
551 return -EINVAL;
552 reg.val = mtk_switch_r32(eth, reg.off);
553
554 if (copy_to_user(ifr->ifr_data, &reg, sizeof(reg)))
555 goto err_copy;
556
557 return 0;
558 case MTKETH_ESW_REG_WRITE:
559 if (!mt7530_exist(eth))
560 return -EOPNOTSUPP;
561 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
562 goto err_copy;
563 if (reg.off > REG_ESW_MAX)
564 return -EINVAL;
565 mtk_switch_w32(eth, reg.val, reg.off);
566
567 return 0;
568 default:
569 break;
570 }
571
572 return -EOPNOTSUPP;
573err_copy:
574 return -EFAULT;
575}
576
developer089e8852022-09-28 14:43:46 +0800577static void gdm_reg_dump_v3(struct mtk_eth *eth, u32 gdm_id, u32 mib_base)
578{
579 pr_info("| GDMA%d_RX_GBCNT : %010u (Rx Good Bytes) |\n",
580 gdm_id, mtk_r32(eth, mib_base));
581 pr_info("| GDMA%d_RX_GPCNT : %010u (Rx Good Pkts) |\n",
582 gdm_id, mtk_r32(eth, mib_base + 0x08));
583 pr_info("| GDMA%d_RX_OERCNT : %010u (overflow error) |\n",
584 gdm_id, mtk_r32(eth, mib_base + 0x10));
585 pr_info("| GDMA%d_RX_FERCNT : %010u (FCS error) |\n",
586 gdm_id, mtk_r32(eth, mib_base + 0x14));
587 pr_info("| GDMA%d_RX_SERCNT : %010u (too short) |\n",
588 gdm_id, mtk_r32(eth, mib_base + 0x18));
589 pr_info("| GDMA%d_RX_LERCNT : %010u (too long) |\n",
590 gdm_id, mtk_r32(eth, mib_base + 0x1C));
591 pr_info("| GDMA%d_RX_CERCNT : %010u (checksum error) |\n",
592 gdm_id, mtk_r32(eth, mib_base + 0x20));
593 pr_info("| GDMA%d_RX_FCCNT : %010u (flow control) |\n",
594 gdm_id, mtk_r32(eth, mib_base + 0x24));
595 pr_info("| GDMA%d_RX_VDPCNT : %010u (VID drop) |\n",
596 gdm_id, mtk_r32(eth, mib_base + 0x28));
597 pr_info("| GDMA%d_RX_PFCCNT : %010u (priority flow control)\n",
598 gdm_id, mtk_r32(eth, mib_base + 0x2C));
599 pr_info("| GDMA%d_TX_GBCNT : %010u (Tx Good Bytes) |\n",
600 gdm_id, mtk_r32(eth, mib_base + 0x40));
601 pr_info("| GDMA%d_TX_GPCNT : %010u (Tx Good Pkts) |\n",
602 gdm_id, mtk_r32(eth, mib_base + 0x48));
603 pr_info("| GDMA%d_TX_SKIPCNT: %010u (abort count) |\n",
604 gdm_id, mtk_r32(eth, mib_base + 0x50));
605 pr_info("| GDMA%d_TX_COLCNT : %010u (collision count)|\n",
606 gdm_id, mtk_r32(eth, mib_base + 0x54));
607 pr_info("| GDMA%d_TX_OERCNT : %010u (overflow error) |\n",
608 gdm_id, mtk_r32(eth, mib_base + 0x58));
609 pr_info("| GDMA%d_TX_FCCNT : %010u (flow control) |\n",
610 gdm_id, mtk_r32(eth, mib_base + 0x60));
611 pr_info("| GDMA%d_TX_PFCCNT : %010u (priority flow control)\n",
612 gdm_id, mtk_r32(eth, mib_base + 0x64));
613 pr_info("| |\n");
614}
615
616static void gdm_reg_dump_v2(struct mtk_eth *eth, u32 gdm_id, u32 mib_base)
617{
618 pr_info("| GDMA%d_RX_GBCNT : %010u (Rx Good Bytes) |\n",
619 gdm_id, mtk_r32(eth, mib_base));
620 pr_info("| GDMA%d_RX_GPCNT : %010u (Rx Good Pkts) |\n",
621 gdm_id, mtk_r32(eth, mib_base + 0x08));
622 pr_info("| GDMA%d_RX_OERCNT : %010u (overflow error) |\n",
623 gdm_id, mtk_r32(eth, mib_base + 0x10));
624 pr_info("| GDMA%d_RX_FERCNT : %010u (FCS error) |\n",
625 gdm_id, mtk_r32(eth, mib_base + 0x14));
626 pr_info("| GDMA%d_RX_SERCNT : %010u (too short) |\n",
627 gdm_id, mtk_r32(eth, mib_base + 0x18));
628 pr_info("| GDMA%d_RX_LERCNT : %010u (too long) |\n",
629 gdm_id, mtk_r32(eth, mib_base + 0x1C));
630 pr_info("| GDMA%d_RX_CERCNT : %010u (checksum error) |\n",
631 gdm_id, mtk_r32(eth, mib_base + 0x20));
632 pr_info("| GDMA%d_RX_FCCNT : %010u (flow control) |\n",
633 gdm_id, mtk_r32(eth, mib_base + 0x24));
634 pr_info("| GDMA%d_TX_SKIPCNT: %010u (abort count) |\n",
635 gdm_id, mtk_r32(eth, mib_base + 0x28));
636 pr_info("| GDMA%d_TX_COLCNT : %010u (collision count) |\n",
637 gdm_id, mtk_r32(eth, mib_base + 0x2C));
638 pr_info("| GDMA%d_TX_GBCNT : %010u (Tx Good Bytes) |\n",
639 gdm_id, mtk_r32(eth, mib_base + 0x30));
640 pr_info("| GDMA%d_TX_GPCNT : %010u (Tx Good Pkts) |\n",
641 gdm_id, mtk_r32(eth, mib_base + 0x38));
642 pr_info("| |\n");
643}
644
645static void gdm_cnt_read(struct mtk_eth *eth)
646{
647 u32 i, mib_base;
648
649 pr_info("\n <<CPU>>\n");
650 pr_info(" |\n");
651 pr_info("+-----------------------------------------------+\n");
652 pr_info("| <<PSE>> |\n");
653 pr_info("+-----------------------------------------------+\n");
654 pr_info(" |\n");
655 pr_info("+-----------------------------------------------+\n");
656 pr_info("| <<GDMA>> |\n");
657
658 for (i = 0; i < MTK_MAC_COUNT; i++) {
659 mib_base = MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET * i;
660
661 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
662 gdm_reg_dump_v3(eth, i + 1, mib_base);
663 else
664 gdm_reg_dump_v2(eth, i + 1, mib_base);
665 }
666
667 pr_info("+-----------------------------------------------+\n");
668}
669
developer9ccff342022-10-13 18:28:54 +0800670void dump_each_port(struct seq_file *seq, struct mtk_eth *eth, u32 base)
671{
672 u32 pkt_cnt = 0;
673 int i = 0;
674
675 for (i = 0; i < 7; i++) {
676 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
677 if ((base == 0x402C) && (i == 6))
678 base = 0x408C;
679 else if ((base == 0x408C) && (i == 6))
680 base = 0x402C;
681 else
682 ;
683 }
684 pkt_cnt = mt7530_mdio_r32(eth, (base) + (i * 0x100));
685 seq_printf(seq, "%8u ", pkt_cnt);
686 }
687 seq_puts(seq, "\n");
688}
689
developerfd40db22021-04-29 10:08:25 +0800690int esw_cnt_read(struct seq_file *seq, void *v)
691{
692 unsigned int pkt_cnt = 0;
693 int i = 0;
694 struct mtk_eth *eth = g_eth;
developerfd40db22021-04-29 10:08:25 +0800695
developer089e8852022-09-28 14:43:46 +0800696 gdm_cnt_read(eth);
developerfd40db22021-04-29 10:08:25 +0800697
698 if (!mt7530_exist(eth))
699 return 0;
700
developer089e8852022-09-28 14:43:46 +0800701 mt798x_iomap();
702
developerfd40db22021-04-29 10:08:25 +0800703 seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n",
704 "Port0", "Port1", "Port2", "Port3", "Port4", "Port5",
705 "Port6");
706 seq_puts(seq, "Tx Drop Packet :");
developer9ccff342022-10-13 18:28:54 +0800707 dump_each_port(seq, eth, 0x4000);
developerfd40db22021-04-29 10:08:25 +0800708 seq_puts(seq, "Tx CRC Error :");
developer9ccff342022-10-13 18:28:54 +0800709 dump_each_port(seq, eth, 0x4004);
developerfd40db22021-04-29 10:08:25 +0800710 seq_puts(seq, "Tx Unicast Packet :");
developer9ccff342022-10-13 18:28:54 +0800711 dump_each_port(seq, eth, 0x4008);
developerfd40db22021-04-29 10:08:25 +0800712 seq_puts(seq, "Tx Multicast Packet :");
developer9ccff342022-10-13 18:28:54 +0800713 dump_each_port(seq, eth, 0x400C);
developerfd40db22021-04-29 10:08:25 +0800714 seq_puts(seq, "Tx Broadcast Packet :");
developer9ccff342022-10-13 18:28:54 +0800715 dump_each_port(seq, eth, 0x4010);
developerfd40db22021-04-29 10:08:25 +0800716 seq_puts(seq, "Tx Collision Event :");
developer9ccff342022-10-13 18:28:54 +0800717 dump_each_port(seq, eth, 0x4014);
developerfd40db22021-04-29 10:08:25 +0800718 seq_puts(seq, "Tx Pause Packet :");
developer9ccff342022-10-13 18:28:54 +0800719 dump_each_port(seq, eth, 0x402C);
developerfd40db22021-04-29 10:08:25 +0800720 seq_puts(seq, "Rx Drop Packet :");
developer9ccff342022-10-13 18:28:54 +0800721 dump_each_port(seq, eth, 0x4060);
developerfd40db22021-04-29 10:08:25 +0800722 seq_puts(seq, "Rx Filtering Packet :");
developer9ccff342022-10-13 18:28:54 +0800723 dump_each_port(seq, eth, 0x4064);
developerfd40db22021-04-29 10:08:25 +0800724 seq_puts(seq, "Rx Unicast Packet :");
developer9ccff342022-10-13 18:28:54 +0800725 dump_each_port(seq, eth, 0x4068);
developerfd40db22021-04-29 10:08:25 +0800726 seq_puts(seq, "Rx Multicast Packet :");
developer9ccff342022-10-13 18:28:54 +0800727 dump_each_port(seq, eth, 0x406C);
developerfd40db22021-04-29 10:08:25 +0800728 seq_puts(seq, "Rx Broadcast Packet :");
developer9ccff342022-10-13 18:28:54 +0800729 dump_each_port(seq, eth, 0x4070);
developerfd40db22021-04-29 10:08:25 +0800730 seq_puts(seq, "Rx Alignment Error :");
developer9ccff342022-10-13 18:28:54 +0800731 dump_each_port(seq, eth, 0x4074);
developerfd40db22021-04-29 10:08:25 +0800732 seq_puts(seq, "Rx CRC Error :");
developer9ccff342022-10-13 18:28:54 +0800733 dump_each_port(seq, eth, 0x4078);
developerfd40db22021-04-29 10:08:25 +0800734 seq_puts(seq, "Rx Undersize Error :");
developer9ccff342022-10-13 18:28:54 +0800735 dump_each_port(seq, eth, 0x407C);
developerfd40db22021-04-29 10:08:25 +0800736 seq_puts(seq, "Rx Fragment Error :");
developer9ccff342022-10-13 18:28:54 +0800737 dump_each_port(seq, eth, 0x4080);
developerfd40db22021-04-29 10:08:25 +0800738 seq_puts(seq, "Rx Oversize Error :");
developer9ccff342022-10-13 18:28:54 +0800739 dump_each_port(seq, eth, 0x4084);
developerfd40db22021-04-29 10:08:25 +0800740 seq_puts(seq, "Rx Jabber Error :");
developer9ccff342022-10-13 18:28:54 +0800741 dump_each_port(seq, eth, 0x4088);
developerfd40db22021-04-29 10:08:25 +0800742 seq_puts(seq, "Rx Pause Packet :");
developer9ccff342022-10-13 18:28:54 +0800743 dump_each_port(seq, eth, 0x408C);
developerfd40db22021-04-29 10:08:25 +0800744 mt7530_mdio_w32(eth, 0x4fe0, 0xf0);
745 mt7530_mdio_w32(eth, 0x4fe0, 0x800000f0);
746
747 seq_puts(seq, "\n");
748
developer089e8852022-09-28 14:43:46 +0800749 mt798x_iounmap();
750
developerfd40db22021-04-29 10:08:25 +0800751 return 0;
752}
753
754static int switch_count_open(struct inode *inode, struct file *file)
755{
756 return single_open(file, esw_cnt_read, 0);
757}
758
759static const struct file_operations switch_count_fops = {
760 .owner = THIS_MODULE,
761 .open = switch_count_open,
762 .read = seq_read,
763 .llseek = seq_lseek,
764 .release = single_release
765};
766
developer621ca6b2023-01-11 11:08:46 +0800767void xfi_mib_dump(struct seq_file *seq, u32 gdm_id)
768{
769 struct mtk_eth *eth = g_eth;
770
771 PRINT_FORMATTED_XFI_MIB(seq, TX_PKT_CNT, GENMASK(31, 0));
772 PRINT_FORMATTED_XFI_MIB(seq, TX_ETH_CNT, GENMASK(31, 0));
773 PRINT_FORMATTED_XFI_MIB(seq, TX_PAUSE_CNT, GENMASK(15, 0));
774 PRINT_FORMATTED_XFI_MIB(seq, TX_BYTE_CNT, GENMASK(31, 0));
775 PRINT_FORMATTED_XFI_MIB64(seq, TX_UC_PKT_CNT);
776 PRINT_FORMATTED_XFI_MIB64(seq, TX_MC_PKT_CNT);
777 PRINT_FORMATTED_XFI_MIB64(seq, TX_BC_PKT_CNT);
778
779 PRINT_FORMATTED_XFI_MIB(seq, RX_PKT_CNT, GENMASK(31, 0));
780 PRINT_FORMATTED_XFI_MIB(seq, RX_ETH_CNT, GENMASK(31, 0));
781 PRINT_FORMATTED_XFI_MIB(seq, RX_PAUSE_CNT, GENMASK(15, 0));
782 PRINT_FORMATTED_XFI_MIB(seq, RX_LEN_ERR_CNT, GENMASK(15, 0));
783 PRINT_FORMATTED_XFI_MIB(seq, RX_CRC_ERR_CNT, GENMASK(15, 0));
784 PRINT_FORMATTED_XFI_MIB64(seq, RX_UC_PKT_CNT);
785 PRINT_FORMATTED_XFI_MIB64(seq, RX_MC_PKT_CNT);
786 PRINT_FORMATTED_XFI_MIB64(seq, RX_BC_PKT_CNT);
787 PRINT_FORMATTED_XFI_MIB(seq, RX_UC_DROP_CNT, GENMASK(31, 0));
788 PRINT_FORMATTED_XFI_MIB(seq, RX_BC_DROP_CNT, GENMASK(31, 0));
789 PRINT_FORMATTED_XFI_MIB(seq, RX_MC_DROP_CNT, GENMASK(31, 0));
790 PRINT_FORMATTED_XFI_MIB(seq, RX_ALL_DROP_CNT, GENMASK(31, 0));
791}
792
793int xfi_cnt_read(struct seq_file *seq, void *v)
794{
795 struct mtk_eth *eth = g_eth;
796 int i;
797
798 seq_puts(seq, "+------------------------------------+\n");
799 seq_puts(seq, "| <<XFI MAC>> |\n");
800
801 for (i = MTK_GMAC2_ID; i < MTK_GMAC_ID_MAX; i++) {
802 xfi_mib_dump(seq, i);
803 mtk_m32(eth, 0x1, 0x1, MTK_XFI_MIB_BASE(i) + MTK_XFI_CNT_CTRL);
804 seq_puts(seq, "| |\n");
805 }
806
807 seq_puts(seq, "+------------------------------------+\n");
808
809 return 0;
810}
811
812static int xfi_count_open(struct inode *inode, struct file *file)
813{
814 return single_open(file, xfi_cnt_read, 0);
815}
816
817static const struct file_operations xfi_count_fops = {
818 .owner = THIS_MODULE,
819 .open = xfi_count_open,
820 .read = seq_read,
821 .llseek = seq_lseek,
822 .release = single_release
823};
824
developer8051e042022-04-08 13:26:36 +0800825static struct proc_dir_entry *proc_tx_ring, *proc_hwtx_ring, *proc_rx_ring;
developerfd40db22021-04-29 10:08:25 +0800826
827int tx_ring_read(struct seq_file *seq, void *v)
828{
developere9356982022-07-04 09:03:20 +0800829 struct mtk_eth *eth = g_eth;
developerfd40db22021-04-29 10:08:25 +0800830 struct mtk_tx_ring *ring = &g_eth->tx_ring;
developere9356982022-07-04 09:03:20 +0800831 struct mtk_tx_dma_v2 *tx_ring;
developerfd40db22021-04-29 10:08:25 +0800832 int i = 0;
833
developerfd40db22021-04-29 10:08:25 +0800834 seq_printf(seq, "free count = %d\n", (int)atomic_read(&ring->free_count));
835 seq_printf(seq, "cpu next free: %d\n", (int)(ring->next_free - ring->dma));
836 seq_printf(seq, "cpu last free: %d\n", (int)(ring->last_free - ring->dma));
837 for (i = 0; i < MTK_DMA_SIZE; i++) {
developer8b6f2402022-11-28 13:42:34 +0800838 dma_addr_t tmp = ring->phys +
839 i * (dma_addr_t)eth->soc->txrx.txd_size;
developere9356982022-07-04 09:03:20 +0800840
841 tx_ring = ring->dma + i * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +0800842
843 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &tmp,
developere9356982022-07-04 09:03:20 +0800844 tx_ring->txd1, tx_ring->txd2,
845 tx_ring->txd3, tx_ring->txd4);
846
developer089e8852022-09-28 14:43:46 +0800847 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
848 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +0800849 seq_printf(seq, " %08x %08x %08x %08x",
850 tx_ring->txd5, tx_ring->txd6,
851 tx_ring->txd7, tx_ring->txd8);
852 }
853
developerfd40db22021-04-29 10:08:25 +0800854 seq_printf(seq, "\n");
855 }
856
developerfd40db22021-04-29 10:08:25 +0800857 return 0;
858}
859
860static int tx_ring_open(struct inode *inode, struct file *file)
861{
862 return single_open(file, tx_ring_read, NULL);
863}
864
865static const struct file_operations tx_ring_fops = {
866 .owner = THIS_MODULE,
867 .open = tx_ring_open,
868 .read = seq_read,
869 .llseek = seq_lseek,
870 .release = single_release
871};
872
developer8051e042022-04-08 13:26:36 +0800873int hwtx_ring_read(struct seq_file *seq, void *v)
874{
875 struct mtk_eth *eth = g_eth;
developere9356982022-07-04 09:03:20 +0800876 struct mtk_tx_dma_v2 *hwtx_ring;
developer8051e042022-04-08 13:26:36 +0800877 int i = 0;
878
developer8051e042022-04-08 13:26:36 +0800879 for (i = 0; i < MTK_DMA_SIZE; i++) {
developer8b6f2402022-11-28 13:42:34 +0800880 dma_addr_t addr = eth->phy_scratch_ring +
881 i * (dma_addr_t)eth->soc->txrx.txd_size;
developere9356982022-07-04 09:03:20 +0800882
883 hwtx_ring = eth->scratch_ring + i * eth->soc->txrx.txd_size;
developer8051e042022-04-08 13:26:36 +0800884
885 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &addr,
developere9356982022-07-04 09:03:20 +0800886 hwtx_ring->txd1, hwtx_ring->txd2,
887 hwtx_ring->txd3, hwtx_ring->txd4);
888
developer089e8852022-09-28 14:43:46 +0800889 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
890 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +0800891 seq_printf(seq, " %08x %08x %08x %08x",
892 hwtx_ring->txd5, hwtx_ring->txd6,
893 hwtx_ring->txd7, hwtx_ring->txd8);
894 }
895
developer8051e042022-04-08 13:26:36 +0800896 seq_printf(seq, "\n");
897 }
898
developer8051e042022-04-08 13:26:36 +0800899 return 0;
900}
901
902static int hwtx_ring_open(struct inode *inode, struct file *file)
903{
904 return single_open(file, hwtx_ring_read, NULL);
905}
906
907static const struct file_operations hwtx_ring_fops = {
908 .owner = THIS_MODULE,
909 .open = hwtx_ring_open,
910 .read = seq_read,
911 .llseek = seq_lseek,
912 .release = single_release
913};
914
developerfd40db22021-04-29 10:08:25 +0800915int rx_ring_read(struct seq_file *seq, void *v)
916{
developere9356982022-07-04 09:03:20 +0800917 struct mtk_eth *eth = g_eth;
developerfd40db22021-04-29 10:08:25 +0800918 struct mtk_rx_ring *ring = &g_eth->rx_ring[0];
developere9356982022-07-04 09:03:20 +0800919 struct mtk_rx_dma_v2 *rx_ring;
developerfd40db22021-04-29 10:08:25 +0800920 int i = 0;
921
developerfd40db22021-04-29 10:08:25 +0800922 seq_printf(seq, "next to read: %d\n",
923 NEXT_DESP_IDX(ring->calc_idx, MTK_DMA_SIZE));
924 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +0800925 rx_ring = ring->dma + i * eth->soc->txrx.rxd_size;
926
developerfd40db22021-04-29 10:08:25 +0800927 seq_printf(seq, "%d: %08x %08x %08x %08x", i,
developere9356982022-07-04 09:03:20 +0800928 rx_ring->rxd1, rx_ring->rxd2,
929 rx_ring->rxd3, rx_ring->rxd4);
930
developer089e8852022-09-28 14:43:46 +0800931 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
932 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +0800933 seq_printf(seq, " %08x %08x %08x %08x",
934 rx_ring->rxd5, rx_ring->rxd6,
935 rx_ring->rxd7, rx_ring->rxd8);
936 }
937
developerfd40db22021-04-29 10:08:25 +0800938 seq_printf(seq, "\n");
939 }
940
developerfd40db22021-04-29 10:08:25 +0800941 return 0;
942}
943
944static int rx_ring_open(struct inode *inode, struct file *file)
945{
946 return single_open(file, rx_ring_read, NULL);
947}
948
949static const struct file_operations rx_ring_fops = {
950 .owner = THIS_MODULE,
951 .open = rx_ring_open,
952 .read = seq_read,
953 .llseek = seq_lseek,
954 .release = single_release
955};
956
developer77f3fd42021-10-05 15:16:05 +0800957static inline u32 mtk_dbg_r32(u32 reg)
958{
959 void __iomem *virt_reg;
960 u32 val;
961
962 virt_reg = ioremap(reg, 32);
963 val = __raw_readl(virt_reg);
964 iounmap(virt_reg);
965
966 return val;
967}
968
developerfd40db22021-04-29 10:08:25 +0800969int dbg_regs_read(struct seq_file *seq, void *v)
970{
971 struct mtk_eth *eth = g_eth;
972
developer77f3fd42021-10-05 15:16:05 +0800973 seq_puts(seq, " <<DEBUG REG DUMP>>\n");
974
975 seq_printf(seq, "| FE_INT_STA : %08x |\n",
developer8051e042022-04-08 13:26:36 +0800976 mtk_r32(eth, MTK_FE_INT_STATUS));
developer089e8852022-09-28 14:43:46 +0800977 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
978 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer77f3fd42021-10-05 15:16:05 +0800979 seq_printf(seq, "| FE_INT_STA2 : %08x |\n",
developer8051e042022-04-08 13:26:36 +0800980 mtk_r32(eth, MTK_FE_INT_STATUS2));
developer77f3fd42021-10-05 15:16:05 +0800981
developerfd40db22021-04-29 10:08:25 +0800982 seq_printf(seq, "| PSE_FQFC_CFG : %08x |\n",
983 mtk_r32(eth, MTK_PSE_FQFC_CFG));
984 seq_printf(seq, "| PSE_IQ_STA1 : %08x |\n",
985 mtk_r32(eth, MTK_PSE_IQ_STA(0)));
986 seq_printf(seq, "| PSE_IQ_STA2 : %08x |\n",
987 mtk_r32(eth, MTK_PSE_IQ_STA(1)));
988
developer089e8852022-09-28 14:43:46 +0800989 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
990 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerfd40db22021-04-29 10:08:25 +0800991 seq_printf(seq, "| PSE_IQ_STA3 : %08x |\n",
992 mtk_r32(eth, MTK_PSE_IQ_STA(2)));
993 seq_printf(seq, "| PSE_IQ_STA4 : %08x |\n",
994 mtk_r32(eth, MTK_PSE_IQ_STA(3)));
developer77f3fd42021-10-05 15:16:05 +0800995 seq_printf(seq, "| PSE_IQ_STA5 : %08x |\n",
996 mtk_r32(eth, MTK_PSE_IQ_STA(4)));
developer089e8852022-09-28 14:43:46 +0800997 seq_printf(seq, "| PSE_IQ_STA6 : %08x |\n",
998 mtk_r32(eth, MTK_PSE_IQ_STA(5)));
999 seq_printf(seq, "| PSE_IQ_STA7 : %08x |\n",
1000 mtk_r32(eth, MTK_PSE_IQ_STA(6)));
1001 seq_printf(seq, "| PSE_IQ_STA8 : %08x |\n",
1002 mtk_r32(eth, MTK_PSE_IQ_STA(7)));
developerfd40db22021-04-29 10:08:25 +08001003 }
1004
1005 seq_printf(seq, "| PSE_OQ_STA1 : %08x |\n",
1006 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
1007 seq_printf(seq, "| PSE_OQ_STA2 : %08x |\n",
1008 mtk_r32(eth, MTK_PSE_OQ_STA(1)));
1009
developer089e8852022-09-28 14:43:46 +08001010 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1011 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerfd40db22021-04-29 10:08:25 +08001012 seq_printf(seq, "| PSE_OQ_STA3 : %08x |\n",
1013 mtk_r32(eth, MTK_PSE_OQ_STA(2)));
1014 seq_printf(seq, "| PSE_OQ_STA4 : %08x |\n",
1015 mtk_r32(eth, MTK_PSE_OQ_STA(3)));
developer77f3fd42021-10-05 15:16:05 +08001016 seq_printf(seq, "| PSE_OQ_STA5 : %08x |\n",
1017 mtk_r32(eth, MTK_PSE_OQ_STA(4)));
developer089e8852022-09-28 14:43:46 +08001018 seq_printf(seq, "| PSE_OQ_STA6 : %08x |\n",
1019 mtk_r32(eth, MTK_PSE_OQ_STA(5)));
1020 seq_printf(seq, "| PSE_OQ_STA7 : %08x |\n",
1021 mtk_r32(eth, MTK_PSE_OQ_STA(6)));
1022 seq_printf(seq, "| PSE_OQ_STA8 : %08x |\n",
1023 mtk_r32(eth, MTK_PSE_OQ_STA(7)));
developerfd40db22021-04-29 10:08:25 +08001024 }
1025
developer77f3fd42021-10-05 15:16:05 +08001026 seq_printf(seq, "| PDMA_CRX_IDX : %08x |\n",
1027 mtk_r32(eth, MTK_PRX_CRX_IDX0));
1028 seq_printf(seq, "| PDMA_DRX_IDX : %08x |\n",
1029 mtk_r32(eth, MTK_PRX_DRX_IDX0));
1030 seq_printf(seq, "| QDMA_CTX_IDX : %08x |\n",
1031 mtk_r32(eth, MTK_QTX_CTX_PTR));
1032 seq_printf(seq, "| QDMA_DTX_IDX : %08x |\n",
1033 mtk_r32(eth, MTK_QTX_DTX_PTR));
developerfd40db22021-04-29 10:08:25 +08001034 seq_printf(seq, "| QDMA_FQ_CNT : %08x |\n",
1035 mtk_r32(eth, MTK_QDMA_FQ_CNT));
developer089e8852022-09-28 14:43:46 +08001036 seq_printf(seq, "| QDMA_FWD_CNT : %08x |\n",
1037 mtk_r32(eth, MTK_QDMA_FWD_CNT));
1038 seq_printf(seq, "| QDMA_FSM : %08x |\n",
1039 mtk_r32(eth, MTK_QDMA_FSM));
developerfd40db22021-04-29 10:08:25 +08001040 seq_printf(seq, "| FE_PSE_FREE : %08x |\n",
1041 mtk_r32(eth, MTK_FE_PSE_FREE));
1042 seq_printf(seq, "| FE_DROP_FQ : %08x |\n",
1043 mtk_r32(eth, MTK_FE_DROP_FQ));
1044 seq_printf(seq, "| FE_DROP_FC : %08x |\n",
1045 mtk_r32(eth, MTK_FE_DROP_FC));
1046 seq_printf(seq, "| FE_DROP_PPE : %08x |\n",
1047 mtk_r32(eth, MTK_FE_DROP_PPE));
1048 seq_printf(seq, "| GDM1_IG_CTRL : %08x |\n",
1049 mtk_r32(eth, MTK_GDMA_FWD_CFG(0)));
1050 seq_printf(seq, "| GDM2_IG_CTRL : %08x |\n",
1051 mtk_r32(eth, MTK_GDMA_FWD_CFG(1)));
developer089e8852022-09-28 14:43:46 +08001052 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1053 seq_printf(seq, "| GDM3_IG_CTRL : %08x |\n",
1054 mtk_r32(eth, MTK_GDMA_FWD_CFG(2)));
1055 }
developerfd40db22021-04-29 10:08:25 +08001056 seq_printf(seq, "| MAC_P1_MCR : %08x |\n",
1057 mtk_r32(eth, MTK_MAC_MCR(0)));
1058 seq_printf(seq, "| MAC_P2_MCR : %08x |\n",
1059 mtk_r32(eth, MTK_MAC_MCR(1)));
developer089e8852022-09-28 14:43:46 +08001060 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1061 seq_printf(seq, "| MAC_P3_MCR : %08x |\n",
1062 mtk_r32(eth, MTK_MAC_MCR(2)));
1063 }
developer77f3fd42021-10-05 15:16:05 +08001064 seq_printf(seq, "| MAC_P1_FSM : %08x |\n",
1065 mtk_r32(eth, MTK_MAC_FSM(0)));
1066 seq_printf(seq, "| MAC_P2_FSM : %08x |\n",
1067 mtk_r32(eth, MTK_MAC_FSM(1)));
developer089e8852022-09-28 14:43:46 +08001068 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1069 seq_printf(seq, "| MAC_P3_FSM : %08x |\n",
1070 mtk_r32(eth, MTK_MAC_FSM(2)));
1071 }
developerfd40db22021-04-29 10:08:25 +08001072
developer089e8852022-09-28 14:43:46 +08001073 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1074 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerfd40db22021-04-29 10:08:25 +08001075 seq_printf(seq, "| FE_CDM1_FSM : %08x |\n",
1076 mtk_r32(eth, MTK_FE_CDM1_FSM));
1077 seq_printf(seq, "| FE_CDM2_FSM : %08x |\n",
1078 mtk_r32(eth, MTK_FE_CDM2_FSM));
developer77f3fd42021-10-05 15:16:05 +08001079 seq_printf(seq, "| FE_CDM3_FSM : %08x |\n",
1080 mtk_r32(eth, MTK_FE_CDM3_FSM));
1081 seq_printf(seq, "| FE_CDM4_FSM : %08x |\n",
1082 mtk_r32(eth, MTK_FE_CDM4_FSM));
developer089e8852022-09-28 14:43:46 +08001083 seq_printf(seq, "| FE_CDM5_FSM : %08x |\n",
1084 mtk_r32(eth, MTK_FE_CDM5_FSM));
1085 seq_printf(seq, "| FE_CDM6_FSM : %08x |\n",
1086 mtk_r32(eth, MTK_FE_CDM6_FSM));
developerfd40db22021-04-29 10:08:25 +08001087 seq_printf(seq, "| FE_GDM1_FSM : %08x |\n",
1088 mtk_r32(eth, MTK_FE_GDM1_FSM));
1089 seq_printf(seq, "| FE_GDM2_FSM : %08x |\n",
1090 mtk_r32(eth, MTK_FE_GDM2_FSM));
developer77f3fd42021-10-05 15:16:05 +08001091 seq_printf(seq, "| SGMII_EFUSE : %08x |\n",
1092 mtk_dbg_r32(MTK_SGMII_EFUSE));
1093 seq_printf(seq, "| SGMII0_RX_CNT : %08x |\n",
1094 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(0)));
1095 seq_printf(seq, "| SGMII1_RX_CNT : %08x |\n",
1096 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(1)));
1097 seq_printf(seq, "| WED_RTQM_GLO : %08x |\n",
1098 mtk_dbg_r32(MTK_WED_RTQM_GLO_CFG));
developerfd40db22021-04-29 10:08:25 +08001099 }
1100
developer8051e042022-04-08 13:26:36 +08001101 mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS);
developer089e8852022-09-28 14:43:46 +08001102 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1103 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer8051e042022-04-08 13:26:36 +08001104 mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS2);
developer77f3fd42021-10-05 15:16:05 +08001105
developerfd40db22021-04-29 10:08:25 +08001106 return 0;
1107}
1108
1109static int dbg_regs_open(struct inode *inode, struct file *file)
1110{
1111 return single_open(file, dbg_regs_read, 0);
1112}
1113
1114static const struct file_operations dbg_regs_fops = {
1115 .owner = THIS_MODULE,
1116 .open = dbg_regs_open,
1117 .read = seq_read,
1118 .llseek = seq_lseek,
developer77d03a72021-06-06 00:06:00 +08001119 .release = single_release
1120};
1121
developere9356982022-07-04 09:03:20 +08001122void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
developer77d03a72021-06-06 00:06:00 +08001123{
developere9356982022-07-04 09:03:20 +08001124 struct mtk_eth *eth = g_eth;
developer77d03a72021-06-06 00:06:00 +08001125 u32 idx, agg_cnt, agg_size;
1126
developer089e8852022-09-28 14:43:46 +08001127 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1128 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001129 idx = ring_no - 4;
1130 agg_cnt = RX_DMA_GET_AGG_CNT_V2(rxd->rxd6);
1131 } else {
1132 idx = ring_no - 1;
1133 agg_cnt = RX_DMA_GET_AGG_CNT(rxd->rxd2);
1134 }
developer77d03a72021-06-06 00:06:00 +08001135
developer8b6f2402022-11-28 13:42:34 +08001136 if (idx >= MTK_HW_LRO_RING_NUM)
1137 return;
1138
developer77d03a72021-06-06 00:06:00 +08001139 agg_size = RX_DMA_GET_PLEN0(rxd->rxd2);
1140
1141 hw_lro_agg_size_cnt[idx][agg_size / 5000]++;
1142 hw_lro_agg_num_cnt[idx][agg_cnt]++;
1143 hw_lro_tot_flush_cnt[idx]++;
1144 hw_lro_tot_agg_cnt[idx] += agg_cnt;
1145}
1146
developere9356982022-07-04 09:03:20 +08001147void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
developer77d03a72021-06-06 00:06:00 +08001148{
developere9356982022-07-04 09:03:20 +08001149 struct mtk_eth *eth = g_eth;
developer77d03a72021-06-06 00:06:00 +08001150 u32 idx, flush_reason;
1151
developer089e8852022-09-28 14:43:46 +08001152 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1153 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +08001154 idx = ring_no - 4;
1155 flush_reason = RX_DMA_GET_FLUSH_RSN_V2(rxd->rxd6);
1156 } else {
1157 idx = ring_no - 1;
1158 flush_reason = RX_DMA_GET_REV(rxd->rxd2);
1159 }
developer77d03a72021-06-06 00:06:00 +08001160
developer8b6f2402022-11-28 13:42:34 +08001161 if (idx >= MTK_HW_LRO_RING_NUM)
1162 return;
1163
developer77d03a72021-06-06 00:06:00 +08001164 if ((flush_reason & 0x7) == MTK_HW_LRO_AGG_FLUSH)
1165 hw_lro_agg_flush_cnt[idx]++;
1166 else if ((flush_reason & 0x7) == MTK_HW_LRO_AGE_FLUSH)
1167 hw_lro_age_flush_cnt[idx]++;
1168 else if ((flush_reason & 0x7) == MTK_HW_LRO_NOT_IN_SEQ_FLUSH)
1169 hw_lro_seq_flush_cnt[idx]++;
1170 else if ((flush_reason & 0x7) == MTK_HW_LRO_TIMESTAMP_FLUSH)
1171 hw_lro_timestamp_flush_cnt[idx]++;
1172 else if ((flush_reason & 0x7) == MTK_HW_LRO_NON_RULE_FLUSH)
1173 hw_lro_norule_flush_cnt[idx]++;
1174}
1175
1176ssize_t hw_lro_stats_write(struct file *file, const char __user *buffer,
1177 size_t count, loff_t *data)
1178{
1179 memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt));
1180 memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt));
1181 memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt));
1182 memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt));
1183 memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt));
1184 memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt));
1185 memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt));
1186 memset(hw_lro_timestamp_flush_cnt, 0,
1187 sizeof(hw_lro_timestamp_flush_cnt));
1188 memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt));
1189
1190 pr_info("clear hw lro cnt table\n");
1191
1192 return count;
1193}
1194
1195int hw_lro_stats_read_v1(struct seq_file *seq, void *v)
1196{
1197 int i;
1198
1199 seq_puts(seq, "HW LRO statistic dump:\n");
1200
1201 /* Agg number count */
1202 seq_puts(seq, "Cnt: RING1 | RING2 | RING3 | Total\n");
1203 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
1204 seq_printf(seq, " %d : %d %d %d %d\n",
1205 i, hw_lro_agg_num_cnt[0][i],
1206 hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i],
1207 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
1208 hw_lro_agg_num_cnt[2][i]);
1209 }
1210
1211 /* Total agg count */
1212 seq_puts(seq, "Total agg: RING1 | RING2 | RING3 | Total\n");
1213 seq_printf(seq, " %d %d %d %d\n",
1214 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
1215 hw_lro_tot_agg_cnt[2],
1216 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1217 hw_lro_tot_agg_cnt[2]);
1218
1219 /* Total flush count */
1220 seq_puts(seq, "Total flush: RING1 | RING2 | RING3 | Total\n");
1221 seq_printf(seq, " %d %d %d %d\n",
1222 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
1223 hw_lro_tot_flush_cnt[2],
1224 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1225 hw_lro_tot_flush_cnt[2]);
1226
1227 /* Avg agg count */
1228 seq_puts(seq, "Avg agg: RING1 | RING2 | RING3 | Total\n");
1229 seq_printf(seq, " %d %d %d %d\n",
1230 (hw_lro_tot_flush_cnt[0]) ?
1231 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
1232 (hw_lro_tot_flush_cnt[1]) ?
1233 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
1234 (hw_lro_tot_flush_cnt[2]) ?
1235 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
1236 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1237 hw_lro_tot_flush_cnt[2]) ?
1238 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1239 hw_lro_tot_agg_cnt[2]) / (hw_lro_tot_flush_cnt[0] +
1240 hw_lro_tot_flush_cnt[1] + hw_lro_tot_flush_cnt[2])) : 0);
1241
1242 /* Statistics of aggregation size counts */
1243 seq_puts(seq, "HW LRO flush pkt len:\n");
1244 seq_puts(seq, " Length | RING1 | RING2 | RING3 | Total\n");
1245 for (i = 0; i < 15; i++) {
1246 seq_printf(seq, "%d~%d: %d %d %d %d\n", i * 5000,
1247 (i + 1) * 5000, hw_lro_agg_size_cnt[0][i],
1248 hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i],
1249 hw_lro_agg_size_cnt[0][i] +
1250 hw_lro_agg_size_cnt[1][i] +
1251 hw_lro_agg_size_cnt[2][i]);
1252 }
1253
1254 seq_puts(seq, "Flush reason: RING1 | RING2 | RING3 | Total\n");
1255 seq_printf(seq, "AGG timeout: %d %d %d %d\n",
1256 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1257 hw_lro_agg_flush_cnt[2],
1258 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1259 hw_lro_agg_flush_cnt[2]));
1260
1261 seq_printf(seq, "AGE timeout: %d %d %d %d\n",
1262 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1263 hw_lro_age_flush_cnt[2],
1264 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1265 hw_lro_age_flush_cnt[2]));
1266
1267 seq_printf(seq, "Not in-sequence: %d %d %d %d\n",
1268 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1269 hw_lro_seq_flush_cnt[2],
1270 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1271 hw_lro_seq_flush_cnt[2]));
1272
1273 seq_printf(seq, "Timestamp: %d %d %d %d\n",
1274 hw_lro_timestamp_flush_cnt[0],
1275 hw_lro_timestamp_flush_cnt[1],
1276 hw_lro_timestamp_flush_cnt[2],
1277 (hw_lro_timestamp_flush_cnt[0] +
1278 hw_lro_timestamp_flush_cnt[1] +
1279 hw_lro_timestamp_flush_cnt[2]));
1280
1281 seq_printf(seq, "No LRO rule: %d %d %d %d\n",
1282 hw_lro_norule_flush_cnt[0],
1283 hw_lro_norule_flush_cnt[1],
1284 hw_lro_norule_flush_cnt[2],
1285 (hw_lro_norule_flush_cnt[0] +
1286 hw_lro_norule_flush_cnt[1] +
1287 hw_lro_norule_flush_cnt[2]));
1288
1289 return 0;
1290}
1291
1292int hw_lro_stats_read_v2(struct seq_file *seq, void *v)
1293{
1294 int i;
1295
1296 seq_puts(seq, "HW LRO statistic dump:\n");
1297
1298 /* Agg number count */
1299 seq_puts(seq, "Cnt: RING4 | RING5 | RING6 | RING7 Total\n");
1300 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
1301 seq_printf(seq,
1302 " %d : %d %d %d %d %d\n",
1303 i, hw_lro_agg_num_cnt[0][i], hw_lro_agg_num_cnt[1][i],
1304 hw_lro_agg_num_cnt[2][i], hw_lro_agg_num_cnt[3][i],
1305 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
1306 hw_lro_agg_num_cnt[2][i] + hw_lro_agg_num_cnt[3][i]);
1307 }
1308
1309 /* Total agg count */
1310 seq_puts(seq, "Total agg: RING4 | RING5 | RING6 | RING7 Total\n");
1311 seq_printf(seq, " %d %d %d %d %d\n",
1312 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
1313 hw_lro_tot_agg_cnt[2], hw_lro_tot_agg_cnt[3],
1314 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1315 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]);
1316
1317 /* Total flush count */
1318 seq_puts(seq, "Total flush: RING4 | RING5 | RING6 | RING7 Total\n");
1319 seq_printf(seq, " %d %d %d %d %d\n",
1320 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
1321 hw_lro_tot_flush_cnt[2], hw_lro_tot_flush_cnt[3],
1322 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1323 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]);
1324
1325 /* Avg agg count */
1326 seq_puts(seq, "Avg agg: RING4 | RING5 | RING6 | RING7 Total\n");
1327 seq_printf(seq, " %d %d %d %d %d\n",
1328 (hw_lro_tot_flush_cnt[0]) ?
1329 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
1330 (hw_lro_tot_flush_cnt[1]) ?
1331 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
1332 (hw_lro_tot_flush_cnt[2]) ?
1333 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
1334 (hw_lro_tot_flush_cnt[3]) ?
1335 hw_lro_tot_agg_cnt[3] / hw_lro_tot_flush_cnt[3] : 0,
1336 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1337 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]) ?
1338 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1339 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]) /
1340 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1341 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3])) : 0);
1342
1343 /* Statistics of aggregation size counts */
1344 seq_puts(seq, "HW LRO flush pkt len:\n");
1345 seq_puts(seq, " Length | RING4 | RING5 | RING6 | RING7 Total\n");
1346 for (i = 0; i < 15; i++) {
1347 seq_printf(seq, "%d~%d: %d %d %d %d %d\n",
1348 i * 5000, (i + 1) * 5000,
1349 hw_lro_agg_size_cnt[0][i], hw_lro_agg_size_cnt[1][i],
1350 hw_lro_agg_size_cnt[2][i], hw_lro_agg_size_cnt[3][i],
1351 hw_lro_agg_size_cnt[0][i] +
1352 hw_lro_agg_size_cnt[1][i] +
1353 hw_lro_agg_size_cnt[2][i] +
1354 hw_lro_agg_size_cnt[3][i]);
1355 }
1356
1357 seq_puts(seq, "Flush reason: RING4 | RING5 | RING6 | RING7 Total\n");
1358 seq_printf(seq, "AGG timeout: %d %d %d %d %d\n",
1359 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1360 hw_lro_agg_flush_cnt[2], hw_lro_agg_flush_cnt[3],
1361 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1362 hw_lro_agg_flush_cnt[2] + hw_lro_agg_flush_cnt[3]));
1363
1364 seq_printf(seq, "AGE timeout: %d %d %d %d %d\n",
1365 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1366 hw_lro_age_flush_cnt[2], hw_lro_age_flush_cnt[3],
1367 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1368 hw_lro_age_flush_cnt[2] + hw_lro_age_flush_cnt[3]));
1369
1370 seq_printf(seq, "Not in-sequence: %d %d %d %d %d\n",
1371 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1372 hw_lro_seq_flush_cnt[2], hw_lro_seq_flush_cnt[3],
1373 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1374 hw_lro_seq_flush_cnt[2] + hw_lro_seq_flush_cnt[3]));
1375
1376 seq_printf(seq, "Timestamp: %d %d %d %d %d\n",
1377 hw_lro_timestamp_flush_cnt[0],
1378 hw_lro_timestamp_flush_cnt[1],
1379 hw_lro_timestamp_flush_cnt[2],
1380 hw_lro_timestamp_flush_cnt[3],
1381 (hw_lro_timestamp_flush_cnt[0] +
1382 hw_lro_timestamp_flush_cnt[1] +
1383 hw_lro_timestamp_flush_cnt[2] +
1384 hw_lro_timestamp_flush_cnt[3]));
1385
1386 seq_printf(seq, "No LRO rule: %d %d %d %d %d\n",
1387 hw_lro_norule_flush_cnt[0],
1388 hw_lro_norule_flush_cnt[1],
1389 hw_lro_norule_flush_cnt[2],
1390 hw_lro_norule_flush_cnt[3],
1391 (hw_lro_norule_flush_cnt[0] +
1392 hw_lro_norule_flush_cnt[1] +
1393 hw_lro_norule_flush_cnt[2] +
1394 hw_lro_norule_flush_cnt[3]));
1395
1396 return 0;
1397}
1398
1399int hw_lro_stats_read_wrapper(struct seq_file *seq, void *v)
1400{
1401 struct mtk_eth *eth = g_eth;
1402
developer089e8852022-09-28 14:43:46 +08001403 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1404 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer77d03a72021-06-06 00:06:00 +08001405 hw_lro_stats_read_v2(seq, v);
1406 else
1407 hw_lro_stats_read_v1(seq, v);
1408
1409 return 0;
1410}
1411
1412static int hw_lro_stats_open(struct inode *inode, struct file *file)
1413{
1414 return single_open(file, hw_lro_stats_read_wrapper, NULL);
1415}
1416
1417static const struct file_operations hw_lro_stats_fops = {
1418 .owner = THIS_MODULE,
1419 .open = hw_lro_stats_open,
1420 .read = seq_read,
1421 .llseek = seq_lseek,
1422 .write = hw_lro_stats_write,
developerfd40db22021-04-29 10:08:25 +08001423 .release = single_release
1424};
1425
developer77d03a72021-06-06 00:06:00 +08001426int hwlro_agg_cnt_ctrl(int cnt)
1427{
1428 int i;
1429
1430 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1431 SET_PDMA_RXRING_MAX_AGG_CNT(g_eth, i, cnt);
1432
1433 return 0;
1434}
1435
1436int hwlro_agg_time_ctrl(int time)
1437{
1438 int i;
1439
1440 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1441 SET_PDMA_RXRING_AGG_TIME(g_eth, i, time);
1442
1443 return 0;
1444}
1445
1446int hwlro_age_time_ctrl(int time)
1447{
1448 int i;
1449
1450 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1451 SET_PDMA_RXRING_AGE_TIME(g_eth, i, time);
1452
1453 return 0;
1454}
1455
1456int hwlro_threshold_ctrl(int bandwidth)
1457{
1458 SET_PDMA_LRO_BW_THRESHOLD(g_eth, bandwidth);
1459
1460 return 0;
1461}
1462
1463int hwlro_ring_enable_ctrl(int enable)
1464{
1465 int i;
1466
1467 pr_info("[%s] %s HW LRO rings\n", __func__, (enable) ? "Enable" : "Disable");
1468
1469 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1470 SET_PDMA_RXRING_VALID(g_eth, i, enable);
1471
1472 return 0;
1473}
1474
1475int hwlro_stats_enable_ctrl(int enable)
1476{
1477 pr_info("[%s] %s HW LRO statistics\n", __func__, (enable) ? "Enable" : "Disable");
1478 mtk_hwlro_stats_ebl = enable;
1479
1480 return 0;
1481}
1482
1483static const mtk_lro_dbg_func lro_dbg_func[] = {
1484 [0] = hwlro_agg_cnt_ctrl,
1485 [1] = hwlro_agg_time_ctrl,
1486 [2] = hwlro_age_time_ctrl,
1487 [3] = hwlro_threshold_ctrl,
1488 [4] = hwlro_ring_enable_ctrl,
1489 [5] = hwlro_stats_enable_ctrl,
1490};
1491
1492ssize_t hw_lro_auto_tlb_write(struct file *file, const char __user *buffer,
1493 size_t count, loff_t *data)
1494{
1495 char buf[32];
1496 char *p_buf;
1497 char *p_token = NULL;
1498 char *p_delimiter = " \t";
1499 long x = 0, y = 0;
developer4c32b7a2021-11-13 16:46:43 +08001500 u32 len = count;
developer77d03a72021-06-06 00:06:00 +08001501 int ret;
1502
1503 if (len >= sizeof(buf)) {
1504 pr_info("Input handling fail!\n");
developer77d03a72021-06-06 00:06:00 +08001505 return -1;
1506 }
1507
1508 if (copy_from_user(buf, buffer, len))
1509 return -EFAULT;
1510
1511 buf[len] = '\0';
1512
1513 p_buf = buf;
1514 p_token = strsep(&p_buf, p_delimiter);
1515 if (!p_token)
1516 x = 0;
1517 else
1518 ret = kstrtol(p_token, 10, &x);
1519
1520 p_token = strsep(&p_buf, "\t\n ");
1521 if (p_token)
1522 ret = kstrtol(p_token, 10, &y);
1523
1524 if (lro_dbg_func[x] && (ARRAY_SIZE(lro_dbg_func) > x))
1525 (*lro_dbg_func[x]) (y);
1526
1527 return count;
1528}
1529
1530void hw_lro_auto_tlb_dump_v1(struct seq_file *seq, u32 index)
1531{
1532 int i;
1533 struct mtk_lro_alt_v1 alt;
1534 __be32 addr;
1535 u32 tlb_info[9];
1536 u32 dw_len, cnt, priority;
1537 u32 entry;
1538
1539 if (index > 4)
1540 index = index - 1;
1541 entry = (index * 9) + 1;
1542
1543 /* read valid entries of the auto-learn table */
1544 mtk_w32(g_eth, entry, MTK_FE_ALT_CF8);
1545
1546 for (i = 0; i < 9; i++)
1547 tlb_info[i] = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1548
1549 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v1));
1550
1551 dw_len = alt.alt_info7.dw_len;
1552 cnt = alt.alt_info6.cnt;
1553
1554 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1555 priority = cnt; /* packet count */
1556 else
1557 priority = dw_len; /* byte count */
1558
1559 /* dump valid entries of the auto-learn table */
1560 if (index >= 4)
1561 seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index);
1562 else
1563 seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index);
1564
1565 if (alt.alt_info8.ipv4) {
1566 addr = htonl(alt.alt_info1.sip0);
1567 seq_printf(seq, "SIP = %pI4 (IPv4)\n", &addr);
1568 } else {
1569 seq_printf(seq, "SIP = %08X:%08X:%08X:%08X (IPv6)\n",
1570 alt.alt_info4.sip3, alt.alt_info3.sip2,
1571 alt.alt_info2.sip1, alt.alt_info1.sip0);
1572 }
1573
1574 seq_printf(seq, "DIP_ID = %d\n", alt.alt_info8.dip_id);
1575 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1576 alt.alt_info0.stp, alt.alt_info0.dtp);
1577 seq_printf(seq, "VLAN_VID_VLD = %d\n", alt.alt_info6.vlan_vid_vld);
1578 seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d\n",
1579 (alt.alt_info5.vlan_vid0 & 0xfff),
1580 ((alt.alt_info5.vlan_vid0 >> 12) & 0xfff),
1581 ((alt.alt_info6.vlan_vid1 << 8) |
1582 ((alt.alt_info5.vlan_vid0 >> 24) & 0xfff)),
1583 ((alt.alt_info6.vlan_vid1 >> 4) & 0xfff));
1584 seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt);
1585 seq_printf(seq, "PRIORITY = %d\n", priority);
1586}
1587
1588void hw_lro_auto_tlb_dump_v2(struct seq_file *seq, u32 index)
1589{
1590 int i;
1591 struct mtk_lro_alt_v2 alt;
1592 u32 score = 0, ipv4 = 0;
1593 u32 ipv6[4] = { 0 };
1594 u32 tlb_info[12];
1595
1596 /* read valid entries of the auto-learn table */
1597 mtk_w32(g_eth, index << MTK_LRO_ALT_INDEX_OFFSET, MTK_LRO_ALT_DBG);
1598
1599 for (i = 0; i < 11; i++)
1600 tlb_info[i] = mtk_r32(g_eth, MTK_LRO_ALT_DBG_DATA);
1601
1602 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v2));
1603
1604 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1605 score = 1; /* packet count */
1606 else
1607 score = 0; /* byte count */
1608
1609 /* dump valid entries of the auto-learn table */
1610 if (alt.alt_info0.valid) {
1611 if (index < 5)
1612 seq_printf(seq,
1613 "\n===== TABLE Entry: %d (onging) =====\n",
1614 index);
1615 else
1616 seq_printf(seq,
1617 "\n===== TABLE Entry: %d (candidate) =====\n",
1618 index);
1619
1620 if (alt.alt_info1.v4_valid) {
1621 ipv4 = (alt.alt_info4.sip0_h << 23) |
1622 alt.alt_info5.sip0_l;
1623 seq_printf(seq, "SIP = 0x%x: (IPv4)\n", ipv4);
1624
1625 ipv4 = (alt.alt_info8.dip0_h << 23) |
1626 alt.alt_info9.dip0_l;
1627 seq_printf(seq, "DIP = 0x%x: (IPv4)\n", ipv4);
1628 } else if (alt.alt_info1.v6_valid) {
1629 ipv6[3] = (alt.alt_info1.sip3_h << 23) |
1630 (alt.alt_info2.sip3_l << 9);
1631 ipv6[2] = (alt.alt_info2.sip2_h << 23) |
1632 (alt.alt_info3.sip2_l << 9);
1633 ipv6[1] = (alt.alt_info3.sip1_h << 23) |
1634 (alt.alt_info4.sip1_l << 9);
1635 ipv6[0] = (alt.alt_info4.sip0_h << 23) |
1636 (alt.alt_info5.sip0_l << 9);
1637 seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1638 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1639
1640 ipv6[3] = (alt.alt_info5.dip3_h << 23) |
1641 (alt.alt_info6.dip3_l << 9);
1642 ipv6[2] = (alt.alt_info6.dip2_h << 23) |
1643 (alt.alt_info7.dip2_l << 9);
1644 ipv6[1] = (alt.alt_info7.dip1_h << 23) |
1645 (alt.alt_info8.dip1_l << 9);
1646 ipv6[0] = (alt.alt_info8.dip0_h << 23) |
1647 (alt.alt_info9.dip0_l << 9);
1648 seq_printf(seq, "DIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1649 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1650 }
1651
1652 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1653 (alt.alt_info9.sp_h << 7) | (alt.alt_info10.sp_l),
1654 alt.alt_info10.dp);
1655 }
1656}
1657
1658int hw_lro_auto_tlb_read(struct seq_file *seq, void *v)
1659{
1660 int i;
1661 u32 reg_val;
1662 u32 reg_op1, reg_op2, reg_op3, reg_op4;
1663 u32 agg_cnt, agg_time, age_time;
1664
1665 seq_puts(seq, "Usage of /proc/mtketh/hw_lro_auto_tlb:\n");
1666 seq_puts(seq, "echo [function] [setting] > /proc/mtketh/hw_lro_auto_tlb\n");
1667 seq_puts(seq, "Functions:\n");
1668 seq_puts(seq, "[0] = hwlro_agg_cnt_ctrl\n");
1669 seq_puts(seq, "[1] = hwlro_agg_time_ctrl\n");
1670 seq_puts(seq, "[2] = hwlro_age_time_ctrl\n");
1671 seq_puts(seq, "[3] = hwlro_threshold_ctrl\n");
1672 seq_puts(seq, "[4] = hwlro_ring_enable_ctrl\n");
1673 seq_puts(seq, "[5] = hwlro_stats_enable_ctrl\n\n");
1674
developer089e8852022-09-28 14:43:46 +08001675 if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2) ||
1676 MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V3)) {
developer77d03a72021-06-06 00:06:00 +08001677 for (i = 1; i <= 8; i++)
1678 hw_lro_auto_tlb_dump_v2(seq, i);
1679 } else {
1680 /* Read valid entries of the auto-learn table */
1681 mtk_w32(g_eth, 0, MTK_FE_ALT_CF8);
1682 reg_val = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1683
1684 seq_printf(seq,
1685 "HW LRO Auto-learn Table: (MTK_FE_ALT_SEQ_CFC=0x%x)\n",
1686 reg_val);
1687
1688 for (i = 7; i >= 0; i--) {
1689 if (reg_val & (1 << i))
1690 hw_lro_auto_tlb_dump_v1(seq, i);
1691 }
1692 }
1693
1694 /* Read the agg_time/age_time/agg_cnt of LRO rings */
1695 seq_puts(seq, "\nHW LRO Ring Settings\n");
1696
1697 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
1698 reg_op1 = mtk_r32(g_eth, MTK_LRO_CTRL_DW1_CFG(i));
1699 reg_op2 = mtk_r32(g_eth, MTK_LRO_CTRL_DW2_CFG(i));
1700 reg_op3 = mtk_r32(g_eth, MTK_LRO_CTRL_DW3_CFG(i));
1701 reg_op4 = mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW2);
1702
1703 agg_cnt =
1704 ((reg_op3 & 0x3) << 6) |
1705 ((reg_op2 >> MTK_LRO_RING_AGG_CNT_L_OFFSET) & 0x3f);
1706 agg_time = (reg_op2 >> MTK_LRO_RING_AGG_TIME_OFFSET) & 0xffff;
1707 age_time =
1708 ((reg_op2 & 0x3f) << 10) |
1709 ((reg_op1 >> MTK_LRO_RING_AGE_TIME_L_OFFSET) & 0x3ff);
1710 seq_printf(seq,
1711 "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
developer089e8852022-09-28 14:43:46 +08001712 (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V1)) ? i : i+3,
developer77d03a72021-06-06 00:06:00 +08001713 agg_cnt, agg_time, age_time, reg_op4);
1714 }
1715
1716 seq_puts(seq, "\n");
1717
1718 return 0;
1719}
1720
1721static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file)
1722{
1723 return single_open(file, hw_lro_auto_tlb_read, NULL);
1724}
1725
1726static const struct file_operations hw_lro_auto_tlb_fops = {
1727 .owner = THIS_MODULE,
1728 .open = hw_lro_auto_tlb_open,
1729 .read = seq_read,
1730 .llseek = seq_lseek,
1731 .write = hw_lro_auto_tlb_write,
1732 .release = single_release
1733};
developerfd40db22021-04-29 10:08:25 +08001734
developer8051e042022-04-08 13:26:36 +08001735int reset_event_read(struct seq_file *seq, void *v)
1736{
1737 struct mtk_eth *eth = g_eth;
1738 struct mtk_reset_event reset_event = eth->reset_event;
1739
1740 seq_printf(seq, "[Event] [Count]\n");
1741 seq_printf(seq, " FQ Empty: %d\n",
1742 reset_event.count[MTK_EVENT_FQ_EMPTY]);
1743 seq_printf(seq, " TSO Fail: %d\n",
1744 reset_event.count[MTK_EVENT_TSO_FAIL]);
1745 seq_printf(seq, " TSO Illegal: %d\n",
1746 reset_event.count[MTK_EVENT_TSO_ILLEGAL]);
1747 seq_printf(seq, " TSO Align: %d\n",
1748 reset_event.count[MTK_EVENT_TSO_ALIGN]);
1749 seq_printf(seq, " RFIFO OV: %d\n",
1750 reset_event.count[MTK_EVENT_RFIFO_OV]);
1751 seq_printf(seq, " RFIFO UF: %d\n",
1752 reset_event.count[MTK_EVENT_RFIFO_UF]);
1753 seq_printf(seq, " Force: %d\n",
1754 reset_event.count[MTK_EVENT_FORCE]);
1755 seq_printf(seq, "----------------------------\n");
1756 seq_printf(seq, " Warm Cnt: %d\n",
1757 reset_event.count[MTK_EVENT_WARM_CNT]);
1758 seq_printf(seq, " Cold Cnt: %d\n",
1759 reset_event.count[MTK_EVENT_COLD_CNT]);
1760 seq_printf(seq, " Total Cnt: %d\n",
1761 reset_event.count[MTK_EVENT_TOTAL_CNT]);
1762
1763 return 0;
1764}
1765
1766static int reset_event_open(struct inode *inode, struct file *file)
1767{
1768 return single_open(file, reset_event_read, 0);
1769}
1770
1771ssize_t reset_event_write(struct file *file, const char __user *buffer,
1772 size_t count, loff_t *data)
1773{
1774 struct mtk_eth *eth = g_eth;
1775 struct mtk_reset_event *reset_event = &eth->reset_event;
1776
1777 memset(reset_event, 0, sizeof(struct mtk_reset_event));
1778 pr_info("MTK reset event counter is cleared !\n");
1779
1780 return count;
1781}
1782
1783static const struct file_operations reset_event_fops = {
1784 .owner = THIS_MODULE,
1785 .open = reset_event_open,
1786 .read = seq_read,
1787 .llseek = seq_lseek,
1788 .write = reset_event_write,
1789 .release = single_release
1790};
1791
1792
developerfd40db22021-04-29 10:08:25 +08001793struct proc_dir_entry *proc_reg_dir;
developer621ca6b2023-01-11 11:08:46 +08001794static struct proc_dir_entry *proc_esw_cnt, *proc_xfi_cnt,
1795 *proc_dbg_regs, *proc_reset_event;
developerfd40db22021-04-29 10:08:25 +08001796
1797int debug_proc_init(struct mtk_eth *eth)
1798{
1799 g_eth = eth;
1800
1801 if (!proc_reg_dir)
1802 proc_reg_dir = proc_mkdir(PROCREG_DIR, NULL);
1803
1804 proc_tx_ring =
1805 proc_create(PROCREG_TXRING, 0, proc_reg_dir, &tx_ring_fops);
1806 if (!proc_tx_ring)
1807 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
1808
developer8051e042022-04-08 13:26:36 +08001809 proc_hwtx_ring =
1810 proc_create(PROCREG_HWTXRING, 0, proc_reg_dir, &hwtx_ring_fops);
1811 if (!proc_hwtx_ring)
1812 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_HWTXRING);
1813
developerfd40db22021-04-29 10:08:25 +08001814 proc_rx_ring =
1815 proc_create(PROCREG_RXRING, 0, proc_reg_dir, &rx_ring_fops);
1816 if (!proc_rx_ring)
1817 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
1818
1819 proc_esw_cnt =
1820 proc_create(PROCREG_ESW_CNT, 0, proc_reg_dir, &switch_count_fops);
1821 if (!proc_esw_cnt)
1822 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
1823
developer621ca6b2023-01-11 11:08:46 +08001824 if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V3)) {
1825 proc_xfi_cnt =
1826 proc_create(PROCREG_XFI_CNT, 0,
1827 proc_reg_dir, &xfi_count_fops);
1828 if (!proc_xfi_cnt)
1829 pr_notice("!! FAIL to create %s PROC !!\n",
1830 PROCREG_XFI_CNT);
1831 }
1832
developerfd40db22021-04-29 10:08:25 +08001833 proc_dbg_regs =
1834 proc_create(PROCREG_DBG_REGS, 0, proc_reg_dir, &dbg_regs_fops);
1835 if (!proc_dbg_regs)
1836 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_DBG_REGS);
1837
developer77d03a72021-06-06 00:06:00 +08001838 if (g_eth->hwlro) {
1839 proc_hw_lro_stats =
1840 proc_create(PROCREG_HW_LRO_STATS, 0, proc_reg_dir,
1841 &hw_lro_stats_fops);
1842 if (!proc_hw_lro_stats)
1843 pr_info("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS);
1844
1845 proc_hw_lro_auto_tlb =
1846 proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, proc_reg_dir,
1847 &hw_lro_auto_tlb_fops);
1848 if (!proc_hw_lro_auto_tlb)
1849 pr_info("!! FAIL to create %s PROC !!\n",
1850 PROCREG_HW_LRO_AUTO_TLB);
1851 }
1852
developer8051e042022-04-08 13:26:36 +08001853 proc_reset_event =
1854 proc_create(PROCREG_RESET_EVENT, 0, proc_reg_dir, &reset_event_fops);
1855 if (!proc_reset_event)
1856 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RESET_EVENT);
1857
developerfd40db22021-04-29 10:08:25 +08001858 return 0;
1859}
1860
1861void debug_proc_exit(void)
1862{
1863 if (proc_tx_ring)
1864 remove_proc_entry(PROCREG_TXRING, proc_reg_dir);
developer8051e042022-04-08 13:26:36 +08001865 if (proc_hwtx_ring)
1866 remove_proc_entry(PROCREG_HWTXRING, proc_reg_dir);
developerfd40db22021-04-29 10:08:25 +08001867 if (proc_rx_ring)
1868 remove_proc_entry(PROCREG_RXRING, proc_reg_dir);
1869
1870 if (proc_esw_cnt)
1871 remove_proc_entry(PROCREG_ESW_CNT, proc_reg_dir);
1872
developer621ca6b2023-01-11 11:08:46 +08001873 if (proc_xfi_cnt)
1874 remove_proc_entry(PROCREG_XFI_CNT, proc_reg_dir);
1875
developerfd40db22021-04-29 10:08:25 +08001876 if (proc_reg_dir)
1877 remove_proc_entry(PROCREG_DIR, 0);
1878
1879 if (proc_dbg_regs)
1880 remove_proc_entry(PROCREG_DBG_REGS, proc_reg_dir);
developer77d03a72021-06-06 00:06:00 +08001881
1882 if (g_eth->hwlro) {
1883 if (proc_hw_lro_stats)
1884 remove_proc_entry(PROCREG_HW_LRO_STATS, proc_reg_dir);
1885
1886 if (proc_hw_lro_auto_tlb)
1887 remove_proc_entry(PROCREG_HW_LRO_AUTO_TLB, proc_reg_dir);
1888 }
developer8051e042022-04-08 13:26:36 +08001889
1890 if (proc_reset_event)
1891 remove_proc_entry(PROCREG_RESET_EVENT, proc_reg_dir);
developerfd40db22021-04-29 10:08:25 +08001892}
1893