blob: 574440dbe93964dab3e664fb135e0538a2df54c9 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
14 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
15 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
16 */
17
18#include <linux/trace_seq.h>
19#include <linux/seq_file.h>
20#include <linux/proc_fs.h>
21#include <linux/u64_stats_sync.h>
22#include <linux/dma-mapping.h>
23#include <linux/netdevice.h>
24#include <linux/ctype.h>
25#include <linux/debugfs.h>
26#include <linux/of_mdio.h>
27
28#include "mtk_eth_soc.h"
29#include "mtk_eth_dbg.h"
30
developer77d03a72021-06-06 00:06:00 +080031u32 hw_lro_agg_num_cnt[MTK_HW_LRO_RING_NUM][MTK_HW_LRO_MAX_AGG_CNT + 1];
32u32 hw_lro_agg_size_cnt[MTK_HW_LRO_RING_NUM][16];
33u32 hw_lro_tot_agg_cnt[MTK_HW_LRO_RING_NUM];
34u32 hw_lro_tot_flush_cnt[MTK_HW_LRO_RING_NUM];
35u32 hw_lro_agg_flush_cnt[MTK_HW_LRO_RING_NUM];
36u32 hw_lro_age_flush_cnt[MTK_HW_LRO_RING_NUM];
37u32 hw_lro_seq_flush_cnt[MTK_HW_LRO_RING_NUM];
38u32 hw_lro_timestamp_flush_cnt[MTK_HW_LRO_RING_NUM];
39u32 hw_lro_norule_flush_cnt[MTK_HW_LRO_RING_NUM];
40u32 mtk_hwlro_stats_ebl;
41static struct proc_dir_entry *proc_hw_lro_stats, *proc_hw_lro_auto_tlb;
42typedef int (*mtk_lro_dbg_func) (int par);
43
developerfd40db22021-04-29 10:08:25 +080044struct mtk_eth_debug {
45 struct dentry *root;
46};
47
48struct mtk_eth *g_eth;
49
50struct mtk_eth_debug eth_debug;
51
developer3957a912021-05-13 16:44:31 +080052void mt7530_mdio_w32(struct mtk_eth *eth, u16 reg, u32 val)
developerfd40db22021-04-29 10:08:25 +080053{
54 mutex_lock(&eth->mii_bus->mdio_lock);
55
56 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
57 _mtk_mdio_write(eth, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
58 _mtk_mdio_write(eth, 0x1f, 0x10, val >> 16);
59
60 mutex_unlock(&eth->mii_bus->mdio_lock);
61}
62
63u32 mt7530_mdio_r32(struct mtk_eth *eth, u32 reg)
64{
65 u16 high, low;
66
67 mutex_lock(&eth->mii_bus->mdio_lock);
68
69 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
70 low = _mtk_mdio_read(eth, 0x1f, (reg >> 2) & 0xf);
71 high = _mtk_mdio_read(eth, 0x1f, 0x10);
72
73 mutex_unlock(&eth->mii_bus->mdio_lock);
74
75 return (high << 16) | (low & 0xffff);
76}
77
78void mtk_switch_w32(struct mtk_eth *eth, u32 val, unsigned reg)
79{
80 mtk_w32(eth, val, reg + 0x10000);
81}
82EXPORT_SYMBOL(mtk_switch_w32);
83
84u32 mtk_switch_r32(struct mtk_eth *eth, unsigned reg)
85{
86 return mtk_r32(eth, reg + 0x10000);
87}
88EXPORT_SYMBOL(mtk_switch_r32);
89
90static int mtketh_debug_show(struct seq_file *m, void *private)
91{
92 struct mtk_eth *eth = m->private;
93 struct mtk_mac *mac = 0;
developer77d03a72021-06-06 00:06:00 +080094 int i = 0;
developerfd40db22021-04-29 10:08:25 +080095
96 for (i = 0 ; i < MTK_MAX_DEVS ; i++) {
97 if (!eth->mac[i] ||
98 of_phy_is_fixed_link(eth->mac[i]->of_node))
99 continue;
100 mac = eth->mac[i];
101#if 0 //FIXME
102 while (j < 30) {
103 d = _mtk_mdio_read(eth, mac->phy_dev->addr, j);
104
105 seq_printf(m, "phy=%d, reg=0x%08x, data=0x%08x\n",
106 mac->phy_dev->addr, j, d);
107 j++;
108 }
109#endif
110 }
111 return 0;
112}
113
114static int mtketh_debug_open(struct inode *inode, struct file *file)
115{
116 return single_open(file, mtketh_debug_show, inode->i_private);
117}
118
119static const struct file_operations mtketh_debug_fops = {
120 .open = mtketh_debug_open,
121 .read = seq_read,
122 .llseek = seq_lseek,
123 .release = single_release,
124};
125
126static int mtketh_mt7530sw_debug_show(struct seq_file *m, void *private)
127{
128 struct mtk_eth *eth = m->private;
129 u32 offset, data;
130 int i;
131 struct mt7530_ranges {
132 u32 start;
133 u32 end;
134 } ranges[] = {
135 {0x0, 0xac},
136 {0x1000, 0x10e0},
137 {0x1100, 0x1140},
138 {0x1200, 0x1240},
139 {0x1300, 0x1340},
140 {0x1400, 0x1440},
141 {0x1500, 0x1540},
142 {0x1600, 0x1640},
143 {0x1800, 0x1848},
144 {0x1900, 0x1948},
145 {0x1a00, 0x1a48},
146 {0x1b00, 0x1b48},
147 {0x1c00, 0x1c48},
148 {0x1d00, 0x1d48},
149 {0x1e00, 0x1e48},
150 {0x1f60, 0x1ffc},
151 {0x2000, 0x212c},
152 {0x2200, 0x222c},
153 {0x2300, 0x232c},
154 {0x2400, 0x242c},
155 {0x2500, 0x252c},
156 {0x2600, 0x262c},
157 {0x3000, 0x3014},
158 {0x30c0, 0x30f8},
159 {0x3100, 0x3114},
160 {0x3200, 0x3214},
161 {0x3300, 0x3314},
162 {0x3400, 0x3414},
163 {0x3500, 0x3514},
164 {0x3600, 0x3614},
165 {0x4000, 0x40d4},
166 {0x4100, 0x41d4},
167 {0x4200, 0x42d4},
168 {0x4300, 0x43d4},
169 {0x4400, 0x44d4},
170 {0x4500, 0x45d4},
171 {0x4600, 0x46d4},
172 {0x4f00, 0x461c},
173 {0x7000, 0x7038},
174 {0x7120, 0x7124},
175 {0x7800, 0x7804},
176 {0x7810, 0x7810},
177 {0x7830, 0x7830},
178 {0x7a00, 0x7a7c},
179 {0x7b00, 0x7b04},
180 {0x7e00, 0x7e04},
181 {0x7ffc, 0x7ffc},
182 };
183
184 if (!mt7530_exist(eth))
185 return -EOPNOTSUPP;
186
187 if ((!eth->mac[0] || !of_phy_is_fixed_link(eth->mac[0]->of_node)) &&
188 (!eth->mac[1] || !of_phy_is_fixed_link(eth->mac[1]->of_node))) {
189 seq_puts(m, "no switch found\n");
190 return 0;
191 }
192
193 for (i = 0 ; i < ARRAY_SIZE(ranges) ; i++) {
194 for (offset = ranges[i].start;
195 offset <= ranges[i].end; offset += 4) {
196 data = mt7530_mdio_r32(eth, offset);
197 seq_printf(m, "mt7530 switch reg=0x%08x, data=0x%08x\n",
198 offset, data);
199 }
200 }
201
202 return 0;
203}
204
205static int mtketh_debug_mt7530sw_open(struct inode *inode, struct file *file)
206{
207 return single_open(file, mtketh_mt7530sw_debug_show, inode->i_private);
208}
209
210static const struct file_operations mtketh_debug_mt7530sw_fops = {
211 .open = mtketh_debug_mt7530sw_open,
212 .read = seq_read,
213 .llseek = seq_lseek,
214 .release = single_release,
215};
216
217static ssize_t mtketh_mt7530sw_debugfs_write(struct file *file,
218 const char __user *ptr,
219 size_t len, loff_t *off)
220{
221 struct mtk_eth *eth = file->private_data;
222 char buf[32], *token, *p = buf;
223 u32 reg, value, phy;
224 int ret;
225
226 if (!mt7530_exist(eth))
227 return -EOPNOTSUPP;
228
229 if (*off != 0)
230 return 0;
231
232 if (len > sizeof(buf) - 1)
233 len = sizeof(buf) - 1;
234
235 ret = strncpy_from_user(buf, ptr, len);
236 if (ret < 0)
237 return ret;
238 buf[len] = '\0';
239
240 token = strsep(&p, " ");
241 if (!token)
242 return -EINVAL;
243 if (kstrtoul(token, 16, (unsigned long *)&phy))
244 return -EINVAL;
245
246 token = strsep(&p, " ");
247 if (!token)
248 return -EINVAL;
249 if (kstrtoul(token, 16, (unsigned long *)&reg))
250 return -EINVAL;
251
252 token = strsep(&p, " ");
253 if (!token)
254 return -EINVAL;
255 if (kstrtoul(token, 16, (unsigned long *)&value))
256 return -EINVAL;
257
258 pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
259 0x1f, reg, value);
260 mt7530_mdio_w32(eth, reg, value);
261 pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
262 0x1f, reg, mt7530_mdio_r32(eth, reg));
263
264 return len;
265}
266
267static ssize_t mtketh_debugfs_write(struct file *file, const char __user *ptr,
268 size_t len, loff_t *off)
269{
270 struct mtk_eth *eth = file->private_data;
271 char buf[32], *token, *p = buf;
272 u32 reg, value, phy;
273 int ret;
274
275 if (*off != 0)
276 return 0;
277
278 if (len > sizeof(buf) - 1)
279 len = sizeof(buf) - 1;
280
281 ret = strncpy_from_user(buf, ptr, len);
282 if (ret < 0)
283 return ret;
284 buf[len] = '\0';
285
286 token = strsep(&p, " ");
287 if (!token)
288 return -EINVAL;
289 if (kstrtoul(token, 16, (unsigned long *)&phy))
290 return -EINVAL;
291
292 token = strsep(&p, " ");
293
294 if (!token)
295 return -EINVAL;
296 if (kstrtoul(token, 16, (unsigned long *)&reg))
297 return -EINVAL;
298
299 token = strsep(&p, " ");
300
301 if (!token)
302 return -EINVAL;
303 if (kstrtoul(token, 16, (unsigned long *)&value))
304 return -EINVAL;
305
306 pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
307 phy, reg, value);
308
309 _mtk_mdio_write(eth, phy, reg, value);
310
311 pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
312 phy, reg, _mtk_mdio_read(eth, phy, reg));
313
314 return len;
315}
316
317static ssize_t mtketh_debugfs_reset(struct file *file, const char __user *ptr,
318 size_t len, loff_t *off)
319{
320 struct mtk_eth *eth = file->private_data;
321
322 schedule_work(&eth->pending_work);
323 return len;
324}
325
326static const struct file_operations fops_reg_w = {
327 .owner = THIS_MODULE,
328 .open = simple_open,
329 .write = mtketh_debugfs_write,
330 .llseek = noop_llseek,
331};
332
333static const struct file_operations fops_eth_reset = {
334 .owner = THIS_MODULE,
335 .open = simple_open,
336 .write = mtketh_debugfs_reset,
337 .llseek = noop_llseek,
338};
339
340static const struct file_operations fops_mt7530sw_reg_w = {
341 .owner = THIS_MODULE,
342 .open = simple_open,
343 .write = mtketh_mt7530sw_debugfs_write,
344 .llseek = noop_llseek,
345};
346
347void mtketh_debugfs_exit(struct mtk_eth *eth)
348{
349 debugfs_remove_recursive(eth_debug.root);
350}
351
352int mtketh_debugfs_init(struct mtk_eth *eth)
353{
354 int ret = 0;
355
356 eth_debug.root = debugfs_create_dir("mtketh", NULL);
357 if (!eth_debug.root) {
358 dev_notice(eth->dev, "%s:err at %d\n", __func__, __LINE__);
359 ret = -ENOMEM;
360 }
361
362 debugfs_create_file("phy_regs", S_IRUGO,
363 eth_debug.root, eth, &mtketh_debug_fops);
364 debugfs_create_file("phy_reg_w", S_IFREG | S_IWUSR,
365 eth_debug.root, eth, &fops_reg_w);
366 debugfs_create_file("reset", S_IFREG | S_IWUSR,
367 eth_debug.root, eth, &fops_eth_reset);
368 if (mt7530_exist(eth)) {
369 debugfs_create_file("mt7530sw_regs", S_IRUGO,
370 eth_debug.root, eth,
371 &mtketh_debug_mt7530sw_fops);
372 debugfs_create_file("mt7530sw_reg_w", S_IFREG | S_IWUSR,
373 eth_debug.root, eth,
374 &fops_mt7530sw_reg_w);
375 }
376 return ret;
377}
378
379void mii_mgr_read_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
380 u32 *read_data)
381{
382 if (mt7530_exist(eth) && phy_addr == 31)
383 *read_data = mt7530_mdio_r32(eth, phy_register);
384
385 else
386 *read_data = _mtk_mdio_read(eth, phy_addr, phy_register);
387}
388
developer3957a912021-05-13 16:44:31 +0800389void mii_mgr_write_combine(struct mtk_eth *eth, u16 phy_addr, u16 phy_register,
developerfd40db22021-04-29 10:08:25 +0800390 u32 write_data)
391{
392 if (mt7530_exist(eth) && phy_addr == 31)
393 mt7530_mdio_w32(eth, phy_register, write_data);
394
395 else
396 _mtk_mdio_write(eth, phy_addr, phy_register, write_data);
397}
398
developer3957a912021-05-13 16:44:31 +0800399static void mii_mgr_read_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data)
developerfd40db22021-04-29 10:08:25 +0800400{
401 mtk_cl45_ind_read(eth, port, devad, reg, data);
402}
403
developer3957a912021-05-13 16:44:31 +0800404static void mii_mgr_write_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data)
developerfd40db22021-04-29 10:08:25 +0800405{
406 mtk_cl45_ind_write(eth, port, devad, reg, data);
407}
408
409int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
410{
411 struct mtk_mac *mac = netdev_priv(dev);
412 struct mtk_eth *eth = mac->hw;
413 struct mtk_mii_ioctl_data mii;
414 struct mtk_esw_reg reg;
developerba2d1eb2021-05-25 19:26:45 +0800415 u16 val;
developerfd40db22021-04-29 10:08:25 +0800416
417 switch (cmd) {
418 case MTKETH_MII_READ:
419 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
420 goto err_copy;
421 mii_mgr_read_combine(eth, mii.phy_id, mii.reg_num,
422 &mii.val_out);
423 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
424 goto err_copy;
425
426 return 0;
427 case MTKETH_MII_WRITE:
428 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
429 goto err_copy;
430 mii_mgr_write_combine(eth, mii.phy_id, mii.reg_num,
431 mii.val_in);
developerfd40db22021-04-29 10:08:25 +0800432 return 0;
433 case MTKETH_MII_READ_CL45:
434 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
435 goto err_copy;
developer3957a912021-05-13 16:44:31 +0800436 mii_mgr_read_cl45(eth,
437 mdio_phy_id_prtad(mii.phy_id),
438 mdio_phy_id_devad(mii.phy_id),
439 mii.reg_num,
developerba2d1eb2021-05-25 19:26:45 +0800440 &val);
441 mii.val_out = val;
developerfd40db22021-04-29 10:08:25 +0800442 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
443 goto err_copy;
444
445 return 0;
446 case MTKETH_MII_WRITE_CL45:
447 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
448 goto err_copy;
developerba2d1eb2021-05-25 19:26:45 +0800449 val = mii.val_in;
developer3957a912021-05-13 16:44:31 +0800450 mii_mgr_write_cl45(eth,
451 mdio_phy_id_prtad(mii.phy_id),
452 mdio_phy_id_devad(mii.phy_id),
453 mii.reg_num,
developerba2d1eb2021-05-25 19:26:45 +0800454 val);
developerfd40db22021-04-29 10:08:25 +0800455 return 0;
456 case MTKETH_ESW_REG_READ:
457 if (!mt7530_exist(eth))
458 return -EOPNOTSUPP;
459 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
460 goto err_copy;
461 if (reg.off > REG_ESW_MAX)
462 return -EINVAL;
463 reg.val = mtk_switch_r32(eth, reg.off);
464
465 if (copy_to_user(ifr->ifr_data, &reg, sizeof(reg)))
466 goto err_copy;
467
468 return 0;
469 case MTKETH_ESW_REG_WRITE:
470 if (!mt7530_exist(eth))
471 return -EOPNOTSUPP;
472 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
473 goto err_copy;
474 if (reg.off > REG_ESW_MAX)
475 return -EINVAL;
476 mtk_switch_w32(eth, reg.val, reg.off);
477
478 return 0;
479 default:
480 break;
481 }
482
483 return -EOPNOTSUPP;
484err_copy:
485 return -EFAULT;
486}
487
488int esw_cnt_read(struct seq_file *seq, void *v)
489{
490 unsigned int pkt_cnt = 0;
491 int i = 0;
492 struct mtk_eth *eth = g_eth;
493 unsigned int mib_base = MTK_GDM1_TX_GBCNT;
494
495 seq_puts(seq, "\n <<CPU>>\n");
496 seq_puts(seq, " |\n");
497 seq_puts(seq, "+-----------------------------------------------+\n");
498 seq_puts(seq, "| <<PSE>> |\n");
499 seq_puts(seq, "+-----------------------------------------------+\n");
500 seq_puts(seq, " |\n");
501 seq_puts(seq, "+-----------------------------------------------+\n");
502 seq_puts(seq, "| <<GDMA>> |\n");
503 seq_printf(seq, "| GDMA1_RX_GBCNT : %010u (Rx Good Bytes) |\n",
504 mtk_r32(eth, mib_base));
505 seq_printf(seq, "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n",
506 mtk_r32(eth, mib_base+0x08));
507 seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow error) |\n",
508 mtk_r32(eth, mib_base+0x10));
509 seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n",
510 mtk_r32(eth, mib_base+0x14));
511 seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n",
512 mtk_r32(eth, mib_base+0x18));
513 seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n",
514 mtk_r32(eth, mib_base+0x1C));
515 seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (checksum error) |\n",
516 mtk_r32(eth, mib_base+0x20));
517 seq_printf(seq, "| GDMA1_RX_FCCNT : %010u (flow control) |\n",
518 mtk_r32(eth, mib_base+0x24));
519 seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (about count) |\n",
520 mtk_r32(eth, mib_base+0x28));
521 seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision count) |\n",
522 mtk_r32(eth, mib_base+0x2C));
523 seq_printf(seq, "| GDMA1_TX_GBCNT : %010u (Tx Good Bytes) |\n",
524 mtk_r32(eth, mib_base+0x30));
525 seq_printf(seq, "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n",
526 mtk_r32(eth, mib_base+0x38));
527 seq_puts(seq, "| |\n");
528 seq_printf(seq, "| GDMA2_RX_GBCNT : %010u (Rx Good Bytes) |\n",
529 mtk_r32(eth, mib_base+0x40));
530 seq_printf(seq, "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n",
531 mtk_r32(eth, mib_base+0x48));
532 seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow error) |\n",
533 mtk_r32(eth, mib_base+0x50));
534 seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n",
535 mtk_r32(eth, mib_base+0x54));
536 seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n",
537 mtk_r32(eth, mib_base+0x58));
538 seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n",
539 mtk_r32(eth, mib_base+0x5C));
540 seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (checksum error) |\n",
541 mtk_r32(eth, mib_base+0x60));
542 seq_printf(seq, "| GDMA2_RX_FCCNT : %010u (flow control) |\n",
543 mtk_r32(eth, mib_base+0x64));
544 seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip) |\n",
545 mtk_r32(eth, mib_base+0x68));
546 seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n",
547 mtk_r32(eth, mib_base+0x6C));
548 seq_printf(seq, "| GDMA2_TX_GBCNT : %010u (Tx Good Bytes) |\n",
549 mtk_r32(eth, mib_base+0x70));
550 seq_printf(seq, "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n",
551 mtk_r32(eth, mib_base+0x78));
552 seq_puts(seq, "+-----------------------------------------------+\n");
553
554 if (!mt7530_exist(eth))
555 return 0;
556
557#define DUMP_EACH_PORT(base) \
558 do { \
559 for (i = 0; i < 7; i++) { \
560 pkt_cnt = mt7530_mdio_r32(eth, (base) + (i * 0x100));\
561 seq_printf(seq, "%8u ", pkt_cnt); \
562 } \
563 seq_puts(seq, "\n"); \
564 } while (0)
565
566 seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n",
567 "Port0", "Port1", "Port2", "Port3", "Port4", "Port5",
568 "Port6");
569 seq_puts(seq, "Tx Drop Packet :");
570 DUMP_EACH_PORT(0x4000);
571 seq_puts(seq, "Tx CRC Error :");
572 DUMP_EACH_PORT(0x4004);
573 seq_puts(seq, "Tx Unicast Packet :");
574 DUMP_EACH_PORT(0x4008);
575 seq_puts(seq, "Tx Multicast Packet :");
576 DUMP_EACH_PORT(0x400C);
577 seq_puts(seq, "Tx Broadcast Packet :");
578 DUMP_EACH_PORT(0x4010);
579 seq_puts(seq, "Tx Collision Event :");
580 DUMP_EACH_PORT(0x4014);
581 seq_puts(seq, "Tx Pause Packet :");
582 DUMP_EACH_PORT(0x402C);
583 seq_puts(seq, "Rx Drop Packet :");
584 DUMP_EACH_PORT(0x4060);
585 seq_puts(seq, "Rx Filtering Packet :");
586 DUMP_EACH_PORT(0x4064);
587 seq_puts(seq, "Rx Unicast Packet :");
588 DUMP_EACH_PORT(0x4068);
589 seq_puts(seq, "Rx Multicast Packet :");
590 DUMP_EACH_PORT(0x406C);
591 seq_puts(seq, "Rx Broadcast Packet :");
592 DUMP_EACH_PORT(0x4070);
593 seq_puts(seq, "Rx Alignment Error :");
594 DUMP_EACH_PORT(0x4074);
595 seq_puts(seq, "Rx CRC Error :");
596 DUMP_EACH_PORT(0x4078);
597 seq_puts(seq, "Rx Undersize Error :");
598 DUMP_EACH_PORT(0x407C);
599 seq_puts(seq, "Rx Fragment Error :");
600 DUMP_EACH_PORT(0x4080);
601 seq_puts(seq, "Rx Oversize Error :");
602 DUMP_EACH_PORT(0x4084);
603 seq_puts(seq, "Rx Jabber Error :");
604 DUMP_EACH_PORT(0x4088);
605 seq_puts(seq, "Rx Pause Packet :");
606 DUMP_EACH_PORT(0x408C);
607 mt7530_mdio_w32(eth, 0x4fe0, 0xf0);
608 mt7530_mdio_w32(eth, 0x4fe0, 0x800000f0);
609
610 seq_puts(seq, "\n");
611
612 return 0;
613}
614
615static int switch_count_open(struct inode *inode, struct file *file)
616{
617 return single_open(file, esw_cnt_read, 0);
618}
619
620static const struct file_operations switch_count_fops = {
621 .owner = THIS_MODULE,
622 .open = switch_count_open,
623 .read = seq_read,
624 .llseek = seq_lseek,
625 .release = single_release
626};
627
628static struct proc_dir_entry *proc_tx_ring, *proc_rx_ring;
629
630int tx_ring_read(struct seq_file *seq, void *v)
631{
632 struct mtk_tx_ring *ring = &g_eth->tx_ring;
633 struct mtk_tx_dma *tx_ring;
634 int i = 0;
635
636 tx_ring =
637 kmalloc(sizeof(struct mtk_tx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
638 if (!tx_ring) {
639 seq_puts(seq, " allocate temp tx_ring fail.\n");
640 return 0;
641 }
642
643 for (i = 0; i < MTK_DMA_SIZE; i++)
644 tx_ring[i] = ring->dma[i];
645
646 seq_printf(seq, "free count = %d\n", (int)atomic_read(&ring->free_count));
647 seq_printf(seq, "cpu next free: %d\n", (int)(ring->next_free - ring->dma));
648 seq_printf(seq, "cpu last free: %d\n", (int)(ring->last_free - ring->dma));
649 for (i = 0; i < MTK_DMA_SIZE; i++) {
650 dma_addr_t tmp = ring->phys + i * sizeof(*tx_ring);
651
652 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &tmp,
653 *(int *)&tx_ring[i].txd1, *(int *)&tx_ring[i].txd2,
654 *(int *)&tx_ring[i].txd3, *(int *)&tx_ring[i].txd4);
655#if defined(CONFIG_MEDIATEK_NETSYS_V2)
656 seq_printf(seq, " %08x %08x %08x %08x",
657 *(int *)&tx_ring[i].txd5, *(int *)&tx_ring[i].txd6,
658 *(int *)&tx_ring[i].txd7, *(int *)&tx_ring[i].txd8);
659#endif
660 seq_printf(seq, "\n");
661 }
662
663 kfree(tx_ring);
664 return 0;
665}
666
667static int tx_ring_open(struct inode *inode, struct file *file)
668{
669 return single_open(file, tx_ring_read, NULL);
670}
671
672static const struct file_operations tx_ring_fops = {
673 .owner = THIS_MODULE,
674 .open = tx_ring_open,
675 .read = seq_read,
676 .llseek = seq_lseek,
677 .release = single_release
678};
679
680int rx_ring_read(struct seq_file *seq, void *v)
681{
682 struct mtk_rx_ring *ring = &g_eth->rx_ring[0];
683 struct mtk_rx_dma *rx_ring;
684
685 int i = 0;
686
687 rx_ring =
688 kmalloc(sizeof(struct mtk_rx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
689 if (!rx_ring) {
690 seq_puts(seq, " allocate temp rx_ring fail.\n");
691 return 0;
692 }
693
694 for (i = 0; i < MTK_DMA_SIZE; i++)
695 rx_ring[i] = ring->dma[i];
696
697 seq_printf(seq, "next to read: %d\n",
698 NEXT_DESP_IDX(ring->calc_idx, MTK_DMA_SIZE));
699 for (i = 0; i < MTK_DMA_SIZE; i++) {
700 seq_printf(seq, "%d: %08x %08x %08x %08x", i,
701 *(int *)&rx_ring[i].rxd1, *(int *)&rx_ring[i].rxd2,
702 *(int *)&rx_ring[i].rxd3, *(int *)&rx_ring[i].rxd4);
developera2bdbd52021-05-31 19:10:17 +0800703#if defined(CONFIG_MEDIATEK_NETSYS_V2)
developerfd40db22021-04-29 10:08:25 +0800704 seq_printf(seq, " %08x %08x %08x %08x",
705 *(int *)&rx_ring[i].rxd5, *(int *)&rx_ring[i].rxd6,
706 *(int *)&rx_ring[i].rxd7, *(int *)&rx_ring[i].rxd8);
707#endif
708 seq_printf(seq, "\n");
709 }
710
711 kfree(rx_ring);
712 return 0;
713}
714
715static int rx_ring_open(struct inode *inode, struct file *file)
716{
717 return single_open(file, rx_ring_read, NULL);
718}
719
720static const struct file_operations rx_ring_fops = {
721 .owner = THIS_MODULE,
722 .open = rx_ring_open,
723 .read = seq_read,
724 .llseek = seq_lseek,
725 .release = single_release
726};
727
developer77f3fd42021-10-05 15:16:05 +0800728static inline u32 mtk_dbg_r32(u32 reg)
729{
730 void __iomem *virt_reg;
731 u32 val;
732
733 virt_reg = ioremap(reg, 32);
734 val = __raw_readl(virt_reg);
735 iounmap(virt_reg);
736
737 return val;
738}
739
developerfd40db22021-04-29 10:08:25 +0800740int dbg_regs_read(struct seq_file *seq, void *v)
741{
742 struct mtk_eth *eth = g_eth;
743
developer77f3fd42021-10-05 15:16:05 +0800744 seq_puts(seq, " <<DEBUG REG DUMP>>\n");
745
746 seq_printf(seq, "| FE_INT_STA : %08x |\n",
747 mtk_r32(eth, MTK_INT_STATUS));
748 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
749 seq_printf(seq, "| FE_INT_STA2 : %08x |\n",
750 mtk_r32(eth, MTK_INT_STATUS2));
751
developerfd40db22021-04-29 10:08:25 +0800752 seq_printf(seq, "| PSE_FQFC_CFG : %08x |\n",
753 mtk_r32(eth, MTK_PSE_FQFC_CFG));
754 seq_printf(seq, "| PSE_IQ_STA1 : %08x |\n",
755 mtk_r32(eth, MTK_PSE_IQ_STA(0)));
756 seq_printf(seq, "| PSE_IQ_STA2 : %08x |\n",
757 mtk_r32(eth, MTK_PSE_IQ_STA(1)));
758
developera2bdbd52021-05-31 19:10:17 +0800759 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +0800760 seq_printf(seq, "| PSE_IQ_STA3 : %08x |\n",
761 mtk_r32(eth, MTK_PSE_IQ_STA(2)));
762 seq_printf(seq, "| PSE_IQ_STA4 : %08x |\n",
763 mtk_r32(eth, MTK_PSE_IQ_STA(3)));
developer77f3fd42021-10-05 15:16:05 +0800764 seq_printf(seq, "| PSE_IQ_STA5 : %08x |\n",
765 mtk_r32(eth, MTK_PSE_IQ_STA(4)));
developerfd40db22021-04-29 10:08:25 +0800766 }
767
768 seq_printf(seq, "| PSE_OQ_STA1 : %08x |\n",
769 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
770 seq_printf(seq, "| PSE_OQ_STA2 : %08x |\n",
771 mtk_r32(eth, MTK_PSE_OQ_STA(1)));
772
developera2bdbd52021-05-31 19:10:17 +0800773 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +0800774 seq_printf(seq, "| PSE_OQ_STA3 : %08x |\n",
775 mtk_r32(eth, MTK_PSE_OQ_STA(2)));
776 seq_printf(seq, "| PSE_OQ_STA4 : %08x |\n",
777 mtk_r32(eth, MTK_PSE_OQ_STA(3)));
developer77f3fd42021-10-05 15:16:05 +0800778 seq_printf(seq, "| PSE_OQ_STA5 : %08x |\n",
779 mtk_r32(eth, MTK_PSE_OQ_STA(4)));
developerfd40db22021-04-29 10:08:25 +0800780 }
781
developer77f3fd42021-10-05 15:16:05 +0800782 seq_printf(seq, "| PDMA_CRX_IDX : %08x |\n",
783 mtk_r32(eth, MTK_PRX_CRX_IDX0));
784 seq_printf(seq, "| PDMA_DRX_IDX : %08x |\n",
785 mtk_r32(eth, MTK_PRX_DRX_IDX0));
786 seq_printf(seq, "| QDMA_CTX_IDX : %08x |\n",
787 mtk_r32(eth, MTK_QTX_CTX_PTR));
788 seq_printf(seq, "| QDMA_DTX_IDX : %08x |\n",
789 mtk_r32(eth, MTK_QTX_DTX_PTR));
developerfd40db22021-04-29 10:08:25 +0800790 seq_printf(seq, "| QDMA_FQ_CNT : %08x |\n",
791 mtk_r32(eth, MTK_QDMA_FQ_CNT));
792 seq_printf(seq, "| FE_PSE_FREE : %08x |\n",
793 mtk_r32(eth, MTK_FE_PSE_FREE));
794 seq_printf(seq, "| FE_DROP_FQ : %08x |\n",
795 mtk_r32(eth, MTK_FE_DROP_FQ));
796 seq_printf(seq, "| FE_DROP_FC : %08x |\n",
797 mtk_r32(eth, MTK_FE_DROP_FC));
798 seq_printf(seq, "| FE_DROP_PPE : %08x |\n",
799 mtk_r32(eth, MTK_FE_DROP_PPE));
800 seq_printf(seq, "| GDM1_IG_CTRL : %08x |\n",
801 mtk_r32(eth, MTK_GDMA_FWD_CFG(0)));
802 seq_printf(seq, "| GDM2_IG_CTRL : %08x |\n",
803 mtk_r32(eth, MTK_GDMA_FWD_CFG(1)));
804 seq_printf(seq, "| MAC_P1_MCR : %08x |\n",
805 mtk_r32(eth, MTK_MAC_MCR(0)));
806 seq_printf(seq, "| MAC_P2_MCR : %08x |\n",
807 mtk_r32(eth, MTK_MAC_MCR(1)));
developer77f3fd42021-10-05 15:16:05 +0800808 seq_printf(seq, "| MAC_P1_FSM : %08x |\n",
809 mtk_r32(eth, MTK_MAC_FSM(0)));
810 seq_printf(seq, "| MAC_P2_FSM : %08x |\n",
811 mtk_r32(eth, MTK_MAC_FSM(1)));
developerfd40db22021-04-29 10:08:25 +0800812
developera2bdbd52021-05-31 19:10:17 +0800813 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +0800814 seq_printf(seq, "| FE_CDM1_FSM : %08x |\n",
815 mtk_r32(eth, MTK_FE_CDM1_FSM));
816 seq_printf(seq, "| FE_CDM2_FSM : %08x |\n",
817 mtk_r32(eth, MTK_FE_CDM2_FSM));
developer77f3fd42021-10-05 15:16:05 +0800818 seq_printf(seq, "| FE_CDM3_FSM : %08x |\n",
819 mtk_r32(eth, MTK_FE_CDM3_FSM));
820 seq_printf(seq, "| FE_CDM4_FSM : %08x |\n",
821 mtk_r32(eth, MTK_FE_CDM4_FSM));
developerfd40db22021-04-29 10:08:25 +0800822 seq_printf(seq, "| FE_GDM1_FSM : %08x |\n",
823 mtk_r32(eth, MTK_FE_GDM1_FSM));
824 seq_printf(seq, "| FE_GDM2_FSM : %08x |\n",
825 mtk_r32(eth, MTK_FE_GDM2_FSM));
developer77f3fd42021-10-05 15:16:05 +0800826 seq_printf(seq, "| SGMII_EFUSE : %08x |\n",
827 mtk_dbg_r32(MTK_SGMII_EFUSE));
828 seq_printf(seq, "| SGMII0_RX_CNT : %08x |\n",
829 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(0)));
830 seq_printf(seq, "| SGMII1_RX_CNT : %08x |\n",
831 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(1)));
832 seq_printf(seq, "| WED_RTQM_GLO : %08x |\n",
833 mtk_dbg_r32(MTK_WED_RTQM_GLO_CFG));
developerfd40db22021-04-29 10:08:25 +0800834 }
835
developer77f3fd42021-10-05 15:16:05 +0800836 mtk_w32(eth, 0xffffffff, MTK_INT_STATUS);
837 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
838 mtk_w32(eth, 0xffffffff, MTK_INT_STATUS2);
839
developerfd40db22021-04-29 10:08:25 +0800840 return 0;
841}
842
843static int dbg_regs_open(struct inode *inode, struct file *file)
844{
845 return single_open(file, dbg_regs_read, 0);
846}
847
848static const struct file_operations dbg_regs_fops = {
849 .owner = THIS_MODULE,
850 .open = dbg_regs_open,
851 .read = seq_read,
852 .llseek = seq_lseek,
developer77d03a72021-06-06 00:06:00 +0800853 .release = single_release
854};
855
856void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma *rxd)
857{
858 u32 idx, agg_cnt, agg_size;
859
860#if defined(CONFIG_MEDIATEK_NETSYS_V2)
861 idx = ring_no - 4;
862 agg_cnt = RX_DMA_GET_AGG_CNT_V2(rxd->rxd6);
863#else
864 idx = ring_no - 1;
865 agg_cnt = RX_DMA_GET_AGG_CNT(rxd->rxd2);
866#endif
867
868 agg_size = RX_DMA_GET_PLEN0(rxd->rxd2);
869
870 hw_lro_agg_size_cnt[idx][agg_size / 5000]++;
871 hw_lro_agg_num_cnt[idx][agg_cnt]++;
872 hw_lro_tot_flush_cnt[idx]++;
873 hw_lro_tot_agg_cnt[idx] += agg_cnt;
874}
875
876void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma *rxd)
877{
878 u32 idx, flush_reason;
879
880#if defined(CONFIG_MEDIATEK_NETSYS_V2)
881 idx = ring_no - 4;
882 flush_reason = RX_DMA_GET_FLUSH_RSN_V2(rxd->rxd6);
883#else
884 idx = ring_no - 1;
885 flush_reason = RX_DMA_GET_REV(rxd->rxd2);
886#endif
887
888 if ((flush_reason & 0x7) == MTK_HW_LRO_AGG_FLUSH)
889 hw_lro_agg_flush_cnt[idx]++;
890 else if ((flush_reason & 0x7) == MTK_HW_LRO_AGE_FLUSH)
891 hw_lro_age_flush_cnt[idx]++;
892 else if ((flush_reason & 0x7) == MTK_HW_LRO_NOT_IN_SEQ_FLUSH)
893 hw_lro_seq_flush_cnt[idx]++;
894 else if ((flush_reason & 0x7) == MTK_HW_LRO_TIMESTAMP_FLUSH)
895 hw_lro_timestamp_flush_cnt[idx]++;
896 else if ((flush_reason & 0x7) == MTK_HW_LRO_NON_RULE_FLUSH)
897 hw_lro_norule_flush_cnt[idx]++;
898}
899
900ssize_t hw_lro_stats_write(struct file *file, const char __user *buffer,
901 size_t count, loff_t *data)
902{
903 memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt));
904 memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt));
905 memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt));
906 memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt));
907 memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt));
908 memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt));
909 memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt));
910 memset(hw_lro_timestamp_flush_cnt, 0,
911 sizeof(hw_lro_timestamp_flush_cnt));
912 memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt));
913
914 pr_info("clear hw lro cnt table\n");
915
916 return count;
917}
918
919int hw_lro_stats_read_v1(struct seq_file *seq, void *v)
920{
921 int i;
922
923 seq_puts(seq, "HW LRO statistic dump:\n");
924
925 /* Agg number count */
926 seq_puts(seq, "Cnt: RING1 | RING2 | RING3 | Total\n");
927 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
928 seq_printf(seq, " %d : %d %d %d %d\n",
929 i, hw_lro_agg_num_cnt[0][i],
930 hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i],
931 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
932 hw_lro_agg_num_cnt[2][i]);
933 }
934
935 /* Total agg count */
936 seq_puts(seq, "Total agg: RING1 | RING2 | RING3 | Total\n");
937 seq_printf(seq, " %d %d %d %d\n",
938 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
939 hw_lro_tot_agg_cnt[2],
940 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
941 hw_lro_tot_agg_cnt[2]);
942
943 /* Total flush count */
944 seq_puts(seq, "Total flush: RING1 | RING2 | RING3 | Total\n");
945 seq_printf(seq, " %d %d %d %d\n",
946 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
947 hw_lro_tot_flush_cnt[2],
948 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
949 hw_lro_tot_flush_cnt[2]);
950
951 /* Avg agg count */
952 seq_puts(seq, "Avg agg: RING1 | RING2 | RING3 | Total\n");
953 seq_printf(seq, " %d %d %d %d\n",
954 (hw_lro_tot_flush_cnt[0]) ?
955 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
956 (hw_lro_tot_flush_cnt[1]) ?
957 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
958 (hw_lro_tot_flush_cnt[2]) ?
959 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
960 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
961 hw_lro_tot_flush_cnt[2]) ?
962 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
963 hw_lro_tot_agg_cnt[2]) / (hw_lro_tot_flush_cnt[0] +
964 hw_lro_tot_flush_cnt[1] + hw_lro_tot_flush_cnt[2])) : 0);
965
966 /* Statistics of aggregation size counts */
967 seq_puts(seq, "HW LRO flush pkt len:\n");
968 seq_puts(seq, " Length | RING1 | RING2 | RING3 | Total\n");
969 for (i = 0; i < 15; i++) {
970 seq_printf(seq, "%d~%d: %d %d %d %d\n", i * 5000,
971 (i + 1) * 5000, hw_lro_agg_size_cnt[0][i],
972 hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i],
973 hw_lro_agg_size_cnt[0][i] +
974 hw_lro_agg_size_cnt[1][i] +
975 hw_lro_agg_size_cnt[2][i]);
976 }
977
978 seq_puts(seq, "Flush reason: RING1 | RING2 | RING3 | Total\n");
979 seq_printf(seq, "AGG timeout: %d %d %d %d\n",
980 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
981 hw_lro_agg_flush_cnt[2],
982 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
983 hw_lro_agg_flush_cnt[2]));
984
985 seq_printf(seq, "AGE timeout: %d %d %d %d\n",
986 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
987 hw_lro_age_flush_cnt[2],
988 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
989 hw_lro_age_flush_cnt[2]));
990
991 seq_printf(seq, "Not in-sequence: %d %d %d %d\n",
992 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
993 hw_lro_seq_flush_cnt[2],
994 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
995 hw_lro_seq_flush_cnt[2]));
996
997 seq_printf(seq, "Timestamp: %d %d %d %d\n",
998 hw_lro_timestamp_flush_cnt[0],
999 hw_lro_timestamp_flush_cnt[1],
1000 hw_lro_timestamp_flush_cnt[2],
1001 (hw_lro_timestamp_flush_cnt[0] +
1002 hw_lro_timestamp_flush_cnt[1] +
1003 hw_lro_timestamp_flush_cnt[2]));
1004
1005 seq_printf(seq, "No LRO rule: %d %d %d %d\n",
1006 hw_lro_norule_flush_cnt[0],
1007 hw_lro_norule_flush_cnt[1],
1008 hw_lro_norule_flush_cnt[2],
1009 (hw_lro_norule_flush_cnt[0] +
1010 hw_lro_norule_flush_cnt[1] +
1011 hw_lro_norule_flush_cnt[2]));
1012
1013 return 0;
1014}
1015
1016int hw_lro_stats_read_v2(struct seq_file *seq, void *v)
1017{
1018 int i;
1019
1020 seq_puts(seq, "HW LRO statistic dump:\n");
1021
1022 /* Agg number count */
1023 seq_puts(seq, "Cnt: RING4 | RING5 | RING6 | RING7 Total\n");
1024 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
1025 seq_printf(seq,
1026 " %d : %d %d %d %d %d\n",
1027 i, hw_lro_agg_num_cnt[0][i], hw_lro_agg_num_cnt[1][i],
1028 hw_lro_agg_num_cnt[2][i], hw_lro_agg_num_cnt[3][i],
1029 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
1030 hw_lro_agg_num_cnt[2][i] + hw_lro_agg_num_cnt[3][i]);
1031 }
1032
1033 /* Total agg count */
1034 seq_puts(seq, "Total agg: RING4 | RING5 | RING6 | RING7 Total\n");
1035 seq_printf(seq, " %d %d %d %d %d\n",
1036 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
1037 hw_lro_tot_agg_cnt[2], hw_lro_tot_agg_cnt[3],
1038 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1039 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]);
1040
1041 /* Total flush count */
1042 seq_puts(seq, "Total flush: RING4 | RING5 | RING6 | RING7 Total\n");
1043 seq_printf(seq, " %d %d %d %d %d\n",
1044 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
1045 hw_lro_tot_flush_cnt[2], hw_lro_tot_flush_cnt[3],
1046 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1047 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]);
1048
1049 /* Avg agg count */
1050 seq_puts(seq, "Avg agg: RING4 | RING5 | RING6 | RING7 Total\n");
1051 seq_printf(seq, " %d %d %d %d %d\n",
1052 (hw_lro_tot_flush_cnt[0]) ?
1053 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
1054 (hw_lro_tot_flush_cnt[1]) ?
1055 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
1056 (hw_lro_tot_flush_cnt[2]) ?
1057 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
1058 (hw_lro_tot_flush_cnt[3]) ?
1059 hw_lro_tot_agg_cnt[3] / hw_lro_tot_flush_cnt[3] : 0,
1060 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1061 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]) ?
1062 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1063 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]) /
1064 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1065 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3])) : 0);
1066
1067 /* Statistics of aggregation size counts */
1068 seq_puts(seq, "HW LRO flush pkt len:\n");
1069 seq_puts(seq, " Length | RING4 | RING5 | RING6 | RING7 Total\n");
1070 for (i = 0; i < 15; i++) {
1071 seq_printf(seq, "%d~%d: %d %d %d %d %d\n",
1072 i * 5000, (i + 1) * 5000,
1073 hw_lro_agg_size_cnt[0][i], hw_lro_agg_size_cnt[1][i],
1074 hw_lro_agg_size_cnt[2][i], hw_lro_agg_size_cnt[3][i],
1075 hw_lro_agg_size_cnt[0][i] +
1076 hw_lro_agg_size_cnt[1][i] +
1077 hw_lro_agg_size_cnt[2][i] +
1078 hw_lro_agg_size_cnt[3][i]);
1079 }
1080
1081 seq_puts(seq, "Flush reason: RING4 | RING5 | RING6 | RING7 Total\n");
1082 seq_printf(seq, "AGG timeout: %d %d %d %d %d\n",
1083 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1084 hw_lro_agg_flush_cnt[2], hw_lro_agg_flush_cnt[3],
1085 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1086 hw_lro_agg_flush_cnt[2] + hw_lro_agg_flush_cnt[3]));
1087
1088 seq_printf(seq, "AGE timeout: %d %d %d %d %d\n",
1089 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1090 hw_lro_age_flush_cnt[2], hw_lro_age_flush_cnt[3],
1091 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1092 hw_lro_age_flush_cnt[2] + hw_lro_age_flush_cnt[3]));
1093
1094 seq_printf(seq, "Not in-sequence: %d %d %d %d %d\n",
1095 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1096 hw_lro_seq_flush_cnt[2], hw_lro_seq_flush_cnt[3],
1097 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1098 hw_lro_seq_flush_cnt[2] + hw_lro_seq_flush_cnt[3]));
1099
1100 seq_printf(seq, "Timestamp: %d %d %d %d %d\n",
1101 hw_lro_timestamp_flush_cnt[0],
1102 hw_lro_timestamp_flush_cnt[1],
1103 hw_lro_timestamp_flush_cnt[2],
1104 hw_lro_timestamp_flush_cnt[3],
1105 (hw_lro_timestamp_flush_cnt[0] +
1106 hw_lro_timestamp_flush_cnt[1] +
1107 hw_lro_timestamp_flush_cnt[2] +
1108 hw_lro_timestamp_flush_cnt[3]));
1109
1110 seq_printf(seq, "No LRO rule: %d %d %d %d %d\n",
1111 hw_lro_norule_flush_cnt[0],
1112 hw_lro_norule_flush_cnt[1],
1113 hw_lro_norule_flush_cnt[2],
1114 hw_lro_norule_flush_cnt[3],
1115 (hw_lro_norule_flush_cnt[0] +
1116 hw_lro_norule_flush_cnt[1] +
1117 hw_lro_norule_flush_cnt[2] +
1118 hw_lro_norule_flush_cnt[3]));
1119
1120 return 0;
1121}
1122
1123int hw_lro_stats_read_wrapper(struct seq_file *seq, void *v)
1124{
1125 struct mtk_eth *eth = g_eth;
1126
1127 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1128 hw_lro_stats_read_v2(seq, v);
1129 else
1130 hw_lro_stats_read_v1(seq, v);
1131
1132 return 0;
1133}
1134
1135static int hw_lro_stats_open(struct inode *inode, struct file *file)
1136{
1137 return single_open(file, hw_lro_stats_read_wrapper, NULL);
1138}
1139
1140static const struct file_operations hw_lro_stats_fops = {
1141 .owner = THIS_MODULE,
1142 .open = hw_lro_stats_open,
1143 .read = seq_read,
1144 .llseek = seq_lseek,
1145 .write = hw_lro_stats_write,
developerfd40db22021-04-29 10:08:25 +08001146 .release = single_release
1147};
1148
developer77d03a72021-06-06 00:06:00 +08001149int hwlro_agg_cnt_ctrl(int cnt)
1150{
1151 int i;
1152
1153 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1154 SET_PDMA_RXRING_MAX_AGG_CNT(g_eth, i, cnt);
1155
1156 return 0;
1157}
1158
1159int hwlro_agg_time_ctrl(int time)
1160{
1161 int i;
1162
1163 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1164 SET_PDMA_RXRING_AGG_TIME(g_eth, i, time);
1165
1166 return 0;
1167}
1168
1169int hwlro_age_time_ctrl(int time)
1170{
1171 int i;
1172
1173 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1174 SET_PDMA_RXRING_AGE_TIME(g_eth, i, time);
1175
1176 return 0;
1177}
1178
1179int hwlro_threshold_ctrl(int bandwidth)
1180{
1181 SET_PDMA_LRO_BW_THRESHOLD(g_eth, bandwidth);
1182
1183 return 0;
1184}
1185
1186int hwlro_ring_enable_ctrl(int enable)
1187{
1188 int i;
1189
1190 pr_info("[%s] %s HW LRO rings\n", __func__, (enable) ? "Enable" : "Disable");
1191
1192 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1193 SET_PDMA_RXRING_VALID(g_eth, i, enable);
1194
1195 return 0;
1196}
1197
1198int hwlro_stats_enable_ctrl(int enable)
1199{
1200 pr_info("[%s] %s HW LRO statistics\n", __func__, (enable) ? "Enable" : "Disable");
1201 mtk_hwlro_stats_ebl = enable;
1202
1203 return 0;
1204}
1205
1206static const mtk_lro_dbg_func lro_dbg_func[] = {
1207 [0] = hwlro_agg_cnt_ctrl,
1208 [1] = hwlro_agg_time_ctrl,
1209 [2] = hwlro_age_time_ctrl,
1210 [3] = hwlro_threshold_ctrl,
1211 [4] = hwlro_ring_enable_ctrl,
1212 [5] = hwlro_stats_enable_ctrl,
1213};
1214
1215ssize_t hw_lro_auto_tlb_write(struct file *file, const char __user *buffer,
1216 size_t count, loff_t *data)
1217{
1218 char buf[32];
1219 char *p_buf;
1220 char *p_token = NULL;
1221 char *p_delimiter = " \t";
1222 long x = 0, y = 0;
developer4c32b7a2021-11-13 16:46:43 +08001223 u32 len = count;
developer77d03a72021-06-06 00:06:00 +08001224 int ret;
1225
1226 if (len >= sizeof(buf)) {
1227 pr_info("Input handling fail!\n");
developer77d03a72021-06-06 00:06:00 +08001228 return -1;
1229 }
1230
1231 if (copy_from_user(buf, buffer, len))
1232 return -EFAULT;
1233
1234 buf[len] = '\0';
1235
1236 p_buf = buf;
1237 p_token = strsep(&p_buf, p_delimiter);
1238 if (!p_token)
1239 x = 0;
1240 else
1241 ret = kstrtol(p_token, 10, &x);
1242
1243 p_token = strsep(&p_buf, "\t\n ");
1244 if (p_token)
1245 ret = kstrtol(p_token, 10, &y);
1246
1247 if (lro_dbg_func[x] && (ARRAY_SIZE(lro_dbg_func) > x))
1248 (*lro_dbg_func[x]) (y);
1249
1250 return count;
1251}
1252
1253void hw_lro_auto_tlb_dump_v1(struct seq_file *seq, u32 index)
1254{
1255 int i;
1256 struct mtk_lro_alt_v1 alt;
1257 __be32 addr;
1258 u32 tlb_info[9];
1259 u32 dw_len, cnt, priority;
1260 u32 entry;
1261
1262 if (index > 4)
1263 index = index - 1;
1264 entry = (index * 9) + 1;
1265
1266 /* read valid entries of the auto-learn table */
1267 mtk_w32(g_eth, entry, MTK_FE_ALT_CF8);
1268
1269 for (i = 0; i < 9; i++)
1270 tlb_info[i] = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1271
1272 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v1));
1273
1274 dw_len = alt.alt_info7.dw_len;
1275 cnt = alt.alt_info6.cnt;
1276
1277 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1278 priority = cnt; /* packet count */
1279 else
1280 priority = dw_len; /* byte count */
1281
1282 /* dump valid entries of the auto-learn table */
1283 if (index >= 4)
1284 seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index);
1285 else
1286 seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index);
1287
1288 if (alt.alt_info8.ipv4) {
1289 addr = htonl(alt.alt_info1.sip0);
1290 seq_printf(seq, "SIP = %pI4 (IPv4)\n", &addr);
1291 } else {
1292 seq_printf(seq, "SIP = %08X:%08X:%08X:%08X (IPv6)\n",
1293 alt.alt_info4.sip3, alt.alt_info3.sip2,
1294 alt.alt_info2.sip1, alt.alt_info1.sip0);
1295 }
1296
1297 seq_printf(seq, "DIP_ID = %d\n", alt.alt_info8.dip_id);
1298 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1299 alt.alt_info0.stp, alt.alt_info0.dtp);
1300 seq_printf(seq, "VLAN_VID_VLD = %d\n", alt.alt_info6.vlan_vid_vld);
1301 seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d\n",
1302 (alt.alt_info5.vlan_vid0 & 0xfff),
1303 ((alt.alt_info5.vlan_vid0 >> 12) & 0xfff),
1304 ((alt.alt_info6.vlan_vid1 << 8) |
1305 ((alt.alt_info5.vlan_vid0 >> 24) & 0xfff)),
1306 ((alt.alt_info6.vlan_vid1 >> 4) & 0xfff));
1307 seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt);
1308 seq_printf(seq, "PRIORITY = %d\n", priority);
1309}
1310
1311void hw_lro_auto_tlb_dump_v2(struct seq_file *seq, u32 index)
1312{
1313 int i;
1314 struct mtk_lro_alt_v2 alt;
1315 u32 score = 0, ipv4 = 0;
1316 u32 ipv6[4] = { 0 };
1317 u32 tlb_info[12];
1318
1319 /* read valid entries of the auto-learn table */
1320 mtk_w32(g_eth, index << MTK_LRO_ALT_INDEX_OFFSET, MTK_LRO_ALT_DBG);
1321
1322 for (i = 0; i < 11; i++)
1323 tlb_info[i] = mtk_r32(g_eth, MTK_LRO_ALT_DBG_DATA);
1324
1325 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v2));
1326
1327 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1328 score = 1; /* packet count */
1329 else
1330 score = 0; /* byte count */
1331
1332 /* dump valid entries of the auto-learn table */
1333 if (alt.alt_info0.valid) {
1334 if (index < 5)
1335 seq_printf(seq,
1336 "\n===== TABLE Entry: %d (onging) =====\n",
1337 index);
1338 else
1339 seq_printf(seq,
1340 "\n===== TABLE Entry: %d (candidate) =====\n",
1341 index);
1342
1343 if (alt.alt_info1.v4_valid) {
1344 ipv4 = (alt.alt_info4.sip0_h << 23) |
1345 alt.alt_info5.sip0_l;
1346 seq_printf(seq, "SIP = 0x%x: (IPv4)\n", ipv4);
1347
1348 ipv4 = (alt.alt_info8.dip0_h << 23) |
1349 alt.alt_info9.dip0_l;
1350 seq_printf(seq, "DIP = 0x%x: (IPv4)\n", ipv4);
1351 } else if (alt.alt_info1.v6_valid) {
1352 ipv6[3] = (alt.alt_info1.sip3_h << 23) |
1353 (alt.alt_info2.sip3_l << 9);
1354 ipv6[2] = (alt.alt_info2.sip2_h << 23) |
1355 (alt.alt_info3.sip2_l << 9);
1356 ipv6[1] = (alt.alt_info3.sip1_h << 23) |
1357 (alt.alt_info4.sip1_l << 9);
1358 ipv6[0] = (alt.alt_info4.sip0_h << 23) |
1359 (alt.alt_info5.sip0_l << 9);
1360 seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1361 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1362
1363 ipv6[3] = (alt.alt_info5.dip3_h << 23) |
1364 (alt.alt_info6.dip3_l << 9);
1365 ipv6[2] = (alt.alt_info6.dip2_h << 23) |
1366 (alt.alt_info7.dip2_l << 9);
1367 ipv6[1] = (alt.alt_info7.dip1_h << 23) |
1368 (alt.alt_info8.dip1_l << 9);
1369 ipv6[0] = (alt.alt_info8.dip0_h << 23) |
1370 (alt.alt_info9.dip0_l << 9);
1371 seq_printf(seq, "DIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1372 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1373 }
1374
1375 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1376 (alt.alt_info9.sp_h << 7) | (alt.alt_info10.sp_l),
1377 alt.alt_info10.dp);
1378 }
1379}
1380
1381int hw_lro_auto_tlb_read(struct seq_file *seq, void *v)
1382{
1383 int i;
1384 u32 reg_val;
1385 u32 reg_op1, reg_op2, reg_op3, reg_op4;
1386 u32 agg_cnt, agg_time, age_time;
1387
1388 seq_puts(seq, "Usage of /proc/mtketh/hw_lro_auto_tlb:\n");
1389 seq_puts(seq, "echo [function] [setting] > /proc/mtketh/hw_lro_auto_tlb\n");
1390 seq_puts(seq, "Functions:\n");
1391 seq_puts(seq, "[0] = hwlro_agg_cnt_ctrl\n");
1392 seq_puts(seq, "[1] = hwlro_agg_time_ctrl\n");
1393 seq_puts(seq, "[2] = hwlro_age_time_ctrl\n");
1394 seq_puts(seq, "[3] = hwlro_threshold_ctrl\n");
1395 seq_puts(seq, "[4] = hwlro_ring_enable_ctrl\n");
1396 seq_puts(seq, "[5] = hwlro_stats_enable_ctrl\n\n");
1397
1398 if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2)) {
1399 for (i = 1; i <= 8; i++)
1400 hw_lro_auto_tlb_dump_v2(seq, i);
1401 } else {
1402 /* Read valid entries of the auto-learn table */
1403 mtk_w32(g_eth, 0, MTK_FE_ALT_CF8);
1404 reg_val = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1405
1406 seq_printf(seq,
1407 "HW LRO Auto-learn Table: (MTK_FE_ALT_SEQ_CFC=0x%x)\n",
1408 reg_val);
1409
1410 for (i = 7; i >= 0; i--) {
1411 if (reg_val & (1 << i))
1412 hw_lro_auto_tlb_dump_v1(seq, i);
1413 }
1414 }
1415
1416 /* Read the agg_time/age_time/agg_cnt of LRO rings */
1417 seq_puts(seq, "\nHW LRO Ring Settings\n");
1418
1419 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
1420 reg_op1 = mtk_r32(g_eth, MTK_LRO_CTRL_DW1_CFG(i));
1421 reg_op2 = mtk_r32(g_eth, MTK_LRO_CTRL_DW2_CFG(i));
1422 reg_op3 = mtk_r32(g_eth, MTK_LRO_CTRL_DW3_CFG(i));
1423 reg_op4 = mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW2);
1424
1425 agg_cnt =
1426 ((reg_op3 & 0x3) << 6) |
1427 ((reg_op2 >> MTK_LRO_RING_AGG_CNT_L_OFFSET) & 0x3f);
1428 agg_time = (reg_op2 >> MTK_LRO_RING_AGG_TIME_OFFSET) & 0xffff;
1429 age_time =
1430 ((reg_op2 & 0x3f) << 10) |
1431 ((reg_op1 >> MTK_LRO_RING_AGE_TIME_L_OFFSET) & 0x3ff);
1432 seq_printf(seq,
1433 "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
1434 (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2))? i+3 : i,
1435 agg_cnt, agg_time, age_time, reg_op4);
1436 }
1437
1438 seq_puts(seq, "\n");
1439
1440 return 0;
1441}
1442
1443static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file)
1444{
1445 return single_open(file, hw_lro_auto_tlb_read, NULL);
1446}
1447
1448static const struct file_operations hw_lro_auto_tlb_fops = {
1449 .owner = THIS_MODULE,
1450 .open = hw_lro_auto_tlb_open,
1451 .read = seq_read,
1452 .llseek = seq_lseek,
1453 .write = hw_lro_auto_tlb_write,
1454 .release = single_release
1455};
developerfd40db22021-04-29 10:08:25 +08001456
1457struct proc_dir_entry *proc_reg_dir;
1458static struct proc_dir_entry *proc_esw_cnt, *proc_dbg_regs;
1459
1460int debug_proc_init(struct mtk_eth *eth)
1461{
1462 g_eth = eth;
1463
1464 if (!proc_reg_dir)
1465 proc_reg_dir = proc_mkdir(PROCREG_DIR, NULL);
1466
1467 proc_tx_ring =
1468 proc_create(PROCREG_TXRING, 0, proc_reg_dir, &tx_ring_fops);
1469 if (!proc_tx_ring)
1470 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
1471
1472 proc_rx_ring =
1473 proc_create(PROCREG_RXRING, 0, proc_reg_dir, &rx_ring_fops);
1474 if (!proc_rx_ring)
1475 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
1476
1477 proc_esw_cnt =
1478 proc_create(PROCREG_ESW_CNT, 0, proc_reg_dir, &switch_count_fops);
1479 if (!proc_esw_cnt)
1480 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
1481
1482 proc_dbg_regs =
1483 proc_create(PROCREG_DBG_REGS, 0, proc_reg_dir, &dbg_regs_fops);
1484 if (!proc_dbg_regs)
1485 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_DBG_REGS);
1486
developer77d03a72021-06-06 00:06:00 +08001487 if (g_eth->hwlro) {
1488 proc_hw_lro_stats =
1489 proc_create(PROCREG_HW_LRO_STATS, 0, proc_reg_dir,
1490 &hw_lro_stats_fops);
1491 if (!proc_hw_lro_stats)
1492 pr_info("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS);
1493
1494 proc_hw_lro_auto_tlb =
1495 proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, proc_reg_dir,
1496 &hw_lro_auto_tlb_fops);
1497 if (!proc_hw_lro_auto_tlb)
1498 pr_info("!! FAIL to create %s PROC !!\n",
1499 PROCREG_HW_LRO_AUTO_TLB);
1500 }
1501
developerfd40db22021-04-29 10:08:25 +08001502 return 0;
1503}
1504
1505void debug_proc_exit(void)
1506{
1507 if (proc_tx_ring)
1508 remove_proc_entry(PROCREG_TXRING, proc_reg_dir);
1509 if (proc_rx_ring)
1510 remove_proc_entry(PROCREG_RXRING, proc_reg_dir);
1511
1512 if (proc_esw_cnt)
1513 remove_proc_entry(PROCREG_ESW_CNT, proc_reg_dir);
1514
1515 if (proc_reg_dir)
1516 remove_proc_entry(PROCREG_DIR, 0);
1517
1518 if (proc_dbg_regs)
1519 remove_proc_entry(PROCREG_DBG_REGS, proc_reg_dir);
developer77d03a72021-06-06 00:06:00 +08001520
1521 if (g_eth->hwlro) {
1522 if (proc_hw_lro_stats)
1523 remove_proc_entry(PROCREG_HW_LRO_STATS, proc_reg_dir);
1524
1525 if (proc_hw_lro_auto_tlb)
1526 remove_proc_entry(PROCREG_HW_LRO_AUTO_TLB, proc_reg_dir);
1527 }
developerfd40db22021-04-29 10:08:25 +08001528}
1529