blob: 99876303911d9701987927016f3ad0e4609736a0 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
14 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
15 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
16 */
17
18#include <linux/trace_seq.h>
19#include <linux/seq_file.h>
20#include <linux/proc_fs.h>
21#include <linux/u64_stats_sync.h>
22#include <linux/dma-mapping.h>
23#include <linux/netdevice.h>
24#include <linux/ctype.h>
25#include <linux/debugfs.h>
26#include <linux/of_mdio.h>
developer089e8852022-09-28 14:43:46 +080027#include <linux/of_address.h>
developerfd40db22021-04-29 10:08:25 +080028
29#include "mtk_eth_soc.h"
30#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080031#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080032
developer77d03a72021-06-06 00:06:00 +080033u32 hw_lro_agg_num_cnt[MTK_HW_LRO_RING_NUM][MTK_HW_LRO_MAX_AGG_CNT + 1];
34u32 hw_lro_agg_size_cnt[MTK_HW_LRO_RING_NUM][16];
35u32 hw_lro_tot_agg_cnt[MTK_HW_LRO_RING_NUM];
36u32 hw_lro_tot_flush_cnt[MTK_HW_LRO_RING_NUM];
37u32 hw_lro_agg_flush_cnt[MTK_HW_LRO_RING_NUM];
38u32 hw_lro_age_flush_cnt[MTK_HW_LRO_RING_NUM];
39u32 hw_lro_seq_flush_cnt[MTK_HW_LRO_RING_NUM];
40u32 hw_lro_timestamp_flush_cnt[MTK_HW_LRO_RING_NUM];
41u32 hw_lro_norule_flush_cnt[MTK_HW_LRO_RING_NUM];
42u32 mtk_hwlro_stats_ebl;
developer7979ddb2023-04-24 17:19:21 +080043u32 dbg_show_level;
44
developer77d03a72021-06-06 00:06:00 +080045static struct proc_dir_entry *proc_hw_lro_stats, *proc_hw_lro_auto_tlb;
46typedef int (*mtk_lro_dbg_func) (int par);
47
developerfd40db22021-04-29 10:08:25 +080048struct mtk_eth_debug {
developer089e8852022-09-28 14:43:46 +080049 struct dentry *root;
50 void __iomem *base;
51 int direct_access;
developerfd40db22021-04-29 10:08:25 +080052};
53
54struct mtk_eth *g_eth;
55
56struct mtk_eth_debug eth_debug;
57
developer089e8852022-09-28 14:43:46 +080058int mt798x_iomap(void)
59{
60 struct device_node *np = NULL;
61
62 np = of_find_node_by_name(NULL, "switch0");
63 if (np) {
64 eth_debug.base = of_iomap(np, 0);
65 if (!eth_debug.base) {
66 pr_err("of_iomap failed\n");
67 of_node_put(np);
68 return -ENOMEM;
69 }
70
71 of_node_put(np);
72 eth_debug.direct_access = 1;
73 }
74
75 return 0;
76}
77
78int mt798x_iounmap(void)
79{
80 eth_debug.direct_access = 0;
81 if (eth_debug.base)
82 iounmap(eth_debug.base);
83
84 return 0;
85}
86
developer3957a912021-05-13 16:44:31 +080087void mt7530_mdio_w32(struct mtk_eth *eth, u16 reg, u32 val)
developerfd40db22021-04-29 10:08:25 +080088{
89 mutex_lock(&eth->mii_bus->mdio_lock);
90
developer089e8852022-09-28 14:43:46 +080091 if (eth_debug.direct_access)
92 __raw_writel(val, eth_debug.base + reg);
93 else {
94 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
95 _mtk_mdio_write(eth, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
96 _mtk_mdio_write(eth, 0x1f, 0x10, val >> 16);
97 }
developerfd40db22021-04-29 10:08:25 +080098
99 mutex_unlock(&eth->mii_bus->mdio_lock);
100}
101
102u32 mt7530_mdio_r32(struct mtk_eth *eth, u32 reg)
103{
104 u16 high, low;
developer089e8852022-09-28 14:43:46 +0800105 u32 ret;
developerfd40db22021-04-29 10:08:25 +0800106
107 mutex_lock(&eth->mii_bus->mdio_lock);
108
developer089e8852022-09-28 14:43:46 +0800109 if (eth_debug.direct_access) {
110 ret = __raw_readl(eth_debug.base + reg);
111 mutex_unlock(&eth->mii_bus->mdio_lock);
112 return ret;
113 }
developerfd40db22021-04-29 10:08:25 +0800114 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
115 low = _mtk_mdio_read(eth, 0x1f, (reg >> 2) & 0xf);
116 high = _mtk_mdio_read(eth, 0x1f, 0x10);
117
118 mutex_unlock(&eth->mii_bus->mdio_lock);
119
120 return (high << 16) | (low & 0xffff);
121}
122
123void mtk_switch_w32(struct mtk_eth *eth, u32 val, unsigned reg)
124{
125 mtk_w32(eth, val, reg + 0x10000);
126}
127EXPORT_SYMBOL(mtk_switch_w32);
128
129u32 mtk_switch_r32(struct mtk_eth *eth, unsigned reg)
130{
131 return mtk_r32(eth, reg + 0x10000);
132}
133EXPORT_SYMBOL(mtk_switch_r32);
134
135static int mtketh_debug_show(struct seq_file *m, void *private)
136{
137 struct mtk_eth *eth = m->private;
138 struct mtk_mac *mac = 0;
developer77d03a72021-06-06 00:06:00 +0800139 int i = 0;
developerfd40db22021-04-29 10:08:25 +0800140
141 for (i = 0 ; i < MTK_MAX_DEVS ; i++) {
142 if (!eth->mac[i] ||
143 of_phy_is_fixed_link(eth->mac[i]->of_node))
144 continue;
145 mac = eth->mac[i];
146#if 0 //FIXME
147 while (j < 30) {
148 d = _mtk_mdio_read(eth, mac->phy_dev->addr, j);
149
150 seq_printf(m, "phy=%d, reg=0x%08x, data=0x%08x\n",
151 mac->phy_dev->addr, j, d);
152 j++;
153 }
154#endif
155 }
156 return 0;
157}
158
159static int mtketh_debug_open(struct inode *inode, struct file *file)
160{
161 return single_open(file, mtketh_debug_show, inode->i_private);
162}
163
164static const struct file_operations mtketh_debug_fops = {
developer089e8852022-09-28 14:43:46 +0800165 .owner = THIS_MODULE,
developerfd40db22021-04-29 10:08:25 +0800166 .open = mtketh_debug_open,
167 .read = seq_read,
168 .llseek = seq_lseek,
169 .release = single_release,
170};
171
172static int mtketh_mt7530sw_debug_show(struct seq_file *m, void *private)
173{
174 struct mtk_eth *eth = m->private;
175 u32 offset, data;
176 int i;
177 struct mt7530_ranges {
178 u32 start;
179 u32 end;
180 } ranges[] = {
181 {0x0, 0xac},
182 {0x1000, 0x10e0},
183 {0x1100, 0x1140},
184 {0x1200, 0x1240},
185 {0x1300, 0x1340},
186 {0x1400, 0x1440},
187 {0x1500, 0x1540},
188 {0x1600, 0x1640},
189 {0x1800, 0x1848},
190 {0x1900, 0x1948},
191 {0x1a00, 0x1a48},
192 {0x1b00, 0x1b48},
193 {0x1c00, 0x1c48},
194 {0x1d00, 0x1d48},
195 {0x1e00, 0x1e48},
196 {0x1f60, 0x1ffc},
197 {0x2000, 0x212c},
198 {0x2200, 0x222c},
199 {0x2300, 0x232c},
200 {0x2400, 0x242c},
201 {0x2500, 0x252c},
202 {0x2600, 0x262c},
203 {0x3000, 0x3014},
204 {0x30c0, 0x30f8},
205 {0x3100, 0x3114},
206 {0x3200, 0x3214},
207 {0x3300, 0x3314},
208 {0x3400, 0x3414},
209 {0x3500, 0x3514},
210 {0x3600, 0x3614},
211 {0x4000, 0x40d4},
212 {0x4100, 0x41d4},
213 {0x4200, 0x42d4},
214 {0x4300, 0x43d4},
215 {0x4400, 0x44d4},
216 {0x4500, 0x45d4},
217 {0x4600, 0x46d4},
218 {0x4f00, 0x461c},
219 {0x7000, 0x7038},
220 {0x7120, 0x7124},
221 {0x7800, 0x7804},
222 {0x7810, 0x7810},
223 {0x7830, 0x7830},
224 {0x7a00, 0x7a7c},
225 {0x7b00, 0x7b04},
226 {0x7e00, 0x7e04},
227 {0x7ffc, 0x7ffc},
228 };
229
230 if (!mt7530_exist(eth))
231 return -EOPNOTSUPP;
232
233 if ((!eth->mac[0] || !of_phy_is_fixed_link(eth->mac[0]->of_node)) &&
234 (!eth->mac[1] || !of_phy_is_fixed_link(eth->mac[1]->of_node))) {
235 seq_puts(m, "no switch found\n");
236 return 0;
237 }
238
239 for (i = 0 ; i < ARRAY_SIZE(ranges) ; i++) {
240 for (offset = ranges[i].start;
241 offset <= ranges[i].end; offset += 4) {
242 data = mt7530_mdio_r32(eth, offset);
243 seq_printf(m, "mt7530 switch reg=0x%08x, data=0x%08x\n",
244 offset, data);
245 }
246 }
247
248 return 0;
249}
250
251static int mtketh_debug_mt7530sw_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, mtketh_mt7530sw_debug_show, inode->i_private);
254}
255
256static const struct file_operations mtketh_debug_mt7530sw_fops = {
developer089e8852022-09-28 14:43:46 +0800257 .owner = THIS_MODULE,
developerfd40db22021-04-29 10:08:25 +0800258 .open = mtketh_debug_mt7530sw_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
264static ssize_t mtketh_mt7530sw_debugfs_write(struct file *file,
265 const char __user *ptr,
266 size_t len, loff_t *off)
267{
268 struct mtk_eth *eth = file->private_data;
269 char buf[32], *token, *p = buf;
developer8b6f2402022-11-28 13:42:34 +0800270 unsigned long reg, value, phy;
developerfd40db22021-04-29 10:08:25 +0800271 int ret;
272
273 if (!mt7530_exist(eth))
274 return -EOPNOTSUPP;
275
276 if (*off != 0)
277 return 0;
278
279 if (len > sizeof(buf) - 1)
280 len = sizeof(buf) - 1;
281
282 ret = strncpy_from_user(buf, ptr, len);
283 if (ret < 0)
284 return ret;
285 buf[len] = '\0';
286
287 token = strsep(&p, " ");
288 if (!token)
289 return -EINVAL;
290 if (kstrtoul(token, 16, (unsigned long *)&phy))
291 return -EINVAL;
292
293 token = strsep(&p, " ");
294 if (!token)
295 return -EINVAL;
296 if (kstrtoul(token, 16, (unsigned long *)&reg))
297 return -EINVAL;
298
299 token = strsep(&p, " ");
300 if (!token)
301 return -EINVAL;
302 if (kstrtoul(token, 16, (unsigned long *)&value))
303 return -EINVAL;
304
305 pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
306 0x1f, reg, value);
307 mt7530_mdio_w32(eth, reg, value);
308 pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
309 0x1f, reg, mt7530_mdio_r32(eth, reg));
310
311 return len;
312}
313
314static ssize_t mtketh_debugfs_write(struct file *file, const char __user *ptr,
315 size_t len, loff_t *off)
316{
317 struct mtk_eth *eth = file->private_data;
318 char buf[32], *token, *p = buf;
developer8b6f2402022-11-28 13:42:34 +0800319 unsigned long reg, value, phy;
developerfd40db22021-04-29 10:08:25 +0800320 int ret;
321
322 if (*off != 0)
323 return 0;
324
325 if (len > sizeof(buf) - 1)
326 len = sizeof(buf) - 1;
327
328 ret = strncpy_from_user(buf, ptr, len);
329 if (ret < 0)
330 return ret;
331 buf[len] = '\0';
332
333 token = strsep(&p, " ");
334 if (!token)
335 return -EINVAL;
336 if (kstrtoul(token, 16, (unsigned long *)&phy))
337 return -EINVAL;
338
339 token = strsep(&p, " ");
340
341 if (!token)
342 return -EINVAL;
343 if (kstrtoul(token, 16, (unsigned long *)&reg))
344 return -EINVAL;
345
346 token = strsep(&p, " ");
347
348 if (!token)
349 return -EINVAL;
350 if (kstrtoul(token, 16, (unsigned long *)&value))
351 return -EINVAL;
352
353 pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
354 phy, reg, value);
355
356 _mtk_mdio_write(eth, phy, reg, value);
357
358 pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
359 phy, reg, _mtk_mdio_read(eth, phy, reg));
360
361 return len;
362}
363
364static ssize_t mtketh_debugfs_reset(struct file *file, const char __user *ptr,
365 size_t len, loff_t *off)
366{
367 struct mtk_eth *eth = file->private_data;
developerbd42c172022-07-18 17:51:30 +0800368 char buf[8] = "";
369 int count = len;
370 unsigned long dbg_level = 0;
371
372 len = min(count, sizeof(buf) - 1);
373 if (copy_from_user(buf, ptr, len))
374 return -EFAULT;
375
376 buf[len] = '\0';
377 if (kstrtoul(buf, 0, &dbg_level))
378 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +0800379
developerbd42c172022-07-18 17:51:30 +0800380 switch(dbg_level)
381 {
382 case 0:
developer0baa6962023-01-31 14:25:23 +0800383 atomic_set(&force, 0);
developerbd42c172022-07-18 17:51:30 +0800384 break;
385 case 1:
developer0baa6962023-01-31 14:25:23 +0800386 if (atomic_read(&force) == 1)
developer37482a42022-12-26 13:31:13 +0800387 schedule_work(&eth->pending_work);
developer0baa6962023-01-31 14:25:23 +0800388 else
389 pr_info(" stat:disable\n");
developerbd42c172022-07-18 17:51:30 +0800390 break;
391 case 2:
developer0baa6962023-01-31 14:25:23 +0800392 atomic_set(&force, 1);
developerbd42c172022-07-18 17:51:30 +0800393 break;
developer37482a42022-12-26 13:31:13 +0800394 case 3:
developer0baa6962023-01-31 14:25:23 +0800395 if (atomic_read(&force) == 1) {
developer37482a42022-12-26 13:31:13 +0800396 mtk_reset_flag = MTK_FE_STOP_TRAFFIC;
397 schedule_work(&eth->pending_work);
398 } else
399 pr_info(" device resetting !!!\n");
400 break;
developer7979ddb2023-04-24 17:19:21 +0800401 case 4:
402 dbg_show_level = 1;
403 break;
404 case 5:
405 dbg_show_level = 0;
406 break;
developerbd42c172022-07-18 17:51:30 +0800407 default:
408 pr_info("Usage: echo [level] > /sys/kernel/debug/mtketh/reset\n");
developer37482a42022-12-26 13:31:13 +0800409 pr_info("Commands: [level]\n");
410 pr_info(" 0 disable reset\n");
411 pr_info(" 1 FE and WDMA reset\n");
developerbd42c172022-07-18 17:51:30 +0800412 pr_info(" 2 enable reset\n");
developer37482a42022-12-26 13:31:13 +0800413 pr_info(" 3 FE reset\n");
developer7979ddb2023-04-24 17:19:21 +0800414 pr_info(" 4 enable dump reset info\n");
415 pr_info(" 5 disable dump reset info\n");
developerbd42c172022-07-18 17:51:30 +0800416 break;
417 }
418 return count;
developerfd40db22021-04-29 10:08:25 +0800419}
420
421static const struct file_operations fops_reg_w = {
422 .owner = THIS_MODULE,
423 .open = simple_open,
424 .write = mtketh_debugfs_write,
425 .llseek = noop_llseek,
426};
427
428static const struct file_operations fops_eth_reset = {
429 .owner = THIS_MODULE,
430 .open = simple_open,
431 .write = mtketh_debugfs_reset,
432 .llseek = noop_llseek,
433};
434
435static const struct file_operations fops_mt7530sw_reg_w = {
436 .owner = THIS_MODULE,
437 .open = simple_open,
438 .write = mtketh_mt7530sw_debugfs_write,
439 .llseek = noop_llseek,
440};
441
442void mtketh_debugfs_exit(struct mtk_eth *eth)
443{
444 debugfs_remove_recursive(eth_debug.root);
445}
446
447int mtketh_debugfs_init(struct mtk_eth *eth)
448{
449 int ret = 0;
450
451 eth_debug.root = debugfs_create_dir("mtketh", NULL);
452 if (!eth_debug.root) {
453 dev_notice(eth->dev, "%s:err at %d\n", __func__, __LINE__);
454 ret = -ENOMEM;
455 }
456
457 debugfs_create_file("phy_regs", S_IRUGO,
458 eth_debug.root, eth, &mtketh_debug_fops);
459 debugfs_create_file("phy_reg_w", S_IFREG | S_IWUSR,
460 eth_debug.root, eth, &fops_reg_w);
461 debugfs_create_file("reset", S_IFREG | S_IWUSR,
462 eth_debug.root, eth, &fops_eth_reset);
463 if (mt7530_exist(eth)) {
464 debugfs_create_file("mt7530sw_regs", S_IRUGO,
465 eth_debug.root, eth,
466 &mtketh_debug_mt7530sw_fops);
467 debugfs_create_file("mt7530sw_reg_w", S_IFREG | S_IWUSR,
468 eth_debug.root, eth,
469 &fops_mt7530sw_reg_w);
470 }
471 return ret;
472}
473
474void mii_mgr_read_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
475 u32 *read_data)
476{
477 if (mt7530_exist(eth) && phy_addr == 31)
478 *read_data = mt7530_mdio_r32(eth, phy_register);
479
480 else
developer089e8852022-09-28 14:43:46 +0800481 *read_data = mdiobus_read(eth->mii_bus, phy_addr, phy_register);
developerfd40db22021-04-29 10:08:25 +0800482}
483
developer3957a912021-05-13 16:44:31 +0800484void mii_mgr_write_combine(struct mtk_eth *eth, u16 phy_addr, u16 phy_register,
developerfd40db22021-04-29 10:08:25 +0800485 u32 write_data)
486{
487 if (mt7530_exist(eth) && phy_addr == 31)
488 mt7530_mdio_w32(eth, phy_register, write_data);
489
490 else
developer089e8852022-09-28 14:43:46 +0800491 mdiobus_write(eth->mii_bus, phy_addr, phy_register, write_data);
developerfd40db22021-04-29 10:08:25 +0800492}
493
developer3957a912021-05-13 16:44:31 +0800494static void mii_mgr_read_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data)
developerfd40db22021-04-29 10:08:25 +0800495{
developer089e8852022-09-28 14:43:46 +0800496 *data = mdiobus_read(eth->mii_bus, port, mdiobus_c45_addr(devad, reg));
developerfd40db22021-04-29 10:08:25 +0800497}
498
developer3957a912021-05-13 16:44:31 +0800499static void mii_mgr_write_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data)
developerfd40db22021-04-29 10:08:25 +0800500{
developer089e8852022-09-28 14:43:46 +0800501 mdiobus_write(eth->mii_bus, port, mdiobus_c45_addr(devad, reg), data);
developerfd40db22021-04-29 10:08:25 +0800502}
503
504int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
505{
506 struct mtk_mac *mac = netdev_priv(dev);
507 struct mtk_eth *eth = mac->hw;
508 struct mtk_mii_ioctl_data mii;
509 struct mtk_esw_reg reg;
developerba2d1eb2021-05-25 19:26:45 +0800510 u16 val;
developerfd40db22021-04-29 10:08:25 +0800511
512 switch (cmd) {
513 case MTKETH_MII_READ:
514 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
515 goto err_copy;
516 mii_mgr_read_combine(eth, mii.phy_id, mii.reg_num,
517 &mii.val_out);
518 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
519 goto err_copy;
520
521 return 0;
522 case MTKETH_MII_WRITE:
523 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
524 goto err_copy;
525 mii_mgr_write_combine(eth, mii.phy_id, mii.reg_num,
526 mii.val_in);
developerfd40db22021-04-29 10:08:25 +0800527 return 0;
528 case MTKETH_MII_READ_CL45:
529 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
530 goto err_copy;
developer3957a912021-05-13 16:44:31 +0800531 mii_mgr_read_cl45(eth,
532 mdio_phy_id_prtad(mii.phy_id),
533 mdio_phy_id_devad(mii.phy_id),
534 mii.reg_num,
developerba2d1eb2021-05-25 19:26:45 +0800535 &val);
536 mii.val_out = val;
developerfd40db22021-04-29 10:08:25 +0800537 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
538 goto err_copy;
539
540 return 0;
541 case MTKETH_MII_WRITE_CL45:
542 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
543 goto err_copy;
developerba2d1eb2021-05-25 19:26:45 +0800544 val = mii.val_in;
developer3957a912021-05-13 16:44:31 +0800545 mii_mgr_write_cl45(eth,
546 mdio_phy_id_prtad(mii.phy_id),
547 mdio_phy_id_devad(mii.phy_id),
548 mii.reg_num,
developerba2d1eb2021-05-25 19:26:45 +0800549 val);
developerfd40db22021-04-29 10:08:25 +0800550 return 0;
551 case MTKETH_ESW_REG_READ:
552 if (!mt7530_exist(eth))
553 return -EOPNOTSUPP;
554 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
555 goto err_copy;
556 if (reg.off > REG_ESW_MAX)
557 return -EINVAL;
558 reg.val = mtk_switch_r32(eth, reg.off);
559
560 if (copy_to_user(ifr->ifr_data, &reg, sizeof(reg)))
561 goto err_copy;
562
563 return 0;
564 case MTKETH_ESW_REG_WRITE:
565 if (!mt7530_exist(eth))
566 return -EOPNOTSUPP;
567 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
568 goto err_copy;
569 if (reg.off > REG_ESW_MAX)
570 return -EINVAL;
571 mtk_switch_w32(eth, reg.val, reg.off);
572
573 return 0;
574 default:
575 break;
576 }
577
578 return -EOPNOTSUPP;
579err_copy:
580 return -EFAULT;
581}
582
developer089e8852022-09-28 14:43:46 +0800583static void gdm_reg_dump_v3(struct mtk_eth *eth, u32 gdm_id, u32 mib_base)
584{
585 pr_info("| GDMA%d_RX_GBCNT : %010u (Rx Good Bytes) |\n",
586 gdm_id, mtk_r32(eth, mib_base));
587 pr_info("| GDMA%d_RX_GPCNT : %010u (Rx Good Pkts) |\n",
588 gdm_id, mtk_r32(eth, mib_base + 0x08));
589 pr_info("| GDMA%d_RX_OERCNT : %010u (overflow error) |\n",
590 gdm_id, mtk_r32(eth, mib_base + 0x10));
591 pr_info("| GDMA%d_RX_FERCNT : %010u (FCS error) |\n",
592 gdm_id, mtk_r32(eth, mib_base + 0x14));
593 pr_info("| GDMA%d_RX_SERCNT : %010u (too short) |\n",
594 gdm_id, mtk_r32(eth, mib_base + 0x18));
595 pr_info("| GDMA%d_RX_LERCNT : %010u (too long) |\n",
596 gdm_id, mtk_r32(eth, mib_base + 0x1C));
597 pr_info("| GDMA%d_RX_CERCNT : %010u (checksum error) |\n",
598 gdm_id, mtk_r32(eth, mib_base + 0x20));
599 pr_info("| GDMA%d_RX_FCCNT : %010u (flow control) |\n",
600 gdm_id, mtk_r32(eth, mib_base + 0x24));
601 pr_info("| GDMA%d_RX_VDPCNT : %010u (VID drop) |\n",
602 gdm_id, mtk_r32(eth, mib_base + 0x28));
603 pr_info("| GDMA%d_RX_PFCCNT : %010u (priority flow control)\n",
604 gdm_id, mtk_r32(eth, mib_base + 0x2C));
605 pr_info("| GDMA%d_TX_GBCNT : %010u (Tx Good Bytes) |\n",
606 gdm_id, mtk_r32(eth, mib_base + 0x40));
607 pr_info("| GDMA%d_TX_GPCNT : %010u (Tx Good Pkts) |\n",
608 gdm_id, mtk_r32(eth, mib_base + 0x48));
609 pr_info("| GDMA%d_TX_SKIPCNT: %010u (abort count) |\n",
610 gdm_id, mtk_r32(eth, mib_base + 0x50));
611 pr_info("| GDMA%d_TX_COLCNT : %010u (collision count)|\n",
612 gdm_id, mtk_r32(eth, mib_base + 0x54));
613 pr_info("| GDMA%d_TX_OERCNT : %010u (overflow error) |\n",
614 gdm_id, mtk_r32(eth, mib_base + 0x58));
615 pr_info("| GDMA%d_TX_FCCNT : %010u (flow control) |\n",
616 gdm_id, mtk_r32(eth, mib_base + 0x60));
617 pr_info("| GDMA%d_TX_PFCCNT : %010u (priority flow control)\n",
618 gdm_id, mtk_r32(eth, mib_base + 0x64));
619 pr_info("| |\n");
620}
621
622static void gdm_reg_dump_v2(struct mtk_eth *eth, u32 gdm_id, u32 mib_base)
623{
624 pr_info("| GDMA%d_RX_GBCNT : %010u (Rx Good Bytes) |\n",
625 gdm_id, mtk_r32(eth, mib_base));
626 pr_info("| GDMA%d_RX_GPCNT : %010u (Rx Good Pkts) |\n",
627 gdm_id, mtk_r32(eth, mib_base + 0x08));
628 pr_info("| GDMA%d_RX_OERCNT : %010u (overflow error) |\n",
629 gdm_id, mtk_r32(eth, mib_base + 0x10));
630 pr_info("| GDMA%d_RX_FERCNT : %010u (FCS error) |\n",
631 gdm_id, mtk_r32(eth, mib_base + 0x14));
632 pr_info("| GDMA%d_RX_SERCNT : %010u (too short) |\n",
633 gdm_id, mtk_r32(eth, mib_base + 0x18));
634 pr_info("| GDMA%d_RX_LERCNT : %010u (too long) |\n",
635 gdm_id, mtk_r32(eth, mib_base + 0x1C));
636 pr_info("| GDMA%d_RX_CERCNT : %010u (checksum error) |\n",
637 gdm_id, mtk_r32(eth, mib_base + 0x20));
638 pr_info("| GDMA%d_RX_FCCNT : %010u (flow control) |\n",
639 gdm_id, mtk_r32(eth, mib_base + 0x24));
640 pr_info("| GDMA%d_TX_SKIPCNT: %010u (abort count) |\n",
641 gdm_id, mtk_r32(eth, mib_base + 0x28));
642 pr_info("| GDMA%d_TX_COLCNT : %010u (collision count) |\n",
643 gdm_id, mtk_r32(eth, mib_base + 0x2C));
644 pr_info("| GDMA%d_TX_GBCNT : %010u (Tx Good Bytes) |\n",
645 gdm_id, mtk_r32(eth, mib_base + 0x30));
646 pr_info("| GDMA%d_TX_GPCNT : %010u (Tx Good Pkts) |\n",
647 gdm_id, mtk_r32(eth, mib_base + 0x38));
648 pr_info("| |\n");
649}
650
651static void gdm_cnt_read(struct mtk_eth *eth)
652{
653 u32 i, mib_base;
654
655 pr_info("\n <<CPU>>\n");
656 pr_info(" |\n");
657 pr_info("+-----------------------------------------------+\n");
658 pr_info("| <<PSE>> |\n");
659 pr_info("+-----------------------------------------------+\n");
660 pr_info(" |\n");
661 pr_info("+-----------------------------------------------+\n");
662 pr_info("| <<GDMA>> |\n");
663
664 for (i = 0; i < MTK_MAC_COUNT; i++) {
665 mib_base = MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET * i;
666
667 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
668 gdm_reg_dump_v3(eth, i + 1, mib_base);
669 else
670 gdm_reg_dump_v2(eth, i + 1, mib_base);
671 }
672
673 pr_info("+-----------------------------------------------+\n");
674}
675
developer9ccff342022-10-13 18:28:54 +0800676void dump_each_port(struct seq_file *seq, struct mtk_eth *eth, u32 base)
677{
678 u32 pkt_cnt = 0;
679 int i = 0;
680
681 for (i = 0; i < 7; i++) {
682 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
683 if ((base == 0x402C) && (i == 6))
684 base = 0x408C;
685 else if ((base == 0x408C) && (i == 6))
686 base = 0x402C;
687 else
688 ;
689 }
690 pkt_cnt = mt7530_mdio_r32(eth, (base) + (i * 0x100));
691 seq_printf(seq, "%8u ", pkt_cnt);
692 }
693 seq_puts(seq, "\n");
694}
695
developerfd40db22021-04-29 10:08:25 +0800696int esw_cnt_read(struct seq_file *seq, void *v)
697{
698 unsigned int pkt_cnt = 0;
699 int i = 0;
700 struct mtk_eth *eth = g_eth;
developerfd40db22021-04-29 10:08:25 +0800701
developer089e8852022-09-28 14:43:46 +0800702 gdm_cnt_read(eth);
developerfd40db22021-04-29 10:08:25 +0800703
704 if (!mt7530_exist(eth))
705 return 0;
706
developer089e8852022-09-28 14:43:46 +0800707 mt798x_iomap();
708
developerfd40db22021-04-29 10:08:25 +0800709 seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n",
710 "Port0", "Port1", "Port2", "Port3", "Port4", "Port5",
711 "Port6");
712 seq_puts(seq, "Tx Drop Packet :");
developer9ccff342022-10-13 18:28:54 +0800713 dump_each_port(seq, eth, 0x4000);
developerfd40db22021-04-29 10:08:25 +0800714 seq_puts(seq, "Tx CRC Error :");
developer9ccff342022-10-13 18:28:54 +0800715 dump_each_port(seq, eth, 0x4004);
developerfd40db22021-04-29 10:08:25 +0800716 seq_puts(seq, "Tx Unicast Packet :");
developer9ccff342022-10-13 18:28:54 +0800717 dump_each_port(seq, eth, 0x4008);
developerfd40db22021-04-29 10:08:25 +0800718 seq_puts(seq, "Tx Multicast Packet :");
developer9ccff342022-10-13 18:28:54 +0800719 dump_each_port(seq, eth, 0x400C);
developerfd40db22021-04-29 10:08:25 +0800720 seq_puts(seq, "Tx Broadcast Packet :");
developer9ccff342022-10-13 18:28:54 +0800721 dump_each_port(seq, eth, 0x4010);
developerfd40db22021-04-29 10:08:25 +0800722 seq_puts(seq, "Tx Collision Event :");
developer9ccff342022-10-13 18:28:54 +0800723 dump_each_port(seq, eth, 0x4014);
developerfd40db22021-04-29 10:08:25 +0800724 seq_puts(seq, "Tx Pause Packet :");
developer9ccff342022-10-13 18:28:54 +0800725 dump_each_port(seq, eth, 0x402C);
developerfd40db22021-04-29 10:08:25 +0800726 seq_puts(seq, "Rx Drop Packet :");
developer9ccff342022-10-13 18:28:54 +0800727 dump_each_port(seq, eth, 0x4060);
developerfd40db22021-04-29 10:08:25 +0800728 seq_puts(seq, "Rx Filtering Packet :");
developer9ccff342022-10-13 18:28:54 +0800729 dump_each_port(seq, eth, 0x4064);
developerfd40db22021-04-29 10:08:25 +0800730 seq_puts(seq, "Rx Unicast Packet :");
developer9ccff342022-10-13 18:28:54 +0800731 dump_each_port(seq, eth, 0x4068);
developerfd40db22021-04-29 10:08:25 +0800732 seq_puts(seq, "Rx Multicast Packet :");
developer9ccff342022-10-13 18:28:54 +0800733 dump_each_port(seq, eth, 0x406C);
developerfd40db22021-04-29 10:08:25 +0800734 seq_puts(seq, "Rx Broadcast Packet :");
developer9ccff342022-10-13 18:28:54 +0800735 dump_each_port(seq, eth, 0x4070);
developerfd40db22021-04-29 10:08:25 +0800736 seq_puts(seq, "Rx Alignment Error :");
developer9ccff342022-10-13 18:28:54 +0800737 dump_each_port(seq, eth, 0x4074);
developerfd40db22021-04-29 10:08:25 +0800738 seq_puts(seq, "Rx CRC Error :");
developer9ccff342022-10-13 18:28:54 +0800739 dump_each_port(seq, eth, 0x4078);
developerfd40db22021-04-29 10:08:25 +0800740 seq_puts(seq, "Rx Undersize Error :");
developer9ccff342022-10-13 18:28:54 +0800741 dump_each_port(seq, eth, 0x407C);
developerfd40db22021-04-29 10:08:25 +0800742 seq_puts(seq, "Rx Fragment Error :");
developer9ccff342022-10-13 18:28:54 +0800743 dump_each_port(seq, eth, 0x4080);
developerfd40db22021-04-29 10:08:25 +0800744 seq_puts(seq, "Rx Oversize Error :");
developer9ccff342022-10-13 18:28:54 +0800745 dump_each_port(seq, eth, 0x4084);
developerfd40db22021-04-29 10:08:25 +0800746 seq_puts(seq, "Rx Jabber Error :");
developer9ccff342022-10-13 18:28:54 +0800747 dump_each_port(seq, eth, 0x4088);
developerfd40db22021-04-29 10:08:25 +0800748 seq_puts(seq, "Rx Pause Packet :");
developer9ccff342022-10-13 18:28:54 +0800749 dump_each_port(seq, eth, 0x408C);
developerfd40db22021-04-29 10:08:25 +0800750 mt7530_mdio_w32(eth, 0x4fe0, 0xf0);
751 mt7530_mdio_w32(eth, 0x4fe0, 0x800000f0);
752
753 seq_puts(seq, "\n");
754
developer089e8852022-09-28 14:43:46 +0800755 mt798x_iounmap();
756
developerfd40db22021-04-29 10:08:25 +0800757 return 0;
758}
759
760static int switch_count_open(struct inode *inode, struct file *file)
761{
762 return single_open(file, esw_cnt_read, 0);
763}
764
765static const struct file_operations switch_count_fops = {
766 .owner = THIS_MODULE,
767 .open = switch_count_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release
771};
772
developer621ca6b2023-01-11 11:08:46 +0800773void xfi_mib_dump(struct seq_file *seq, u32 gdm_id)
774{
775 struct mtk_eth *eth = g_eth;
776
777 PRINT_FORMATTED_XFI_MIB(seq, TX_PKT_CNT, GENMASK(31, 0));
778 PRINT_FORMATTED_XFI_MIB(seq, TX_ETH_CNT, GENMASK(31, 0));
779 PRINT_FORMATTED_XFI_MIB(seq, TX_PAUSE_CNT, GENMASK(15, 0));
780 PRINT_FORMATTED_XFI_MIB(seq, TX_BYTE_CNT, GENMASK(31, 0));
781 PRINT_FORMATTED_XFI_MIB64(seq, TX_UC_PKT_CNT);
782 PRINT_FORMATTED_XFI_MIB64(seq, TX_MC_PKT_CNT);
783 PRINT_FORMATTED_XFI_MIB64(seq, TX_BC_PKT_CNT);
784
785 PRINT_FORMATTED_XFI_MIB(seq, RX_PKT_CNT, GENMASK(31, 0));
786 PRINT_FORMATTED_XFI_MIB(seq, RX_ETH_CNT, GENMASK(31, 0));
787 PRINT_FORMATTED_XFI_MIB(seq, RX_PAUSE_CNT, GENMASK(15, 0));
788 PRINT_FORMATTED_XFI_MIB(seq, RX_LEN_ERR_CNT, GENMASK(15, 0));
789 PRINT_FORMATTED_XFI_MIB(seq, RX_CRC_ERR_CNT, GENMASK(15, 0));
790 PRINT_FORMATTED_XFI_MIB64(seq, RX_UC_PKT_CNT);
791 PRINT_FORMATTED_XFI_MIB64(seq, RX_MC_PKT_CNT);
792 PRINT_FORMATTED_XFI_MIB64(seq, RX_BC_PKT_CNT);
793 PRINT_FORMATTED_XFI_MIB(seq, RX_UC_DROP_CNT, GENMASK(31, 0));
794 PRINT_FORMATTED_XFI_MIB(seq, RX_BC_DROP_CNT, GENMASK(31, 0));
795 PRINT_FORMATTED_XFI_MIB(seq, RX_MC_DROP_CNT, GENMASK(31, 0));
796 PRINT_FORMATTED_XFI_MIB(seq, RX_ALL_DROP_CNT, GENMASK(31, 0));
797}
798
799int xfi_cnt_read(struct seq_file *seq, void *v)
800{
801 struct mtk_eth *eth = g_eth;
802 int i;
803
804 seq_puts(seq, "+------------------------------------+\n");
805 seq_puts(seq, "| <<XFI MAC>> |\n");
806
807 for (i = MTK_GMAC2_ID; i < MTK_GMAC_ID_MAX; i++) {
808 xfi_mib_dump(seq, i);
809 mtk_m32(eth, 0x1, 0x1, MTK_XFI_MIB_BASE(i) + MTK_XFI_CNT_CTRL);
810 seq_puts(seq, "| |\n");
811 }
812
813 seq_puts(seq, "+------------------------------------+\n");
814
815 return 0;
816}
817
818static int xfi_count_open(struct inode *inode, struct file *file)
819{
820 return single_open(file, xfi_cnt_read, 0);
821}
822
823static const struct file_operations xfi_count_fops = {
824 .owner = THIS_MODULE,
825 .open = xfi_count_open,
826 .read = seq_read,
827 .llseek = seq_lseek,
828 .release = single_release
829};
830
developer8051e042022-04-08 13:26:36 +0800831static struct proc_dir_entry *proc_tx_ring, *proc_hwtx_ring, *proc_rx_ring;
developerfd40db22021-04-29 10:08:25 +0800832
833int tx_ring_read(struct seq_file *seq, void *v)
834{
developere9356982022-07-04 09:03:20 +0800835 struct mtk_eth *eth = g_eth;
developerfd40db22021-04-29 10:08:25 +0800836 struct mtk_tx_ring *ring = &g_eth->tx_ring;
developere9356982022-07-04 09:03:20 +0800837 struct mtk_tx_dma_v2 *tx_ring;
developerfd40db22021-04-29 10:08:25 +0800838 int i = 0;
839
developerfd40db22021-04-29 10:08:25 +0800840 seq_printf(seq, "free count = %d\n", (int)atomic_read(&ring->free_count));
841 seq_printf(seq, "cpu next free: %d\n", (int)(ring->next_free - ring->dma));
842 seq_printf(seq, "cpu last free: %d\n", (int)(ring->last_free - ring->dma));
843 for (i = 0; i < MTK_DMA_SIZE; i++) {
developer8b6f2402022-11-28 13:42:34 +0800844 dma_addr_t tmp = ring->phys +
845 i * (dma_addr_t)eth->soc->txrx.txd_size;
developere9356982022-07-04 09:03:20 +0800846
847 tx_ring = ring->dma + i * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +0800848
849 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &tmp,
developere9356982022-07-04 09:03:20 +0800850 tx_ring->txd1, tx_ring->txd2,
851 tx_ring->txd3, tx_ring->txd4);
852
developer089e8852022-09-28 14:43:46 +0800853 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
854 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +0800855 seq_printf(seq, " %08x %08x %08x %08x",
856 tx_ring->txd5, tx_ring->txd6,
857 tx_ring->txd7, tx_ring->txd8);
858 }
859
developerfd40db22021-04-29 10:08:25 +0800860 seq_printf(seq, "\n");
861 }
862
developerfd40db22021-04-29 10:08:25 +0800863 return 0;
864}
865
866static int tx_ring_open(struct inode *inode, struct file *file)
867{
868 return single_open(file, tx_ring_read, NULL);
869}
870
871static const struct file_operations tx_ring_fops = {
872 .owner = THIS_MODULE,
873 .open = tx_ring_open,
874 .read = seq_read,
875 .llseek = seq_lseek,
876 .release = single_release
877};
878
developer8051e042022-04-08 13:26:36 +0800879int hwtx_ring_read(struct seq_file *seq, void *v)
880{
881 struct mtk_eth *eth = g_eth;
developere9356982022-07-04 09:03:20 +0800882 struct mtk_tx_dma_v2 *hwtx_ring;
developer8051e042022-04-08 13:26:36 +0800883 int i = 0;
884
developer8051e042022-04-08 13:26:36 +0800885 for (i = 0; i < MTK_DMA_SIZE; i++) {
developer8b6f2402022-11-28 13:42:34 +0800886 dma_addr_t addr = eth->phy_scratch_ring +
887 i * (dma_addr_t)eth->soc->txrx.txd_size;
developere9356982022-07-04 09:03:20 +0800888
889 hwtx_ring = eth->scratch_ring + i * eth->soc->txrx.txd_size;
developer8051e042022-04-08 13:26:36 +0800890
891 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &addr,
developere9356982022-07-04 09:03:20 +0800892 hwtx_ring->txd1, hwtx_ring->txd2,
893 hwtx_ring->txd3, hwtx_ring->txd4);
894
developer089e8852022-09-28 14:43:46 +0800895 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
896 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developere9356982022-07-04 09:03:20 +0800897 seq_printf(seq, " %08x %08x %08x %08x",
898 hwtx_ring->txd5, hwtx_ring->txd6,
899 hwtx_ring->txd7, hwtx_ring->txd8);
900 }
901
developer8051e042022-04-08 13:26:36 +0800902 seq_printf(seq, "\n");
903 }
904
developer8051e042022-04-08 13:26:36 +0800905 return 0;
906}
907
908static int hwtx_ring_open(struct inode *inode, struct file *file)
909{
910 return single_open(file, hwtx_ring_read, NULL);
911}
912
913static const struct file_operations hwtx_ring_fops = {
914 .owner = THIS_MODULE,
915 .open = hwtx_ring_open,
916 .read = seq_read,
917 .llseek = seq_lseek,
918 .release = single_release
919};
920
developerfd40db22021-04-29 10:08:25 +0800921int rx_ring_read(struct seq_file *seq, void *v)
922{
developere9356982022-07-04 09:03:20 +0800923 struct mtk_eth *eth = g_eth;
developerfd40db22021-04-29 10:08:25 +0800924 struct mtk_rx_ring *ring = &g_eth->rx_ring[0];
developere9356982022-07-04 09:03:20 +0800925 struct mtk_rx_dma_v2 *rx_ring;
developerfd40db22021-04-29 10:08:25 +0800926 int i = 0;
927
developerfd40db22021-04-29 10:08:25 +0800928 seq_printf(seq, "next to read: %d\n",
929 NEXT_DESP_IDX(ring->calc_idx, MTK_DMA_SIZE));
930 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +0800931 rx_ring = ring->dma + i * eth->soc->txrx.rxd_size;
932
developerfd40db22021-04-29 10:08:25 +0800933 seq_printf(seq, "%d: %08x %08x %08x %08x", i,
developere9356982022-07-04 09:03:20 +0800934 rx_ring->rxd1, rx_ring->rxd2,
935 rx_ring->rxd3, rx_ring->rxd4);
936
developer8ecd51b2023-03-13 11:28:28 +0800937 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developere9356982022-07-04 09:03:20 +0800938 seq_printf(seq, " %08x %08x %08x %08x",
939 rx_ring->rxd5, rx_ring->rxd6,
940 rx_ring->rxd7, rx_ring->rxd8);
941 }
942
developerfd40db22021-04-29 10:08:25 +0800943 seq_printf(seq, "\n");
944 }
945
developerfd40db22021-04-29 10:08:25 +0800946 return 0;
947}
948
949static int rx_ring_open(struct inode *inode, struct file *file)
950{
951 return single_open(file, rx_ring_read, NULL);
952}
953
954static const struct file_operations rx_ring_fops = {
955 .owner = THIS_MODULE,
956 .open = rx_ring_open,
957 .read = seq_read,
958 .llseek = seq_lseek,
959 .release = single_release
960};
961
developer77f3fd42021-10-05 15:16:05 +0800962static inline u32 mtk_dbg_r32(u32 reg)
963{
964 void __iomem *virt_reg;
965 u32 val;
966
967 virt_reg = ioremap(reg, 32);
968 val = __raw_readl(virt_reg);
969 iounmap(virt_reg);
970
971 return val;
972}
973
developerfd40db22021-04-29 10:08:25 +0800974int dbg_regs_read(struct seq_file *seq, void *v)
975{
976 struct mtk_eth *eth = g_eth;
977
developer77f3fd42021-10-05 15:16:05 +0800978 seq_puts(seq, " <<DEBUG REG DUMP>>\n");
979
980 seq_printf(seq, "| FE_INT_STA : %08x |\n",
developer8051e042022-04-08 13:26:36 +0800981 mtk_r32(eth, MTK_FE_INT_STATUS));
developer089e8852022-09-28 14:43:46 +0800982 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
983 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer77f3fd42021-10-05 15:16:05 +0800984 seq_printf(seq, "| FE_INT_STA2 : %08x |\n",
developer8051e042022-04-08 13:26:36 +0800985 mtk_r32(eth, MTK_FE_INT_STATUS2));
developer77f3fd42021-10-05 15:16:05 +0800986
developerfd40db22021-04-29 10:08:25 +0800987 seq_printf(seq, "| PSE_FQFC_CFG : %08x |\n",
988 mtk_r32(eth, MTK_PSE_FQFC_CFG));
989 seq_printf(seq, "| PSE_IQ_STA1 : %08x |\n",
990 mtk_r32(eth, MTK_PSE_IQ_STA(0)));
991 seq_printf(seq, "| PSE_IQ_STA2 : %08x |\n",
992 mtk_r32(eth, MTK_PSE_IQ_STA(1)));
993
developer089e8852022-09-28 14:43:46 +0800994 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
995 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerfd40db22021-04-29 10:08:25 +0800996 seq_printf(seq, "| PSE_IQ_STA3 : %08x |\n",
997 mtk_r32(eth, MTK_PSE_IQ_STA(2)));
998 seq_printf(seq, "| PSE_IQ_STA4 : %08x |\n",
999 mtk_r32(eth, MTK_PSE_IQ_STA(3)));
developer77f3fd42021-10-05 15:16:05 +08001000 seq_printf(seq, "| PSE_IQ_STA5 : %08x |\n",
1001 mtk_r32(eth, MTK_PSE_IQ_STA(4)));
developer089e8852022-09-28 14:43:46 +08001002 seq_printf(seq, "| PSE_IQ_STA6 : %08x |\n",
1003 mtk_r32(eth, MTK_PSE_IQ_STA(5)));
1004 seq_printf(seq, "| PSE_IQ_STA7 : %08x |\n",
1005 mtk_r32(eth, MTK_PSE_IQ_STA(6)));
1006 seq_printf(seq, "| PSE_IQ_STA8 : %08x |\n",
1007 mtk_r32(eth, MTK_PSE_IQ_STA(7)));
developerfd40db22021-04-29 10:08:25 +08001008 }
1009
1010 seq_printf(seq, "| PSE_OQ_STA1 : %08x |\n",
1011 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
1012 seq_printf(seq, "| PSE_OQ_STA2 : %08x |\n",
1013 mtk_r32(eth, MTK_PSE_OQ_STA(1)));
1014
developer089e8852022-09-28 14:43:46 +08001015 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1016 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerfd40db22021-04-29 10:08:25 +08001017 seq_printf(seq, "| PSE_OQ_STA3 : %08x |\n",
1018 mtk_r32(eth, MTK_PSE_OQ_STA(2)));
1019 seq_printf(seq, "| PSE_OQ_STA4 : %08x |\n",
1020 mtk_r32(eth, MTK_PSE_OQ_STA(3)));
developer77f3fd42021-10-05 15:16:05 +08001021 seq_printf(seq, "| PSE_OQ_STA5 : %08x |\n",
1022 mtk_r32(eth, MTK_PSE_OQ_STA(4)));
developer089e8852022-09-28 14:43:46 +08001023 seq_printf(seq, "| PSE_OQ_STA6 : %08x |\n",
1024 mtk_r32(eth, MTK_PSE_OQ_STA(5)));
1025 seq_printf(seq, "| PSE_OQ_STA7 : %08x |\n",
1026 mtk_r32(eth, MTK_PSE_OQ_STA(6)));
1027 seq_printf(seq, "| PSE_OQ_STA8 : %08x |\n",
1028 mtk_r32(eth, MTK_PSE_OQ_STA(7)));
developerfd40db22021-04-29 10:08:25 +08001029 }
1030
developer77f3fd42021-10-05 15:16:05 +08001031 seq_printf(seq, "| PDMA_CRX_IDX : %08x |\n",
1032 mtk_r32(eth, MTK_PRX_CRX_IDX0));
1033 seq_printf(seq, "| PDMA_DRX_IDX : %08x |\n",
1034 mtk_r32(eth, MTK_PRX_DRX_IDX0));
1035 seq_printf(seq, "| QDMA_CTX_IDX : %08x |\n",
1036 mtk_r32(eth, MTK_QTX_CTX_PTR));
1037 seq_printf(seq, "| QDMA_DTX_IDX : %08x |\n",
1038 mtk_r32(eth, MTK_QTX_DTX_PTR));
developerfd40db22021-04-29 10:08:25 +08001039 seq_printf(seq, "| QDMA_FQ_CNT : %08x |\n",
1040 mtk_r32(eth, MTK_QDMA_FQ_CNT));
developer089e8852022-09-28 14:43:46 +08001041 seq_printf(seq, "| QDMA_FWD_CNT : %08x |\n",
1042 mtk_r32(eth, MTK_QDMA_FWD_CNT));
1043 seq_printf(seq, "| QDMA_FSM : %08x |\n",
1044 mtk_r32(eth, MTK_QDMA_FSM));
developerfd40db22021-04-29 10:08:25 +08001045 seq_printf(seq, "| FE_PSE_FREE : %08x |\n",
1046 mtk_r32(eth, MTK_FE_PSE_FREE));
1047 seq_printf(seq, "| FE_DROP_FQ : %08x |\n",
1048 mtk_r32(eth, MTK_FE_DROP_FQ));
1049 seq_printf(seq, "| FE_DROP_FC : %08x |\n",
1050 mtk_r32(eth, MTK_FE_DROP_FC));
1051 seq_printf(seq, "| FE_DROP_PPE : %08x |\n",
1052 mtk_r32(eth, MTK_FE_DROP_PPE));
1053 seq_printf(seq, "| GDM1_IG_CTRL : %08x |\n",
1054 mtk_r32(eth, MTK_GDMA_FWD_CFG(0)));
1055 seq_printf(seq, "| GDM2_IG_CTRL : %08x |\n",
1056 mtk_r32(eth, MTK_GDMA_FWD_CFG(1)));
developer089e8852022-09-28 14:43:46 +08001057 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1058 seq_printf(seq, "| GDM3_IG_CTRL : %08x |\n",
1059 mtk_r32(eth, MTK_GDMA_FWD_CFG(2)));
1060 }
developerfd40db22021-04-29 10:08:25 +08001061 seq_printf(seq, "| MAC_P1_MCR : %08x |\n",
1062 mtk_r32(eth, MTK_MAC_MCR(0)));
1063 seq_printf(seq, "| MAC_P2_MCR : %08x |\n",
1064 mtk_r32(eth, MTK_MAC_MCR(1)));
developer089e8852022-09-28 14:43:46 +08001065 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1066 seq_printf(seq, "| MAC_P3_MCR : %08x |\n",
1067 mtk_r32(eth, MTK_MAC_MCR(2)));
1068 }
developer77f3fd42021-10-05 15:16:05 +08001069 seq_printf(seq, "| MAC_P1_FSM : %08x |\n",
1070 mtk_r32(eth, MTK_MAC_FSM(0)));
1071 seq_printf(seq, "| MAC_P2_FSM : %08x |\n",
1072 mtk_r32(eth, MTK_MAC_FSM(1)));
developer089e8852022-09-28 14:43:46 +08001073 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1074 seq_printf(seq, "| MAC_P3_FSM : %08x |\n",
1075 mtk_r32(eth, MTK_MAC_FSM(2)));
1076 }
developerfd40db22021-04-29 10:08:25 +08001077
developer089e8852022-09-28 14:43:46 +08001078 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1079 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerfd40db22021-04-29 10:08:25 +08001080 seq_printf(seq, "| FE_CDM1_FSM : %08x |\n",
1081 mtk_r32(eth, MTK_FE_CDM1_FSM));
1082 seq_printf(seq, "| FE_CDM2_FSM : %08x |\n",
1083 mtk_r32(eth, MTK_FE_CDM2_FSM));
developer77f3fd42021-10-05 15:16:05 +08001084 seq_printf(seq, "| FE_CDM3_FSM : %08x |\n",
1085 mtk_r32(eth, MTK_FE_CDM3_FSM));
1086 seq_printf(seq, "| FE_CDM4_FSM : %08x |\n",
1087 mtk_r32(eth, MTK_FE_CDM4_FSM));
developer089e8852022-09-28 14:43:46 +08001088 seq_printf(seq, "| FE_CDM5_FSM : %08x |\n",
1089 mtk_r32(eth, MTK_FE_CDM5_FSM));
1090 seq_printf(seq, "| FE_CDM6_FSM : %08x |\n",
1091 mtk_r32(eth, MTK_FE_CDM6_FSM));
developerfd40db22021-04-29 10:08:25 +08001092 seq_printf(seq, "| FE_GDM1_FSM : %08x |\n",
1093 mtk_r32(eth, MTK_FE_GDM1_FSM));
1094 seq_printf(seq, "| FE_GDM2_FSM : %08x |\n",
1095 mtk_r32(eth, MTK_FE_GDM2_FSM));
developer77f3fd42021-10-05 15:16:05 +08001096 seq_printf(seq, "| SGMII_EFUSE : %08x |\n",
1097 mtk_dbg_r32(MTK_SGMII_EFUSE));
1098 seq_printf(seq, "| SGMII0_RX_CNT : %08x |\n",
1099 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(0)));
1100 seq_printf(seq, "| SGMII1_RX_CNT : %08x |\n",
1101 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(1)));
1102 seq_printf(seq, "| WED_RTQM_GLO : %08x |\n",
1103 mtk_dbg_r32(MTK_WED_RTQM_GLO_CFG));
developerfd40db22021-04-29 10:08:25 +08001104 }
1105
developer8051e042022-04-08 13:26:36 +08001106 mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS);
developer089e8852022-09-28 14:43:46 +08001107 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1108 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer8051e042022-04-08 13:26:36 +08001109 mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS2);
developer77f3fd42021-10-05 15:16:05 +08001110
developerfd40db22021-04-29 10:08:25 +08001111 return 0;
1112}
1113
1114static int dbg_regs_open(struct inode *inode, struct file *file)
1115{
1116 return single_open(file, dbg_regs_read, 0);
1117}
1118
1119static const struct file_operations dbg_regs_fops = {
1120 .owner = THIS_MODULE,
1121 .open = dbg_regs_open,
1122 .read = seq_read,
1123 .llseek = seq_lseek,
developer77d03a72021-06-06 00:06:00 +08001124 .release = single_release
1125};
1126
developere9356982022-07-04 09:03:20 +08001127void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
developer77d03a72021-06-06 00:06:00 +08001128{
developere9356982022-07-04 09:03:20 +08001129 struct mtk_eth *eth = g_eth;
developer77d03a72021-06-06 00:06:00 +08001130 u32 idx, agg_cnt, agg_size;
1131
developer8ecd51b2023-03-13 11:28:28 +08001132 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developere9356982022-07-04 09:03:20 +08001133 idx = ring_no - 4;
1134 agg_cnt = RX_DMA_GET_AGG_CNT_V2(rxd->rxd6);
1135 } else {
1136 idx = ring_no - 1;
1137 agg_cnt = RX_DMA_GET_AGG_CNT(rxd->rxd2);
1138 }
developer77d03a72021-06-06 00:06:00 +08001139
developer8b6f2402022-11-28 13:42:34 +08001140 if (idx >= MTK_HW_LRO_RING_NUM)
1141 return;
1142
developer77d03a72021-06-06 00:06:00 +08001143 agg_size = RX_DMA_GET_PLEN0(rxd->rxd2);
1144
1145 hw_lro_agg_size_cnt[idx][agg_size / 5000]++;
1146 hw_lro_agg_num_cnt[idx][agg_cnt]++;
1147 hw_lro_tot_flush_cnt[idx]++;
1148 hw_lro_tot_agg_cnt[idx] += agg_cnt;
1149}
1150
developere9356982022-07-04 09:03:20 +08001151void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
developer77d03a72021-06-06 00:06:00 +08001152{
developere9356982022-07-04 09:03:20 +08001153 struct mtk_eth *eth = g_eth;
developer77d03a72021-06-06 00:06:00 +08001154 u32 idx, flush_reason;
1155
developer8ecd51b2023-03-13 11:28:28 +08001156 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developere9356982022-07-04 09:03:20 +08001157 idx = ring_no - 4;
1158 flush_reason = RX_DMA_GET_FLUSH_RSN_V2(rxd->rxd6);
1159 } else {
1160 idx = ring_no - 1;
1161 flush_reason = RX_DMA_GET_REV(rxd->rxd2);
1162 }
developer77d03a72021-06-06 00:06:00 +08001163
developer8b6f2402022-11-28 13:42:34 +08001164 if (idx >= MTK_HW_LRO_RING_NUM)
1165 return;
1166
developer77d03a72021-06-06 00:06:00 +08001167 if ((flush_reason & 0x7) == MTK_HW_LRO_AGG_FLUSH)
1168 hw_lro_agg_flush_cnt[idx]++;
1169 else if ((flush_reason & 0x7) == MTK_HW_LRO_AGE_FLUSH)
1170 hw_lro_age_flush_cnt[idx]++;
1171 else if ((flush_reason & 0x7) == MTK_HW_LRO_NOT_IN_SEQ_FLUSH)
1172 hw_lro_seq_flush_cnt[idx]++;
1173 else if ((flush_reason & 0x7) == MTK_HW_LRO_TIMESTAMP_FLUSH)
1174 hw_lro_timestamp_flush_cnt[idx]++;
1175 else if ((flush_reason & 0x7) == MTK_HW_LRO_NON_RULE_FLUSH)
1176 hw_lro_norule_flush_cnt[idx]++;
1177}
1178
1179ssize_t hw_lro_stats_write(struct file *file, const char __user *buffer,
1180 size_t count, loff_t *data)
1181{
1182 memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt));
1183 memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt));
1184 memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt));
1185 memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt));
1186 memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt));
1187 memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt));
1188 memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt));
1189 memset(hw_lro_timestamp_flush_cnt, 0,
1190 sizeof(hw_lro_timestamp_flush_cnt));
1191 memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt));
1192
1193 pr_info("clear hw lro cnt table\n");
1194
1195 return count;
1196}
1197
1198int hw_lro_stats_read_v1(struct seq_file *seq, void *v)
1199{
1200 int i;
1201
1202 seq_puts(seq, "HW LRO statistic dump:\n");
1203
1204 /* Agg number count */
1205 seq_puts(seq, "Cnt: RING1 | RING2 | RING3 | Total\n");
1206 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
1207 seq_printf(seq, " %d : %d %d %d %d\n",
1208 i, hw_lro_agg_num_cnt[0][i],
1209 hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i],
1210 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
1211 hw_lro_agg_num_cnt[2][i]);
1212 }
1213
1214 /* Total agg count */
1215 seq_puts(seq, "Total agg: RING1 | RING2 | RING3 | Total\n");
1216 seq_printf(seq, " %d %d %d %d\n",
1217 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
1218 hw_lro_tot_agg_cnt[2],
1219 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1220 hw_lro_tot_agg_cnt[2]);
1221
1222 /* Total flush count */
1223 seq_puts(seq, "Total flush: RING1 | RING2 | RING3 | Total\n");
1224 seq_printf(seq, " %d %d %d %d\n",
1225 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
1226 hw_lro_tot_flush_cnt[2],
1227 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1228 hw_lro_tot_flush_cnt[2]);
1229
1230 /* Avg agg count */
1231 seq_puts(seq, "Avg agg: RING1 | RING2 | RING3 | Total\n");
1232 seq_printf(seq, " %d %d %d %d\n",
1233 (hw_lro_tot_flush_cnt[0]) ?
1234 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
1235 (hw_lro_tot_flush_cnt[1]) ?
1236 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
1237 (hw_lro_tot_flush_cnt[2]) ?
1238 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
1239 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1240 hw_lro_tot_flush_cnt[2]) ?
1241 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1242 hw_lro_tot_agg_cnt[2]) / (hw_lro_tot_flush_cnt[0] +
1243 hw_lro_tot_flush_cnt[1] + hw_lro_tot_flush_cnt[2])) : 0);
1244
1245 /* Statistics of aggregation size counts */
1246 seq_puts(seq, "HW LRO flush pkt len:\n");
1247 seq_puts(seq, " Length | RING1 | RING2 | RING3 | Total\n");
1248 for (i = 0; i < 15; i++) {
1249 seq_printf(seq, "%d~%d: %d %d %d %d\n", i * 5000,
1250 (i + 1) * 5000, hw_lro_agg_size_cnt[0][i],
1251 hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i],
1252 hw_lro_agg_size_cnt[0][i] +
1253 hw_lro_agg_size_cnt[1][i] +
1254 hw_lro_agg_size_cnt[2][i]);
1255 }
1256
1257 seq_puts(seq, "Flush reason: RING1 | RING2 | RING3 | Total\n");
1258 seq_printf(seq, "AGG timeout: %d %d %d %d\n",
1259 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1260 hw_lro_agg_flush_cnt[2],
1261 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1262 hw_lro_agg_flush_cnt[2]));
1263
1264 seq_printf(seq, "AGE timeout: %d %d %d %d\n",
1265 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1266 hw_lro_age_flush_cnt[2],
1267 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1268 hw_lro_age_flush_cnt[2]));
1269
1270 seq_printf(seq, "Not in-sequence: %d %d %d %d\n",
1271 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1272 hw_lro_seq_flush_cnt[2],
1273 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1274 hw_lro_seq_flush_cnt[2]));
1275
1276 seq_printf(seq, "Timestamp: %d %d %d %d\n",
1277 hw_lro_timestamp_flush_cnt[0],
1278 hw_lro_timestamp_flush_cnt[1],
1279 hw_lro_timestamp_flush_cnt[2],
1280 (hw_lro_timestamp_flush_cnt[0] +
1281 hw_lro_timestamp_flush_cnt[1] +
1282 hw_lro_timestamp_flush_cnt[2]));
1283
1284 seq_printf(seq, "No LRO rule: %d %d %d %d\n",
1285 hw_lro_norule_flush_cnt[0],
1286 hw_lro_norule_flush_cnt[1],
1287 hw_lro_norule_flush_cnt[2],
1288 (hw_lro_norule_flush_cnt[0] +
1289 hw_lro_norule_flush_cnt[1] +
1290 hw_lro_norule_flush_cnt[2]));
1291
1292 return 0;
1293}
1294
1295int hw_lro_stats_read_v2(struct seq_file *seq, void *v)
1296{
1297 int i;
1298
1299 seq_puts(seq, "HW LRO statistic dump:\n");
1300
1301 /* Agg number count */
1302 seq_puts(seq, "Cnt: RING4 | RING5 | RING6 | RING7 Total\n");
1303 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
1304 seq_printf(seq,
1305 " %d : %d %d %d %d %d\n",
1306 i, hw_lro_agg_num_cnt[0][i], hw_lro_agg_num_cnt[1][i],
1307 hw_lro_agg_num_cnt[2][i], hw_lro_agg_num_cnt[3][i],
1308 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
1309 hw_lro_agg_num_cnt[2][i] + hw_lro_agg_num_cnt[3][i]);
1310 }
1311
1312 /* Total agg count */
1313 seq_puts(seq, "Total agg: RING4 | RING5 | RING6 | RING7 Total\n");
1314 seq_printf(seq, " %d %d %d %d %d\n",
1315 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
1316 hw_lro_tot_agg_cnt[2], hw_lro_tot_agg_cnt[3],
1317 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1318 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]);
1319
1320 /* Total flush count */
1321 seq_puts(seq, "Total flush: RING4 | RING5 | RING6 | RING7 Total\n");
1322 seq_printf(seq, " %d %d %d %d %d\n",
1323 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
1324 hw_lro_tot_flush_cnt[2], hw_lro_tot_flush_cnt[3],
1325 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1326 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]);
1327
1328 /* Avg agg count */
1329 seq_puts(seq, "Avg agg: RING4 | RING5 | RING6 | RING7 Total\n");
1330 seq_printf(seq, " %d %d %d %d %d\n",
1331 (hw_lro_tot_flush_cnt[0]) ?
1332 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
1333 (hw_lro_tot_flush_cnt[1]) ?
1334 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
1335 (hw_lro_tot_flush_cnt[2]) ?
1336 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
1337 (hw_lro_tot_flush_cnt[3]) ?
1338 hw_lro_tot_agg_cnt[3] / hw_lro_tot_flush_cnt[3] : 0,
1339 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1340 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]) ?
1341 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1342 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]) /
1343 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1344 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3])) : 0);
1345
1346 /* Statistics of aggregation size counts */
1347 seq_puts(seq, "HW LRO flush pkt len:\n");
1348 seq_puts(seq, " Length | RING4 | RING5 | RING6 | RING7 Total\n");
1349 for (i = 0; i < 15; i++) {
1350 seq_printf(seq, "%d~%d: %d %d %d %d %d\n",
1351 i * 5000, (i + 1) * 5000,
1352 hw_lro_agg_size_cnt[0][i], hw_lro_agg_size_cnt[1][i],
1353 hw_lro_agg_size_cnt[2][i], hw_lro_agg_size_cnt[3][i],
1354 hw_lro_agg_size_cnt[0][i] +
1355 hw_lro_agg_size_cnt[1][i] +
1356 hw_lro_agg_size_cnt[2][i] +
1357 hw_lro_agg_size_cnt[3][i]);
1358 }
1359
1360 seq_puts(seq, "Flush reason: RING4 | RING5 | RING6 | RING7 Total\n");
1361 seq_printf(seq, "AGG timeout: %d %d %d %d %d\n",
1362 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1363 hw_lro_agg_flush_cnt[2], hw_lro_agg_flush_cnt[3],
1364 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1365 hw_lro_agg_flush_cnt[2] + hw_lro_agg_flush_cnt[3]));
1366
1367 seq_printf(seq, "AGE timeout: %d %d %d %d %d\n",
1368 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1369 hw_lro_age_flush_cnt[2], hw_lro_age_flush_cnt[3],
1370 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1371 hw_lro_age_flush_cnt[2] + hw_lro_age_flush_cnt[3]));
1372
1373 seq_printf(seq, "Not in-sequence: %d %d %d %d %d\n",
1374 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1375 hw_lro_seq_flush_cnt[2], hw_lro_seq_flush_cnt[3],
1376 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1377 hw_lro_seq_flush_cnt[2] + hw_lro_seq_flush_cnt[3]));
1378
1379 seq_printf(seq, "Timestamp: %d %d %d %d %d\n",
1380 hw_lro_timestamp_flush_cnt[0],
1381 hw_lro_timestamp_flush_cnt[1],
1382 hw_lro_timestamp_flush_cnt[2],
1383 hw_lro_timestamp_flush_cnt[3],
1384 (hw_lro_timestamp_flush_cnt[0] +
1385 hw_lro_timestamp_flush_cnt[1] +
1386 hw_lro_timestamp_flush_cnt[2] +
1387 hw_lro_timestamp_flush_cnt[3]));
1388
1389 seq_printf(seq, "No LRO rule: %d %d %d %d %d\n",
1390 hw_lro_norule_flush_cnt[0],
1391 hw_lro_norule_flush_cnt[1],
1392 hw_lro_norule_flush_cnt[2],
1393 hw_lro_norule_flush_cnt[3],
1394 (hw_lro_norule_flush_cnt[0] +
1395 hw_lro_norule_flush_cnt[1] +
1396 hw_lro_norule_flush_cnt[2] +
1397 hw_lro_norule_flush_cnt[3]));
1398
1399 return 0;
1400}
1401
1402int hw_lro_stats_read_wrapper(struct seq_file *seq, void *v)
1403{
1404 struct mtk_eth *eth = g_eth;
1405
developer8ecd51b2023-03-13 11:28:28 +08001406 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
developer77d03a72021-06-06 00:06:00 +08001407 hw_lro_stats_read_v2(seq, v);
1408 else
1409 hw_lro_stats_read_v1(seq, v);
1410
1411 return 0;
1412}
1413
1414static int hw_lro_stats_open(struct inode *inode, struct file *file)
1415{
1416 return single_open(file, hw_lro_stats_read_wrapper, NULL);
1417}
1418
1419static const struct file_operations hw_lro_stats_fops = {
1420 .owner = THIS_MODULE,
1421 .open = hw_lro_stats_open,
1422 .read = seq_read,
1423 .llseek = seq_lseek,
1424 .write = hw_lro_stats_write,
developerfd40db22021-04-29 10:08:25 +08001425 .release = single_release
1426};
1427
developer77d03a72021-06-06 00:06:00 +08001428int hwlro_agg_cnt_ctrl(int cnt)
1429{
1430 int i;
1431
1432 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1433 SET_PDMA_RXRING_MAX_AGG_CNT(g_eth, i, cnt);
1434
1435 return 0;
1436}
1437
1438int hwlro_agg_time_ctrl(int time)
1439{
1440 int i;
1441
1442 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1443 SET_PDMA_RXRING_AGG_TIME(g_eth, i, time);
1444
1445 return 0;
1446}
1447
1448int hwlro_age_time_ctrl(int time)
1449{
1450 int i;
1451
1452 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1453 SET_PDMA_RXRING_AGE_TIME(g_eth, i, time);
1454
1455 return 0;
1456}
1457
1458int hwlro_threshold_ctrl(int bandwidth)
1459{
1460 SET_PDMA_LRO_BW_THRESHOLD(g_eth, bandwidth);
1461
1462 return 0;
1463}
1464
1465int hwlro_ring_enable_ctrl(int enable)
1466{
1467 int i;
1468
1469 pr_info("[%s] %s HW LRO rings\n", __func__, (enable) ? "Enable" : "Disable");
1470
1471 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1472 SET_PDMA_RXRING_VALID(g_eth, i, enable);
1473
1474 return 0;
1475}
1476
1477int hwlro_stats_enable_ctrl(int enable)
1478{
1479 pr_info("[%s] %s HW LRO statistics\n", __func__, (enable) ? "Enable" : "Disable");
1480 mtk_hwlro_stats_ebl = enable;
1481
1482 return 0;
1483}
1484
1485static const mtk_lro_dbg_func lro_dbg_func[] = {
1486 [0] = hwlro_agg_cnt_ctrl,
1487 [1] = hwlro_agg_time_ctrl,
1488 [2] = hwlro_age_time_ctrl,
1489 [3] = hwlro_threshold_ctrl,
1490 [4] = hwlro_ring_enable_ctrl,
1491 [5] = hwlro_stats_enable_ctrl,
1492};
1493
1494ssize_t hw_lro_auto_tlb_write(struct file *file, const char __user *buffer,
1495 size_t count, loff_t *data)
1496{
1497 char buf[32];
1498 char *p_buf;
1499 char *p_token = NULL;
1500 char *p_delimiter = " \t";
1501 long x = 0, y = 0;
developer4c32b7a2021-11-13 16:46:43 +08001502 u32 len = count;
developer77d03a72021-06-06 00:06:00 +08001503 int ret;
1504
1505 if (len >= sizeof(buf)) {
1506 pr_info("Input handling fail!\n");
developer77d03a72021-06-06 00:06:00 +08001507 return -1;
1508 }
1509
1510 if (copy_from_user(buf, buffer, len))
1511 return -EFAULT;
1512
1513 buf[len] = '\0';
1514
1515 p_buf = buf;
1516 p_token = strsep(&p_buf, p_delimiter);
1517 if (!p_token)
1518 x = 0;
1519 else
1520 ret = kstrtol(p_token, 10, &x);
1521
1522 p_token = strsep(&p_buf, "\t\n ");
1523 if (p_token)
1524 ret = kstrtol(p_token, 10, &y);
1525
1526 if (lro_dbg_func[x] && (ARRAY_SIZE(lro_dbg_func) > x))
1527 (*lro_dbg_func[x]) (y);
1528
1529 return count;
1530}
1531
1532void hw_lro_auto_tlb_dump_v1(struct seq_file *seq, u32 index)
1533{
1534 int i;
1535 struct mtk_lro_alt_v1 alt;
1536 __be32 addr;
1537 u32 tlb_info[9];
1538 u32 dw_len, cnt, priority;
1539 u32 entry;
1540
1541 if (index > 4)
1542 index = index - 1;
1543 entry = (index * 9) + 1;
1544
1545 /* read valid entries of the auto-learn table */
1546 mtk_w32(g_eth, entry, MTK_FE_ALT_CF8);
1547
1548 for (i = 0; i < 9; i++)
1549 tlb_info[i] = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1550
1551 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v1));
1552
1553 dw_len = alt.alt_info7.dw_len;
1554 cnt = alt.alt_info6.cnt;
1555
1556 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1557 priority = cnt; /* packet count */
1558 else
1559 priority = dw_len; /* byte count */
1560
1561 /* dump valid entries of the auto-learn table */
1562 if (index >= 4)
1563 seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index);
1564 else
1565 seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index);
1566
1567 if (alt.alt_info8.ipv4) {
1568 addr = htonl(alt.alt_info1.sip0);
1569 seq_printf(seq, "SIP = %pI4 (IPv4)\n", &addr);
1570 } else {
1571 seq_printf(seq, "SIP = %08X:%08X:%08X:%08X (IPv6)\n",
1572 alt.alt_info4.sip3, alt.alt_info3.sip2,
1573 alt.alt_info2.sip1, alt.alt_info1.sip0);
1574 }
1575
1576 seq_printf(seq, "DIP_ID = %d\n", alt.alt_info8.dip_id);
1577 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1578 alt.alt_info0.stp, alt.alt_info0.dtp);
1579 seq_printf(seq, "VLAN_VID_VLD = %d\n", alt.alt_info6.vlan_vid_vld);
1580 seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d\n",
1581 (alt.alt_info5.vlan_vid0 & 0xfff),
1582 ((alt.alt_info5.vlan_vid0 >> 12) & 0xfff),
1583 ((alt.alt_info6.vlan_vid1 << 8) |
1584 ((alt.alt_info5.vlan_vid0 >> 24) & 0xfff)),
1585 ((alt.alt_info6.vlan_vid1 >> 4) & 0xfff));
1586 seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt);
1587 seq_printf(seq, "PRIORITY = %d\n", priority);
1588}
1589
1590void hw_lro_auto_tlb_dump_v2(struct seq_file *seq, u32 index)
1591{
1592 int i;
1593 struct mtk_lro_alt_v2 alt;
1594 u32 score = 0, ipv4 = 0;
1595 u32 ipv6[4] = { 0 };
1596 u32 tlb_info[12];
1597
1598 /* read valid entries of the auto-learn table */
1599 mtk_w32(g_eth, index << MTK_LRO_ALT_INDEX_OFFSET, MTK_LRO_ALT_DBG);
1600
1601 for (i = 0; i < 11; i++)
1602 tlb_info[i] = mtk_r32(g_eth, MTK_LRO_ALT_DBG_DATA);
1603
1604 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v2));
1605
1606 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1607 score = 1; /* packet count */
1608 else
1609 score = 0; /* byte count */
1610
1611 /* dump valid entries of the auto-learn table */
1612 if (alt.alt_info0.valid) {
1613 if (index < 5)
1614 seq_printf(seq,
1615 "\n===== TABLE Entry: %d (onging) =====\n",
1616 index);
1617 else
1618 seq_printf(seq,
1619 "\n===== TABLE Entry: %d (candidate) =====\n",
1620 index);
1621
1622 if (alt.alt_info1.v4_valid) {
1623 ipv4 = (alt.alt_info4.sip0_h << 23) |
1624 alt.alt_info5.sip0_l;
1625 seq_printf(seq, "SIP = 0x%x: (IPv4)\n", ipv4);
1626
1627 ipv4 = (alt.alt_info8.dip0_h << 23) |
1628 alt.alt_info9.dip0_l;
1629 seq_printf(seq, "DIP = 0x%x: (IPv4)\n", ipv4);
1630 } else if (alt.alt_info1.v6_valid) {
1631 ipv6[3] = (alt.alt_info1.sip3_h << 23) |
1632 (alt.alt_info2.sip3_l << 9);
1633 ipv6[2] = (alt.alt_info2.sip2_h << 23) |
1634 (alt.alt_info3.sip2_l << 9);
1635 ipv6[1] = (alt.alt_info3.sip1_h << 23) |
1636 (alt.alt_info4.sip1_l << 9);
1637 ipv6[0] = (alt.alt_info4.sip0_h << 23) |
1638 (alt.alt_info5.sip0_l << 9);
1639 seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1640 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1641
1642 ipv6[3] = (alt.alt_info5.dip3_h << 23) |
1643 (alt.alt_info6.dip3_l << 9);
1644 ipv6[2] = (alt.alt_info6.dip2_h << 23) |
1645 (alt.alt_info7.dip2_l << 9);
1646 ipv6[1] = (alt.alt_info7.dip1_h << 23) |
1647 (alt.alt_info8.dip1_l << 9);
1648 ipv6[0] = (alt.alt_info8.dip0_h << 23) |
1649 (alt.alt_info9.dip0_l << 9);
1650 seq_printf(seq, "DIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1651 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1652 }
1653
1654 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1655 (alt.alt_info9.sp_h << 7) | (alt.alt_info10.sp_l),
1656 alt.alt_info10.dp);
1657 }
1658}
1659
1660int hw_lro_auto_tlb_read(struct seq_file *seq, void *v)
1661{
1662 int i;
1663 u32 reg_val;
1664 u32 reg_op1, reg_op2, reg_op3, reg_op4;
1665 u32 agg_cnt, agg_time, age_time;
1666
1667 seq_puts(seq, "Usage of /proc/mtketh/hw_lro_auto_tlb:\n");
1668 seq_puts(seq, "echo [function] [setting] > /proc/mtketh/hw_lro_auto_tlb\n");
1669 seq_puts(seq, "Functions:\n");
1670 seq_puts(seq, "[0] = hwlro_agg_cnt_ctrl\n");
1671 seq_puts(seq, "[1] = hwlro_agg_time_ctrl\n");
1672 seq_puts(seq, "[2] = hwlro_age_time_ctrl\n");
1673 seq_puts(seq, "[3] = hwlro_threshold_ctrl\n");
1674 seq_puts(seq, "[4] = hwlro_ring_enable_ctrl\n");
1675 seq_puts(seq, "[5] = hwlro_stats_enable_ctrl\n\n");
1676
developer8ecd51b2023-03-13 11:28:28 +08001677 if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer77d03a72021-06-06 00:06:00 +08001678 for (i = 1; i <= 8; i++)
1679 hw_lro_auto_tlb_dump_v2(seq, i);
1680 } else {
1681 /* Read valid entries of the auto-learn table */
1682 mtk_w32(g_eth, 0, MTK_FE_ALT_CF8);
1683 reg_val = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1684
1685 seq_printf(seq,
1686 "HW LRO Auto-learn Table: (MTK_FE_ALT_SEQ_CFC=0x%x)\n",
1687 reg_val);
1688
1689 for (i = 7; i >= 0; i--) {
1690 if (reg_val & (1 << i))
1691 hw_lro_auto_tlb_dump_v1(seq, i);
1692 }
1693 }
1694
1695 /* Read the agg_time/age_time/agg_cnt of LRO rings */
1696 seq_puts(seq, "\nHW LRO Ring Settings\n");
1697
1698 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
1699 reg_op1 = mtk_r32(g_eth, MTK_LRO_CTRL_DW1_CFG(i));
1700 reg_op2 = mtk_r32(g_eth, MTK_LRO_CTRL_DW2_CFG(i));
1701 reg_op3 = mtk_r32(g_eth, MTK_LRO_CTRL_DW3_CFG(i));
1702 reg_op4 = mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW2);
1703
1704 agg_cnt =
1705 ((reg_op3 & 0x3) << 6) |
1706 ((reg_op2 >> MTK_LRO_RING_AGG_CNT_L_OFFSET) & 0x3f);
1707 agg_time = (reg_op2 >> MTK_LRO_RING_AGG_TIME_OFFSET) & 0xffff;
1708 age_time =
1709 ((reg_op2 & 0x3f) << 10) |
1710 ((reg_op1 >> MTK_LRO_RING_AGE_TIME_L_OFFSET) & 0x3ff);
1711 seq_printf(seq,
1712 "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
developer8ecd51b2023-03-13 11:28:28 +08001713 (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_RX_V2)) ?
1714 i : i+3,
developer77d03a72021-06-06 00:06:00 +08001715 agg_cnt, agg_time, age_time, reg_op4);
1716 }
1717
1718 seq_puts(seq, "\n");
1719
1720 return 0;
1721}
1722
1723static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file)
1724{
1725 return single_open(file, hw_lro_auto_tlb_read, NULL);
1726}
1727
1728static const struct file_operations hw_lro_auto_tlb_fops = {
1729 .owner = THIS_MODULE,
1730 .open = hw_lro_auto_tlb_open,
1731 .read = seq_read,
1732 .llseek = seq_lseek,
1733 .write = hw_lro_auto_tlb_write,
1734 .release = single_release
1735};
developerfd40db22021-04-29 10:08:25 +08001736
developer8051e042022-04-08 13:26:36 +08001737int reset_event_read(struct seq_file *seq, void *v)
1738{
1739 struct mtk_eth *eth = g_eth;
1740 struct mtk_reset_event reset_event = eth->reset_event;
1741
1742 seq_printf(seq, "[Event] [Count]\n");
1743 seq_printf(seq, " FQ Empty: %d\n",
1744 reset_event.count[MTK_EVENT_FQ_EMPTY]);
1745 seq_printf(seq, " TSO Fail: %d\n",
1746 reset_event.count[MTK_EVENT_TSO_FAIL]);
1747 seq_printf(seq, " TSO Illegal: %d\n",
1748 reset_event.count[MTK_EVENT_TSO_ILLEGAL]);
1749 seq_printf(seq, " TSO Align: %d\n",
1750 reset_event.count[MTK_EVENT_TSO_ALIGN]);
1751 seq_printf(seq, " RFIFO OV: %d\n",
1752 reset_event.count[MTK_EVENT_RFIFO_OV]);
1753 seq_printf(seq, " RFIFO UF: %d\n",
1754 reset_event.count[MTK_EVENT_RFIFO_UF]);
1755 seq_printf(seq, " Force: %d\n",
1756 reset_event.count[MTK_EVENT_FORCE]);
1757 seq_printf(seq, "----------------------------\n");
1758 seq_printf(seq, " Warm Cnt: %d\n",
1759 reset_event.count[MTK_EVENT_WARM_CNT]);
1760 seq_printf(seq, " Cold Cnt: %d\n",
1761 reset_event.count[MTK_EVENT_COLD_CNT]);
1762 seq_printf(seq, " Total Cnt: %d\n",
1763 reset_event.count[MTK_EVENT_TOTAL_CNT]);
1764
1765 return 0;
1766}
1767
1768static int reset_event_open(struct inode *inode, struct file *file)
1769{
1770 return single_open(file, reset_event_read, 0);
1771}
1772
1773ssize_t reset_event_write(struct file *file, const char __user *buffer,
1774 size_t count, loff_t *data)
1775{
1776 struct mtk_eth *eth = g_eth;
1777 struct mtk_reset_event *reset_event = &eth->reset_event;
1778
1779 memset(reset_event, 0, sizeof(struct mtk_reset_event));
1780 pr_info("MTK reset event counter is cleared !\n");
1781
1782 return count;
1783}
1784
1785static const struct file_operations reset_event_fops = {
1786 .owner = THIS_MODULE,
1787 .open = reset_event_open,
1788 .read = seq_read,
1789 .llseek = seq_lseek,
1790 .write = reset_event_write,
1791 .release = single_release
1792};
1793
1794
developerfd40db22021-04-29 10:08:25 +08001795struct proc_dir_entry *proc_reg_dir;
developer621ca6b2023-01-11 11:08:46 +08001796static struct proc_dir_entry *proc_esw_cnt, *proc_xfi_cnt,
1797 *proc_dbg_regs, *proc_reset_event;
developerfd40db22021-04-29 10:08:25 +08001798
1799int debug_proc_init(struct mtk_eth *eth)
1800{
1801 g_eth = eth;
1802
1803 if (!proc_reg_dir)
1804 proc_reg_dir = proc_mkdir(PROCREG_DIR, NULL);
1805
1806 proc_tx_ring =
1807 proc_create(PROCREG_TXRING, 0, proc_reg_dir, &tx_ring_fops);
1808 if (!proc_tx_ring)
1809 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
1810
developer8051e042022-04-08 13:26:36 +08001811 proc_hwtx_ring =
1812 proc_create(PROCREG_HWTXRING, 0, proc_reg_dir, &hwtx_ring_fops);
1813 if (!proc_hwtx_ring)
1814 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_HWTXRING);
1815
developerfd40db22021-04-29 10:08:25 +08001816 proc_rx_ring =
1817 proc_create(PROCREG_RXRING, 0, proc_reg_dir, &rx_ring_fops);
1818 if (!proc_rx_ring)
1819 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
1820
1821 proc_esw_cnt =
1822 proc_create(PROCREG_ESW_CNT, 0, proc_reg_dir, &switch_count_fops);
1823 if (!proc_esw_cnt)
1824 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
1825
developer621ca6b2023-01-11 11:08:46 +08001826 if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V3)) {
1827 proc_xfi_cnt =
1828 proc_create(PROCREG_XFI_CNT, 0,
1829 proc_reg_dir, &xfi_count_fops);
1830 if (!proc_xfi_cnt)
1831 pr_notice("!! FAIL to create %s PROC !!\n",
1832 PROCREG_XFI_CNT);
1833 }
1834
developerfd40db22021-04-29 10:08:25 +08001835 proc_dbg_regs =
1836 proc_create(PROCREG_DBG_REGS, 0, proc_reg_dir, &dbg_regs_fops);
1837 if (!proc_dbg_regs)
1838 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_DBG_REGS);
1839
developer77d03a72021-06-06 00:06:00 +08001840 if (g_eth->hwlro) {
1841 proc_hw_lro_stats =
1842 proc_create(PROCREG_HW_LRO_STATS, 0, proc_reg_dir,
1843 &hw_lro_stats_fops);
1844 if (!proc_hw_lro_stats)
1845 pr_info("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS);
1846
1847 proc_hw_lro_auto_tlb =
1848 proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, proc_reg_dir,
1849 &hw_lro_auto_tlb_fops);
1850 if (!proc_hw_lro_auto_tlb)
1851 pr_info("!! FAIL to create %s PROC !!\n",
1852 PROCREG_HW_LRO_AUTO_TLB);
1853 }
1854
developer8051e042022-04-08 13:26:36 +08001855 proc_reset_event =
1856 proc_create(PROCREG_RESET_EVENT, 0, proc_reg_dir, &reset_event_fops);
1857 if (!proc_reset_event)
1858 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RESET_EVENT);
developer7979ddb2023-04-24 17:19:21 +08001859 dbg_show_level = 1;
developerfd40db22021-04-29 10:08:25 +08001860 return 0;
1861}
1862
1863void debug_proc_exit(void)
1864{
1865 if (proc_tx_ring)
1866 remove_proc_entry(PROCREG_TXRING, proc_reg_dir);
developer8051e042022-04-08 13:26:36 +08001867 if (proc_hwtx_ring)
1868 remove_proc_entry(PROCREG_HWTXRING, proc_reg_dir);
developerfd40db22021-04-29 10:08:25 +08001869 if (proc_rx_ring)
1870 remove_proc_entry(PROCREG_RXRING, proc_reg_dir);
1871
1872 if (proc_esw_cnt)
1873 remove_proc_entry(PROCREG_ESW_CNT, proc_reg_dir);
1874
developer621ca6b2023-01-11 11:08:46 +08001875 if (proc_xfi_cnt)
1876 remove_proc_entry(PROCREG_XFI_CNT, proc_reg_dir);
1877
developerfd40db22021-04-29 10:08:25 +08001878 if (proc_reg_dir)
1879 remove_proc_entry(PROCREG_DIR, 0);
1880
1881 if (proc_dbg_regs)
1882 remove_proc_entry(PROCREG_DBG_REGS, proc_reg_dir);
developer77d03a72021-06-06 00:06:00 +08001883
1884 if (g_eth->hwlro) {
1885 if (proc_hw_lro_stats)
1886 remove_proc_entry(PROCREG_HW_LRO_STATS, proc_reg_dir);
1887
1888 if (proc_hw_lro_auto_tlb)
1889 remove_proc_entry(PROCREG_HW_LRO_AUTO_TLB, proc_reg_dir);
1890 }
developer8051e042022-04-08 13:26:36 +08001891
1892 if (proc_reset_event)
1893 remove_proc_entry(PROCREG_RESET_EVENT, proc_reg_dir);
developerfd40db22021-04-29 10:08:25 +08001894}
1895