blob: 91caa38d17df0a0ea320591d5579add1b6bd155c [file] [log] [blame]
developerec4ebe42022-04-12 11:17:45 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
14 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
15 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
16 */
17
18#include <linux/trace_seq.h>
19#include <linux/seq_file.h>
20#include <linux/proc_fs.h>
21#include <linux/u64_stats_sync.h>
22#include <linux/dma-mapping.h>
23#include <linux/netdevice.h>
24#include <linux/ctype.h>
25#include <linux/debugfs.h>
26#include <linux/of_mdio.h>
developere86c3ec2022-10-11 10:29:18 +080027#include <linux/of_address.h>
developerec4ebe42022-04-12 11:17:45 +080028
29#include "mtk_eth_soc.h"
30#include "mtk_eth_dbg.h"
developer3d2dd692022-04-19 12:53:29 +080031#include "mtk_eth_reset.h"
developerec4ebe42022-04-12 11:17:45 +080032
33u32 hw_lro_agg_num_cnt[MTK_HW_LRO_RING_NUM][MTK_HW_LRO_MAX_AGG_CNT + 1];
34u32 hw_lro_agg_size_cnt[MTK_HW_LRO_RING_NUM][16];
35u32 hw_lro_tot_agg_cnt[MTK_HW_LRO_RING_NUM];
36u32 hw_lro_tot_flush_cnt[MTK_HW_LRO_RING_NUM];
37u32 hw_lro_agg_flush_cnt[MTK_HW_LRO_RING_NUM];
38u32 hw_lro_age_flush_cnt[MTK_HW_LRO_RING_NUM];
39u32 hw_lro_seq_flush_cnt[MTK_HW_LRO_RING_NUM];
40u32 hw_lro_timestamp_flush_cnt[MTK_HW_LRO_RING_NUM];
41u32 hw_lro_norule_flush_cnt[MTK_HW_LRO_RING_NUM];
42u32 mtk_hwlro_stats_ebl;
developer8b8f87d2023-04-27 11:01:26 +080043u32 dbg_show_level;
developeredbe69e2023-06-08 11:08:46 +080044u32 cur_rss_num;
developer8b8f87d2023-04-27 11:01:26 +080045
developeredbe69e2023-06-08 11:08:46 +080046static struct proc_dir_entry *proc_hw_lro_stats, *proc_hw_lro_auto_tlb,
47 *proc_rss_ctrl;
developerec4ebe42022-04-12 11:17:45 +080048typedef int (*mtk_lro_dbg_func) (int par);
49
50struct mtk_eth_debug {
developere86c3ec2022-10-11 10:29:18 +080051 struct dentry *root;
52 void __iomem *base;
53 int direct_access;
developerec4ebe42022-04-12 11:17:45 +080054};
55
56struct mtk_eth *g_eth;
57
58struct mtk_eth_debug eth_debug;
59
developere86c3ec2022-10-11 10:29:18 +080060int mt798x_iomap(void)
61{
62 struct device_node *np = NULL;
63
64 np = of_find_node_by_name(NULL, "switch0");
65 if (np) {
66 eth_debug.base = of_iomap(np, 0);
67 if (!eth_debug.base) {
68 pr_err("of_iomap failed\n");
69 of_node_put(np);
70 return -ENOMEM;
71 }
72
73 of_node_put(np);
74 eth_debug.direct_access = 1;
75 }
76
77 return 0;
78}
79
80int mt798x_iounmap(void)
81{
82 eth_debug.direct_access = 0;
83 if (eth_debug.base)
84 iounmap(eth_debug.base);
85
86 return 0;
87}
88
developerec4ebe42022-04-12 11:17:45 +080089void mt7530_mdio_w32(struct mtk_eth *eth, u16 reg, u32 val)
90{
91 mutex_lock(&eth->mii_bus->mdio_lock);
92
developere86c3ec2022-10-11 10:29:18 +080093 if (eth_debug.direct_access)
94 __raw_writel(val, eth_debug.base + reg);
95 else {
96 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
97 _mtk_mdio_write(eth, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
98 _mtk_mdio_write(eth, 0x1f, 0x10, val >> 16);
99 }
developerec4ebe42022-04-12 11:17:45 +0800100
101 mutex_unlock(&eth->mii_bus->mdio_lock);
102}
103
104u32 mt7530_mdio_r32(struct mtk_eth *eth, u32 reg)
105{
106 u16 high, low;
developere86c3ec2022-10-11 10:29:18 +0800107 u32 ret;
developerec4ebe42022-04-12 11:17:45 +0800108
109 mutex_lock(&eth->mii_bus->mdio_lock);
110
developere86c3ec2022-10-11 10:29:18 +0800111 if (eth_debug.direct_access) {
112 ret = __raw_readl(eth_debug.base + reg);
113 mutex_unlock(&eth->mii_bus->mdio_lock);
114 return ret;
115 }
developerec4ebe42022-04-12 11:17:45 +0800116 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
117 low = _mtk_mdio_read(eth, 0x1f, (reg >> 2) & 0xf);
118 high = _mtk_mdio_read(eth, 0x1f, 0x10);
119
120 mutex_unlock(&eth->mii_bus->mdio_lock);
121
122 return (high << 16) | (low & 0xffff);
123}
124
125void mtk_switch_w32(struct mtk_eth *eth, u32 val, unsigned reg)
126{
127 mtk_w32(eth, val, reg + 0x10000);
128}
129EXPORT_SYMBOL(mtk_switch_w32);
130
131u32 mtk_switch_r32(struct mtk_eth *eth, unsigned reg)
132{
133 return mtk_r32(eth, reg + 0x10000);
134}
135EXPORT_SYMBOL(mtk_switch_r32);
136
137static int mtketh_debug_show(struct seq_file *m, void *private)
138{
139 struct mtk_eth *eth = m->private;
140 struct mtk_mac *mac = 0;
141 int i = 0;
142
143 for (i = 0 ; i < MTK_MAX_DEVS ; i++) {
144 if (!eth->mac[i] ||
145 of_phy_is_fixed_link(eth->mac[i]->of_node))
146 continue;
147 mac = eth->mac[i];
148#if 0 //FIXME
149 while (j < 30) {
150 d = _mtk_mdio_read(eth, mac->phy_dev->addr, j);
151
152 seq_printf(m, "phy=%d, reg=0x%08x, data=0x%08x\n",
153 mac->phy_dev->addr, j, d);
154 j++;
155 }
156#endif
157 }
158 return 0;
159}
160
161static int mtketh_debug_open(struct inode *inode, struct file *file)
162{
163 return single_open(file, mtketh_debug_show, inode->i_private);
164}
165
166static const struct file_operations mtketh_debug_fops = {
developere86c3ec2022-10-11 10:29:18 +0800167 .owner = THIS_MODULE,
developerec4ebe42022-04-12 11:17:45 +0800168 .open = mtketh_debug_open,
169 .read = seq_read,
170 .llseek = seq_lseek,
171 .release = single_release,
172};
173
174static int mtketh_mt7530sw_debug_show(struct seq_file *m, void *private)
175{
176 struct mtk_eth *eth = m->private;
177 u32 offset, data;
178 int i;
179 struct mt7530_ranges {
180 u32 start;
181 u32 end;
182 } ranges[] = {
183 {0x0, 0xac},
184 {0x1000, 0x10e0},
185 {0x1100, 0x1140},
186 {0x1200, 0x1240},
187 {0x1300, 0x1340},
188 {0x1400, 0x1440},
189 {0x1500, 0x1540},
190 {0x1600, 0x1640},
191 {0x1800, 0x1848},
192 {0x1900, 0x1948},
193 {0x1a00, 0x1a48},
194 {0x1b00, 0x1b48},
195 {0x1c00, 0x1c48},
196 {0x1d00, 0x1d48},
197 {0x1e00, 0x1e48},
198 {0x1f60, 0x1ffc},
199 {0x2000, 0x212c},
200 {0x2200, 0x222c},
201 {0x2300, 0x232c},
202 {0x2400, 0x242c},
203 {0x2500, 0x252c},
204 {0x2600, 0x262c},
205 {0x3000, 0x3014},
206 {0x30c0, 0x30f8},
207 {0x3100, 0x3114},
208 {0x3200, 0x3214},
209 {0x3300, 0x3314},
210 {0x3400, 0x3414},
211 {0x3500, 0x3514},
212 {0x3600, 0x3614},
213 {0x4000, 0x40d4},
214 {0x4100, 0x41d4},
215 {0x4200, 0x42d4},
216 {0x4300, 0x43d4},
217 {0x4400, 0x44d4},
218 {0x4500, 0x45d4},
219 {0x4600, 0x46d4},
220 {0x4f00, 0x461c},
221 {0x7000, 0x7038},
222 {0x7120, 0x7124},
223 {0x7800, 0x7804},
224 {0x7810, 0x7810},
225 {0x7830, 0x7830},
226 {0x7a00, 0x7a7c},
227 {0x7b00, 0x7b04},
228 {0x7e00, 0x7e04},
229 {0x7ffc, 0x7ffc},
230 };
231
232 if (!mt7530_exist(eth))
233 return -EOPNOTSUPP;
234
235 if ((!eth->mac[0] || !of_phy_is_fixed_link(eth->mac[0]->of_node)) &&
236 (!eth->mac[1] || !of_phy_is_fixed_link(eth->mac[1]->of_node))) {
237 seq_puts(m, "no switch found\n");
238 return 0;
239 }
240
241 for (i = 0 ; i < ARRAY_SIZE(ranges) ; i++) {
242 for (offset = ranges[i].start;
243 offset <= ranges[i].end; offset += 4) {
244 data = mt7530_mdio_r32(eth, offset);
245 seq_printf(m, "mt7530 switch reg=0x%08x, data=0x%08x\n",
246 offset, data);
247 }
248 }
249
250 return 0;
251}
252
253static int mtketh_debug_mt7530sw_open(struct inode *inode, struct file *file)
254{
255 return single_open(file, mtketh_mt7530sw_debug_show, inode->i_private);
256}
257
258static const struct file_operations mtketh_debug_mt7530sw_fops = {
developere86c3ec2022-10-11 10:29:18 +0800259 .owner = THIS_MODULE,
developerec4ebe42022-04-12 11:17:45 +0800260 .open = mtketh_debug_mt7530sw_open,
261 .read = seq_read,
262 .llseek = seq_lseek,
263 .release = single_release,
264};
265
266static ssize_t mtketh_mt7530sw_debugfs_write(struct file *file,
267 const char __user *ptr,
268 size_t len, loff_t *off)
269{
270 struct mtk_eth *eth = file->private_data;
271 char buf[32], *token, *p = buf;
developer3d5faf22022-11-29 18:07:22 +0800272 unsigned long reg, value, phy;
developerec4ebe42022-04-12 11:17:45 +0800273 int ret;
274
275 if (!mt7530_exist(eth))
276 return -EOPNOTSUPP;
277
278 if (*off != 0)
279 return 0;
280
281 if (len > sizeof(buf) - 1)
282 len = sizeof(buf) - 1;
283
284 ret = strncpy_from_user(buf, ptr, len);
285 if (ret < 0)
286 return ret;
287 buf[len] = '\0';
288
289 token = strsep(&p, " ");
290 if (!token)
291 return -EINVAL;
292 if (kstrtoul(token, 16, (unsigned long *)&phy))
293 return -EINVAL;
294
295 token = strsep(&p, " ");
296 if (!token)
297 return -EINVAL;
298 if (kstrtoul(token, 16, (unsigned long *)&reg))
299 return -EINVAL;
300
301 token = strsep(&p, " ");
302 if (!token)
303 return -EINVAL;
304 if (kstrtoul(token, 16, (unsigned long *)&value))
305 return -EINVAL;
306
developerbd1b38a2023-06-19 11:13:22 +0800307 pr_info("%s:phy=%d, reg=0x%lx, val=0x%lx\n", __func__,
developerec4ebe42022-04-12 11:17:45 +0800308 0x1f, reg, value);
309 mt7530_mdio_w32(eth, reg, value);
developerbd1b38a2023-06-19 11:13:22 +0800310 pr_info("%s:phy=%d, reg=0x%lx, val=0x%x confirm..\n", __func__,
developerec4ebe42022-04-12 11:17:45 +0800311 0x1f, reg, mt7530_mdio_r32(eth, reg));
312
313 return len;
314}
315
316static ssize_t mtketh_debugfs_write(struct file *file, const char __user *ptr,
317 size_t len, loff_t *off)
318{
319 struct mtk_eth *eth = file->private_data;
320 char buf[32], *token, *p = buf;
developer3d5faf22022-11-29 18:07:22 +0800321 unsigned long reg, value, phy;
developerec4ebe42022-04-12 11:17:45 +0800322 int ret;
323
324 if (*off != 0)
325 return 0;
326
327 if (len > sizeof(buf) - 1)
328 len = sizeof(buf) - 1;
329
330 ret = strncpy_from_user(buf, ptr, len);
331 if (ret < 0)
332 return ret;
333 buf[len] = '\0';
334
335 token = strsep(&p, " ");
336 if (!token)
337 return -EINVAL;
338 if (kstrtoul(token, 16, (unsigned long *)&phy))
339 return -EINVAL;
340
341 token = strsep(&p, " ");
342
343 if (!token)
344 return -EINVAL;
345 if (kstrtoul(token, 16, (unsigned long *)&reg))
346 return -EINVAL;
347
348 token = strsep(&p, " ");
349
350 if (!token)
351 return -EINVAL;
352 if (kstrtoul(token, 16, (unsigned long *)&value))
353 return -EINVAL;
354
developerbd1b38a2023-06-19 11:13:22 +0800355 pr_info("%s:phy=%ld, reg=0x%lx, val=0x%lx\n", __func__,
developerec4ebe42022-04-12 11:17:45 +0800356 phy, reg, value);
357
358 _mtk_mdio_write(eth, phy, reg, value);
359
developerbd1b38a2023-06-19 11:13:22 +0800360 pr_info("%s:phy=%ld, reg=0x%lx, val=0x%x confirm..\n", __func__,
developerec4ebe42022-04-12 11:17:45 +0800361 phy, reg, _mtk_mdio_read(eth, phy, reg));
362
363 return len;
364}
365
366static ssize_t mtketh_debugfs_reset(struct file *file, const char __user *ptr,
367 size_t len, loff_t *off)
368{
369 struct mtk_eth *eth = file->private_data;
developer4b4e6922022-07-19 19:00:56 +0800370 char buf[8] = "";
371 int count = len;
372 unsigned long dbg_level = 0;
373
developerbd1b38a2023-06-19 11:13:22 +0800374 len = min((size_t)count, sizeof(buf) - 1);
developer4b4e6922022-07-19 19:00:56 +0800375 if (copy_from_user(buf, ptr, len))
376 return -EFAULT;
377
378 buf[len] = '\0';
379 if (kstrtoul(buf, 0, &dbg_level))
380 return -EINVAL;
developerec4ebe42022-04-12 11:17:45 +0800381
developer4b4e6922022-07-19 19:00:56 +0800382 switch(dbg_level)
383 {
384 case 0:
developere744f1b2023-02-07 15:43:21 +0800385 atomic_set(&force, 0);
developer4b4e6922022-07-19 19:00:56 +0800386 break;
387 case 1:
developere744f1b2023-02-07 15:43:21 +0800388 if (atomic_read(&force) == 1)
developerafda3572022-12-28 16:28:30 +0800389 schedule_work(&eth->pending_work);
developere744f1b2023-02-07 15:43:21 +0800390 else
391 pr_info(" stat:disable\n");
developer4b4e6922022-07-19 19:00:56 +0800392 break;
393 case 2:
developere744f1b2023-02-07 15:43:21 +0800394 atomic_set(&force, 1);
developer4b4e6922022-07-19 19:00:56 +0800395 break;
developerafda3572022-12-28 16:28:30 +0800396 case 3:
developere744f1b2023-02-07 15:43:21 +0800397 if (atomic_read(&force) == 1) {
developerafda3572022-12-28 16:28:30 +0800398 mtk_reset_flag = MTK_FE_STOP_TRAFFIC;
399 schedule_work(&eth->pending_work);
400 } else
401 pr_info(" device resetting !!!\n");
402 break;
developer8b8f87d2023-04-27 11:01:26 +0800403 case 4:
404 dbg_show_level = 1;
405 break;
406 case 5:
407 dbg_show_level = 0;
408 break;
developer4b4e6922022-07-19 19:00:56 +0800409 default:
410 pr_info("Usage: echo [level] > /sys/kernel/debug/mtketh/reset\n");
developerafda3572022-12-28 16:28:30 +0800411 pr_info("Commands: [level]\n");
412 pr_info(" 0 disable reset\n");
413 pr_info(" 1 FE and WDMA reset\n");
developer4b4e6922022-07-19 19:00:56 +0800414 pr_info(" 2 enable reset\n");
developerafda3572022-12-28 16:28:30 +0800415 pr_info(" 3 FE reset\n");
developer8b8f87d2023-04-27 11:01:26 +0800416 pr_info(" 4 enable dump reset info\n");
417 pr_info(" 5 disable dump reset info\n");
developer4b4e6922022-07-19 19:00:56 +0800418 break;
419 }
420 return count;
developerec4ebe42022-04-12 11:17:45 +0800421}
422
developer722ab5f2024-02-22 11:01:46 +0800423static int pppq_toggle_read(struct seq_file *m, void *private)
424{
425 struct mtk_eth *eth = m->private;
426
427 pr_info("value=%d, pppq is %s now!\n",
428 eth->pppq_toggle, (eth->pppq_toggle) ? "enabled" : "disabled");
429
430 return 0;
431}
432
433static int pppq_toggle_open(struct inode *inode, struct file *file)
434{
435 return single_open(file, pppq_toggle_read, inode->i_private);
436}
437
438static ssize_t pppq_toggle_write(struct file *file, const char __user *ptr,
439 size_t len, loff_t *off)
440{
441 struct seq_file *m = file->private_data;
442 struct mtk_eth *eth = m->private;
443 char buf[8] = {0};
444
445 if ((len > 8) || copy_from_user(buf, ptr, len))
446 return -EFAULT;
447
448 if (buf[0] == '1' && !eth->pppq_toggle) {
449 eth->pppq_toggle = 1;
450 pr_info("pppq is enabled!\n");
451 } else if (buf[0] == '0' && eth->pppq_toggle) {
452 eth->pppq_toggle = 0;
453 pr_info("pppq is disabled!\n");
454 }
455
456 return len;
457}
458
developerec4ebe42022-04-12 11:17:45 +0800459static const struct file_operations fops_reg_w = {
460 .owner = THIS_MODULE,
461 .open = simple_open,
462 .write = mtketh_debugfs_write,
463 .llseek = noop_llseek,
464};
465
466static const struct file_operations fops_eth_reset = {
467 .owner = THIS_MODULE,
468 .open = simple_open,
469 .write = mtketh_debugfs_reset,
470 .llseek = noop_llseek,
471};
472
developer722ab5f2024-02-22 11:01:46 +0800473static const struct file_operations fops_pppq_toggle = {
474 .owner = THIS_MODULE,
475 .open = pppq_toggle_open,
476 .read = seq_read,
477 .llseek = seq_lseek,
478 .write = pppq_toggle_write,
479 .release = single_release,
480};
481
developerec4ebe42022-04-12 11:17:45 +0800482static const struct file_operations fops_mt7530sw_reg_w = {
483 .owner = THIS_MODULE,
484 .open = simple_open,
485 .write = mtketh_mt7530sw_debugfs_write,
486 .llseek = noop_llseek,
487};
488
489void mtketh_debugfs_exit(struct mtk_eth *eth)
490{
491 debugfs_remove_recursive(eth_debug.root);
492}
493
494int mtketh_debugfs_init(struct mtk_eth *eth)
495{
496 int ret = 0;
497
498 eth_debug.root = debugfs_create_dir("mtketh", NULL);
499 if (!eth_debug.root) {
500 dev_notice(eth->dev, "%s:err at %d\n", __func__, __LINE__);
501 ret = -ENOMEM;
502 }
503
developer722ab5f2024-02-22 11:01:46 +0800504 debugfs_create_file("pppq_toggle", 0444,
505 eth_debug.root, eth, &fops_pppq_toggle);
developerec4ebe42022-04-12 11:17:45 +0800506 debugfs_create_file("phy_regs", S_IRUGO,
507 eth_debug.root, eth, &mtketh_debug_fops);
508 debugfs_create_file("phy_reg_w", S_IFREG | S_IWUSR,
509 eth_debug.root, eth, &fops_reg_w);
510 debugfs_create_file("reset", S_IFREG | S_IWUSR,
511 eth_debug.root, eth, &fops_eth_reset);
512 if (mt7530_exist(eth)) {
513 debugfs_create_file("mt7530sw_regs", S_IRUGO,
514 eth_debug.root, eth,
515 &mtketh_debug_mt7530sw_fops);
516 debugfs_create_file("mt7530sw_reg_w", S_IFREG | S_IWUSR,
517 eth_debug.root, eth,
518 &fops_mt7530sw_reg_w);
519 }
520 return ret;
521}
522
523void mii_mgr_read_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
524 u32 *read_data)
525{
526 if (mt7530_exist(eth) && phy_addr == 31)
527 *read_data = mt7530_mdio_r32(eth, phy_register);
528
529 else
developere86c3ec2022-10-11 10:29:18 +0800530 *read_data = mdiobus_read(eth->mii_bus, phy_addr, phy_register);
developerec4ebe42022-04-12 11:17:45 +0800531}
532
533void mii_mgr_write_combine(struct mtk_eth *eth, u16 phy_addr, u16 phy_register,
534 u32 write_data)
535{
536 if (mt7530_exist(eth) && phy_addr == 31)
537 mt7530_mdio_w32(eth, phy_register, write_data);
538
539 else
developere86c3ec2022-10-11 10:29:18 +0800540 mdiobus_write(eth->mii_bus, phy_addr, phy_register, write_data);
developerec4ebe42022-04-12 11:17:45 +0800541}
542
543static void mii_mgr_read_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data)
544{
developere86c3ec2022-10-11 10:29:18 +0800545 *data = mdiobus_read(eth->mii_bus, port, mdiobus_c45_addr(devad, reg));
developerec4ebe42022-04-12 11:17:45 +0800546}
547
548static void mii_mgr_write_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data)
549{
developere86c3ec2022-10-11 10:29:18 +0800550 mdiobus_write(eth->mii_bus, port, mdiobus_c45_addr(devad, reg), data);
developerec4ebe42022-04-12 11:17:45 +0800551}
552
553int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
554{
555 struct mtk_mac *mac = netdev_priv(dev);
556 struct mtk_eth *eth = mac->hw;
557 struct mtk_mii_ioctl_data mii;
558 struct mtk_esw_reg reg;
559 u16 val;
560
561 switch (cmd) {
562 case MTKETH_MII_READ:
563 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
564 goto err_copy;
565 mii_mgr_read_combine(eth, mii.phy_id, mii.reg_num,
566 &mii.val_out);
567 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
568 goto err_copy;
569
570 return 0;
571 case MTKETH_MII_WRITE:
572 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
573 goto err_copy;
574 mii_mgr_write_combine(eth, mii.phy_id, mii.reg_num,
575 mii.val_in);
576 return 0;
577 case MTKETH_MII_READ_CL45:
578 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
579 goto err_copy;
580 mii_mgr_read_cl45(eth,
581 mdio_phy_id_prtad(mii.phy_id),
582 mdio_phy_id_devad(mii.phy_id),
583 mii.reg_num,
584 &val);
585 mii.val_out = val;
586 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
587 goto err_copy;
588
589 return 0;
590 case MTKETH_MII_WRITE_CL45:
591 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
592 goto err_copy;
593 val = mii.val_in;
594 mii_mgr_write_cl45(eth,
595 mdio_phy_id_prtad(mii.phy_id),
596 mdio_phy_id_devad(mii.phy_id),
597 mii.reg_num,
598 val);
599 return 0;
600 case MTKETH_ESW_REG_READ:
601 if (!mt7530_exist(eth))
602 return -EOPNOTSUPP;
603 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
604 goto err_copy;
605 if (reg.off > REG_ESW_MAX)
606 return -EINVAL;
607 reg.val = mtk_switch_r32(eth, reg.off);
608
609 if (copy_to_user(ifr->ifr_data, &reg, sizeof(reg)))
610 goto err_copy;
611
612 return 0;
613 case MTKETH_ESW_REG_WRITE:
614 if (!mt7530_exist(eth))
615 return -EOPNOTSUPP;
616 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
617 goto err_copy;
618 if (reg.off > REG_ESW_MAX)
619 return -EINVAL;
620 mtk_switch_w32(eth, reg.val, reg.off);
621
622 return 0;
623 default:
624 break;
625 }
626
627 return -EOPNOTSUPP;
628err_copy:
629 return -EFAULT;
630}
631
developere86c3ec2022-10-11 10:29:18 +0800632static void gdm_reg_dump_v3(struct mtk_eth *eth, u32 gdm_id, u32 mib_base)
633{
634 pr_info("| GDMA%d_RX_GBCNT : %010u (Rx Good Bytes) |\n",
635 gdm_id, mtk_r32(eth, mib_base));
636 pr_info("| GDMA%d_RX_GPCNT : %010u (Rx Good Pkts) |\n",
637 gdm_id, mtk_r32(eth, mib_base + 0x08));
638 pr_info("| GDMA%d_RX_OERCNT : %010u (overflow error) |\n",
639 gdm_id, mtk_r32(eth, mib_base + 0x10));
640 pr_info("| GDMA%d_RX_FERCNT : %010u (FCS error) |\n",
641 gdm_id, mtk_r32(eth, mib_base + 0x14));
642 pr_info("| GDMA%d_RX_SERCNT : %010u (too short) |\n",
643 gdm_id, mtk_r32(eth, mib_base + 0x18));
644 pr_info("| GDMA%d_RX_LERCNT : %010u (too long) |\n",
645 gdm_id, mtk_r32(eth, mib_base + 0x1C));
646 pr_info("| GDMA%d_RX_CERCNT : %010u (checksum error) |\n",
647 gdm_id, mtk_r32(eth, mib_base + 0x20));
648 pr_info("| GDMA%d_RX_FCCNT : %010u (flow control) |\n",
649 gdm_id, mtk_r32(eth, mib_base + 0x24));
650 pr_info("| GDMA%d_RX_VDPCNT : %010u (VID drop) |\n",
651 gdm_id, mtk_r32(eth, mib_base + 0x28));
652 pr_info("| GDMA%d_RX_PFCCNT : %010u (priority flow control)\n",
653 gdm_id, mtk_r32(eth, mib_base + 0x2C));
654 pr_info("| GDMA%d_TX_GBCNT : %010u (Tx Good Bytes) |\n",
655 gdm_id, mtk_r32(eth, mib_base + 0x40));
656 pr_info("| GDMA%d_TX_GPCNT : %010u (Tx Good Pkts) |\n",
657 gdm_id, mtk_r32(eth, mib_base + 0x48));
658 pr_info("| GDMA%d_TX_SKIPCNT: %010u (abort count) |\n",
659 gdm_id, mtk_r32(eth, mib_base + 0x50));
660 pr_info("| GDMA%d_TX_COLCNT : %010u (collision count)|\n",
661 gdm_id, mtk_r32(eth, mib_base + 0x54));
662 pr_info("| GDMA%d_TX_OERCNT : %010u (overflow error) |\n",
663 gdm_id, mtk_r32(eth, mib_base + 0x58));
664 pr_info("| GDMA%d_TX_FCCNT : %010u (flow control) |\n",
665 gdm_id, mtk_r32(eth, mib_base + 0x60));
666 pr_info("| GDMA%d_TX_PFCCNT : %010u (priority flow control)\n",
667 gdm_id, mtk_r32(eth, mib_base + 0x64));
668 pr_info("| |\n");
669}
670
671static void gdm_reg_dump_v2(struct mtk_eth *eth, u32 gdm_id, u32 mib_base)
672{
673 pr_info("| GDMA%d_RX_GBCNT : %010u (Rx Good Bytes) |\n",
674 gdm_id, mtk_r32(eth, mib_base));
675 pr_info("| GDMA%d_RX_GPCNT : %010u (Rx Good Pkts) |\n",
676 gdm_id, mtk_r32(eth, mib_base + 0x08));
677 pr_info("| GDMA%d_RX_OERCNT : %010u (overflow error) |\n",
678 gdm_id, mtk_r32(eth, mib_base + 0x10));
679 pr_info("| GDMA%d_RX_FERCNT : %010u (FCS error) |\n",
680 gdm_id, mtk_r32(eth, mib_base + 0x14));
681 pr_info("| GDMA%d_RX_SERCNT : %010u (too short) |\n",
682 gdm_id, mtk_r32(eth, mib_base + 0x18));
683 pr_info("| GDMA%d_RX_LERCNT : %010u (too long) |\n",
684 gdm_id, mtk_r32(eth, mib_base + 0x1C));
685 pr_info("| GDMA%d_RX_CERCNT : %010u (checksum error) |\n",
686 gdm_id, mtk_r32(eth, mib_base + 0x20));
687 pr_info("| GDMA%d_RX_FCCNT : %010u (flow control) |\n",
688 gdm_id, mtk_r32(eth, mib_base + 0x24));
689 pr_info("| GDMA%d_TX_SKIPCNT: %010u (abort count) |\n",
690 gdm_id, mtk_r32(eth, mib_base + 0x28));
691 pr_info("| GDMA%d_TX_COLCNT : %010u (collision count) |\n",
692 gdm_id, mtk_r32(eth, mib_base + 0x2C));
693 pr_info("| GDMA%d_TX_GBCNT : %010u (Tx Good Bytes) |\n",
694 gdm_id, mtk_r32(eth, mib_base + 0x30));
695 pr_info("| GDMA%d_TX_GPCNT : %010u (Tx Good Pkts) |\n",
696 gdm_id, mtk_r32(eth, mib_base + 0x38));
697 pr_info("| |\n");
698}
699
700static void gdm_cnt_read(struct mtk_eth *eth)
701{
702 u32 i, mib_base;
703
704 pr_info("\n <<CPU>>\n");
705 pr_info(" |\n");
706 pr_info("+-----------------------------------------------+\n");
707 pr_info("| <<PSE>> |\n");
708 pr_info("+-----------------------------------------------+\n");
709 pr_info(" |\n");
710 pr_info("+-----------------------------------------------+\n");
711 pr_info("| <<GDMA>> |\n");
712
713 for (i = 0; i < MTK_MAC_COUNT; i++) {
714 mib_base = MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET * i;
715
716 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
717 gdm_reg_dump_v3(eth, i + 1, mib_base);
718 else
719 gdm_reg_dump_v2(eth, i + 1, mib_base);
720 }
721
722 pr_info("+-----------------------------------------------+\n");
723}
724
developer02f1f2e2022-10-19 12:38:30 +0800725void dump_each_port(struct seq_file *seq, struct mtk_eth *eth, u32 base)
726{
727 u32 pkt_cnt = 0;
728 int i = 0;
729
730 for (i = 0; i < 7; i++) {
731 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
732 if ((base == 0x402C) && (i == 6))
733 base = 0x408C;
734 else if ((base == 0x408C) && (i == 6))
735 base = 0x402C;
736 else
737 ;
738 }
739 pkt_cnt = mt7530_mdio_r32(eth, (base) + (i * 0x100));
740 seq_printf(seq, "%8u ", pkt_cnt);
741 }
742 seq_puts(seq, "\n");
743}
744
developerec4ebe42022-04-12 11:17:45 +0800745int esw_cnt_read(struct seq_file *seq, void *v)
746{
developerec4ebe42022-04-12 11:17:45 +0800747 struct mtk_eth *eth = g_eth;
developerec4ebe42022-04-12 11:17:45 +0800748
developere86c3ec2022-10-11 10:29:18 +0800749 gdm_cnt_read(eth);
developerec4ebe42022-04-12 11:17:45 +0800750
751 if (!mt7530_exist(eth))
752 return 0;
753
developere86c3ec2022-10-11 10:29:18 +0800754 mt798x_iomap();
755
developerec4ebe42022-04-12 11:17:45 +0800756 seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n",
757 "Port0", "Port1", "Port2", "Port3", "Port4", "Port5",
758 "Port6");
759 seq_puts(seq, "Tx Drop Packet :");
developer02f1f2e2022-10-19 12:38:30 +0800760 dump_each_port(seq, eth, 0x4000);
developerec4ebe42022-04-12 11:17:45 +0800761 seq_puts(seq, "Tx CRC Error :");
developer02f1f2e2022-10-19 12:38:30 +0800762 dump_each_port(seq, eth, 0x4004);
developerec4ebe42022-04-12 11:17:45 +0800763 seq_puts(seq, "Tx Unicast Packet :");
developer02f1f2e2022-10-19 12:38:30 +0800764 dump_each_port(seq, eth, 0x4008);
developerec4ebe42022-04-12 11:17:45 +0800765 seq_puts(seq, "Tx Multicast Packet :");
developer02f1f2e2022-10-19 12:38:30 +0800766 dump_each_port(seq, eth, 0x400C);
developerec4ebe42022-04-12 11:17:45 +0800767 seq_puts(seq, "Tx Broadcast Packet :");
developer02f1f2e2022-10-19 12:38:30 +0800768 dump_each_port(seq, eth, 0x4010);
developerec4ebe42022-04-12 11:17:45 +0800769 seq_puts(seq, "Tx Collision Event :");
developer02f1f2e2022-10-19 12:38:30 +0800770 dump_each_port(seq, eth, 0x4014);
developerec4ebe42022-04-12 11:17:45 +0800771 seq_puts(seq, "Tx Pause Packet :");
developer02f1f2e2022-10-19 12:38:30 +0800772 dump_each_port(seq, eth, 0x402C);
developerec4ebe42022-04-12 11:17:45 +0800773 seq_puts(seq, "Rx Drop Packet :");
developer02f1f2e2022-10-19 12:38:30 +0800774 dump_each_port(seq, eth, 0x4060);
developerec4ebe42022-04-12 11:17:45 +0800775 seq_puts(seq, "Rx Filtering Packet :");
developer02f1f2e2022-10-19 12:38:30 +0800776 dump_each_port(seq, eth, 0x4064);
developerec4ebe42022-04-12 11:17:45 +0800777 seq_puts(seq, "Rx Unicast Packet :");
developer02f1f2e2022-10-19 12:38:30 +0800778 dump_each_port(seq, eth, 0x4068);
developerec4ebe42022-04-12 11:17:45 +0800779 seq_puts(seq, "Rx Multicast Packet :");
developer02f1f2e2022-10-19 12:38:30 +0800780 dump_each_port(seq, eth, 0x406C);
developerec4ebe42022-04-12 11:17:45 +0800781 seq_puts(seq, "Rx Broadcast Packet :");
developer02f1f2e2022-10-19 12:38:30 +0800782 dump_each_port(seq, eth, 0x4070);
developerec4ebe42022-04-12 11:17:45 +0800783 seq_puts(seq, "Rx Alignment Error :");
developer02f1f2e2022-10-19 12:38:30 +0800784 dump_each_port(seq, eth, 0x4074);
developerec4ebe42022-04-12 11:17:45 +0800785 seq_puts(seq, "Rx CRC Error :");
developer02f1f2e2022-10-19 12:38:30 +0800786 dump_each_port(seq, eth, 0x4078);
developerec4ebe42022-04-12 11:17:45 +0800787 seq_puts(seq, "Rx Undersize Error :");
developer02f1f2e2022-10-19 12:38:30 +0800788 dump_each_port(seq, eth, 0x407C);
developerec4ebe42022-04-12 11:17:45 +0800789 seq_puts(seq, "Rx Fragment Error :");
developer02f1f2e2022-10-19 12:38:30 +0800790 dump_each_port(seq, eth, 0x4080);
developerec4ebe42022-04-12 11:17:45 +0800791 seq_puts(seq, "Rx Oversize Error :");
developer02f1f2e2022-10-19 12:38:30 +0800792 dump_each_port(seq, eth, 0x4084);
developerec4ebe42022-04-12 11:17:45 +0800793 seq_puts(seq, "Rx Jabber Error :");
developer02f1f2e2022-10-19 12:38:30 +0800794 dump_each_port(seq, eth, 0x4088);
developerec4ebe42022-04-12 11:17:45 +0800795 seq_puts(seq, "Rx Pause Packet :");
developer02f1f2e2022-10-19 12:38:30 +0800796 dump_each_port(seq, eth, 0x408C);
developerec4ebe42022-04-12 11:17:45 +0800797 mt7530_mdio_w32(eth, 0x4fe0, 0xf0);
798 mt7530_mdio_w32(eth, 0x4fe0, 0x800000f0);
799
800 seq_puts(seq, "\n");
801
developere86c3ec2022-10-11 10:29:18 +0800802 mt798x_iounmap();
803
developerec4ebe42022-04-12 11:17:45 +0800804 return 0;
805}
806
807static int switch_count_open(struct inode *inode, struct file *file)
808{
809 return single_open(file, esw_cnt_read, 0);
810}
811
812static const struct file_operations switch_count_fops = {
813 .owner = THIS_MODULE,
814 .open = switch_count_open,
815 .read = seq_read,
816 .llseek = seq_lseek,
817 .release = single_release
818};
819
developerc3d2b632023-01-13 11:32:11 +0800820void xfi_mib_dump(struct seq_file *seq, u32 gdm_id)
821{
822 struct mtk_eth *eth = g_eth;
823
824 PRINT_FORMATTED_XFI_MIB(seq, TX_PKT_CNT, GENMASK(31, 0));
825 PRINT_FORMATTED_XFI_MIB(seq, TX_ETH_CNT, GENMASK(31, 0));
826 PRINT_FORMATTED_XFI_MIB(seq, TX_PAUSE_CNT, GENMASK(15, 0));
827 PRINT_FORMATTED_XFI_MIB(seq, TX_BYTE_CNT, GENMASK(31, 0));
828 PRINT_FORMATTED_XFI_MIB64(seq, TX_UC_PKT_CNT);
829 PRINT_FORMATTED_XFI_MIB64(seq, TX_MC_PKT_CNT);
830 PRINT_FORMATTED_XFI_MIB64(seq, TX_BC_PKT_CNT);
831
832 PRINT_FORMATTED_XFI_MIB(seq, RX_PKT_CNT, GENMASK(31, 0));
833 PRINT_FORMATTED_XFI_MIB(seq, RX_ETH_CNT, GENMASK(31, 0));
834 PRINT_FORMATTED_XFI_MIB(seq, RX_PAUSE_CNT, GENMASK(15, 0));
835 PRINT_FORMATTED_XFI_MIB(seq, RX_LEN_ERR_CNT, GENMASK(15, 0));
836 PRINT_FORMATTED_XFI_MIB(seq, RX_CRC_ERR_CNT, GENMASK(15, 0));
837 PRINT_FORMATTED_XFI_MIB64(seq, RX_UC_PKT_CNT);
838 PRINT_FORMATTED_XFI_MIB64(seq, RX_MC_PKT_CNT);
839 PRINT_FORMATTED_XFI_MIB64(seq, RX_BC_PKT_CNT);
840 PRINT_FORMATTED_XFI_MIB(seq, RX_UC_DROP_CNT, GENMASK(31, 0));
841 PRINT_FORMATTED_XFI_MIB(seq, RX_BC_DROP_CNT, GENMASK(31, 0));
842 PRINT_FORMATTED_XFI_MIB(seq, RX_MC_DROP_CNT, GENMASK(31, 0));
843 PRINT_FORMATTED_XFI_MIB(seq, RX_ALL_DROP_CNT, GENMASK(31, 0));
844}
845
846int xfi_cnt_read(struct seq_file *seq, void *v)
847{
848 struct mtk_eth *eth = g_eth;
849 int i;
850
851 seq_puts(seq, "+------------------------------------+\n");
852 seq_puts(seq, "| <<XFI MAC>> |\n");
853
854 for (i = MTK_GMAC2_ID; i < MTK_GMAC_ID_MAX; i++) {
855 xfi_mib_dump(seq, i);
856 mtk_m32(eth, 0x1, 0x1, MTK_XFI_MIB_BASE(i) + MTK_XFI_CNT_CTRL);
857 seq_puts(seq, "| |\n");
858 }
859
860 seq_puts(seq, "+------------------------------------+\n");
861
862 return 0;
863}
864
865static int xfi_count_open(struct inode *inode, struct file *file)
866{
867 return single_open(file, xfi_cnt_read, 0);
868}
869
870static const struct file_operations xfi_count_fops = {
871 .owner = THIS_MODULE,
872 .open = xfi_count_open,
873 .read = seq_read,
874 .llseek = seq_lseek,
875 .release = single_release
876};
877
developer3d2dd692022-04-19 12:53:29 +0800878static struct proc_dir_entry *proc_tx_ring, *proc_hwtx_ring, *proc_rx_ring;
developerec4ebe42022-04-12 11:17:45 +0800879
880int tx_ring_read(struct seq_file *seq, void *v)
881{
developer29f66b32022-07-12 15:23:20 +0800882 struct mtk_eth *eth = g_eth;
developerec4ebe42022-04-12 11:17:45 +0800883 struct mtk_tx_ring *ring = &g_eth->tx_ring;
developer29f66b32022-07-12 15:23:20 +0800884 struct mtk_tx_dma_v2 *tx_ring;
developerec4ebe42022-04-12 11:17:45 +0800885 int i = 0;
886
developerec4ebe42022-04-12 11:17:45 +0800887 seq_printf(seq, "free count = %d\n", (int)atomic_read(&ring->free_count));
888 seq_printf(seq, "cpu next free: %d\n", (int)(ring->next_free - ring->dma));
889 seq_printf(seq, "cpu last free: %d\n", (int)(ring->last_free - ring->dma));
developer64376db2024-04-08 14:04:38 +0800890 for (i = 0; i < eth->soc->txrx.tx_dma_size; i++) {
developer3d5faf22022-11-29 18:07:22 +0800891 dma_addr_t tmp = ring->phys +
892 i * (dma_addr_t)eth->soc->txrx.txd_size;
developer29f66b32022-07-12 15:23:20 +0800893
894 tx_ring = ring->dma + i * eth->soc->txrx.txd_size;
developerec4ebe42022-04-12 11:17:45 +0800895
896 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &tmp,
developer29f66b32022-07-12 15:23:20 +0800897 tx_ring->txd1, tx_ring->txd2,
898 tx_ring->txd3, tx_ring->txd4);
899
developere86c3ec2022-10-11 10:29:18 +0800900 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
901 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer29f66b32022-07-12 15:23:20 +0800902 seq_printf(seq, " %08x %08x %08x %08x",
903 tx_ring->txd5, tx_ring->txd6,
904 tx_ring->txd7, tx_ring->txd8);
905 }
906
developerec4ebe42022-04-12 11:17:45 +0800907 seq_printf(seq, "\n");
908 }
909
developerec4ebe42022-04-12 11:17:45 +0800910 return 0;
911}
912
913static int tx_ring_open(struct inode *inode, struct file *file)
914{
915 return single_open(file, tx_ring_read, NULL);
916}
917
918static const struct file_operations tx_ring_fops = {
919 .owner = THIS_MODULE,
920 .open = tx_ring_open,
921 .read = seq_read,
922 .llseek = seq_lseek,
923 .release = single_release
924};
925
developer3d2dd692022-04-19 12:53:29 +0800926int hwtx_ring_read(struct seq_file *seq, void *v)
927{
928 struct mtk_eth *eth = g_eth;
developer29f66b32022-07-12 15:23:20 +0800929 struct mtk_tx_dma_v2 *hwtx_ring;
developer3d2dd692022-04-19 12:53:29 +0800930 int i = 0;
931
developer64376db2024-04-08 14:04:38 +0800932 for (i = 0; i < eth->soc->txrx.fq_dma_size; i++) {
developer3d5faf22022-11-29 18:07:22 +0800933 dma_addr_t addr = eth->phy_scratch_ring +
934 i * (dma_addr_t)eth->soc->txrx.txd_size;
developer29f66b32022-07-12 15:23:20 +0800935
936 hwtx_ring = eth->scratch_ring + i * eth->soc->txrx.txd_size;
developer3d2dd692022-04-19 12:53:29 +0800937
938 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &addr,
developer29f66b32022-07-12 15:23:20 +0800939 hwtx_ring->txd1, hwtx_ring->txd2,
940 hwtx_ring->txd3, hwtx_ring->txd4);
941
developere86c3ec2022-10-11 10:29:18 +0800942 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
943 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developer29f66b32022-07-12 15:23:20 +0800944 seq_printf(seq, " %08x %08x %08x %08x",
945 hwtx_ring->txd5, hwtx_ring->txd6,
946 hwtx_ring->txd7, hwtx_ring->txd8);
947 }
948
developer3d2dd692022-04-19 12:53:29 +0800949 seq_printf(seq, "\n");
950 }
951
developer3d2dd692022-04-19 12:53:29 +0800952 return 0;
953}
954
955static int hwtx_ring_open(struct inode *inode, struct file *file)
956{
957 return single_open(file, hwtx_ring_read, NULL);
958}
959
960static const struct file_operations hwtx_ring_fops = {
961 .owner = THIS_MODULE,
962 .open = hwtx_ring_open,
963 .read = seq_read,
964 .llseek = seq_lseek,
965 .release = single_release
966};
967
developerec4ebe42022-04-12 11:17:45 +0800968int rx_ring_read(struct seq_file *seq, void *v)
969{
developer29f66b32022-07-12 15:23:20 +0800970 struct mtk_eth *eth = g_eth;
developer3c9c74d2023-09-11 11:36:12 +0800971 struct mtk_rx_ring *ring;
developer29f66b32022-07-12 15:23:20 +0800972 struct mtk_rx_dma_v2 *rx_ring;
developer3c9c74d2023-09-11 11:36:12 +0800973 int i = 0, j = 0;
developerec4ebe42022-04-12 11:17:45 +0800974
developerfce0d152024-01-11 13:37:13 +0800975 for (j = 0; j < MTK_MAX_RX_RING_NUM; j++) {
developer3c9c74d2023-09-11 11:36:12 +0800976 ring = &g_eth->rx_ring[j];
developerfce0d152024-01-11 13:37:13 +0800977 if (!ring->dma)
978 continue;
developer29f66b32022-07-12 15:23:20 +0800979
developer3c9c74d2023-09-11 11:36:12 +0800980 seq_printf(seq, "[Ring%d] next to read: %d\n", j,
developer64376db2024-04-08 14:04:38 +0800981 NEXT_DESP_IDX(ring->calc_idx, eth->soc->txrx.rx_dma_size));
developerfce0d152024-01-11 13:37:13 +0800982 for (i = 0; i < ring->dma_size; i++) {
developer3c9c74d2023-09-11 11:36:12 +0800983 rx_ring = ring->dma + i * eth->soc->txrx.rxd_size;
developer29f66b32022-07-12 15:23:20 +0800984
developer3c9c74d2023-09-11 11:36:12 +0800985 seq_printf(seq, "%d: %08x %08x %08x %08x", i,
986 rx_ring->rxd1, rx_ring->rxd2,
987 rx_ring->rxd3, rx_ring->rxd4);
developer29f66b32022-07-12 15:23:20 +0800988
developer3c9c74d2023-09-11 11:36:12 +0800989 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
990 seq_printf(seq, " %08x %08x %08x %08x",
991 rx_ring->rxd5, rx_ring->rxd6,
992 rx_ring->rxd7, rx_ring->rxd8);
993 }
994
995 seq_puts(seq, "\n");
996 }
developerec4ebe42022-04-12 11:17:45 +0800997 }
998
developerec4ebe42022-04-12 11:17:45 +0800999 return 0;
1000}
1001
1002static int rx_ring_open(struct inode *inode, struct file *file)
1003{
1004 return single_open(file, rx_ring_read, NULL);
1005}
1006
1007static const struct file_operations rx_ring_fops = {
1008 .owner = THIS_MODULE,
1009 .open = rx_ring_open,
1010 .read = seq_read,
1011 .llseek = seq_lseek,
1012 .release = single_release
1013};
1014
1015static inline u32 mtk_dbg_r32(u32 reg)
1016{
1017 void __iomem *virt_reg;
1018 u32 val;
1019
1020 virt_reg = ioremap(reg, 32);
1021 val = __raw_readl(virt_reg);
1022 iounmap(virt_reg);
1023
1024 return val;
1025}
1026
1027int dbg_regs_read(struct seq_file *seq, void *v)
1028{
1029 struct mtk_eth *eth = g_eth;
developer722ab5f2024-02-22 11:01:46 +08001030 u32 i;
developerec4ebe42022-04-12 11:17:45 +08001031
1032 seq_puts(seq, " <<DEBUG REG DUMP>>\n");
1033
1034 seq_printf(seq, "| FE_INT_STA : %08x |\n",
developer3d2dd692022-04-19 12:53:29 +08001035 mtk_r32(eth, MTK_FE_INT_STATUS));
developere86c3ec2022-10-11 10:29:18 +08001036 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1037 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developerec4ebe42022-04-12 11:17:45 +08001038 seq_printf(seq, "| FE_INT_STA2 : %08x |\n",
developer3d2dd692022-04-19 12:53:29 +08001039 mtk_r32(eth, MTK_FE_INT_STATUS2));
developerec4ebe42022-04-12 11:17:45 +08001040
1041 seq_printf(seq, "| PSE_FQFC_CFG : %08x |\n",
1042 mtk_r32(eth, MTK_PSE_FQFC_CFG));
1043 seq_printf(seq, "| PSE_IQ_STA1 : %08x |\n",
1044 mtk_r32(eth, MTK_PSE_IQ_STA(0)));
1045 seq_printf(seq, "| PSE_IQ_STA2 : %08x |\n",
1046 mtk_r32(eth, MTK_PSE_IQ_STA(1)));
1047
developere86c3ec2022-10-11 10:29:18 +08001048 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1049 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerec4ebe42022-04-12 11:17:45 +08001050 seq_printf(seq, "| PSE_IQ_STA3 : %08x |\n",
1051 mtk_r32(eth, MTK_PSE_IQ_STA(2)));
1052 seq_printf(seq, "| PSE_IQ_STA4 : %08x |\n",
1053 mtk_r32(eth, MTK_PSE_IQ_STA(3)));
1054 seq_printf(seq, "| PSE_IQ_STA5 : %08x |\n",
1055 mtk_r32(eth, MTK_PSE_IQ_STA(4)));
developere86c3ec2022-10-11 10:29:18 +08001056 seq_printf(seq, "| PSE_IQ_STA6 : %08x |\n",
1057 mtk_r32(eth, MTK_PSE_IQ_STA(5)));
1058 seq_printf(seq, "| PSE_IQ_STA7 : %08x |\n",
1059 mtk_r32(eth, MTK_PSE_IQ_STA(6)));
1060 seq_printf(seq, "| PSE_IQ_STA8 : %08x |\n",
1061 mtk_r32(eth, MTK_PSE_IQ_STA(7)));
developerec4ebe42022-04-12 11:17:45 +08001062 }
1063
1064 seq_printf(seq, "| PSE_OQ_STA1 : %08x |\n",
1065 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
1066 seq_printf(seq, "| PSE_OQ_STA2 : %08x |\n",
1067 mtk_r32(eth, MTK_PSE_OQ_STA(1)));
1068
developere86c3ec2022-10-11 10:29:18 +08001069 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1070 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerec4ebe42022-04-12 11:17:45 +08001071 seq_printf(seq, "| PSE_OQ_STA3 : %08x |\n",
1072 mtk_r32(eth, MTK_PSE_OQ_STA(2)));
1073 seq_printf(seq, "| PSE_OQ_STA4 : %08x |\n",
1074 mtk_r32(eth, MTK_PSE_OQ_STA(3)));
1075 seq_printf(seq, "| PSE_OQ_STA5 : %08x |\n",
1076 mtk_r32(eth, MTK_PSE_OQ_STA(4)));
developere86c3ec2022-10-11 10:29:18 +08001077 seq_printf(seq, "| PSE_OQ_STA6 : %08x |\n",
1078 mtk_r32(eth, MTK_PSE_OQ_STA(5)));
1079 seq_printf(seq, "| PSE_OQ_STA7 : %08x |\n",
1080 mtk_r32(eth, MTK_PSE_OQ_STA(6)));
1081 seq_printf(seq, "| PSE_OQ_STA8 : %08x |\n",
1082 mtk_r32(eth, MTK_PSE_OQ_STA(7)));
developerec4ebe42022-04-12 11:17:45 +08001083 }
1084
1085 seq_printf(seq, "| PDMA_CRX_IDX : %08x |\n",
1086 mtk_r32(eth, MTK_PRX_CRX_IDX0));
1087 seq_printf(seq, "| PDMA_DRX_IDX : %08x |\n",
1088 mtk_r32(eth, MTK_PRX_DRX_IDX0));
developer722ab5f2024-02-22 11:01:46 +08001089 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
1090 for (i = 1; i < eth->soc->rss_num; i++) {
1091 seq_printf(seq, "| PDMA_CRX_IDX%d : %08x |\n",
1092 i, mtk_r32(eth, MTK_PRX_CRX_IDX_CFG(i)));
1093 seq_printf(seq, "| PDMA_DRX_IDX%d : %08x |\n",
1094 i, mtk_r32(eth, MTK_PRX_DRX_IDX_CFG(i)));
1095 }
1096 }
1097 if (MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO)) {
1098 for (i = 0; i < MTK_HW_LRO_RING_NUM; i++) {
1099 seq_printf(seq, "| PDMA_CRX_IDX%d : %08x |\n",
1100 MTK_HW_LRO_RING(i),
1101 mtk_r32(eth, MTK_PRX_CRX_IDX_CFG(MTK_HW_LRO_RING(i))));
1102 seq_printf(seq, "| PDMA_DRX_IDX%d : %08x |\n",
1103 MTK_HW_LRO_RING(i),
1104 mtk_r32(eth, MTK_PRX_DRX_IDX_CFG(MTK_HW_LRO_RING(i))));
1105 }
1106 }
1107
1108 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1109 seq_printf(seq, "| QDMA_CTX_IDX : %08x |\n",
1110 mtk_r32(eth, MTK_QTX_CTX_PTR));
1111 seq_printf(seq, "| QDMA_DTX_IDX : %08x |\n",
1112 mtk_r32(eth, MTK_QTX_DTX_PTR));
1113 seq_printf(seq, "| QDMA_FQ_CNT : %08x |\n",
1114 mtk_r32(eth, MTK_QDMA_FQ_CNT));
1115 seq_printf(seq, "| QDMA_FWD_CNT : %08x |\n",
1116 mtk_r32(eth, MTK_QDMA_FWD_CNT));
1117 seq_printf(seq, "| QDMA_FSM : %08x |\n",
1118 mtk_r32(eth, MTK_QDMA_FSM));
1119 } else {
1120 seq_printf(seq, "| PDMA_CTX_IDX : %08x |\n",
1121 mtk_r32(eth, MTK_PTX_CTX_IDX0));
1122 seq_printf(seq, "| PDMA_DTX_IDX : %08x |\n",
1123 mtk_r32(eth, MTK_PTX_DTX_IDX0));
1124 }
1125
developerec4ebe42022-04-12 11:17:45 +08001126 seq_printf(seq, "| FE_PSE_FREE : %08x |\n",
1127 mtk_r32(eth, MTK_FE_PSE_FREE));
1128 seq_printf(seq, "| FE_DROP_FQ : %08x |\n",
1129 mtk_r32(eth, MTK_FE_DROP_FQ));
1130 seq_printf(seq, "| FE_DROP_FC : %08x |\n",
1131 mtk_r32(eth, MTK_FE_DROP_FC));
1132 seq_printf(seq, "| FE_DROP_PPE : %08x |\n",
1133 mtk_r32(eth, MTK_FE_DROP_PPE));
1134 seq_printf(seq, "| GDM1_IG_CTRL : %08x |\n",
1135 mtk_r32(eth, MTK_GDMA_FWD_CFG(0)));
1136 seq_printf(seq, "| GDM2_IG_CTRL : %08x |\n",
1137 mtk_r32(eth, MTK_GDMA_FWD_CFG(1)));
developere86c3ec2022-10-11 10:29:18 +08001138 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1139 seq_printf(seq, "| GDM3_IG_CTRL : %08x |\n",
1140 mtk_r32(eth, MTK_GDMA_FWD_CFG(2)));
1141 }
developerec4ebe42022-04-12 11:17:45 +08001142 seq_printf(seq, "| MAC_P1_MCR : %08x |\n",
1143 mtk_r32(eth, MTK_MAC_MCR(0)));
1144 seq_printf(seq, "| MAC_P2_MCR : %08x |\n",
1145 mtk_r32(eth, MTK_MAC_MCR(1)));
developere86c3ec2022-10-11 10:29:18 +08001146 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1147 seq_printf(seq, "| MAC_P3_MCR : %08x |\n",
1148 mtk_r32(eth, MTK_MAC_MCR(2)));
1149 }
developerec4ebe42022-04-12 11:17:45 +08001150 seq_printf(seq, "| MAC_P1_FSM : %08x |\n",
1151 mtk_r32(eth, MTK_MAC_FSM(0)));
1152 seq_printf(seq, "| MAC_P2_FSM : %08x |\n",
1153 mtk_r32(eth, MTK_MAC_FSM(1)));
developere86c3ec2022-10-11 10:29:18 +08001154 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
1155 seq_printf(seq, "| MAC_P3_FSM : %08x |\n",
1156 mtk_r32(eth, MTK_MAC_FSM(2)));
1157 }
developerec4ebe42022-04-12 11:17:45 +08001158
developere86c3ec2022-10-11 10:29:18 +08001159 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1160 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerec4ebe42022-04-12 11:17:45 +08001161 seq_printf(seq, "| FE_CDM1_FSM : %08x |\n",
1162 mtk_r32(eth, MTK_FE_CDM1_FSM));
1163 seq_printf(seq, "| FE_CDM2_FSM : %08x |\n",
1164 mtk_r32(eth, MTK_FE_CDM2_FSM));
1165 seq_printf(seq, "| FE_CDM3_FSM : %08x |\n",
1166 mtk_r32(eth, MTK_FE_CDM3_FSM));
1167 seq_printf(seq, "| FE_CDM4_FSM : %08x |\n",
1168 mtk_r32(eth, MTK_FE_CDM4_FSM));
developere86c3ec2022-10-11 10:29:18 +08001169 seq_printf(seq, "| FE_CDM5_FSM : %08x |\n",
1170 mtk_r32(eth, MTK_FE_CDM5_FSM));
1171 seq_printf(seq, "| FE_CDM6_FSM : %08x |\n",
1172 mtk_r32(eth, MTK_FE_CDM6_FSM));
developerfce0d152024-01-11 13:37:13 +08001173 seq_printf(seq, "| FE_CDM7_FSM : %08x |\n",
1174 mtk_r32(eth, MTK_FE_CDM7_FSM));
developerec4ebe42022-04-12 11:17:45 +08001175 seq_printf(seq, "| FE_GDM1_FSM : %08x |\n",
1176 mtk_r32(eth, MTK_FE_GDM1_FSM));
1177 seq_printf(seq, "| FE_GDM2_FSM : %08x |\n",
1178 mtk_r32(eth, MTK_FE_GDM2_FSM));
developerfce0d152024-01-11 13:37:13 +08001179 seq_printf(seq, "| FE_GDM3_FSM : %08x |\n",
1180 mtk_r32(eth, MTK_FE_GDM3_FSM));
developerec4ebe42022-04-12 11:17:45 +08001181 seq_printf(seq, "| SGMII_EFUSE : %08x |\n",
1182 mtk_dbg_r32(MTK_SGMII_EFUSE));
1183 seq_printf(seq, "| SGMII0_RX_CNT : %08x |\n",
1184 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(0)));
1185 seq_printf(seq, "| SGMII1_RX_CNT : %08x |\n",
1186 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(1)));
1187 seq_printf(seq, "| WED_RTQM_GLO : %08x |\n",
1188 mtk_dbg_r32(MTK_WED_RTQM_GLO_CFG));
1189 }
1190
developer3d2dd692022-04-19 12:53:29 +08001191 mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS);
developere86c3ec2022-10-11 10:29:18 +08001192 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
1193 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
developer3d2dd692022-04-19 12:53:29 +08001194 mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS2);
developerec4ebe42022-04-12 11:17:45 +08001195
1196 return 0;
1197}
1198
1199static int dbg_regs_open(struct inode *inode, struct file *file)
1200{
1201 return single_open(file, dbg_regs_read, 0);
1202}
1203
1204static const struct file_operations dbg_regs_fops = {
1205 .owner = THIS_MODULE,
1206 .open = dbg_regs_open,
1207 .read = seq_read,
1208 .llseek = seq_lseek,
1209 .release = single_release
1210};
1211
developer55392d12023-07-10 12:54:02 +08001212static int mtk_rss_set_indr_tbl(struct mtk_eth *eth, int num)
1213{
1214 struct mtk_rss_params *rss_params = &eth->rss_params;
1215 u32 i;
1216
1217 if (num <= 0 || num > MTK_RX_NAPI_NUM)
1218 return -EOPNOTSUPP;
1219
1220 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE; i++)
1221 rss_params->indirection_table[i] = i % num;
1222
1223 for (i = 0; i < MTK_RSS_MAX_INDIRECTION_TABLE / 16; i++)
1224 mtk_w32(eth, mtk_rss_indr_table(rss_params, i),
1225 MTK_RSS_INDR_TABLE_DW(i));
1226
1227 return 0;
1228}
1229
developeredbe69e2023-06-08 11:08:46 +08001230ssize_t rss_ctrl_write(struct file *file, const char __user *buffer,
1231 size_t count, loff_t *data)
1232{
1233 char buf[32];
1234 char *p_buf;
1235 char *p_token = NULL;
1236 char *p_delimiter = " \t";
1237 long num = 4;
1238 u32 len = count;
1239 int ret;
1240
1241 if (len >= sizeof(buf)) {
1242 pr_info("Input handling fail!\n");
1243 return -1;
1244 }
1245
1246 if (copy_from_user(buf, buffer, len))
1247 return -EFAULT;
1248
1249 buf[len] = '\0';
1250
1251 p_buf = buf;
1252 p_token = strsep(&p_buf, p_delimiter);
1253 if (!p_token)
1254 num = 4;
1255 else
1256 ret = kstrtol(p_token, 10, &num);
1257
1258 if (!mtk_rss_set_indr_tbl(g_eth, num))
1259 cur_rss_num = num;
1260
1261 return count;
1262}
1263
1264int rss_ctrl_read(struct seq_file *seq, void *v)
1265{
1266 pr_info("ADMA is using %d-RSS.\n", cur_rss_num);
1267 return 0;
1268}
1269
1270static int rss_ctrl_open(struct inode *inode, struct file *file)
1271{
1272 return single_open(file, rss_ctrl_read, 0);
1273}
1274
1275static const struct file_operations rss_ctrl_fops = {
1276 .owner = THIS_MODULE,
1277 .open = rss_ctrl_open,
1278 .read = seq_read,
1279 .llseek = seq_lseek,
1280 .write = rss_ctrl_write,
1281 .release = single_release
1282};
1283
developer29f66b32022-07-12 15:23:20 +08001284void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
developerec4ebe42022-04-12 11:17:45 +08001285{
developer29f66b32022-07-12 15:23:20 +08001286 struct mtk_eth *eth = g_eth;
developerec4ebe42022-04-12 11:17:45 +08001287 u32 idx, agg_cnt, agg_size;
1288
developerb35f4fa2023-03-14 13:24:47 +08001289 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer29f66b32022-07-12 15:23:20 +08001290 idx = ring_no - 4;
1291 agg_cnt = RX_DMA_GET_AGG_CNT_V2(rxd->rxd6);
1292 } else {
1293 idx = ring_no - 1;
1294 agg_cnt = RX_DMA_GET_AGG_CNT(rxd->rxd2);
1295 }
developerec4ebe42022-04-12 11:17:45 +08001296
developer3d5faf22022-11-29 18:07:22 +08001297 if (idx >= MTK_HW_LRO_RING_NUM)
1298 return;
1299
developerec4ebe42022-04-12 11:17:45 +08001300 agg_size = RX_DMA_GET_PLEN0(rxd->rxd2);
1301
1302 hw_lro_agg_size_cnt[idx][agg_size / 5000]++;
1303 hw_lro_agg_num_cnt[idx][agg_cnt]++;
1304 hw_lro_tot_flush_cnt[idx]++;
1305 hw_lro_tot_agg_cnt[idx] += agg_cnt;
1306}
1307
developer29f66b32022-07-12 15:23:20 +08001308void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
developerec4ebe42022-04-12 11:17:45 +08001309{
developer29f66b32022-07-12 15:23:20 +08001310 struct mtk_eth *eth = g_eth;
developerec4ebe42022-04-12 11:17:45 +08001311 u32 idx, flush_reason;
1312
developerb35f4fa2023-03-14 13:24:47 +08001313 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
developer29f66b32022-07-12 15:23:20 +08001314 idx = ring_no - 4;
1315 flush_reason = RX_DMA_GET_FLUSH_RSN_V2(rxd->rxd6);
1316 } else {
1317 idx = ring_no - 1;
1318 flush_reason = RX_DMA_GET_REV(rxd->rxd2);
1319 }
developerec4ebe42022-04-12 11:17:45 +08001320
developer3d5faf22022-11-29 18:07:22 +08001321 if (idx >= MTK_HW_LRO_RING_NUM)
1322 return;
1323
developerec4ebe42022-04-12 11:17:45 +08001324 if ((flush_reason & 0x7) == MTK_HW_LRO_AGG_FLUSH)
1325 hw_lro_agg_flush_cnt[idx]++;
1326 else if ((flush_reason & 0x7) == MTK_HW_LRO_AGE_FLUSH)
1327 hw_lro_age_flush_cnt[idx]++;
1328 else if ((flush_reason & 0x7) == MTK_HW_LRO_NOT_IN_SEQ_FLUSH)
1329 hw_lro_seq_flush_cnt[idx]++;
1330 else if ((flush_reason & 0x7) == MTK_HW_LRO_TIMESTAMP_FLUSH)
1331 hw_lro_timestamp_flush_cnt[idx]++;
1332 else if ((flush_reason & 0x7) == MTK_HW_LRO_NON_RULE_FLUSH)
1333 hw_lro_norule_flush_cnt[idx]++;
1334}
1335
1336ssize_t hw_lro_stats_write(struct file *file, const char __user *buffer,
1337 size_t count, loff_t *data)
1338{
1339 memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt));
1340 memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt));
1341 memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt));
1342 memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt));
1343 memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt));
1344 memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt));
1345 memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt));
1346 memset(hw_lro_timestamp_flush_cnt, 0,
1347 sizeof(hw_lro_timestamp_flush_cnt));
1348 memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt));
1349
1350 pr_info("clear hw lro cnt table\n");
1351
1352 return count;
1353}
1354
1355int hw_lro_stats_read_v1(struct seq_file *seq, void *v)
1356{
1357 int i;
1358
1359 seq_puts(seq, "HW LRO statistic dump:\n");
1360
1361 /* Agg number count */
1362 seq_puts(seq, "Cnt: RING1 | RING2 | RING3 | Total\n");
1363 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
1364 seq_printf(seq, " %d : %d %d %d %d\n",
1365 i, hw_lro_agg_num_cnt[0][i],
1366 hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i],
1367 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
1368 hw_lro_agg_num_cnt[2][i]);
1369 }
1370
1371 /* Total agg count */
1372 seq_puts(seq, "Total agg: RING1 | RING2 | RING3 | Total\n");
1373 seq_printf(seq, " %d %d %d %d\n",
1374 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
1375 hw_lro_tot_agg_cnt[2],
1376 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1377 hw_lro_tot_agg_cnt[2]);
1378
1379 /* Total flush count */
1380 seq_puts(seq, "Total flush: RING1 | RING2 | RING3 | Total\n");
1381 seq_printf(seq, " %d %d %d %d\n",
1382 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
1383 hw_lro_tot_flush_cnt[2],
1384 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1385 hw_lro_tot_flush_cnt[2]);
1386
1387 /* Avg agg count */
1388 seq_puts(seq, "Avg agg: RING1 | RING2 | RING3 | Total\n");
1389 seq_printf(seq, " %d %d %d %d\n",
1390 (hw_lro_tot_flush_cnt[0]) ?
1391 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
1392 (hw_lro_tot_flush_cnt[1]) ?
1393 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
1394 (hw_lro_tot_flush_cnt[2]) ?
1395 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
1396 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1397 hw_lro_tot_flush_cnt[2]) ?
1398 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1399 hw_lro_tot_agg_cnt[2]) / (hw_lro_tot_flush_cnt[0] +
1400 hw_lro_tot_flush_cnt[1] + hw_lro_tot_flush_cnt[2])) : 0);
1401
1402 /* Statistics of aggregation size counts */
1403 seq_puts(seq, "HW LRO flush pkt len:\n");
1404 seq_puts(seq, " Length | RING1 | RING2 | RING3 | Total\n");
1405 for (i = 0; i < 15; i++) {
1406 seq_printf(seq, "%d~%d: %d %d %d %d\n", i * 5000,
1407 (i + 1) * 5000, hw_lro_agg_size_cnt[0][i],
1408 hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i],
1409 hw_lro_agg_size_cnt[0][i] +
1410 hw_lro_agg_size_cnt[1][i] +
1411 hw_lro_agg_size_cnt[2][i]);
1412 }
1413
1414 seq_puts(seq, "Flush reason: RING1 | RING2 | RING3 | Total\n");
1415 seq_printf(seq, "AGG timeout: %d %d %d %d\n",
1416 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1417 hw_lro_agg_flush_cnt[2],
1418 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1419 hw_lro_agg_flush_cnt[2]));
1420
1421 seq_printf(seq, "AGE timeout: %d %d %d %d\n",
1422 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1423 hw_lro_age_flush_cnt[2],
1424 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1425 hw_lro_age_flush_cnt[2]));
1426
1427 seq_printf(seq, "Not in-sequence: %d %d %d %d\n",
1428 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1429 hw_lro_seq_flush_cnt[2],
1430 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1431 hw_lro_seq_flush_cnt[2]));
1432
1433 seq_printf(seq, "Timestamp: %d %d %d %d\n",
1434 hw_lro_timestamp_flush_cnt[0],
1435 hw_lro_timestamp_flush_cnt[1],
1436 hw_lro_timestamp_flush_cnt[2],
1437 (hw_lro_timestamp_flush_cnt[0] +
1438 hw_lro_timestamp_flush_cnt[1] +
1439 hw_lro_timestamp_flush_cnt[2]));
1440
1441 seq_printf(seq, "No LRO rule: %d %d %d %d\n",
1442 hw_lro_norule_flush_cnt[0],
1443 hw_lro_norule_flush_cnt[1],
1444 hw_lro_norule_flush_cnt[2],
1445 (hw_lro_norule_flush_cnt[0] +
1446 hw_lro_norule_flush_cnt[1] +
1447 hw_lro_norule_flush_cnt[2]));
1448
1449 return 0;
1450}
1451
1452int hw_lro_stats_read_v2(struct seq_file *seq, void *v)
1453{
1454 int i;
1455
1456 seq_puts(seq, "HW LRO statistic dump:\n");
1457
1458 /* Agg number count */
1459 seq_puts(seq, "Cnt: RING4 | RING5 | RING6 | RING7 Total\n");
1460 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
1461 seq_printf(seq,
1462 " %d : %d %d %d %d %d\n",
1463 i, hw_lro_agg_num_cnt[0][i], hw_lro_agg_num_cnt[1][i],
1464 hw_lro_agg_num_cnt[2][i], hw_lro_agg_num_cnt[3][i],
1465 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
1466 hw_lro_agg_num_cnt[2][i] + hw_lro_agg_num_cnt[3][i]);
1467 }
1468
1469 /* Total agg count */
1470 seq_puts(seq, "Total agg: RING4 | RING5 | RING6 | RING7 Total\n");
1471 seq_printf(seq, " %d %d %d %d %d\n",
1472 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
1473 hw_lro_tot_agg_cnt[2], hw_lro_tot_agg_cnt[3],
1474 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1475 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]);
1476
1477 /* Total flush count */
1478 seq_puts(seq, "Total flush: RING4 | RING5 | RING6 | RING7 Total\n");
1479 seq_printf(seq, " %d %d %d %d %d\n",
1480 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
1481 hw_lro_tot_flush_cnt[2], hw_lro_tot_flush_cnt[3],
1482 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1483 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]);
1484
1485 /* Avg agg count */
1486 seq_puts(seq, "Avg agg: RING4 | RING5 | RING6 | RING7 Total\n");
1487 seq_printf(seq, " %d %d %d %d %d\n",
1488 (hw_lro_tot_flush_cnt[0]) ?
1489 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
1490 (hw_lro_tot_flush_cnt[1]) ?
1491 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
1492 (hw_lro_tot_flush_cnt[2]) ?
1493 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
1494 (hw_lro_tot_flush_cnt[3]) ?
1495 hw_lro_tot_agg_cnt[3] / hw_lro_tot_flush_cnt[3] : 0,
1496 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1497 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]) ?
1498 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1499 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]) /
1500 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1501 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3])) : 0);
1502
1503 /* Statistics of aggregation size counts */
1504 seq_puts(seq, "HW LRO flush pkt len:\n");
1505 seq_puts(seq, " Length | RING4 | RING5 | RING6 | RING7 Total\n");
1506 for (i = 0; i < 15; i++) {
1507 seq_printf(seq, "%d~%d: %d %d %d %d %d\n",
1508 i * 5000, (i + 1) * 5000,
1509 hw_lro_agg_size_cnt[0][i], hw_lro_agg_size_cnt[1][i],
1510 hw_lro_agg_size_cnt[2][i], hw_lro_agg_size_cnt[3][i],
1511 hw_lro_agg_size_cnt[0][i] +
1512 hw_lro_agg_size_cnt[1][i] +
1513 hw_lro_agg_size_cnt[2][i] +
1514 hw_lro_agg_size_cnt[3][i]);
1515 }
1516
1517 seq_puts(seq, "Flush reason: RING4 | RING5 | RING6 | RING7 Total\n");
1518 seq_printf(seq, "AGG timeout: %d %d %d %d %d\n",
1519 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1520 hw_lro_agg_flush_cnt[2], hw_lro_agg_flush_cnt[3],
1521 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1522 hw_lro_agg_flush_cnt[2] + hw_lro_agg_flush_cnt[3]));
1523
1524 seq_printf(seq, "AGE timeout: %d %d %d %d %d\n",
1525 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1526 hw_lro_age_flush_cnt[2], hw_lro_age_flush_cnt[3],
1527 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1528 hw_lro_age_flush_cnt[2] + hw_lro_age_flush_cnt[3]));
1529
1530 seq_printf(seq, "Not in-sequence: %d %d %d %d %d\n",
1531 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1532 hw_lro_seq_flush_cnt[2], hw_lro_seq_flush_cnt[3],
1533 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1534 hw_lro_seq_flush_cnt[2] + hw_lro_seq_flush_cnt[3]));
1535
1536 seq_printf(seq, "Timestamp: %d %d %d %d %d\n",
1537 hw_lro_timestamp_flush_cnt[0],
1538 hw_lro_timestamp_flush_cnt[1],
1539 hw_lro_timestamp_flush_cnt[2],
1540 hw_lro_timestamp_flush_cnt[3],
1541 (hw_lro_timestamp_flush_cnt[0] +
1542 hw_lro_timestamp_flush_cnt[1] +
1543 hw_lro_timestamp_flush_cnt[2] +
1544 hw_lro_timestamp_flush_cnt[3]));
1545
1546 seq_printf(seq, "No LRO rule: %d %d %d %d %d\n",
1547 hw_lro_norule_flush_cnt[0],
1548 hw_lro_norule_flush_cnt[1],
1549 hw_lro_norule_flush_cnt[2],
1550 hw_lro_norule_flush_cnt[3],
1551 (hw_lro_norule_flush_cnt[0] +
1552 hw_lro_norule_flush_cnt[1] +
1553 hw_lro_norule_flush_cnt[2] +
1554 hw_lro_norule_flush_cnt[3]));
1555
1556 return 0;
1557}
1558
1559int hw_lro_stats_read_wrapper(struct seq_file *seq, void *v)
1560{
1561 struct mtk_eth *eth = g_eth;
1562
developerb35f4fa2023-03-14 13:24:47 +08001563 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
developerec4ebe42022-04-12 11:17:45 +08001564 hw_lro_stats_read_v2(seq, v);
1565 else
1566 hw_lro_stats_read_v1(seq, v);
1567
1568 return 0;
1569}
1570
1571static int hw_lro_stats_open(struct inode *inode, struct file *file)
1572{
1573 return single_open(file, hw_lro_stats_read_wrapper, NULL);
1574}
1575
1576static const struct file_operations hw_lro_stats_fops = {
1577 .owner = THIS_MODULE,
1578 .open = hw_lro_stats_open,
1579 .read = seq_read,
1580 .llseek = seq_lseek,
1581 .write = hw_lro_stats_write,
1582 .release = single_release
1583};
1584
1585int hwlro_agg_cnt_ctrl(int cnt)
1586{
1587 int i;
1588
1589 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1590 SET_PDMA_RXRING_MAX_AGG_CNT(g_eth, i, cnt);
1591
1592 return 0;
1593}
1594
1595int hwlro_agg_time_ctrl(int time)
1596{
1597 int i;
1598
1599 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1600 SET_PDMA_RXRING_AGG_TIME(g_eth, i, time);
1601
1602 return 0;
1603}
1604
1605int hwlro_age_time_ctrl(int time)
1606{
1607 int i;
1608
1609 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1610 SET_PDMA_RXRING_AGE_TIME(g_eth, i, time);
1611
1612 return 0;
1613}
1614
1615int hwlro_threshold_ctrl(int bandwidth)
1616{
1617 SET_PDMA_LRO_BW_THRESHOLD(g_eth, bandwidth);
1618
1619 return 0;
1620}
1621
1622int hwlro_ring_enable_ctrl(int enable)
1623{
1624 int i;
1625
1626 pr_info("[%s] %s HW LRO rings\n", __func__, (enable) ? "Enable" : "Disable");
1627
1628 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1629 SET_PDMA_RXRING_VALID(g_eth, i, enable);
1630
1631 return 0;
1632}
1633
1634int hwlro_stats_enable_ctrl(int enable)
1635{
1636 pr_info("[%s] %s HW LRO statistics\n", __func__, (enable) ? "Enable" : "Disable");
1637 mtk_hwlro_stats_ebl = enable;
1638
1639 return 0;
1640}
1641
1642static const mtk_lro_dbg_func lro_dbg_func[] = {
1643 [0] = hwlro_agg_cnt_ctrl,
1644 [1] = hwlro_agg_time_ctrl,
1645 [2] = hwlro_age_time_ctrl,
1646 [3] = hwlro_threshold_ctrl,
1647 [4] = hwlro_ring_enable_ctrl,
1648 [5] = hwlro_stats_enable_ctrl,
1649};
1650
1651ssize_t hw_lro_auto_tlb_write(struct file *file, const char __user *buffer,
1652 size_t count, loff_t *data)
1653{
1654 char buf[32];
1655 char *p_buf;
1656 char *p_token = NULL;
1657 char *p_delimiter = " \t";
1658 long x = 0, y = 0;
1659 u32 len = count;
1660 int ret;
1661
1662 if (len >= sizeof(buf)) {
1663 pr_info("Input handling fail!\n");
1664 return -1;
1665 }
1666
1667 if (copy_from_user(buf, buffer, len))
1668 return -EFAULT;
1669
1670 buf[len] = '\0';
1671
1672 p_buf = buf;
1673 p_token = strsep(&p_buf, p_delimiter);
1674 if (!p_token)
1675 x = 0;
1676 else
1677 ret = kstrtol(p_token, 10, &x);
1678
1679 p_token = strsep(&p_buf, "\t\n ");
1680 if (p_token)
1681 ret = kstrtol(p_token, 10, &y);
1682
1683 if (lro_dbg_func[x] && (ARRAY_SIZE(lro_dbg_func) > x))
1684 (*lro_dbg_func[x]) (y);
1685
1686 return count;
1687}
1688
1689void hw_lro_auto_tlb_dump_v1(struct seq_file *seq, u32 index)
1690{
1691 int i;
1692 struct mtk_lro_alt_v1 alt;
1693 __be32 addr;
1694 u32 tlb_info[9];
1695 u32 dw_len, cnt, priority;
1696 u32 entry;
1697
1698 if (index > 4)
1699 index = index - 1;
1700 entry = (index * 9) + 1;
1701
1702 /* read valid entries of the auto-learn table */
1703 mtk_w32(g_eth, entry, MTK_FE_ALT_CF8);
1704
1705 for (i = 0; i < 9; i++)
1706 tlb_info[i] = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1707
1708 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v1));
1709
1710 dw_len = alt.alt_info7.dw_len;
1711 cnt = alt.alt_info6.cnt;
1712
1713 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1714 priority = cnt; /* packet count */
1715 else
1716 priority = dw_len; /* byte count */
1717
1718 /* dump valid entries of the auto-learn table */
1719 if (index >= 4)
1720 seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index);
1721 else
1722 seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index);
1723
1724 if (alt.alt_info8.ipv4) {
1725 addr = htonl(alt.alt_info1.sip0);
1726 seq_printf(seq, "SIP = %pI4 (IPv4)\n", &addr);
1727 } else {
1728 seq_printf(seq, "SIP = %08X:%08X:%08X:%08X (IPv6)\n",
1729 alt.alt_info4.sip3, alt.alt_info3.sip2,
1730 alt.alt_info2.sip1, alt.alt_info1.sip0);
1731 }
1732
1733 seq_printf(seq, "DIP_ID = %d\n", alt.alt_info8.dip_id);
1734 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1735 alt.alt_info0.stp, alt.alt_info0.dtp);
1736 seq_printf(seq, "VLAN_VID_VLD = %d\n", alt.alt_info6.vlan_vid_vld);
1737 seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d\n",
1738 (alt.alt_info5.vlan_vid0 & 0xfff),
1739 ((alt.alt_info5.vlan_vid0 >> 12) & 0xfff),
1740 ((alt.alt_info6.vlan_vid1 << 8) |
1741 ((alt.alt_info5.vlan_vid0 >> 24) & 0xfff)),
1742 ((alt.alt_info6.vlan_vid1 >> 4) & 0xfff));
1743 seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt);
1744 seq_printf(seq, "PRIORITY = %d\n", priority);
1745}
1746
1747void hw_lro_auto_tlb_dump_v2(struct seq_file *seq, u32 index)
1748{
1749 int i;
1750 struct mtk_lro_alt_v2 alt;
1751 u32 score = 0, ipv4 = 0;
1752 u32 ipv6[4] = { 0 };
1753 u32 tlb_info[12];
1754
1755 /* read valid entries of the auto-learn table */
1756 mtk_w32(g_eth, index << MTK_LRO_ALT_INDEX_OFFSET, MTK_LRO_ALT_DBG);
1757
1758 for (i = 0; i < 11; i++)
1759 tlb_info[i] = mtk_r32(g_eth, MTK_LRO_ALT_DBG_DATA);
1760
1761 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v2));
1762
1763 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1764 score = 1; /* packet count */
1765 else
1766 score = 0; /* byte count */
1767
1768 /* dump valid entries of the auto-learn table */
1769 if (alt.alt_info0.valid) {
1770 if (index < 5)
1771 seq_printf(seq,
1772 "\n===== TABLE Entry: %d (onging) =====\n",
1773 index);
1774 else
1775 seq_printf(seq,
1776 "\n===== TABLE Entry: %d (candidate) =====\n",
1777 index);
1778
1779 if (alt.alt_info1.v4_valid) {
1780 ipv4 = (alt.alt_info4.sip0_h << 23) |
1781 alt.alt_info5.sip0_l;
1782 seq_printf(seq, "SIP = 0x%x: (IPv4)\n", ipv4);
1783
1784 ipv4 = (alt.alt_info8.dip0_h << 23) |
1785 alt.alt_info9.dip0_l;
1786 seq_printf(seq, "DIP = 0x%x: (IPv4)\n", ipv4);
1787 } else if (alt.alt_info1.v6_valid) {
1788 ipv6[3] = (alt.alt_info1.sip3_h << 23) |
1789 (alt.alt_info2.sip3_l << 9);
1790 ipv6[2] = (alt.alt_info2.sip2_h << 23) |
1791 (alt.alt_info3.sip2_l << 9);
1792 ipv6[1] = (alt.alt_info3.sip1_h << 23) |
1793 (alt.alt_info4.sip1_l << 9);
1794 ipv6[0] = (alt.alt_info4.sip0_h << 23) |
1795 (alt.alt_info5.sip0_l << 9);
1796 seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1797 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1798
1799 ipv6[3] = (alt.alt_info5.dip3_h << 23) |
1800 (alt.alt_info6.dip3_l << 9);
1801 ipv6[2] = (alt.alt_info6.dip2_h << 23) |
1802 (alt.alt_info7.dip2_l << 9);
1803 ipv6[1] = (alt.alt_info7.dip1_h << 23) |
1804 (alt.alt_info8.dip1_l << 9);
1805 ipv6[0] = (alt.alt_info8.dip0_h << 23) |
1806 (alt.alt_info9.dip0_l << 9);
1807 seq_printf(seq, "DIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1808 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1809 }
1810
1811 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1812 (alt.alt_info9.sp_h << 7) | (alt.alt_info10.sp_l),
1813 alt.alt_info10.dp);
1814 }
1815}
1816
1817int hw_lro_auto_tlb_read(struct seq_file *seq, void *v)
1818{
1819 int i;
1820 u32 reg_val;
1821 u32 reg_op1, reg_op2, reg_op3, reg_op4;
1822 u32 agg_cnt, agg_time, age_time;
1823
1824 seq_puts(seq, "Usage of /proc/mtketh/hw_lro_auto_tlb:\n");
1825 seq_puts(seq, "echo [function] [setting] > /proc/mtketh/hw_lro_auto_tlb\n");
1826 seq_puts(seq, "Functions:\n");
1827 seq_puts(seq, "[0] = hwlro_agg_cnt_ctrl\n");
1828 seq_puts(seq, "[1] = hwlro_agg_time_ctrl\n");
1829 seq_puts(seq, "[2] = hwlro_age_time_ctrl\n");
1830 seq_puts(seq, "[3] = hwlro_threshold_ctrl\n");
1831 seq_puts(seq, "[4] = hwlro_ring_enable_ctrl\n");
1832 seq_puts(seq, "[5] = hwlro_stats_enable_ctrl\n\n");
1833
developerb35f4fa2023-03-14 13:24:47 +08001834 if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_RX_V2)) {
developerec4ebe42022-04-12 11:17:45 +08001835 for (i = 1; i <= 8; i++)
1836 hw_lro_auto_tlb_dump_v2(seq, i);
1837 } else {
1838 /* Read valid entries of the auto-learn table */
1839 mtk_w32(g_eth, 0, MTK_FE_ALT_CF8);
1840 reg_val = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1841
1842 seq_printf(seq,
1843 "HW LRO Auto-learn Table: (MTK_FE_ALT_SEQ_CFC=0x%x)\n",
1844 reg_val);
1845
1846 for (i = 7; i >= 0; i--) {
1847 if (reg_val & (1 << i))
1848 hw_lro_auto_tlb_dump_v1(seq, i);
1849 }
1850 }
1851
1852 /* Read the agg_time/age_time/agg_cnt of LRO rings */
1853 seq_puts(seq, "\nHW LRO Ring Settings\n");
1854
1855 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
1856 reg_op1 = mtk_r32(g_eth, MTK_LRO_CTRL_DW1_CFG(i));
1857 reg_op2 = mtk_r32(g_eth, MTK_LRO_CTRL_DW2_CFG(i));
1858 reg_op3 = mtk_r32(g_eth, MTK_LRO_CTRL_DW3_CFG(i));
1859 reg_op4 = mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW2);
1860
1861 agg_cnt =
1862 ((reg_op3 & 0x3) << 6) |
1863 ((reg_op2 >> MTK_LRO_RING_AGG_CNT_L_OFFSET) & 0x3f);
1864 agg_time = (reg_op2 >> MTK_LRO_RING_AGG_TIME_OFFSET) & 0xffff;
1865 age_time =
1866 ((reg_op2 & 0x3f) << 10) |
1867 ((reg_op1 >> MTK_LRO_RING_AGE_TIME_L_OFFSET) & 0x3ff);
1868 seq_printf(seq,
1869 "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
developer3c9c74d2023-09-11 11:36:12 +08001870 !(MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_RX_V2)) ?
developerb35f4fa2023-03-14 13:24:47 +08001871 i : i+3,
developerec4ebe42022-04-12 11:17:45 +08001872 agg_cnt, agg_time, age_time, reg_op4);
1873 }
1874
1875 seq_puts(seq, "\n");
1876
1877 return 0;
1878}
1879
1880static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file)
1881{
1882 return single_open(file, hw_lro_auto_tlb_read, NULL);
1883}
1884
1885static const struct file_operations hw_lro_auto_tlb_fops = {
1886 .owner = THIS_MODULE,
1887 .open = hw_lro_auto_tlb_open,
1888 .read = seq_read,
1889 .llseek = seq_lseek,
1890 .write = hw_lro_auto_tlb_write,
1891 .release = single_release
1892};
1893
developer3d2dd692022-04-19 12:53:29 +08001894int reset_event_read(struct seq_file *seq, void *v)
1895{
1896 struct mtk_eth *eth = g_eth;
1897 struct mtk_reset_event reset_event = eth->reset_event;
1898
1899 seq_printf(seq, "[Event] [Count]\n");
1900 seq_printf(seq, " FQ Empty: %d\n",
1901 reset_event.count[MTK_EVENT_FQ_EMPTY]);
1902 seq_printf(seq, " TSO Fail: %d\n",
1903 reset_event.count[MTK_EVENT_TSO_FAIL]);
1904 seq_printf(seq, " TSO Illegal: %d\n",
1905 reset_event.count[MTK_EVENT_TSO_ILLEGAL]);
1906 seq_printf(seq, " TSO Align: %d\n",
1907 reset_event.count[MTK_EVENT_TSO_ALIGN]);
1908 seq_printf(seq, " RFIFO OV: %d\n",
1909 reset_event.count[MTK_EVENT_RFIFO_OV]);
1910 seq_printf(seq, " RFIFO UF: %d\n",
1911 reset_event.count[MTK_EVENT_RFIFO_UF]);
1912 seq_printf(seq, " Force: %d\n",
1913 reset_event.count[MTK_EVENT_FORCE]);
1914 seq_printf(seq, "----------------------------\n");
1915 seq_printf(seq, " Warm Cnt: %d\n",
1916 reset_event.count[MTK_EVENT_WARM_CNT]);
1917 seq_printf(seq, " Cold Cnt: %d\n",
1918 reset_event.count[MTK_EVENT_COLD_CNT]);
1919 seq_printf(seq, " Total Cnt: %d\n",
1920 reset_event.count[MTK_EVENT_TOTAL_CNT]);
1921
1922 return 0;
1923}
1924
1925static int reset_event_open(struct inode *inode, struct file *file)
1926{
1927 return single_open(file, reset_event_read, 0);
1928}
1929
1930ssize_t reset_event_write(struct file *file, const char __user *buffer,
1931 size_t count, loff_t *data)
1932{
1933 struct mtk_eth *eth = g_eth;
1934 struct mtk_reset_event *reset_event = &eth->reset_event;
1935
1936 memset(reset_event, 0, sizeof(struct mtk_reset_event));
1937 pr_info("MTK reset event counter is cleared !\n");
1938
1939 return count;
1940}
1941
1942static const struct file_operations reset_event_fops = {
1943 .owner = THIS_MODULE,
1944 .open = reset_event_open,
1945 .read = seq_read,
1946 .llseek = seq_lseek,
1947 .write = reset_event_write,
1948 .release = single_release
1949};
1950
1951
developerec4ebe42022-04-12 11:17:45 +08001952struct proc_dir_entry *proc_reg_dir;
developerc3d2b632023-01-13 11:32:11 +08001953static struct proc_dir_entry *proc_esw_cnt, *proc_xfi_cnt,
1954 *proc_dbg_regs, *proc_reset_event;
developerec4ebe42022-04-12 11:17:45 +08001955
1956int debug_proc_init(struct mtk_eth *eth)
1957{
1958 g_eth = eth;
1959
1960 if (!proc_reg_dir)
1961 proc_reg_dir = proc_mkdir(PROCREG_DIR, NULL);
1962
1963 proc_tx_ring =
1964 proc_create(PROCREG_TXRING, 0, proc_reg_dir, &tx_ring_fops);
1965 if (!proc_tx_ring)
1966 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
1967
developer3d2dd692022-04-19 12:53:29 +08001968 proc_hwtx_ring =
1969 proc_create(PROCREG_HWTXRING, 0, proc_reg_dir, &hwtx_ring_fops);
1970 if (!proc_hwtx_ring)
1971 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_HWTXRING);
1972
developerec4ebe42022-04-12 11:17:45 +08001973 proc_rx_ring =
1974 proc_create(PROCREG_RXRING, 0, proc_reg_dir, &rx_ring_fops);
1975 if (!proc_rx_ring)
1976 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
1977
1978 proc_esw_cnt =
1979 proc_create(PROCREG_ESW_CNT, 0, proc_reg_dir, &switch_count_fops);
1980 if (!proc_esw_cnt)
1981 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
1982
developerc3d2b632023-01-13 11:32:11 +08001983 if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V3)) {
1984 proc_xfi_cnt =
1985 proc_create(PROCREG_XFI_CNT, 0,
1986 proc_reg_dir, &xfi_count_fops);
1987 if (!proc_xfi_cnt)
1988 pr_notice("!! FAIL to create %s PROC !!\n",
1989 PROCREG_XFI_CNT);
1990 }
1991
developerec4ebe42022-04-12 11:17:45 +08001992 proc_dbg_regs =
1993 proc_create(PROCREG_DBG_REGS, 0, proc_reg_dir, &dbg_regs_fops);
1994 if (!proc_dbg_regs)
1995 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_DBG_REGS);
1996
developeredbe69e2023-06-08 11:08:46 +08001997 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) {
1998 proc_rss_ctrl =
1999 proc_create(PROCREG_RSS_CTRL, 0, proc_reg_dir,
2000 &rss_ctrl_fops);
2001 if (!proc_rss_ctrl)
2002 pr_info("!! FAIL to create %s PROC !!\n",
2003 PROCREG_RSS_CTRL);
2004
2005 cur_rss_num = g_eth->soc->rss_num;
2006 }
2007
developerec4ebe42022-04-12 11:17:45 +08002008 if (g_eth->hwlro) {
2009 proc_hw_lro_stats =
2010 proc_create(PROCREG_HW_LRO_STATS, 0, proc_reg_dir,
2011 &hw_lro_stats_fops);
2012 if (!proc_hw_lro_stats)
2013 pr_info("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS);
2014
2015 proc_hw_lro_auto_tlb =
2016 proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, proc_reg_dir,
2017 &hw_lro_auto_tlb_fops);
2018 if (!proc_hw_lro_auto_tlb)
2019 pr_info("!! FAIL to create %s PROC !!\n",
2020 PROCREG_HW_LRO_AUTO_TLB);
2021 }
2022
developer3d2dd692022-04-19 12:53:29 +08002023 proc_reset_event =
2024 proc_create(PROCREG_RESET_EVENT, 0, proc_reg_dir, &reset_event_fops);
2025 if (!proc_reset_event)
2026 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RESET_EVENT);
developer8b8f87d2023-04-27 11:01:26 +08002027 dbg_show_level = 1;
developerec4ebe42022-04-12 11:17:45 +08002028 return 0;
2029}
2030
2031void debug_proc_exit(void)
2032{
2033 if (proc_tx_ring)
2034 remove_proc_entry(PROCREG_TXRING, proc_reg_dir);
developer3d2dd692022-04-19 12:53:29 +08002035 if (proc_hwtx_ring)
2036 remove_proc_entry(PROCREG_HWTXRING, proc_reg_dir);
developerec4ebe42022-04-12 11:17:45 +08002037 if (proc_rx_ring)
2038 remove_proc_entry(PROCREG_RXRING, proc_reg_dir);
2039
2040 if (proc_esw_cnt)
2041 remove_proc_entry(PROCREG_ESW_CNT, proc_reg_dir);
2042
developerc3d2b632023-01-13 11:32:11 +08002043 if (proc_xfi_cnt)
2044 remove_proc_entry(PROCREG_XFI_CNT, proc_reg_dir);
2045
developerec4ebe42022-04-12 11:17:45 +08002046 if (proc_reg_dir)
2047 remove_proc_entry(PROCREG_DIR, 0);
2048
2049 if (proc_dbg_regs)
2050 remove_proc_entry(PROCREG_DBG_REGS, proc_reg_dir);
2051
developeredbe69e2023-06-08 11:08:46 +08002052 if (proc_rss_ctrl)
2053 remove_proc_entry(PROCREG_RSS_CTRL, proc_reg_dir);
2054
developerec4ebe42022-04-12 11:17:45 +08002055 if (g_eth->hwlro) {
2056 if (proc_hw_lro_stats)
2057 remove_proc_entry(PROCREG_HW_LRO_STATS, proc_reg_dir);
2058
2059 if (proc_hw_lro_auto_tlb)
2060 remove_proc_entry(PROCREG_HW_LRO_AUTO_TLB, proc_reg_dir);
2061 }
developer3d2dd692022-04-19 12:53:29 +08002062
2063 if (proc_reset_event)
2064 remove_proc_entry(PROCREG_RESET_EVENT, proc_reg_dir);
developerec4ebe42022-04-12 11:17:45 +08002065}
2066