blob: 0d5ca16e4f913875f84d7d35c421651f5edc79f8 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
14 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
15 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
16 */
17
18#include <linux/trace_seq.h>
19#include <linux/seq_file.h>
20#include <linux/proc_fs.h>
21#include <linux/u64_stats_sync.h>
22#include <linux/dma-mapping.h>
23#include <linux/netdevice.h>
24#include <linux/ctype.h>
25#include <linux/debugfs.h>
26#include <linux/of_mdio.h>
27
28#include "mtk_eth_soc.h"
29#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080030#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080031
developer77d03a72021-06-06 00:06:00 +080032u32 hw_lro_agg_num_cnt[MTK_HW_LRO_RING_NUM][MTK_HW_LRO_MAX_AGG_CNT + 1];
33u32 hw_lro_agg_size_cnt[MTK_HW_LRO_RING_NUM][16];
34u32 hw_lro_tot_agg_cnt[MTK_HW_LRO_RING_NUM];
35u32 hw_lro_tot_flush_cnt[MTK_HW_LRO_RING_NUM];
36u32 hw_lro_agg_flush_cnt[MTK_HW_LRO_RING_NUM];
37u32 hw_lro_age_flush_cnt[MTK_HW_LRO_RING_NUM];
38u32 hw_lro_seq_flush_cnt[MTK_HW_LRO_RING_NUM];
39u32 hw_lro_timestamp_flush_cnt[MTK_HW_LRO_RING_NUM];
40u32 hw_lro_norule_flush_cnt[MTK_HW_LRO_RING_NUM];
41u32 mtk_hwlro_stats_ebl;
42static struct proc_dir_entry *proc_hw_lro_stats, *proc_hw_lro_auto_tlb;
43typedef int (*mtk_lro_dbg_func) (int par);
44
developerfd40db22021-04-29 10:08:25 +080045struct mtk_eth_debug {
46 struct dentry *root;
47};
48
49struct mtk_eth *g_eth;
50
51struct mtk_eth_debug eth_debug;
52
developer3957a912021-05-13 16:44:31 +080053void mt7530_mdio_w32(struct mtk_eth *eth, u16 reg, u32 val)
developerfd40db22021-04-29 10:08:25 +080054{
55 mutex_lock(&eth->mii_bus->mdio_lock);
56
57 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
58 _mtk_mdio_write(eth, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
59 _mtk_mdio_write(eth, 0x1f, 0x10, val >> 16);
60
61 mutex_unlock(&eth->mii_bus->mdio_lock);
62}
63
64u32 mt7530_mdio_r32(struct mtk_eth *eth, u32 reg)
65{
66 u16 high, low;
67
68 mutex_lock(&eth->mii_bus->mdio_lock);
69
70 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
71 low = _mtk_mdio_read(eth, 0x1f, (reg >> 2) & 0xf);
72 high = _mtk_mdio_read(eth, 0x1f, 0x10);
73
74 mutex_unlock(&eth->mii_bus->mdio_lock);
75
76 return (high << 16) | (low & 0xffff);
77}
78
79void mtk_switch_w32(struct mtk_eth *eth, u32 val, unsigned reg)
80{
81 mtk_w32(eth, val, reg + 0x10000);
82}
83EXPORT_SYMBOL(mtk_switch_w32);
84
85u32 mtk_switch_r32(struct mtk_eth *eth, unsigned reg)
86{
87 return mtk_r32(eth, reg + 0x10000);
88}
89EXPORT_SYMBOL(mtk_switch_r32);
90
91static int mtketh_debug_show(struct seq_file *m, void *private)
92{
93 struct mtk_eth *eth = m->private;
94 struct mtk_mac *mac = 0;
developer77d03a72021-06-06 00:06:00 +080095 int i = 0;
developerfd40db22021-04-29 10:08:25 +080096
97 for (i = 0 ; i < MTK_MAX_DEVS ; i++) {
98 if (!eth->mac[i] ||
99 of_phy_is_fixed_link(eth->mac[i]->of_node))
100 continue;
101 mac = eth->mac[i];
102#if 0 //FIXME
103 while (j < 30) {
104 d = _mtk_mdio_read(eth, mac->phy_dev->addr, j);
105
106 seq_printf(m, "phy=%d, reg=0x%08x, data=0x%08x\n",
107 mac->phy_dev->addr, j, d);
108 j++;
109 }
110#endif
111 }
112 return 0;
113}
114
115static int mtketh_debug_open(struct inode *inode, struct file *file)
116{
117 return single_open(file, mtketh_debug_show, inode->i_private);
118}
119
120static const struct file_operations mtketh_debug_fops = {
121 .open = mtketh_debug_open,
122 .read = seq_read,
123 .llseek = seq_lseek,
124 .release = single_release,
125};
126
127static int mtketh_mt7530sw_debug_show(struct seq_file *m, void *private)
128{
129 struct mtk_eth *eth = m->private;
130 u32 offset, data;
131 int i;
132 struct mt7530_ranges {
133 u32 start;
134 u32 end;
135 } ranges[] = {
136 {0x0, 0xac},
137 {0x1000, 0x10e0},
138 {0x1100, 0x1140},
139 {0x1200, 0x1240},
140 {0x1300, 0x1340},
141 {0x1400, 0x1440},
142 {0x1500, 0x1540},
143 {0x1600, 0x1640},
144 {0x1800, 0x1848},
145 {0x1900, 0x1948},
146 {0x1a00, 0x1a48},
147 {0x1b00, 0x1b48},
148 {0x1c00, 0x1c48},
149 {0x1d00, 0x1d48},
150 {0x1e00, 0x1e48},
151 {0x1f60, 0x1ffc},
152 {0x2000, 0x212c},
153 {0x2200, 0x222c},
154 {0x2300, 0x232c},
155 {0x2400, 0x242c},
156 {0x2500, 0x252c},
157 {0x2600, 0x262c},
158 {0x3000, 0x3014},
159 {0x30c0, 0x30f8},
160 {0x3100, 0x3114},
161 {0x3200, 0x3214},
162 {0x3300, 0x3314},
163 {0x3400, 0x3414},
164 {0x3500, 0x3514},
165 {0x3600, 0x3614},
166 {0x4000, 0x40d4},
167 {0x4100, 0x41d4},
168 {0x4200, 0x42d4},
169 {0x4300, 0x43d4},
170 {0x4400, 0x44d4},
171 {0x4500, 0x45d4},
172 {0x4600, 0x46d4},
173 {0x4f00, 0x461c},
174 {0x7000, 0x7038},
175 {0x7120, 0x7124},
176 {0x7800, 0x7804},
177 {0x7810, 0x7810},
178 {0x7830, 0x7830},
179 {0x7a00, 0x7a7c},
180 {0x7b00, 0x7b04},
181 {0x7e00, 0x7e04},
182 {0x7ffc, 0x7ffc},
183 };
184
185 if (!mt7530_exist(eth))
186 return -EOPNOTSUPP;
187
188 if ((!eth->mac[0] || !of_phy_is_fixed_link(eth->mac[0]->of_node)) &&
189 (!eth->mac[1] || !of_phy_is_fixed_link(eth->mac[1]->of_node))) {
190 seq_puts(m, "no switch found\n");
191 return 0;
192 }
193
194 for (i = 0 ; i < ARRAY_SIZE(ranges) ; i++) {
195 for (offset = ranges[i].start;
196 offset <= ranges[i].end; offset += 4) {
197 data = mt7530_mdio_r32(eth, offset);
198 seq_printf(m, "mt7530 switch reg=0x%08x, data=0x%08x\n",
199 offset, data);
200 }
201 }
202
203 return 0;
204}
205
206static int mtketh_debug_mt7530sw_open(struct inode *inode, struct file *file)
207{
208 return single_open(file, mtketh_mt7530sw_debug_show, inode->i_private);
209}
210
211static const struct file_operations mtketh_debug_mt7530sw_fops = {
212 .open = mtketh_debug_mt7530sw_open,
213 .read = seq_read,
214 .llseek = seq_lseek,
215 .release = single_release,
216};
217
218static ssize_t mtketh_mt7530sw_debugfs_write(struct file *file,
219 const char __user *ptr,
220 size_t len, loff_t *off)
221{
222 struct mtk_eth *eth = file->private_data;
223 char buf[32], *token, *p = buf;
224 u32 reg, value, phy;
225 int ret;
226
227 if (!mt7530_exist(eth))
228 return -EOPNOTSUPP;
229
230 if (*off != 0)
231 return 0;
232
233 if (len > sizeof(buf) - 1)
234 len = sizeof(buf) - 1;
235
236 ret = strncpy_from_user(buf, ptr, len);
237 if (ret < 0)
238 return ret;
239 buf[len] = '\0';
240
241 token = strsep(&p, " ");
242 if (!token)
243 return -EINVAL;
244 if (kstrtoul(token, 16, (unsigned long *)&phy))
245 return -EINVAL;
246
247 token = strsep(&p, " ");
248 if (!token)
249 return -EINVAL;
250 if (kstrtoul(token, 16, (unsigned long *)&reg))
251 return -EINVAL;
252
253 token = strsep(&p, " ");
254 if (!token)
255 return -EINVAL;
256 if (kstrtoul(token, 16, (unsigned long *)&value))
257 return -EINVAL;
258
259 pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
260 0x1f, reg, value);
261 mt7530_mdio_w32(eth, reg, value);
262 pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
263 0x1f, reg, mt7530_mdio_r32(eth, reg));
264
265 return len;
266}
267
268static ssize_t mtketh_debugfs_write(struct file *file, const char __user *ptr,
269 size_t len, loff_t *off)
270{
271 struct mtk_eth *eth = file->private_data;
272 char buf[32], *token, *p = buf;
273 u32 reg, value, phy;
274 int ret;
275
276 if (*off != 0)
277 return 0;
278
279 if (len > sizeof(buf) - 1)
280 len = sizeof(buf) - 1;
281
282 ret = strncpy_from_user(buf, ptr, len);
283 if (ret < 0)
284 return ret;
285 buf[len] = '\0';
286
287 token = strsep(&p, " ");
288 if (!token)
289 return -EINVAL;
290 if (kstrtoul(token, 16, (unsigned long *)&phy))
291 return -EINVAL;
292
293 token = strsep(&p, " ");
294
295 if (!token)
296 return -EINVAL;
297 if (kstrtoul(token, 16, (unsigned long *)&reg))
298 return -EINVAL;
299
300 token = strsep(&p, " ");
301
302 if (!token)
303 return -EINVAL;
304 if (kstrtoul(token, 16, (unsigned long *)&value))
305 return -EINVAL;
306
307 pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
308 phy, reg, value);
309
310 _mtk_mdio_write(eth, phy, reg, value);
311
312 pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
313 phy, reg, _mtk_mdio_read(eth, phy, reg));
314
315 return len;
316}
317
318static ssize_t mtketh_debugfs_reset(struct file *file, const char __user *ptr,
319 size_t len, loff_t *off)
320{
321 struct mtk_eth *eth = file->private_data;
developerbd42c172022-07-18 17:51:30 +0800322 char buf[8] = "";
323 int count = len;
324 unsigned long dbg_level = 0;
325
326 len = min(count, sizeof(buf) - 1);
327 if (copy_from_user(buf, ptr, len))
328 return -EFAULT;
329
330 buf[len] = '\0';
331 if (kstrtoul(buf, 0, &dbg_level))
332 return -EINVAL;
developerfd40db22021-04-29 10:08:25 +0800333
developerbd42c172022-07-18 17:51:30 +0800334 switch(dbg_level)
335 {
336 case 0:
337 if (atomic_read(&reset_lock) == 0)
338 atomic_inc(&reset_lock);
339 break;
340 case 1:
341 if (atomic_read(&force) == 0)
342 atomic_inc(&force);
343 schedule_work(&eth->pending_work);
344 break;
345 case 2:
346 if (atomic_read(&reset_lock) == 1)
347 atomic_dec(&reset_lock);
348 break;
349 default:
350 pr_info("Usage: echo [level] > /sys/kernel/debug/mtketh/reset\n");
351 pr_info("Commands: [level] \n");
352 pr_info(" 0 disable reset \n");
353 pr_info(" 1 force reset \n");
354 pr_info(" 2 enable reset\n");
355 break;
356 }
357 return count;
developerfd40db22021-04-29 10:08:25 +0800358}
359
360static const struct file_operations fops_reg_w = {
361 .owner = THIS_MODULE,
362 .open = simple_open,
363 .write = mtketh_debugfs_write,
364 .llseek = noop_llseek,
365};
366
367static const struct file_operations fops_eth_reset = {
368 .owner = THIS_MODULE,
369 .open = simple_open,
370 .write = mtketh_debugfs_reset,
371 .llseek = noop_llseek,
372};
373
374static const struct file_operations fops_mt7530sw_reg_w = {
375 .owner = THIS_MODULE,
376 .open = simple_open,
377 .write = mtketh_mt7530sw_debugfs_write,
378 .llseek = noop_llseek,
379};
380
381void mtketh_debugfs_exit(struct mtk_eth *eth)
382{
383 debugfs_remove_recursive(eth_debug.root);
384}
385
386int mtketh_debugfs_init(struct mtk_eth *eth)
387{
388 int ret = 0;
389
390 eth_debug.root = debugfs_create_dir("mtketh", NULL);
391 if (!eth_debug.root) {
392 dev_notice(eth->dev, "%s:err at %d\n", __func__, __LINE__);
393 ret = -ENOMEM;
394 }
395
396 debugfs_create_file("phy_regs", S_IRUGO,
397 eth_debug.root, eth, &mtketh_debug_fops);
398 debugfs_create_file("phy_reg_w", S_IFREG | S_IWUSR,
399 eth_debug.root, eth, &fops_reg_w);
400 debugfs_create_file("reset", S_IFREG | S_IWUSR,
401 eth_debug.root, eth, &fops_eth_reset);
402 if (mt7530_exist(eth)) {
403 debugfs_create_file("mt7530sw_regs", S_IRUGO,
404 eth_debug.root, eth,
405 &mtketh_debug_mt7530sw_fops);
406 debugfs_create_file("mt7530sw_reg_w", S_IFREG | S_IWUSR,
407 eth_debug.root, eth,
408 &fops_mt7530sw_reg_w);
409 }
410 return ret;
411}
412
413void mii_mgr_read_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
414 u32 *read_data)
415{
416 if (mt7530_exist(eth) && phy_addr == 31)
417 *read_data = mt7530_mdio_r32(eth, phy_register);
418
419 else
420 *read_data = _mtk_mdio_read(eth, phy_addr, phy_register);
421}
422
developer3957a912021-05-13 16:44:31 +0800423void mii_mgr_write_combine(struct mtk_eth *eth, u16 phy_addr, u16 phy_register,
developerfd40db22021-04-29 10:08:25 +0800424 u32 write_data)
425{
426 if (mt7530_exist(eth) && phy_addr == 31)
427 mt7530_mdio_w32(eth, phy_register, write_data);
428
429 else
430 _mtk_mdio_write(eth, phy_addr, phy_register, write_data);
431}
432
developer3957a912021-05-13 16:44:31 +0800433static void mii_mgr_read_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data)
developerfd40db22021-04-29 10:08:25 +0800434{
developer599cda42022-05-24 15:13:31 +0800435 *data = _mtk_mdio_read(eth, port, mdiobus_c45_addr(devad, reg));
developerfd40db22021-04-29 10:08:25 +0800436}
437
developer3957a912021-05-13 16:44:31 +0800438static void mii_mgr_write_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data)
developerfd40db22021-04-29 10:08:25 +0800439{
developer599cda42022-05-24 15:13:31 +0800440 _mtk_mdio_write(eth, port, mdiobus_c45_addr(devad, reg), data);
developerfd40db22021-04-29 10:08:25 +0800441}
442
443int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
444{
445 struct mtk_mac *mac = netdev_priv(dev);
446 struct mtk_eth *eth = mac->hw;
447 struct mtk_mii_ioctl_data mii;
448 struct mtk_esw_reg reg;
developerba2d1eb2021-05-25 19:26:45 +0800449 u16 val;
developerfd40db22021-04-29 10:08:25 +0800450
451 switch (cmd) {
452 case MTKETH_MII_READ:
453 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
454 goto err_copy;
455 mii_mgr_read_combine(eth, mii.phy_id, mii.reg_num,
456 &mii.val_out);
457 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
458 goto err_copy;
459
460 return 0;
461 case MTKETH_MII_WRITE:
462 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
463 goto err_copy;
464 mii_mgr_write_combine(eth, mii.phy_id, mii.reg_num,
465 mii.val_in);
developerfd40db22021-04-29 10:08:25 +0800466 return 0;
467 case MTKETH_MII_READ_CL45:
468 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
469 goto err_copy;
developer3957a912021-05-13 16:44:31 +0800470 mii_mgr_read_cl45(eth,
471 mdio_phy_id_prtad(mii.phy_id),
472 mdio_phy_id_devad(mii.phy_id),
473 mii.reg_num,
developerba2d1eb2021-05-25 19:26:45 +0800474 &val);
475 mii.val_out = val;
developerfd40db22021-04-29 10:08:25 +0800476 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
477 goto err_copy;
478
479 return 0;
480 case MTKETH_MII_WRITE_CL45:
481 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
482 goto err_copy;
developerba2d1eb2021-05-25 19:26:45 +0800483 val = mii.val_in;
developer3957a912021-05-13 16:44:31 +0800484 mii_mgr_write_cl45(eth,
485 mdio_phy_id_prtad(mii.phy_id),
486 mdio_phy_id_devad(mii.phy_id),
487 mii.reg_num,
developerba2d1eb2021-05-25 19:26:45 +0800488 val);
developerfd40db22021-04-29 10:08:25 +0800489 return 0;
490 case MTKETH_ESW_REG_READ:
491 if (!mt7530_exist(eth))
492 return -EOPNOTSUPP;
493 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
494 goto err_copy;
495 if (reg.off > REG_ESW_MAX)
496 return -EINVAL;
497 reg.val = mtk_switch_r32(eth, reg.off);
498
499 if (copy_to_user(ifr->ifr_data, &reg, sizeof(reg)))
500 goto err_copy;
501
502 return 0;
503 case MTKETH_ESW_REG_WRITE:
504 if (!mt7530_exist(eth))
505 return -EOPNOTSUPP;
506 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
507 goto err_copy;
508 if (reg.off > REG_ESW_MAX)
509 return -EINVAL;
510 mtk_switch_w32(eth, reg.val, reg.off);
511
512 return 0;
513 default:
514 break;
515 }
516
517 return -EOPNOTSUPP;
518err_copy:
519 return -EFAULT;
520}
521
522int esw_cnt_read(struct seq_file *seq, void *v)
523{
524 unsigned int pkt_cnt = 0;
525 int i = 0;
526 struct mtk_eth *eth = g_eth;
527 unsigned int mib_base = MTK_GDM1_TX_GBCNT;
528
529 seq_puts(seq, "\n <<CPU>>\n");
530 seq_puts(seq, " |\n");
531 seq_puts(seq, "+-----------------------------------------------+\n");
532 seq_puts(seq, "| <<PSE>> |\n");
533 seq_puts(seq, "+-----------------------------------------------+\n");
534 seq_puts(seq, " |\n");
535 seq_puts(seq, "+-----------------------------------------------+\n");
536 seq_puts(seq, "| <<GDMA>> |\n");
537 seq_printf(seq, "| GDMA1_RX_GBCNT : %010u (Rx Good Bytes) |\n",
538 mtk_r32(eth, mib_base));
539 seq_printf(seq, "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n",
540 mtk_r32(eth, mib_base+0x08));
541 seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow error) |\n",
542 mtk_r32(eth, mib_base+0x10));
543 seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n",
544 mtk_r32(eth, mib_base+0x14));
545 seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n",
546 mtk_r32(eth, mib_base+0x18));
547 seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n",
548 mtk_r32(eth, mib_base+0x1C));
549 seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (checksum error) |\n",
550 mtk_r32(eth, mib_base+0x20));
551 seq_printf(seq, "| GDMA1_RX_FCCNT : %010u (flow control) |\n",
552 mtk_r32(eth, mib_base+0x24));
553 seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (about count) |\n",
554 mtk_r32(eth, mib_base+0x28));
555 seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision count) |\n",
556 mtk_r32(eth, mib_base+0x2C));
557 seq_printf(seq, "| GDMA1_TX_GBCNT : %010u (Tx Good Bytes) |\n",
558 mtk_r32(eth, mib_base+0x30));
559 seq_printf(seq, "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n",
560 mtk_r32(eth, mib_base+0x38));
561 seq_puts(seq, "| |\n");
562 seq_printf(seq, "| GDMA2_RX_GBCNT : %010u (Rx Good Bytes) |\n",
563 mtk_r32(eth, mib_base+0x40));
564 seq_printf(seq, "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n",
565 mtk_r32(eth, mib_base+0x48));
566 seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow error) |\n",
567 mtk_r32(eth, mib_base+0x50));
568 seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n",
569 mtk_r32(eth, mib_base+0x54));
570 seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n",
571 mtk_r32(eth, mib_base+0x58));
572 seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n",
573 mtk_r32(eth, mib_base+0x5C));
574 seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (checksum error) |\n",
575 mtk_r32(eth, mib_base+0x60));
576 seq_printf(seq, "| GDMA2_RX_FCCNT : %010u (flow control) |\n",
577 mtk_r32(eth, mib_base+0x64));
578 seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip) |\n",
579 mtk_r32(eth, mib_base+0x68));
580 seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n",
581 mtk_r32(eth, mib_base+0x6C));
582 seq_printf(seq, "| GDMA2_TX_GBCNT : %010u (Tx Good Bytes) |\n",
583 mtk_r32(eth, mib_base+0x70));
584 seq_printf(seq, "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n",
585 mtk_r32(eth, mib_base+0x78));
586 seq_puts(seq, "+-----------------------------------------------+\n");
587
588 if (!mt7530_exist(eth))
589 return 0;
590
591#define DUMP_EACH_PORT(base) \
592 do { \
593 for (i = 0; i < 7; i++) { \
594 pkt_cnt = mt7530_mdio_r32(eth, (base) + (i * 0x100));\
595 seq_printf(seq, "%8u ", pkt_cnt); \
596 } \
597 seq_puts(seq, "\n"); \
598 } while (0)
599
600 seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n",
601 "Port0", "Port1", "Port2", "Port3", "Port4", "Port5",
602 "Port6");
603 seq_puts(seq, "Tx Drop Packet :");
604 DUMP_EACH_PORT(0x4000);
605 seq_puts(seq, "Tx CRC Error :");
606 DUMP_EACH_PORT(0x4004);
607 seq_puts(seq, "Tx Unicast Packet :");
608 DUMP_EACH_PORT(0x4008);
609 seq_puts(seq, "Tx Multicast Packet :");
610 DUMP_EACH_PORT(0x400C);
611 seq_puts(seq, "Tx Broadcast Packet :");
612 DUMP_EACH_PORT(0x4010);
613 seq_puts(seq, "Tx Collision Event :");
614 DUMP_EACH_PORT(0x4014);
615 seq_puts(seq, "Tx Pause Packet :");
616 DUMP_EACH_PORT(0x402C);
617 seq_puts(seq, "Rx Drop Packet :");
618 DUMP_EACH_PORT(0x4060);
619 seq_puts(seq, "Rx Filtering Packet :");
620 DUMP_EACH_PORT(0x4064);
621 seq_puts(seq, "Rx Unicast Packet :");
622 DUMP_EACH_PORT(0x4068);
623 seq_puts(seq, "Rx Multicast Packet :");
624 DUMP_EACH_PORT(0x406C);
625 seq_puts(seq, "Rx Broadcast Packet :");
626 DUMP_EACH_PORT(0x4070);
627 seq_puts(seq, "Rx Alignment Error :");
628 DUMP_EACH_PORT(0x4074);
629 seq_puts(seq, "Rx CRC Error :");
630 DUMP_EACH_PORT(0x4078);
631 seq_puts(seq, "Rx Undersize Error :");
632 DUMP_EACH_PORT(0x407C);
633 seq_puts(seq, "Rx Fragment Error :");
634 DUMP_EACH_PORT(0x4080);
635 seq_puts(seq, "Rx Oversize Error :");
636 DUMP_EACH_PORT(0x4084);
637 seq_puts(seq, "Rx Jabber Error :");
638 DUMP_EACH_PORT(0x4088);
639 seq_puts(seq, "Rx Pause Packet :");
640 DUMP_EACH_PORT(0x408C);
641 mt7530_mdio_w32(eth, 0x4fe0, 0xf0);
642 mt7530_mdio_w32(eth, 0x4fe0, 0x800000f0);
643
644 seq_puts(seq, "\n");
645
646 return 0;
647}
648
649static int switch_count_open(struct inode *inode, struct file *file)
650{
651 return single_open(file, esw_cnt_read, 0);
652}
653
654static const struct file_operations switch_count_fops = {
655 .owner = THIS_MODULE,
656 .open = switch_count_open,
657 .read = seq_read,
658 .llseek = seq_lseek,
659 .release = single_release
660};
661
developer8051e042022-04-08 13:26:36 +0800662static struct proc_dir_entry *proc_tx_ring, *proc_hwtx_ring, *proc_rx_ring;
developerfd40db22021-04-29 10:08:25 +0800663
664int tx_ring_read(struct seq_file *seq, void *v)
665{
developere9356982022-07-04 09:03:20 +0800666 struct mtk_eth *eth = g_eth;
developerfd40db22021-04-29 10:08:25 +0800667 struct mtk_tx_ring *ring = &g_eth->tx_ring;
developere9356982022-07-04 09:03:20 +0800668 struct mtk_tx_dma_v2 *tx_ring;
developerfd40db22021-04-29 10:08:25 +0800669 int i = 0;
670
developerfd40db22021-04-29 10:08:25 +0800671 seq_printf(seq, "free count = %d\n", (int)atomic_read(&ring->free_count));
672 seq_printf(seq, "cpu next free: %d\n", (int)(ring->next_free - ring->dma));
673 seq_printf(seq, "cpu last free: %d\n", (int)(ring->last_free - ring->dma));
674 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +0800675 dma_addr_t tmp = ring->phys + i * eth->soc->txrx.txd_size;
676
677 tx_ring = ring->dma + i * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +0800678
679 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &tmp,
developere9356982022-07-04 09:03:20 +0800680 tx_ring->txd1, tx_ring->txd2,
681 tx_ring->txd3, tx_ring->txd4);
682
683 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
684 seq_printf(seq, " %08x %08x %08x %08x",
685 tx_ring->txd5, tx_ring->txd6,
686 tx_ring->txd7, tx_ring->txd8);
687 }
688
developerfd40db22021-04-29 10:08:25 +0800689 seq_printf(seq, "\n");
690 }
691
developerfd40db22021-04-29 10:08:25 +0800692 return 0;
693}
694
695static int tx_ring_open(struct inode *inode, struct file *file)
696{
697 return single_open(file, tx_ring_read, NULL);
698}
699
700static const struct file_operations tx_ring_fops = {
701 .owner = THIS_MODULE,
702 .open = tx_ring_open,
703 .read = seq_read,
704 .llseek = seq_lseek,
705 .release = single_release
706};
707
developer8051e042022-04-08 13:26:36 +0800708int hwtx_ring_read(struct seq_file *seq, void *v)
709{
710 struct mtk_eth *eth = g_eth;
developere9356982022-07-04 09:03:20 +0800711 struct mtk_tx_dma_v2 *hwtx_ring;
developer8051e042022-04-08 13:26:36 +0800712 int i = 0;
713
developer8051e042022-04-08 13:26:36 +0800714 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +0800715 dma_addr_t addr = eth->phy_scratch_ring + i * eth->soc->txrx.txd_size;
716
717 hwtx_ring = eth->scratch_ring + i * eth->soc->txrx.txd_size;
developer8051e042022-04-08 13:26:36 +0800718
719 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &addr,
developere9356982022-07-04 09:03:20 +0800720 hwtx_ring->txd1, hwtx_ring->txd2,
721 hwtx_ring->txd3, hwtx_ring->txd4);
722
723 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
724 seq_printf(seq, " %08x %08x %08x %08x",
725 hwtx_ring->txd5, hwtx_ring->txd6,
726 hwtx_ring->txd7, hwtx_ring->txd8);
727 }
728
developer8051e042022-04-08 13:26:36 +0800729 seq_printf(seq, "\n");
730 }
731
developer8051e042022-04-08 13:26:36 +0800732 return 0;
733}
734
735static int hwtx_ring_open(struct inode *inode, struct file *file)
736{
737 return single_open(file, hwtx_ring_read, NULL);
738}
739
740static const struct file_operations hwtx_ring_fops = {
741 .owner = THIS_MODULE,
742 .open = hwtx_ring_open,
743 .read = seq_read,
744 .llseek = seq_lseek,
745 .release = single_release
746};
747
developerfd40db22021-04-29 10:08:25 +0800748int rx_ring_read(struct seq_file *seq, void *v)
749{
developere9356982022-07-04 09:03:20 +0800750 struct mtk_eth *eth = g_eth;
developerfd40db22021-04-29 10:08:25 +0800751 struct mtk_rx_ring *ring = &g_eth->rx_ring[0];
developere9356982022-07-04 09:03:20 +0800752 struct mtk_rx_dma_v2 *rx_ring;
developerfd40db22021-04-29 10:08:25 +0800753 int i = 0;
754
developerfd40db22021-04-29 10:08:25 +0800755 seq_printf(seq, "next to read: %d\n",
756 NEXT_DESP_IDX(ring->calc_idx, MTK_DMA_SIZE));
757 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +0800758 rx_ring = ring->dma + i * eth->soc->txrx.rxd_size;
759
developerfd40db22021-04-29 10:08:25 +0800760 seq_printf(seq, "%d: %08x %08x %08x %08x", i,
developere9356982022-07-04 09:03:20 +0800761 rx_ring->rxd1, rx_ring->rxd2,
762 rx_ring->rxd3, rx_ring->rxd4);
763
764 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
765 seq_printf(seq, " %08x %08x %08x %08x",
766 rx_ring->rxd5, rx_ring->rxd6,
767 rx_ring->rxd7, rx_ring->rxd8);
768 }
769
developerfd40db22021-04-29 10:08:25 +0800770 seq_printf(seq, "\n");
771 }
772
developerfd40db22021-04-29 10:08:25 +0800773 return 0;
774}
775
776static int rx_ring_open(struct inode *inode, struct file *file)
777{
778 return single_open(file, rx_ring_read, NULL);
779}
780
781static const struct file_operations rx_ring_fops = {
782 .owner = THIS_MODULE,
783 .open = rx_ring_open,
784 .read = seq_read,
785 .llseek = seq_lseek,
786 .release = single_release
787};
788
developer77f3fd42021-10-05 15:16:05 +0800789static inline u32 mtk_dbg_r32(u32 reg)
790{
791 void __iomem *virt_reg;
792 u32 val;
793
794 virt_reg = ioremap(reg, 32);
795 val = __raw_readl(virt_reg);
796 iounmap(virt_reg);
797
798 return val;
799}
800
developerfd40db22021-04-29 10:08:25 +0800801int dbg_regs_read(struct seq_file *seq, void *v)
802{
803 struct mtk_eth *eth = g_eth;
804
developer77f3fd42021-10-05 15:16:05 +0800805 seq_puts(seq, " <<DEBUG REG DUMP>>\n");
806
807 seq_printf(seq, "| FE_INT_STA : %08x |\n",
developer8051e042022-04-08 13:26:36 +0800808 mtk_r32(eth, MTK_FE_INT_STATUS));
developer77f3fd42021-10-05 15:16:05 +0800809 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
810 seq_printf(seq, "| FE_INT_STA2 : %08x |\n",
developer8051e042022-04-08 13:26:36 +0800811 mtk_r32(eth, MTK_FE_INT_STATUS2));
developer77f3fd42021-10-05 15:16:05 +0800812
developerfd40db22021-04-29 10:08:25 +0800813 seq_printf(seq, "| PSE_FQFC_CFG : %08x |\n",
814 mtk_r32(eth, MTK_PSE_FQFC_CFG));
815 seq_printf(seq, "| PSE_IQ_STA1 : %08x |\n",
816 mtk_r32(eth, MTK_PSE_IQ_STA(0)));
817 seq_printf(seq, "| PSE_IQ_STA2 : %08x |\n",
818 mtk_r32(eth, MTK_PSE_IQ_STA(1)));
819
developera2bdbd52021-05-31 19:10:17 +0800820 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +0800821 seq_printf(seq, "| PSE_IQ_STA3 : %08x |\n",
822 mtk_r32(eth, MTK_PSE_IQ_STA(2)));
823 seq_printf(seq, "| PSE_IQ_STA4 : %08x |\n",
824 mtk_r32(eth, MTK_PSE_IQ_STA(3)));
developer77f3fd42021-10-05 15:16:05 +0800825 seq_printf(seq, "| PSE_IQ_STA5 : %08x |\n",
826 mtk_r32(eth, MTK_PSE_IQ_STA(4)));
developerfd40db22021-04-29 10:08:25 +0800827 }
828
829 seq_printf(seq, "| PSE_OQ_STA1 : %08x |\n",
830 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
831 seq_printf(seq, "| PSE_OQ_STA2 : %08x |\n",
832 mtk_r32(eth, MTK_PSE_OQ_STA(1)));
833
developera2bdbd52021-05-31 19:10:17 +0800834 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +0800835 seq_printf(seq, "| PSE_OQ_STA3 : %08x |\n",
836 mtk_r32(eth, MTK_PSE_OQ_STA(2)));
837 seq_printf(seq, "| PSE_OQ_STA4 : %08x |\n",
838 mtk_r32(eth, MTK_PSE_OQ_STA(3)));
developer77f3fd42021-10-05 15:16:05 +0800839 seq_printf(seq, "| PSE_OQ_STA5 : %08x |\n",
840 mtk_r32(eth, MTK_PSE_OQ_STA(4)));
developerfd40db22021-04-29 10:08:25 +0800841 }
842
developer77f3fd42021-10-05 15:16:05 +0800843 seq_printf(seq, "| PDMA_CRX_IDX : %08x |\n",
844 mtk_r32(eth, MTK_PRX_CRX_IDX0));
845 seq_printf(seq, "| PDMA_DRX_IDX : %08x |\n",
846 mtk_r32(eth, MTK_PRX_DRX_IDX0));
847 seq_printf(seq, "| QDMA_CTX_IDX : %08x |\n",
848 mtk_r32(eth, MTK_QTX_CTX_PTR));
849 seq_printf(seq, "| QDMA_DTX_IDX : %08x |\n",
850 mtk_r32(eth, MTK_QTX_DTX_PTR));
developerfd40db22021-04-29 10:08:25 +0800851 seq_printf(seq, "| QDMA_FQ_CNT : %08x |\n",
852 mtk_r32(eth, MTK_QDMA_FQ_CNT));
853 seq_printf(seq, "| FE_PSE_FREE : %08x |\n",
854 mtk_r32(eth, MTK_FE_PSE_FREE));
855 seq_printf(seq, "| FE_DROP_FQ : %08x |\n",
856 mtk_r32(eth, MTK_FE_DROP_FQ));
857 seq_printf(seq, "| FE_DROP_FC : %08x |\n",
858 mtk_r32(eth, MTK_FE_DROP_FC));
859 seq_printf(seq, "| FE_DROP_PPE : %08x |\n",
860 mtk_r32(eth, MTK_FE_DROP_PPE));
861 seq_printf(seq, "| GDM1_IG_CTRL : %08x |\n",
862 mtk_r32(eth, MTK_GDMA_FWD_CFG(0)));
863 seq_printf(seq, "| GDM2_IG_CTRL : %08x |\n",
864 mtk_r32(eth, MTK_GDMA_FWD_CFG(1)));
865 seq_printf(seq, "| MAC_P1_MCR : %08x |\n",
866 mtk_r32(eth, MTK_MAC_MCR(0)));
867 seq_printf(seq, "| MAC_P2_MCR : %08x |\n",
868 mtk_r32(eth, MTK_MAC_MCR(1)));
developer77f3fd42021-10-05 15:16:05 +0800869 seq_printf(seq, "| MAC_P1_FSM : %08x |\n",
870 mtk_r32(eth, MTK_MAC_FSM(0)));
871 seq_printf(seq, "| MAC_P2_FSM : %08x |\n",
872 mtk_r32(eth, MTK_MAC_FSM(1)));
developerfd40db22021-04-29 10:08:25 +0800873
developera2bdbd52021-05-31 19:10:17 +0800874 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +0800875 seq_printf(seq, "| FE_CDM1_FSM : %08x |\n",
876 mtk_r32(eth, MTK_FE_CDM1_FSM));
877 seq_printf(seq, "| FE_CDM2_FSM : %08x |\n",
878 mtk_r32(eth, MTK_FE_CDM2_FSM));
developer77f3fd42021-10-05 15:16:05 +0800879 seq_printf(seq, "| FE_CDM3_FSM : %08x |\n",
880 mtk_r32(eth, MTK_FE_CDM3_FSM));
881 seq_printf(seq, "| FE_CDM4_FSM : %08x |\n",
882 mtk_r32(eth, MTK_FE_CDM4_FSM));
developerfd40db22021-04-29 10:08:25 +0800883 seq_printf(seq, "| FE_GDM1_FSM : %08x |\n",
884 mtk_r32(eth, MTK_FE_GDM1_FSM));
885 seq_printf(seq, "| FE_GDM2_FSM : %08x |\n",
886 mtk_r32(eth, MTK_FE_GDM2_FSM));
developer77f3fd42021-10-05 15:16:05 +0800887 seq_printf(seq, "| SGMII_EFUSE : %08x |\n",
888 mtk_dbg_r32(MTK_SGMII_EFUSE));
889 seq_printf(seq, "| SGMII0_RX_CNT : %08x |\n",
890 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(0)));
891 seq_printf(seq, "| SGMII1_RX_CNT : %08x |\n",
892 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(1)));
893 seq_printf(seq, "| WED_RTQM_GLO : %08x |\n",
894 mtk_dbg_r32(MTK_WED_RTQM_GLO_CFG));
developerfd40db22021-04-29 10:08:25 +0800895 }
896
developer8051e042022-04-08 13:26:36 +0800897 mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS);
developer77f3fd42021-10-05 15:16:05 +0800898 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developer8051e042022-04-08 13:26:36 +0800899 mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS2);
developer77f3fd42021-10-05 15:16:05 +0800900
developerfd40db22021-04-29 10:08:25 +0800901 return 0;
902}
903
904static int dbg_regs_open(struct inode *inode, struct file *file)
905{
906 return single_open(file, dbg_regs_read, 0);
907}
908
909static const struct file_operations dbg_regs_fops = {
910 .owner = THIS_MODULE,
911 .open = dbg_regs_open,
912 .read = seq_read,
913 .llseek = seq_lseek,
developer77d03a72021-06-06 00:06:00 +0800914 .release = single_release
915};
916
developere9356982022-07-04 09:03:20 +0800917void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
developer77d03a72021-06-06 00:06:00 +0800918{
developere9356982022-07-04 09:03:20 +0800919 struct mtk_eth *eth = g_eth;
developer77d03a72021-06-06 00:06:00 +0800920 u32 idx, agg_cnt, agg_size;
921
developere9356982022-07-04 09:03:20 +0800922 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
923 idx = ring_no - 4;
924 agg_cnt = RX_DMA_GET_AGG_CNT_V2(rxd->rxd6);
925 } else {
926 idx = ring_no - 1;
927 agg_cnt = RX_DMA_GET_AGG_CNT(rxd->rxd2);
928 }
developer77d03a72021-06-06 00:06:00 +0800929
930 agg_size = RX_DMA_GET_PLEN0(rxd->rxd2);
931
932 hw_lro_agg_size_cnt[idx][agg_size / 5000]++;
933 hw_lro_agg_num_cnt[idx][agg_cnt]++;
934 hw_lro_tot_flush_cnt[idx]++;
935 hw_lro_tot_agg_cnt[idx] += agg_cnt;
936}
937
developere9356982022-07-04 09:03:20 +0800938void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
developer77d03a72021-06-06 00:06:00 +0800939{
developere9356982022-07-04 09:03:20 +0800940 struct mtk_eth *eth = g_eth;
developer77d03a72021-06-06 00:06:00 +0800941 u32 idx, flush_reason;
942
developere9356982022-07-04 09:03:20 +0800943 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
944 idx = ring_no - 4;
945 flush_reason = RX_DMA_GET_FLUSH_RSN_V2(rxd->rxd6);
946 } else {
947 idx = ring_no - 1;
948 flush_reason = RX_DMA_GET_REV(rxd->rxd2);
949 }
developer77d03a72021-06-06 00:06:00 +0800950
951 if ((flush_reason & 0x7) == MTK_HW_LRO_AGG_FLUSH)
952 hw_lro_agg_flush_cnt[idx]++;
953 else if ((flush_reason & 0x7) == MTK_HW_LRO_AGE_FLUSH)
954 hw_lro_age_flush_cnt[idx]++;
955 else if ((flush_reason & 0x7) == MTK_HW_LRO_NOT_IN_SEQ_FLUSH)
956 hw_lro_seq_flush_cnt[idx]++;
957 else if ((flush_reason & 0x7) == MTK_HW_LRO_TIMESTAMP_FLUSH)
958 hw_lro_timestamp_flush_cnt[idx]++;
959 else if ((flush_reason & 0x7) == MTK_HW_LRO_NON_RULE_FLUSH)
960 hw_lro_norule_flush_cnt[idx]++;
961}
962
963ssize_t hw_lro_stats_write(struct file *file, const char __user *buffer,
964 size_t count, loff_t *data)
965{
966 memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt));
967 memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt));
968 memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt));
969 memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt));
970 memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt));
971 memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt));
972 memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt));
973 memset(hw_lro_timestamp_flush_cnt, 0,
974 sizeof(hw_lro_timestamp_flush_cnt));
975 memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt));
976
977 pr_info("clear hw lro cnt table\n");
978
979 return count;
980}
981
982int hw_lro_stats_read_v1(struct seq_file *seq, void *v)
983{
984 int i;
985
986 seq_puts(seq, "HW LRO statistic dump:\n");
987
988 /* Agg number count */
989 seq_puts(seq, "Cnt: RING1 | RING2 | RING3 | Total\n");
990 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
991 seq_printf(seq, " %d : %d %d %d %d\n",
992 i, hw_lro_agg_num_cnt[0][i],
993 hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i],
994 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
995 hw_lro_agg_num_cnt[2][i]);
996 }
997
998 /* Total agg count */
999 seq_puts(seq, "Total agg: RING1 | RING2 | RING3 | Total\n");
1000 seq_printf(seq, " %d %d %d %d\n",
1001 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
1002 hw_lro_tot_agg_cnt[2],
1003 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1004 hw_lro_tot_agg_cnt[2]);
1005
1006 /* Total flush count */
1007 seq_puts(seq, "Total flush: RING1 | RING2 | RING3 | Total\n");
1008 seq_printf(seq, " %d %d %d %d\n",
1009 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
1010 hw_lro_tot_flush_cnt[2],
1011 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1012 hw_lro_tot_flush_cnt[2]);
1013
1014 /* Avg agg count */
1015 seq_puts(seq, "Avg agg: RING1 | RING2 | RING3 | Total\n");
1016 seq_printf(seq, " %d %d %d %d\n",
1017 (hw_lro_tot_flush_cnt[0]) ?
1018 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
1019 (hw_lro_tot_flush_cnt[1]) ?
1020 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
1021 (hw_lro_tot_flush_cnt[2]) ?
1022 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
1023 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1024 hw_lro_tot_flush_cnt[2]) ?
1025 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1026 hw_lro_tot_agg_cnt[2]) / (hw_lro_tot_flush_cnt[0] +
1027 hw_lro_tot_flush_cnt[1] + hw_lro_tot_flush_cnt[2])) : 0);
1028
1029 /* Statistics of aggregation size counts */
1030 seq_puts(seq, "HW LRO flush pkt len:\n");
1031 seq_puts(seq, " Length | RING1 | RING2 | RING3 | Total\n");
1032 for (i = 0; i < 15; i++) {
1033 seq_printf(seq, "%d~%d: %d %d %d %d\n", i * 5000,
1034 (i + 1) * 5000, hw_lro_agg_size_cnt[0][i],
1035 hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i],
1036 hw_lro_agg_size_cnt[0][i] +
1037 hw_lro_agg_size_cnt[1][i] +
1038 hw_lro_agg_size_cnt[2][i]);
1039 }
1040
1041 seq_puts(seq, "Flush reason: RING1 | RING2 | RING3 | Total\n");
1042 seq_printf(seq, "AGG timeout: %d %d %d %d\n",
1043 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1044 hw_lro_agg_flush_cnt[2],
1045 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1046 hw_lro_agg_flush_cnt[2]));
1047
1048 seq_printf(seq, "AGE timeout: %d %d %d %d\n",
1049 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1050 hw_lro_age_flush_cnt[2],
1051 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1052 hw_lro_age_flush_cnt[2]));
1053
1054 seq_printf(seq, "Not in-sequence: %d %d %d %d\n",
1055 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1056 hw_lro_seq_flush_cnt[2],
1057 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1058 hw_lro_seq_flush_cnt[2]));
1059
1060 seq_printf(seq, "Timestamp: %d %d %d %d\n",
1061 hw_lro_timestamp_flush_cnt[0],
1062 hw_lro_timestamp_flush_cnt[1],
1063 hw_lro_timestamp_flush_cnt[2],
1064 (hw_lro_timestamp_flush_cnt[0] +
1065 hw_lro_timestamp_flush_cnt[1] +
1066 hw_lro_timestamp_flush_cnt[2]));
1067
1068 seq_printf(seq, "No LRO rule: %d %d %d %d\n",
1069 hw_lro_norule_flush_cnt[0],
1070 hw_lro_norule_flush_cnt[1],
1071 hw_lro_norule_flush_cnt[2],
1072 (hw_lro_norule_flush_cnt[0] +
1073 hw_lro_norule_flush_cnt[1] +
1074 hw_lro_norule_flush_cnt[2]));
1075
1076 return 0;
1077}
1078
1079int hw_lro_stats_read_v2(struct seq_file *seq, void *v)
1080{
1081 int i;
1082
1083 seq_puts(seq, "HW LRO statistic dump:\n");
1084
1085 /* Agg number count */
1086 seq_puts(seq, "Cnt: RING4 | RING5 | RING6 | RING7 Total\n");
1087 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
1088 seq_printf(seq,
1089 " %d : %d %d %d %d %d\n",
1090 i, hw_lro_agg_num_cnt[0][i], hw_lro_agg_num_cnt[1][i],
1091 hw_lro_agg_num_cnt[2][i], hw_lro_agg_num_cnt[3][i],
1092 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
1093 hw_lro_agg_num_cnt[2][i] + hw_lro_agg_num_cnt[3][i]);
1094 }
1095
1096 /* Total agg count */
1097 seq_puts(seq, "Total agg: RING4 | RING5 | RING6 | RING7 Total\n");
1098 seq_printf(seq, " %d %d %d %d %d\n",
1099 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
1100 hw_lro_tot_agg_cnt[2], hw_lro_tot_agg_cnt[3],
1101 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1102 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]);
1103
1104 /* Total flush count */
1105 seq_puts(seq, "Total flush: RING4 | RING5 | RING6 | RING7 Total\n");
1106 seq_printf(seq, " %d %d %d %d %d\n",
1107 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
1108 hw_lro_tot_flush_cnt[2], hw_lro_tot_flush_cnt[3],
1109 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1110 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]);
1111
1112 /* Avg agg count */
1113 seq_puts(seq, "Avg agg: RING4 | RING5 | RING6 | RING7 Total\n");
1114 seq_printf(seq, " %d %d %d %d %d\n",
1115 (hw_lro_tot_flush_cnt[0]) ?
1116 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
1117 (hw_lro_tot_flush_cnt[1]) ?
1118 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
1119 (hw_lro_tot_flush_cnt[2]) ?
1120 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
1121 (hw_lro_tot_flush_cnt[3]) ?
1122 hw_lro_tot_agg_cnt[3] / hw_lro_tot_flush_cnt[3] : 0,
1123 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1124 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]) ?
1125 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1126 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]) /
1127 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1128 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3])) : 0);
1129
1130 /* Statistics of aggregation size counts */
1131 seq_puts(seq, "HW LRO flush pkt len:\n");
1132 seq_puts(seq, " Length | RING4 | RING5 | RING6 | RING7 Total\n");
1133 for (i = 0; i < 15; i++) {
1134 seq_printf(seq, "%d~%d: %d %d %d %d %d\n",
1135 i * 5000, (i + 1) * 5000,
1136 hw_lro_agg_size_cnt[0][i], hw_lro_agg_size_cnt[1][i],
1137 hw_lro_agg_size_cnt[2][i], hw_lro_agg_size_cnt[3][i],
1138 hw_lro_agg_size_cnt[0][i] +
1139 hw_lro_agg_size_cnt[1][i] +
1140 hw_lro_agg_size_cnt[2][i] +
1141 hw_lro_agg_size_cnt[3][i]);
1142 }
1143
1144 seq_puts(seq, "Flush reason: RING4 | RING5 | RING6 | RING7 Total\n");
1145 seq_printf(seq, "AGG timeout: %d %d %d %d %d\n",
1146 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1147 hw_lro_agg_flush_cnt[2], hw_lro_agg_flush_cnt[3],
1148 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1149 hw_lro_agg_flush_cnt[2] + hw_lro_agg_flush_cnt[3]));
1150
1151 seq_printf(seq, "AGE timeout: %d %d %d %d %d\n",
1152 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1153 hw_lro_age_flush_cnt[2], hw_lro_age_flush_cnt[3],
1154 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1155 hw_lro_age_flush_cnt[2] + hw_lro_age_flush_cnt[3]));
1156
1157 seq_printf(seq, "Not in-sequence: %d %d %d %d %d\n",
1158 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1159 hw_lro_seq_flush_cnt[2], hw_lro_seq_flush_cnt[3],
1160 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1161 hw_lro_seq_flush_cnt[2] + hw_lro_seq_flush_cnt[3]));
1162
1163 seq_printf(seq, "Timestamp: %d %d %d %d %d\n",
1164 hw_lro_timestamp_flush_cnt[0],
1165 hw_lro_timestamp_flush_cnt[1],
1166 hw_lro_timestamp_flush_cnt[2],
1167 hw_lro_timestamp_flush_cnt[3],
1168 (hw_lro_timestamp_flush_cnt[0] +
1169 hw_lro_timestamp_flush_cnt[1] +
1170 hw_lro_timestamp_flush_cnt[2] +
1171 hw_lro_timestamp_flush_cnt[3]));
1172
1173 seq_printf(seq, "No LRO rule: %d %d %d %d %d\n",
1174 hw_lro_norule_flush_cnt[0],
1175 hw_lro_norule_flush_cnt[1],
1176 hw_lro_norule_flush_cnt[2],
1177 hw_lro_norule_flush_cnt[3],
1178 (hw_lro_norule_flush_cnt[0] +
1179 hw_lro_norule_flush_cnt[1] +
1180 hw_lro_norule_flush_cnt[2] +
1181 hw_lro_norule_flush_cnt[3]));
1182
1183 return 0;
1184}
1185
1186int hw_lro_stats_read_wrapper(struct seq_file *seq, void *v)
1187{
1188 struct mtk_eth *eth = g_eth;
1189
1190 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1191 hw_lro_stats_read_v2(seq, v);
1192 else
1193 hw_lro_stats_read_v1(seq, v);
1194
1195 return 0;
1196}
1197
1198static int hw_lro_stats_open(struct inode *inode, struct file *file)
1199{
1200 return single_open(file, hw_lro_stats_read_wrapper, NULL);
1201}
1202
1203static const struct file_operations hw_lro_stats_fops = {
1204 .owner = THIS_MODULE,
1205 .open = hw_lro_stats_open,
1206 .read = seq_read,
1207 .llseek = seq_lseek,
1208 .write = hw_lro_stats_write,
developerfd40db22021-04-29 10:08:25 +08001209 .release = single_release
1210};
1211
developer77d03a72021-06-06 00:06:00 +08001212int hwlro_agg_cnt_ctrl(int cnt)
1213{
1214 int i;
1215
1216 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1217 SET_PDMA_RXRING_MAX_AGG_CNT(g_eth, i, cnt);
1218
1219 return 0;
1220}
1221
1222int hwlro_agg_time_ctrl(int time)
1223{
1224 int i;
1225
1226 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1227 SET_PDMA_RXRING_AGG_TIME(g_eth, i, time);
1228
1229 return 0;
1230}
1231
1232int hwlro_age_time_ctrl(int time)
1233{
1234 int i;
1235
1236 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1237 SET_PDMA_RXRING_AGE_TIME(g_eth, i, time);
1238
1239 return 0;
1240}
1241
1242int hwlro_threshold_ctrl(int bandwidth)
1243{
1244 SET_PDMA_LRO_BW_THRESHOLD(g_eth, bandwidth);
1245
1246 return 0;
1247}
1248
1249int hwlro_ring_enable_ctrl(int enable)
1250{
1251 int i;
1252
1253 pr_info("[%s] %s HW LRO rings\n", __func__, (enable) ? "Enable" : "Disable");
1254
1255 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1256 SET_PDMA_RXRING_VALID(g_eth, i, enable);
1257
1258 return 0;
1259}
1260
1261int hwlro_stats_enable_ctrl(int enable)
1262{
1263 pr_info("[%s] %s HW LRO statistics\n", __func__, (enable) ? "Enable" : "Disable");
1264 mtk_hwlro_stats_ebl = enable;
1265
1266 return 0;
1267}
1268
1269static const mtk_lro_dbg_func lro_dbg_func[] = {
1270 [0] = hwlro_agg_cnt_ctrl,
1271 [1] = hwlro_agg_time_ctrl,
1272 [2] = hwlro_age_time_ctrl,
1273 [3] = hwlro_threshold_ctrl,
1274 [4] = hwlro_ring_enable_ctrl,
1275 [5] = hwlro_stats_enable_ctrl,
1276};
1277
1278ssize_t hw_lro_auto_tlb_write(struct file *file, const char __user *buffer,
1279 size_t count, loff_t *data)
1280{
1281 char buf[32];
1282 char *p_buf;
1283 char *p_token = NULL;
1284 char *p_delimiter = " \t";
1285 long x = 0, y = 0;
developer4c32b7a2021-11-13 16:46:43 +08001286 u32 len = count;
developer77d03a72021-06-06 00:06:00 +08001287 int ret;
1288
1289 if (len >= sizeof(buf)) {
1290 pr_info("Input handling fail!\n");
developer77d03a72021-06-06 00:06:00 +08001291 return -1;
1292 }
1293
1294 if (copy_from_user(buf, buffer, len))
1295 return -EFAULT;
1296
1297 buf[len] = '\0';
1298
1299 p_buf = buf;
1300 p_token = strsep(&p_buf, p_delimiter);
1301 if (!p_token)
1302 x = 0;
1303 else
1304 ret = kstrtol(p_token, 10, &x);
1305
1306 p_token = strsep(&p_buf, "\t\n ");
1307 if (p_token)
1308 ret = kstrtol(p_token, 10, &y);
1309
1310 if (lro_dbg_func[x] && (ARRAY_SIZE(lro_dbg_func) > x))
1311 (*lro_dbg_func[x]) (y);
1312
1313 return count;
1314}
1315
1316void hw_lro_auto_tlb_dump_v1(struct seq_file *seq, u32 index)
1317{
1318 int i;
1319 struct mtk_lro_alt_v1 alt;
1320 __be32 addr;
1321 u32 tlb_info[9];
1322 u32 dw_len, cnt, priority;
1323 u32 entry;
1324
1325 if (index > 4)
1326 index = index - 1;
1327 entry = (index * 9) + 1;
1328
1329 /* read valid entries of the auto-learn table */
1330 mtk_w32(g_eth, entry, MTK_FE_ALT_CF8);
1331
1332 for (i = 0; i < 9; i++)
1333 tlb_info[i] = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1334
1335 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v1));
1336
1337 dw_len = alt.alt_info7.dw_len;
1338 cnt = alt.alt_info6.cnt;
1339
1340 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1341 priority = cnt; /* packet count */
1342 else
1343 priority = dw_len; /* byte count */
1344
1345 /* dump valid entries of the auto-learn table */
1346 if (index >= 4)
1347 seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index);
1348 else
1349 seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index);
1350
1351 if (alt.alt_info8.ipv4) {
1352 addr = htonl(alt.alt_info1.sip0);
1353 seq_printf(seq, "SIP = %pI4 (IPv4)\n", &addr);
1354 } else {
1355 seq_printf(seq, "SIP = %08X:%08X:%08X:%08X (IPv6)\n",
1356 alt.alt_info4.sip3, alt.alt_info3.sip2,
1357 alt.alt_info2.sip1, alt.alt_info1.sip0);
1358 }
1359
1360 seq_printf(seq, "DIP_ID = %d\n", alt.alt_info8.dip_id);
1361 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1362 alt.alt_info0.stp, alt.alt_info0.dtp);
1363 seq_printf(seq, "VLAN_VID_VLD = %d\n", alt.alt_info6.vlan_vid_vld);
1364 seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d\n",
1365 (alt.alt_info5.vlan_vid0 & 0xfff),
1366 ((alt.alt_info5.vlan_vid0 >> 12) & 0xfff),
1367 ((alt.alt_info6.vlan_vid1 << 8) |
1368 ((alt.alt_info5.vlan_vid0 >> 24) & 0xfff)),
1369 ((alt.alt_info6.vlan_vid1 >> 4) & 0xfff));
1370 seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt);
1371 seq_printf(seq, "PRIORITY = %d\n", priority);
1372}
1373
1374void hw_lro_auto_tlb_dump_v2(struct seq_file *seq, u32 index)
1375{
1376 int i;
1377 struct mtk_lro_alt_v2 alt;
1378 u32 score = 0, ipv4 = 0;
1379 u32 ipv6[4] = { 0 };
1380 u32 tlb_info[12];
1381
1382 /* read valid entries of the auto-learn table */
1383 mtk_w32(g_eth, index << MTK_LRO_ALT_INDEX_OFFSET, MTK_LRO_ALT_DBG);
1384
1385 for (i = 0; i < 11; i++)
1386 tlb_info[i] = mtk_r32(g_eth, MTK_LRO_ALT_DBG_DATA);
1387
1388 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v2));
1389
1390 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1391 score = 1; /* packet count */
1392 else
1393 score = 0; /* byte count */
1394
1395 /* dump valid entries of the auto-learn table */
1396 if (alt.alt_info0.valid) {
1397 if (index < 5)
1398 seq_printf(seq,
1399 "\n===== TABLE Entry: %d (onging) =====\n",
1400 index);
1401 else
1402 seq_printf(seq,
1403 "\n===== TABLE Entry: %d (candidate) =====\n",
1404 index);
1405
1406 if (alt.alt_info1.v4_valid) {
1407 ipv4 = (alt.alt_info4.sip0_h << 23) |
1408 alt.alt_info5.sip0_l;
1409 seq_printf(seq, "SIP = 0x%x: (IPv4)\n", ipv4);
1410
1411 ipv4 = (alt.alt_info8.dip0_h << 23) |
1412 alt.alt_info9.dip0_l;
1413 seq_printf(seq, "DIP = 0x%x: (IPv4)\n", ipv4);
1414 } else if (alt.alt_info1.v6_valid) {
1415 ipv6[3] = (alt.alt_info1.sip3_h << 23) |
1416 (alt.alt_info2.sip3_l << 9);
1417 ipv6[2] = (alt.alt_info2.sip2_h << 23) |
1418 (alt.alt_info3.sip2_l << 9);
1419 ipv6[1] = (alt.alt_info3.sip1_h << 23) |
1420 (alt.alt_info4.sip1_l << 9);
1421 ipv6[0] = (alt.alt_info4.sip0_h << 23) |
1422 (alt.alt_info5.sip0_l << 9);
1423 seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1424 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1425
1426 ipv6[3] = (alt.alt_info5.dip3_h << 23) |
1427 (alt.alt_info6.dip3_l << 9);
1428 ipv6[2] = (alt.alt_info6.dip2_h << 23) |
1429 (alt.alt_info7.dip2_l << 9);
1430 ipv6[1] = (alt.alt_info7.dip1_h << 23) |
1431 (alt.alt_info8.dip1_l << 9);
1432 ipv6[0] = (alt.alt_info8.dip0_h << 23) |
1433 (alt.alt_info9.dip0_l << 9);
1434 seq_printf(seq, "DIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1435 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1436 }
1437
1438 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1439 (alt.alt_info9.sp_h << 7) | (alt.alt_info10.sp_l),
1440 alt.alt_info10.dp);
1441 }
1442}
1443
1444int hw_lro_auto_tlb_read(struct seq_file *seq, void *v)
1445{
1446 int i;
1447 u32 reg_val;
1448 u32 reg_op1, reg_op2, reg_op3, reg_op4;
1449 u32 agg_cnt, agg_time, age_time;
1450
1451 seq_puts(seq, "Usage of /proc/mtketh/hw_lro_auto_tlb:\n");
1452 seq_puts(seq, "echo [function] [setting] > /proc/mtketh/hw_lro_auto_tlb\n");
1453 seq_puts(seq, "Functions:\n");
1454 seq_puts(seq, "[0] = hwlro_agg_cnt_ctrl\n");
1455 seq_puts(seq, "[1] = hwlro_agg_time_ctrl\n");
1456 seq_puts(seq, "[2] = hwlro_age_time_ctrl\n");
1457 seq_puts(seq, "[3] = hwlro_threshold_ctrl\n");
1458 seq_puts(seq, "[4] = hwlro_ring_enable_ctrl\n");
1459 seq_puts(seq, "[5] = hwlro_stats_enable_ctrl\n\n");
1460
1461 if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2)) {
1462 for (i = 1; i <= 8; i++)
1463 hw_lro_auto_tlb_dump_v2(seq, i);
1464 } else {
1465 /* Read valid entries of the auto-learn table */
1466 mtk_w32(g_eth, 0, MTK_FE_ALT_CF8);
1467 reg_val = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1468
1469 seq_printf(seq,
1470 "HW LRO Auto-learn Table: (MTK_FE_ALT_SEQ_CFC=0x%x)\n",
1471 reg_val);
1472
1473 for (i = 7; i >= 0; i--) {
1474 if (reg_val & (1 << i))
1475 hw_lro_auto_tlb_dump_v1(seq, i);
1476 }
1477 }
1478
1479 /* Read the agg_time/age_time/agg_cnt of LRO rings */
1480 seq_puts(seq, "\nHW LRO Ring Settings\n");
1481
1482 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
1483 reg_op1 = mtk_r32(g_eth, MTK_LRO_CTRL_DW1_CFG(i));
1484 reg_op2 = mtk_r32(g_eth, MTK_LRO_CTRL_DW2_CFG(i));
1485 reg_op3 = mtk_r32(g_eth, MTK_LRO_CTRL_DW3_CFG(i));
1486 reg_op4 = mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW2);
1487
1488 agg_cnt =
1489 ((reg_op3 & 0x3) << 6) |
1490 ((reg_op2 >> MTK_LRO_RING_AGG_CNT_L_OFFSET) & 0x3f);
1491 agg_time = (reg_op2 >> MTK_LRO_RING_AGG_TIME_OFFSET) & 0xffff;
1492 age_time =
1493 ((reg_op2 & 0x3f) << 10) |
1494 ((reg_op1 >> MTK_LRO_RING_AGE_TIME_L_OFFSET) & 0x3ff);
1495 seq_printf(seq,
1496 "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
1497 (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2))? i+3 : i,
1498 agg_cnt, agg_time, age_time, reg_op4);
1499 }
1500
1501 seq_puts(seq, "\n");
1502
1503 return 0;
1504}
1505
1506static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file)
1507{
1508 return single_open(file, hw_lro_auto_tlb_read, NULL);
1509}
1510
1511static const struct file_operations hw_lro_auto_tlb_fops = {
1512 .owner = THIS_MODULE,
1513 .open = hw_lro_auto_tlb_open,
1514 .read = seq_read,
1515 .llseek = seq_lseek,
1516 .write = hw_lro_auto_tlb_write,
1517 .release = single_release
1518};
developerfd40db22021-04-29 10:08:25 +08001519
developer8051e042022-04-08 13:26:36 +08001520int reset_event_read(struct seq_file *seq, void *v)
1521{
1522 struct mtk_eth *eth = g_eth;
1523 struct mtk_reset_event reset_event = eth->reset_event;
1524
1525 seq_printf(seq, "[Event] [Count]\n");
1526 seq_printf(seq, " FQ Empty: %d\n",
1527 reset_event.count[MTK_EVENT_FQ_EMPTY]);
1528 seq_printf(seq, " TSO Fail: %d\n",
1529 reset_event.count[MTK_EVENT_TSO_FAIL]);
1530 seq_printf(seq, " TSO Illegal: %d\n",
1531 reset_event.count[MTK_EVENT_TSO_ILLEGAL]);
1532 seq_printf(seq, " TSO Align: %d\n",
1533 reset_event.count[MTK_EVENT_TSO_ALIGN]);
1534 seq_printf(seq, " RFIFO OV: %d\n",
1535 reset_event.count[MTK_EVENT_RFIFO_OV]);
1536 seq_printf(seq, " RFIFO UF: %d\n",
1537 reset_event.count[MTK_EVENT_RFIFO_UF]);
1538 seq_printf(seq, " Force: %d\n",
1539 reset_event.count[MTK_EVENT_FORCE]);
1540 seq_printf(seq, "----------------------------\n");
1541 seq_printf(seq, " Warm Cnt: %d\n",
1542 reset_event.count[MTK_EVENT_WARM_CNT]);
1543 seq_printf(seq, " Cold Cnt: %d\n",
1544 reset_event.count[MTK_EVENT_COLD_CNT]);
1545 seq_printf(seq, " Total Cnt: %d\n",
1546 reset_event.count[MTK_EVENT_TOTAL_CNT]);
1547
1548 return 0;
1549}
1550
1551static int reset_event_open(struct inode *inode, struct file *file)
1552{
1553 return single_open(file, reset_event_read, 0);
1554}
1555
1556ssize_t reset_event_write(struct file *file, const char __user *buffer,
1557 size_t count, loff_t *data)
1558{
1559 struct mtk_eth *eth = g_eth;
1560 struct mtk_reset_event *reset_event = &eth->reset_event;
1561
1562 memset(reset_event, 0, sizeof(struct mtk_reset_event));
1563 pr_info("MTK reset event counter is cleared !\n");
1564
1565 return count;
1566}
1567
1568static const struct file_operations reset_event_fops = {
1569 .owner = THIS_MODULE,
1570 .open = reset_event_open,
1571 .read = seq_read,
1572 .llseek = seq_lseek,
1573 .write = reset_event_write,
1574 .release = single_release
1575};
1576
1577
developerfd40db22021-04-29 10:08:25 +08001578struct proc_dir_entry *proc_reg_dir;
developer8051e042022-04-08 13:26:36 +08001579static struct proc_dir_entry *proc_esw_cnt, *proc_dbg_regs, *proc_reset_event;
developerfd40db22021-04-29 10:08:25 +08001580
1581int debug_proc_init(struct mtk_eth *eth)
1582{
1583 g_eth = eth;
1584
1585 if (!proc_reg_dir)
1586 proc_reg_dir = proc_mkdir(PROCREG_DIR, NULL);
1587
1588 proc_tx_ring =
1589 proc_create(PROCREG_TXRING, 0, proc_reg_dir, &tx_ring_fops);
1590 if (!proc_tx_ring)
1591 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
1592
developer8051e042022-04-08 13:26:36 +08001593 proc_hwtx_ring =
1594 proc_create(PROCREG_HWTXRING, 0, proc_reg_dir, &hwtx_ring_fops);
1595 if (!proc_hwtx_ring)
1596 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_HWTXRING);
1597
developerfd40db22021-04-29 10:08:25 +08001598 proc_rx_ring =
1599 proc_create(PROCREG_RXRING, 0, proc_reg_dir, &rx_ring_fops);
1600 if (!proc_rx_ring)
1601 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
1602
1603 proc_esw_cnt =
1604 proc_create(PROCREG_ESW_CNT, 0, proc_reg_dir, &switch_count_fops);
1605 if (!proc_esw_cnt)
1606 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
1607
1608 proc_dbg_regs =
1609 proc_create(PROCREG_DBG_REGS, 0, proc_reg_dir, &dbg_regs_fops);
1610 if (!proc_dbg_regs)
1611 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_DBG_REGS);
1612
developer77d03a72021-06-06 00:06:00 +08001613 if (g_eth->hwlro) {
1614 proc_hw_lro_stats =
1615 proc_create(PROCREG_HW_LRO_STATS, 0, proc_reg_dir,
1616 &hw_lro_stats_fops);
1617 if (!proc_hw_lro_stats)
1618 pr_info("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS);
1619
1620 proc_hw_lro_auto_tlb =
1621 proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, proc_reg_dir,
1622 &hw_lro_auto_tlb_fops);
1623 if (!proc_hw_lro_auto_tlb)
1624 pr_info("!! FAIL to create %s PROC !!\n",
1625 PROCREG_HW_LRO_AUTO_TLB);
1626 }
1627
developer8051e042022-04-08 13:26:36 +08001628 proc_reset_event =
1629 proc_create(PROCREG_RESET_EVENT, 0, proc_reg_dir, &reset_event_fops);
1630 if (!proc_reset_event)
1631 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RESET_EVENT);
1632
developerfd40db22021-04-29 10:08:25 +08001633 return 0;
1634}
1635
1636void debug_proc_exit(void)
1637{
1638 if (proc_tx_ring)
1639 remove_proc_entry(PROCREG_TXRING, proc_reg_dir);
developer8051e042022-04-08 13:26:36 +08001640 if (proc_hwtx_ring)
1641 remove_proc_entry(PROCREG_HWTXRING, proc_reg_dir);
developerfd40db22021-04-29 10:08:25 +08001642 if (proc_rx_ring)
1643 remove_proc_entry(PROCREG_RXRING, proc_reg_dir);
1644
1645 if (proc_esw_cnt)
1646 remove_proc_entry(PROCREG_ESW_CNT, proc_reg_dir);
1647
1648 if (proc_reg_dir)
1649 remove_proc_entry(PROCREG_DIR, 0);
1650
1651 if (proc_dbg_regs)
1652 remove_proc_entry(PROCREG_DBG_REGS, proc_reg_dir);
developer77d03a72021-06-06 00:06:00 +08001653
1654 if (g_eth->hwlro) {
1655 if (proc_hw_lro_stats)
1656 remove_proc_entry(PROCREG_HW_LRO_STATS, proc_reg_dir);
1657
1658 if (proc_hw_lro_auto_tlb)
1659 remove_proc_entry(PROCREG_HW_LRO_AUTO_TLB, proc_reg_dir);
1660 }
developer8051e042022-04-08 13:26:36 +08001661
1662 if (proc_reset_event)
1663 remove_proc_entry(PROCREG_RESET_EVENT, proc_reg_dir);
developerfd40db22021-04-29 10:08:25 +08001664}
1665