blob: 38d2b5378f1350cd00e3231d37619a9928de5ccb [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
14 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
15 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
16 */
17
18#include <linux/trace_seq.h>
19#include <linux/seq_file.h>
20#include <linux/proc_fs.h>
21#include <linux/u64_stats_sync.h>
22#include <linux/dma-mapping.h>
23#include <linux/netdevice.h>
24#include <linux/ctype.h>
25#include <linux/debugfs.h>
26#include <linux/of_mdio.h>
27
28#include "mtk_eth_soc.h"
29#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080030#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080031
developer77d03a72021-06-06 00:06:00 +080032u32 hw_lro_agg_num_cnt[MTK_HW_LRO_RING_NUM][MTK_HW_LRO_MAX_AGG_CNT + 1];
33u32 hw_lro_agg_size_cnt[MTK_HW_LRO_RING_NUM][16];
34u32 hw_lro_tot_agg_cnt[MTK_HW_LRO_RING_NUM];
35u32 hw_lro_tot_flush_cnt[MTK_HW_LRO_RING_NUM];
36u32 hw_lro_agg_flush_cnt[MTK_HW_LRO_RING_NUM];
37u32 hw_lro_age_flush_cnt[MTK_HW_LRO_RING_NUM];
38u32 hw_lro_seq_flush_cnt[MTK_HW_LRO_RING_NUM];
39u32 hw_lro_timestamp_flush_cnt[MTK_HW_LRO_RING_NUM];
40u32 hw_lro_norule_flush_cnt[MTK_HW_LRO_RING_NUM];
41u32 mtk_hwlro_stats_ebl;
42static struct proc_dir_entry *proc_hw_lro_stats, *proc_hw_lro_auto_tlb;
43typedef int (*mtk_lro_dbg_func) (int par);
44
developerfd40db22021-04-29 10:08:25 +080045struct mtk_eth_debug {
46 struct dentry *root;
47};
48
49struct mtk_eth *g_eth;
50
51struct mtk_eth_debug eth_debug;
52
developer3957a912021-05-13 16:44:31 +080053void mt7530_mdio_w32(struct mtk_eth *eth, u16 reg, u32 val)
developerfd40db22021-04-29 10:08:25 +080054{
55 mutex_lock(&eth->mii_bus->mdio_lock);
56
57 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
58 _mtk_mdio_write(eth, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
59 _mtk_mdio_write(eth, 0x1f, 0x10, val >> 16);
60
61 mutex_unlock(&eth->mii_bus->mdio_lock);
62}
63
64u32 mt7530_mdio_r32(struct mtk_eth *eth, u32 reg)
65{
66 u16 high, low;
67
68 mutex_lock(&eth->mii_bus->mdio_lock);
69
70 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
71 low = _mtk_mdio_read(eth, 0x1f, (reg >> 2) & 0xf);
72 high = _mtk_mdio_read(eth, 0x1f, 0x10);
73
74 mutex_unlock(&eth->mii_bus->mdio_lock);
75
76 return (high << 16) | (low & 0xffff);
77}
78
79void mtk_switch_w32(struct mtk_eth *eth, u32 val, unsigned reg)
80{
81 mtk_w32(eth, val, reg + 0x10000);
82}
83EXPORT_SYMBOL(mtk_switch_w32);
84
85u32 mtk_switch_r32(struct mtk_eth *eth, unsigned reg)
86{
87 return mtk_r32(eth, reg + 0x10000);
88}
89EXPORT_SYMBOL(mtk_switch_r32);
90
91static int mtketh_debug_show(struct seq_file *m, void *private)
92{
93 struct mtk_eth *eth = m->private;
94 struct mtk_mac *mac = 0;
developer77d03a72021-06-06 00:06:00 +080095 int i = 0;
developerfd40db22021-04-29 10:08:25 +080096
97 for (i = 0 ; i < MTK_MAX_DEVS ; i++) {
98 if (!eth->mac[i] ||
99 of_phy_is_fixed_link(eth->mac[i]->of_node))
100 continue;
101 mac = eth->mac[i];
102#if 0 //FIXME
103 while (j < 30) {
104 d = _mtk_mdio_read(eth, mac->phy_dev->addr, j);
105
106 seq_printf(m, "phy=%d, reg=0x%08x, data=0x%08x\n",
107 mac->phy_dev->addr, j, d);
108 j++;
109 }
110#endif
111 }
112 return 0;
113}
114
115static int mtketh_debug_open(struct inode *inode, struct file *file)
116{
117 return single_open(file, mtketh_debug_show, inode->i_private);
118}
119
120static const struct file_operations mtketh_debug_fops = {
121 .open = mtketh_debug_open,
122 .read = seq_read,
123 .llseek = seq_lseek,
124 .release = single_release,
125};
126
127static int mtketh_mt7530sw_debug_show(struct seq_file *m, void *private)
128{
129 struct mtk_eth *eth = m->private;
130 u32 offset, data;
131 int i;
132 struct mt7530_ranges {
133 u32 start;
134 u32 end;
135 } ranges[] = {
136 {0x0, 0xac},
137 {0x1000, 0x10e0},
138 {0x1100, 0x1140},
139 {0x1200, 0x1240},
140 {0x1300, 0x1340},
141 {0x1400, 0x1440},
142 {0x1500, 0x1540},
143 {0x1600, 0x1640},
144 {0x1800, 0x1848},
145 {0x1900, 0x1948},
146 {0x1a00, 0x1a48},
147 {0x1b00, 0x1b48},
148 {0x1c00, 0x1c48},
149 {0x1d00, 0x1d48},
150 {0x1e00, 0x1e48},
151 {0x1f60, 0x1ffc},
152 {0x2000, 0x212c},
153 {0x2200, 0x222c},
154 {0x2300, 0x232c},
155 {0x2400, 0x242c},
156 {0x2500, 0x252c},
157 {0x2600, 0x262c},
158 {0x3000, 0x3014},
159 {0x30c0, 0x30f8},
160 {0x3100, 0x3114},
161 {0x3200, 0x3214},
162 {0x3300, 0x3314},
163 {0x3400, 0x3414},
164 {0x3500, 0x3514},
165 {0x3600, 0x3614},
166 {0x4000, 0x40d4},
167 {0x4100, 0x41d4},
168 {0x4200, 0x42d4},
169 {0x4300, 0x43d4},
170 {0x4400, 0x44d4},
171 {0x4500, 0x45d4},
172 {0x4600, 0x46d4},
173 {0x4f00, 0x461c},
174 {0x7000, 0x7038},
175 {0x7120, 0x7124},
176 {0x7800, 0x7804},
177 {0x7810, 0x7810},
178 {0x7830, 0x7830},
179 {0x7a00, 0x7a7c},
180 {0x7b00, 0x7b04},
181 {0x7e00, 0x7e04},
182 {0x7ffc, 0x7ffc},
183 };
184
185 if (!mt7530_exist(eth))
186 return -EOPNOTSUPP;
187
188 if ((!eth->mac[0] || !of_phy_is_fixed_link(eth->mac[0]->of_node)) &&
189 (!eth->mac[1] || !of_phy_is_fixed_link(eth->mac[1]->of_node))) {
190 seq_puts(m, "no switch found\n");
191 return 0;
192 }
193
194 for (i = 0 ; i < ARRAY_SIZE(ranges) ; i++) {
195 for (offset = ranges[i].start;
196 offset <= ranges[i].end; offset += 4) {
197 data = mt7530_mdio_r32(eth, offset);
198 seq_printf(m, "mt7530 switch reg=0x%08x, data=0x%08x\n",
199 offset, data);
200 }
201 }
202
203 return 0;
204}
205
206static int mtketh_debug_mt7530sw_open(struct inode *inode, struct file *file)
207{
208 return single_open(file, mtketh_mt7530sw_debug_show, inode->i_private);
209}
210
211static const struct file_operations mtketh_debug_mt7530sw_fops = {
212 .open = mtketh_debug_mt7530sw_open,
213 .read = seq_read,
214 .llseek = seq_lseek,
215 .release = single_release,
216};
217
218static ssize_t mtketh_mt7530sw_debugfs_write(struct file *file,
219 const char __user *ptr,
220 size_t len, loff_t *off)
221{
222 struct mtk_eth *eth = file->private_data;
223 char buf[32], *token, *p = buf;
224 u32 reg, value, phy;
225 int ret;
226
227 if (!mt7530_exist(eth))
228 return -EOPNOTSUPP;
229
230 if (*off != 0)
231 return 0;
232
233 if (len > sizeof(buf) - 1)
234 len = sizeof(buf) - 1;
235
236 ret = strncpy_from_user(buf, ptr, len);
237 if (ret < 0)
238 return ret;
239 buf[len] = '\0';
240
241 token = strsep(&p, " ");
242 if (!token)
243 return -EINVAL;
244 if (kstrtoul(token, 16, (unsigned long *)&phy))
245 return -EINVAL;
246
247 token = strsep(&p, " ");
248 if (!token)
249 return -EINVAL;
250 if (kstrtoul(token, 16, (unsigned long *)&reg))
251 return -EINVAL;
252
253 token = strsep(&p, " ");
254 if (!token)
255 return -EINVAL;
256 if (kstrtoul(token, 16, (unsigned long *)&value))
257 return -EINVAL;
258
259 pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
260 0x1f, reg, value);
261 mt7530_mdio_w32(eth, reg, value);
262 pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
263 0x1f, reg, mt7530_mdio_r32(eth, reg));
264
265 return len;
266}
267
268static ssize_t mtketh_debugfs_write(struct file *file, const char __user *ptr,
269 size_t len, loff_t *off)
270{
271 struct mtk_eth *eth = file->private_data;
272 char buf[32], *token, *p = buf;
273 u32 reg, value, phy;
274 int ret;
275
276 if (*off != 0)
277 return 0;
278
279 if (len > sizeof(buf) - 1)
280 len = sizeof(buf) - 1;
281
282 ret = strncpy_from_user(buf, ptr, len);
283 if (ret < 0)
284 return ret;
285 buf[len] = '\0';
286
287 token = strsep(&p, " ");
288 if (!token)
289 return -EINVAL;
290 if (kstrtoul(token, 16, (unsigned long *)&phy))
291 return -EINVAL;
292
293 token = strsep(&p, " ");
294
295 if (!token)
296 return -EINVAL;
297 if (kstrtoul(token, 16, (unsigned long *)&reg))
298 return -EINVAL;
299
300 token = strsep(&p, " ");
301
302 if (!token)
303 return -EINVAL;
304 if (kstrtoul(token, 16, (unsigned long *)&value))
305 return -EINVAL;
306
307 pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
308 phy, reg, value);
309
310 _mtk_mdio_write(eth, phy, reg, value);
311
312 pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
313 phy, reg, _mtk_mdio_read(eth, phy, reg));
314
315 return len;
316}
317
318static ssize_t mtketh_debugfs_reset(struct file *file, const char __user *ptr,
319 size_t len, loff_t *off)
320{
321 struct mtk_eth *eth = file->private_data;
322
developer8051e042022-04-08 13:26:36 +0800323 atomic_inc(&force);
developerfd40db22021-04-29 10:08:25 +0800324 schedule_work(&eth->pending_work);
325 return len;
326}
327
328static const struct file_operations fops_reg_w = {
329 .owner = THIS_MODULE,
330 .open = simple_open,
331 .write = mtketh_debugfs_write,
332 .llseek = noop_llseek,
333};
334
335static const struct file_operations fops_eth_reset = {
336 .owner = THIS_MODULE,
337 .open = simple_open,
338 .write = mtketh_debugfs_reset,
339 .llseek = noop_llseek,
340};
341
342static const struct file_operations fops_mt7530sw_reg_w = {
343 .owner = THIS_MODULE,
344 .open = simple_open,
345 .write = mtketh_mt7530sw_debugfs_write,
346 .llseek = noop_llseek,
347};
348
349void mtketh_debugfs_exit(struct mtk_eth *eth)
350{
351 debugfs_remove_recursive(eth_debug.root);
352}
353
354int mtketh_debugfs_init(struct mtk_eth *eth)
355{
356 int ret = 0;
357
358 eth_debug.root = debugfs_create_dir("mtketh", NULL);
359 if (!eth_debug.root) {
360 dev_notice(eth->dev, "%s:err at %d\n", __func__, __LINE__);
361 ret = -ENOMEM;
362 }
363
364 debugfs_create_file("phy_regs", S_IRUGO,
365 eth_debug.root, eth, &mtketh_debug_fops);
366 debugfs_create_file("phy_reg_w", S_IFREG | S_IWUSR,
367 eth_debug.root, eth, &fops_reg_w);
368 debugfs_create_file("reset", S_IFREG | S_IWUSR,
369 eth_debug.root, eth, &fops_eth_reset);
370 if (mt7530_exist(eth)) {
371 debugfs_create_file("mt7530sw_regs", S_IRUGO,
372 eth_debug.root, eth,
373 &mtketh_debug_mt7530sw_fops);
374 debugfs_create_file("mt7530sw_reg_w", S_IFREG | S_IWUSR,
375 eth_debug.root, eth,
376 &fops_mt7530sw_reg_w);
377 }
378 return ret;
379}
380
381void mii_mgr_read_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
382 u32 *read_data)
383{
384 if (mt7530_exist(eth) && phy_addr == 31)
385 *read_data = mt7530_mdio_r32(eth, phy_register);
386
387 else
388 *read_data = _mtk_mdio_read(eth, phy_addr, phy_register);
389}
390
developer3957a912021-05-13 16:44:31 +0800391void mii_mgr_write_combine(struct mtk_eth *eth, u16 phy_addr, u16 phy_register,
developerfd40db22021-04-29 10:08:25 +0800392 u32 write_data)
393{
394 if (mt7530_exist(eth) && phy_addr == 31)
395 mt7530_mdio_w32(eth, phy_register, write_data);
396
397 else
398 _mtk_mdio_write(eth, phy_addr, phy_register, write_data);
399}
400
developer3957a912021-05-13 16:44:31 +0800401static void mii_mgr_read_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data)
developerfd40db22021-04-29 10:08:25 +0800402{
developer599cda42022-05-24 15:13:31 +0800403 *data = _mtk_mdio_read(eth, port, mdiobus_c45_addr(devad, reg));
developerfd40db22021-04-29 10:08:25 +0800404}
405
developer3957a912021-05-13 16:44:31 +0800406static void mii_mgr_write_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data)
developerfd40db22021-04-29 10:08:25 +0800407{
developer599cda42022-05-24 15:13:31 +0800408 _mtk_mdio_write(eth, port, mdiobus_c45_addr(devad, reg), data);
developerfd40db22021-04-29 10:08:25 +0800409}
410
411int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
412{
413 struct mtk_mac *mac = netdev_priv(dev);
414 struct mtk_eth *eth = mac->hw;
415 struct mtk_mii_ioctl_data mii;
416 struct mtk_esw_reg reg;
developerba2d1eb2021-05-25 19:26:45 +0800417 u16 val;
developerfd40db22021-04-29 10:08:25 +0800418
419 switch (cmd) {
420 case MTKETH_MII_READ:
421 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
422 goto err_copy;
423 mii_mgr_read_combine(eth, mii.phy_id, mii.reg_num,
424 &mii.val_out);
425 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
426 goto err_copy;
427
428 return 0;
429 case MTKETH_MII_WRITE:
430 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
431 goto err_copy;
432 mii_mgr_write_combine(eth, mii.phy_id, mii.reg_num,
433 mii.val_in);
developerfd40db22021-04-29 10:08:25 +0800434 return 0;
435 case MTKETH_MII_READ_CL45:
436 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
437 goto err_copy;
developer3957a912021-05-13 16:44:31 +0800438 mii_mgr_read_cl45(eth,
439 mdio_phy_id_prtad(mii.phy_id),
440 mdio_phy_id_devad(mii.phy_id),
441 mii.reg_num,
developerba2d1eb2021-05-25 19:26:45 +0800442 &val);
443 mii.val_out = val;
developerfd40db22021-04-29 10:08:25 +0800444 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
445 goto err_copy;
446
447 return 0;
448 case MTKETH_MII_WRITE_CL45:
449 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
450 goto err_copy;
developerba2d1eb2021-05-25 19:26:45 +0800451 val = mii.val_in;
developer3957a912021-05-13 16:44:31 +0800452 mii_mgr_write_cl45(eth,
453 mdio_phy_id_prtad(mii.phy_id),
454 mdio_phy_id_devad(mii.phy_id),
455 mii.reg_num,
developerba2d1eb2021-05-25 19:26:45 +0800456 val);
developerfd40db22021-04-29 10:08:25 +0800457 return 0;
458 case MTKETH_ESW_REG_READ:
459 if (!mt7530_exist(eth))
460 return -EOPNOTSUPP;
461 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
462 goto err_copy;
463 if (reg.off > REG_ESW_MAX)
464 return -EINVAL;
465 reg.val = mtk_switch_r32(eth, reg.off);
466
467 if (copy_to_user(ifr->ifr_data, &reg, sizeof(reg)))
468 goto err_copy;
469
470 return 0;
471 case MTKETH_ESW_REG_WRITE:
472 if (!mt7530_exist(eth))
473 return -EOPNOTSUPP;
474 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
475 goto err_copy;
476 if (reg.off > REG_ESW_MAX)
477 return -EINVAL;
478 mtk_switch_w32(eth, reg.val, reg.off);
479
480 return 0;
481 default:
482 break;
483 }
484
485 return -EOPNOTSUPP;
486err_copy:
487 return -EFAULT;
488}
489
490int esw_cnt_read(struct seq_file *seq, void *v)
491{
492 unsigned int pkt_cnt = 0;
493 int i = 0;
494 struct mtk_eth *eth = g_eth;
495 unsigned int mib_base = MTK_GDM1_TX_GBCNT;
496
497 seq_puts(seq, "\n <<CPU>>\n");
498 seq_puts(seq, " |\n");
499 seq_puts(seq, "+-----------------------------------------------+\n");
500 seq_puts(seq, "| <<PSE>> |\n");
501 seq_puts(seq, "+-----------------------------------------------+\n");
502 seq_puts(seq, " |\n");
503 seq_puts(seq, "+-----------------------------------------------+\n");
504 seq_puts(seq, "| <<GDMA>> |\n");
505 seq_printf(seq, "| GDMA1_RX_GBCNT : %010u (Rx Good Bytes) |\n",
506 mtk_r32(eth, mib_base));
507 seq_printf(seq, "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n",
508 mtk_r32(eth, mib_base+0x08));
509 seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow error) |\n",
510 mtk_r32(eth, mib_base+0x10));
511 seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n",
512 mtk_r32(eth, mib_base+0x14));
513 seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n",
514 mtk_r32(eth, mib_base+0x18));
515 seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n",
516 mtk_r32(eth, mib_base+0x1C));
517 seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (checksum error) |\n",
518 mtk_r32(eth, mib_base+0x20));
519 seq_printf(seq, "| GDMA1_RX_FCCNT : %010u (flow control) |\n",
520 mtk_r32(eth, mib_base+0x24));
521 seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (about count) |\n",
522 mtk_r32(eth, mib_base+0x28));
523 seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision count) |\n",
524 mtk_r32(eth, mib_base+0x2C));
525 seq_printf(seq, "| GDMA1_TX_GBCNT : %010u (Tx Good Bytes) |\n",
526 mtk_r32(eth, mib_base+0x30));
527 seq_printf(seq, "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n",
528 mtk_r32(eth, mib_base+0x38));
529 seq_puts(seq, "| |\n");
530 seq_printf(seq, "| GDMA2_RX_GBCNT : %010u (Rx Good Bytes) |\n",
531 mtk_r32(eth, mib_base+0x40));
532 seq_printf(seq, "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n",
533 mtk_r32(eth, mib_base+0x48));
534 seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow error) |\n",
535 mtk_r32(eth, mib_base+0x50));
536 seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n",
537 mtk_r32(eth, mib_base+0x54));
538 seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n",
539 mtk_r32(eth, mib_base+0x58));
540 seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n",
541 mtk_r32(eth, mib_base+0x5C));
542 seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (checksum error) |\n",
543 mtk_r32(eth, mib_base+0x60));
544 seq_printf(seq, "| GDMA2_RX_FCCNT : %010u (flow control) |\n",
545 mtk_r32(eth, mib_base+0x64));
546 seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip) |\n",
547 mtk_r32(eth, mib_base+0x68));
548 seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n",
549 mtk_r32(eth, mib_base+0x6C));
550 seq_printf(seq, "| GDMA2_TX_GBCNT : %010u (Tx Good Bytes) |\n",
551 mtk_r32(eth, mib_base+0x70));
552 seq_printf(seq, "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n",
553 mtk_r32(eth, mib_base+0x78));
554 seq_puts(seq, "+-----------------------------------------------+\n");
555
556 if (!mt7530_exist(eth))
557 return 0;
558
559#define DUMP_EACH_PORT(base) \
560 do { \
561 for (i = 0; i < 7; i++) { \
562 pkt_cnt = mt7530_mdio_r32(eth, (base) + (i * 0x100));\
563 seq_printf(seq, "%8u ", pkt_cnt); \
564 } \
565 seq_puts(seq, "\n"); \
566 } while (0)
567
568 seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n",
569 "Port0", "Port1", "Port2", "Port3", "Port4", "Port5",
570 "Port6");
571 seq_puts(seq, "Tx Drop Packet :");
572 DUMP_EACH_PORT(0x4000);
573 seq_puts(seq, "Tx CRC Error :");
574 DUMP_EACH_PORT(0x4004);
575 seq_puts(seq, "Tx Unicast Packet :");
576 DUMP_EACH_PORT(0x4008);
577 seq_puts(seq, "Tx Multicast Packet :");
578 DUMP_EACH_PORT(0x400C);
579 seq_puts(seq, "Tx Broadcast Packet :");
580 DUMP_EACH_PORT(0x4010);
581 seq_puts(seq, "Tx Collision Event :");
582 DUMP_EACH_PORT(0x4014);
583 seq_puts(seq, "Tx Pause Packet :");
584 DUMP_EACH_PORT(0x402C);
585 seq_puts(seq, "Rx Drop Packet :");
586 DUMP_EACH_PORT(0x4060);
587 seq_puts(seq, "Rx Filtering Packet :");
588 DUMP_EACH_PORT(0x4064);
589 seq_puts(seq, "Rx Unicast Packet :");
590 DUMP_EACH_PORT(0x4068);
591 seq_puts(seq, "Rx Multicast Packet :");
592 DUMP_EACH_PORT(0x406C);
593 seq_puts(seq, "Rx Broadcast Packet :");
594 DUMP_EACH_PORT(0x4070);
595 seq_puts(seq, "Rx Alignment Error :");
596 DUMP_EACH_PORT(0x4074);
597 seq_puts(seq, "Rx CRC Error :");
598 DUMP_EACH_PORT(0x4078);
599 seq_puts(seq, "Rx Undersize Error :");
600 DUMP_EACH_PORT(0x407C);
601 seq_puts(seq, "Rx Fragment Error :");
602 DUMP_EACH_PORT(0x4080);
603 seq_puts(seq, "Rx Oversize Error :");
604 DUMP_EACH_PORT(0x4084);
605 seq_puts(seq, "Rx Jabber Error :");
606 DUMP_EACH_PORT(0x4088);
607 seq_puts(seq, "Rx Pause Packet :");
608 DUMP_EACH_PORT(0x408C);
609 mt7530_mdio_w32(eth, 0x4fe0, 0xf0);
610 mt7530_mdio_w32(eth, 0x4fe0, 0x800000f0);
611
612 seq_puts(seq, "\n");
613
614 return 0;
615}
616
617static int switch_count_open(struct inode *inode, struct file *file)
618{
619 return single_open(file, esw_cnt_read, 0);
620}
621
622static const struct file_operations switch_count_fops = {
623 .owner = THIS_MODULE,
624 .open = switch_count_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = single_release
628};
629
developer8051e042022-04-08 13:26:36 +0800630static struct proc_dir_entry *proc_tx_ring, *proc_hwtx_ring, *proc_rx_ring;
developerfd40db22021-04-29 10:08:25 +0800631
632int tx_ring_read(struct seq_file *seq, void *v)
633{
developere9356982022-07-04 09:03:20 +0800634 struct mtk_eth *eth = g_eth;
developerfd40db22021-04-29 10:08:25 +0800635 struct mtk_tx_ring *ring = &g_eth->tx_ring;
developere9356982022-07-04 09:03:20 +0800636 struct mtk_tx_dma_v2 *tx_ring;
developerfd40db22021-04-29 10:08:25 +0800637 int i = 0;
638
developerfd40db22021-04-29 10:08:25 +0800639 seq_printf(seq, "free count = %d\n", (int)atomic_read(&ring->free_count));
640 seq_printf(seq, "cpu next free: %d\n", (int)(ring->next_free - ring->dma));
641 seq_printf(seq, "cpu last free: %d\n", (int)(ring->last_free - ring->dma));
642 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +0800643 dma_addr_t tmp = ring->phys + i * eth->soc->txrx.txd_size;
644
645 tx_ring = ring->dma + i * eth->soc->txrx.txd_size;
developerfd40db22021-04-29 10:08:25 +0800646
647 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &tmp,
developere9356982022-07-04 09:03:20 +0800648 tx_ring->txd1, tx_ring->txd2,
649 tx_ring->txd3, tx_ring->txd4);
650
651 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
652 seq_printf(seq, " %08x %08x %08x %08x",
653 tx_ring->txd5, tx_ring->txd6,
654 tx_ring->txd7, tx_ring->txd8);
655 }
656
developerfd40db22021-04-29 10:08:25 +0800657 seq_printf(seq, "\n");
658 }
659
developerfd40db22021-04-29 10:08:25 +0800660 return 0;
661}
662
663static int tx_ring_open(struct inode *inode, struct file *file)
664{
665 return single_open(file, tx_ring_read, NULL);
666}
667
668static const struct file_operations tx_ring_fops = {
669 .owner = THIS_MODULE,
670 .open = tx_ring_open,
671 .read = seq_read,
672 .llseek = seq_lseek,
673 .release = single_release
674};
675
developer8051e042022-04-08 13:26:36 +0800676int hwtx_ring_read(struct seq_file *seq, void *v)
677{
678 struct mtk_eth *eth = g_eth;
developere9356982022-07-04 09:03:20 +0800679 struct mtk_tx_dma_v2 *hwtx_ring;
developer8051e042022-04-08 13:26:36 +0800680 int i = 0;
681
developer8051e042022-04-08 13:26:36 +0800682 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +0800683 dma_addr_t addr = eth->phy_scratch_ring + i * eth->soc->txrx.txd_size;
684
685 hwtx_ring = eth->scratch_ring + i * eth->soc->txrx.txd_size;
developer8051e042022-04-08 13:26:36 +0800686
687 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &addr,
developere9356982022-07-04 09:03:20 +0800688 hwtx_ring->txd1, hwtx_ring->txd2,
689 hwtx_ring->txd3, hwtx_ring->txd4);
690
691 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
692 seq_printf(seq, " %08x %08x %08x %08x",
693 hwtx_ring->txd5, hwtx_ring->txd6,
694 hwtx_ring->txd7, hwtx_ring->txd8);
695 }
696
developer8051e042022-04-08 13:26:36 +0800697 seq_printf(seq, "\n");
698 }
699
developer8051e042022-04-08 13:26:36 +0800700 return 0;
701}
702
703static int hwtx_ring_open(struct inode *inode, struct file *file)
704{
705 return single_open(file, hwtx_ring_read, NULL);
706}
707
708static const struct file_operations hwtx_ring_fops = {
709 .owner = THIS_MODULE,
710 .open = hwtx_ring_open,
711 .read = seq_read,
712 .llseek = seq_lseek,
713 .release = single_release
714};
715
developerfd40db22021-04-29 10:08:25 +0800716int rx_ring_read(struct seq_file *seq, void *v)
717{
developere9356982022-07-04 09:03:20 +0800718 struct mtk_eth *eth = g_eth;
developerfd40db22021-04-29 10:08:25 +0800719 struct mtk_rx_ring *ring = &g_eth->rx_ring[0];
developere9356982022-07-04 09:03:20 +0800720 struct mtk_rx_dma_v2 *rx_ring;
developerfd40db22021-04-29 10:08:25 +0800721 int i = 0;
722
developerfd40db22021-04-29 10:08:25 +0800723 seq_printf(seq, "next to read: %d\n",
724 NEXT_DESP_IDX(ring->calc_idx, MTK_DMA_SIZE));
725 for (i = 0; i < MTK_DMA_SIZE; i++) {
developere9356982022-07-04 09:03:20 +0800726 rx_ring = ring->dma + i * eth->soc->txrx.rxd_size;
727
developerfd40db22021-04-29 10:08:25 +0800728 seq_printf(seq, "%d: %08x %08x %08x %08x", i,
developere9356982022-07-04 09:03:20 +0800729 rx_ring->rxd1, rx_ring->rxd2,
730 rx_ring->rxd3, rx_ring->rxd4);
731
732 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
733 seq_printf(seq, " %08x %08x %08x %08x",
734 rx_ring->rxd5, rx_ring->rxd6,
735 rx_ring->rxd7, rx_ring->rxd8);
736 }
737
developerfd40db22021-04-29 10:08:25 +0800738 seq_printf(seq, "\n");
739 }
740
developerfd40db22021-04-29 10:08:25 +0800741 return 0;
742}
743
744static int rx_ring_open(struct inode *inode, struct file *file)
745{
746 return single_open(file, rx_ring_read, NULL);
747}
748
749static const struct file_operations rx_ring_fops = {
750 .owner = THIS_MODULE,
751 .open = rx_ring_open,
752 .read = seq_read,
753 .llseek = seq_lseek,
754 .release = single_release
755};
756
developer77f3fd42021-10-05 15:16:05 +0800757static inline u32 mtk_dbg_r32(u32 reg)
758{
759 void __iomem *virt_reg;
760 u32 val;
761
762 virt_reg = ioremap(reg, 32);
763 val = __raw_readl(virt_reg);
764 iounmap(virt_reg);
765
766 return val;
767}
768
developerfd40db22021-04-29 10:08:25 +0800769int dbg_regs_read(struct seq_file *seq, void *v)
770{
771 struct mtk_eth *eth = g_eth;
772
developer77f3fd42021-10-05 15:16:05 +0800773 seq_puts(seq, " <<DEBUG REG DUMP>>\n");
774
775 seq_printf(seq, "| FE_INT_STA : %08x |\n",
developer8051e042022-04-08 13:26:36 +0800776 mtk_r32(eth, MTK_FE_INT_STATUS));
developer77f3fd42021-10-05 15:16:05 +0800777 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
778 seq_printf(seq, "| FE_INT_STA2 : %08x |\n",
developer8051e042022-04-08 13:26:36 +0800779 mtk_r32(eth, MTK_FE_INT_STATUS2));
developer77f3fd42021-10-05 15:16:05 +0800780
developerfd40db22021-04-29 10:08:25 +0800781 seq_printf(seq, "| PSE_FQFC_CFG : %08x |\n",
782 mtk_r32(eth, MTK_PSE_FQFC_CFG));
783 seq_printf(seq, "| PSE_IQ_STA1 : %08x |\n",
784 mtk_r32(eth, MTK_PSE_IQ_STA(0)));
785 seq_printf(seq, "| PSE_IQ_STA2 : %08x |\n",
786 mtk_r32(eth, MTK_PSE_IQ_STA(1)));
787
developera2bdbd52021-05-31 19:10:17 +0800788 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +0800789 seq_printf(seq, "| PSE_IQ_STA3 : %08x |\n",
790 mtk_r32(eth, MTK_PSE_IQ_STA(2)));
791 seq_printf(seq, "| PSE_IQ_STA4 : %08x |\n",
792 mtk_r32(eth, MTK_PSE_IQ_STA(3)));
developer77f3fd42021-10-05 15:16:05 +0800793 seq_printf(seq, "| PSE_IQ_STA5 : %08x |\n",
794 mtk_r32(eth, MTK_PSE_IQ_STA(4)));
developerfd40db22021-04-29 10:08:25 +0800795 }
796
797 seq_printf(seq, "| PSE_OQ_STA1 : %08x |\n",
798 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
799 seq_printf(seq, "| PSE_OQ_STA2 : %08x |\n",
800 mtk_r32(eth, MTK_PSE_OQ_STA(1)));
801
developera2bdbd52021-05-31 19:10:17 +0800802 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +0800803 seq_printf(seq, "| PSE_OQ_STA3 : %08x |\n",
804 mtk_r32(eth, MTK_PSE_OQ_STA(2)));
805 seq_printf(seq, "| PSE_OQ_STA4 : %08x |\n",
806 mtk_r32(eth, MTK_PSE_OQ_STA(3)));
developer77f3fd42021-10-05 15:16:05 +0800807 seq_printf(seq, "| PSE_OQ_STA5 : %08x |\n",
808 mtk_r32(eth, MTK_PSE_OQ_STA(4)));
developerfd40db22021-04-29 10:08:25 +0800809 }
810
developer77f3fd42021-10-05 15:16:05 +0800811 seq_printf(seq, "| PDMA_CRX_IDX : %08x |\n",
812 mtk_r32(eth, MTK_PRX_CRX_IDX0));
813 seq_printf(seq, "| PDMA_DRX_IDX : %08x |\n",
814 mtk_r32(eth, MTK_PRX_DRX_IDX0));
815 seq_printf(seq, "| QDMA_CTX_IDX : %08x |\n",
816 mtk_r32(eth, MTK_QTX_CTX_PTR));
817 seq_printf(seq, "| QDMA_DTX_IDX : %08x |\n",
818 mtk_r32(eth, MTK_QTX_DTX_PTR));
developerfd40db22021-04-29 10:08:25 +0800819 seq_printf(seq, "| QDMA_FQ_CNT : %08x |\n",
820 mtk_r32(eth, MTK_QDMA_FQ_CNT));
821 seq_printf(seq, "| FE_PSE_FREE : %08x |\n",
822 mtk_r32(eth, MTK_FE_PSE_FREE));
823 seq_printf(seq, "| FE_DROP_FQ : %08x |\n",
824 mtk_r32(eth, MTK_FE_DROP_FQ));
825 seq_printf(seq, "| FE_DROP_FC : %08x |\n",
826 mtk_r32(eth, MTK_FE_DROP_FC));
827 seq_printf(seq, "| FE_DROP_PPE : %08x |\n",
828 mtk_r32(eth, MTK_FE_DROP_PPE));
829 seq_printf(seq, "| GDM1_IG_CTRL : %08x |\n",
830 mtk_r32(eth, MTK_GDMA_FWD_CFG(0)));
831 seq_printf(seq, "| GDM2_IG_CTRL : %08x |\n",
832 mtk_r32(eth, MTK_GDMA_FWD_CFG(1)));
833 seq_printf(seq, "| MAC_P1_MCR : %08x |\n",
834 mtk_r32(eth, MTK_MAC_MCR(0)));
835 seq_printf(seq, "| MAC_P2_MCR : %08x |\n",
836 mtk_r32(eth, MTK_MAC_MCR(1)));
developer77f3fd42021-10-05 15:16:05 +0800837 seq_printf(seq, "| MAC_P1_FSM : %08x |\n",
838 mtk_r32(eth, MTK_MAC_FSM(0)));
839 seq_printf(seq, "| MAC_P2_FSM : %08x |\n",
840 mtk_r32(eth, MTK_MAC_FSM(1)));
developerfd40db22021-04-29 10:08:25 +0800841
developera2bdbd52021-05-31 19:10:17 +0800842 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +0800843 seq_printf(seq, "| FE_CDM1_FSM : %08x |\n",
844 mtk_r32(eth, MTK_FE_CDM1_FSM));
845 seq_printf(seq, "| FE_CDM2_FSM : %08x |\n",
846 mtk_r32(eth, MTK_FE_CDM2_FSM));
developer77f3fd42021-10-05 15:16:05 +0800847 seq_printf(seq, "| FE_CDM3_FSM : %08x |\n",
848 mtk_r32(eth, MTK_FE_CDM3_FSM));
849 seq_printf(seq, "| FE_CDM4_FSM : %08x |\n",
850 mtk_r32(eth, MTK_FE_CDM4_FSM));
developerfd40db22021-04-29 10:08:25 +0800851 seq_printf(seq, "| FE_GDM1_FSM : %08x |\n",
852 mtk_r32(eth, MTK_FE_GDM1_FSM));
853 seq_printf(seq, "| FE_GDM2_FSM : %08x |\n",
854 mtk_r32(eth, MTK_FE_GDM2_FSM));
developer77f3fd42021-10-05 15:16:05 +0800855 seq_printf(seq, "| SGMII_EFUSE : %08x |\n",
856 mtk_dbg_r32(MTK_SGMII_EFUSE));
857 seq_printf(seq, "| SGMII0_RX_CNT : %08x |\n",
858 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(0)));
859 seq_printf(seq, "| SGMII1_RX_CNT : %08x |\n",
860 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(1)));
861 seq_printf(seq, "| WED_RTQM_GLO : %08x |\n",
862 mtk_dbg_r32(MTK_WED_RTQM_GLO_CFG));
developerfd40db22021-04-29 10:08:25 +0800863 }
864
developer8051e042022-04-08 13:26:36 +0800865 mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS);
developer77f3fd42021-10-05 15:16:05 +0800866 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developer8051e042022-04-08 13:26:36 +0800867 mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS2);
developer77f3fd42021-10-05 15:16:05 +0800868
developerfd40db22021-04-29 10:08:25 +0800869 return 0;
870}
871
872static int dbg_regs_open(struct inode *inode, struct file *file)
873{
874 return single_open(file, dbg_regs_read, 0);
875}
876
877static const struct file_operations dbg_regs_fops = {
878 .owner = THIS_MODULE,
879 .open = dbg_regs_open,
880 .read = seq_read,
881 .llseek = seq_lseek,
developer77d03a72021-06-06 00:06:00 +0800882 .release = single_release
883};
884
developere9356982022-07-04 09:03:20 +0800885void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
developer77d03a72021-06-06 00:06:00 +0800886{
developere9356982022-07-04 09:03:20 +0800887 struct mtk_eth *eth = g_eth;
developer77d03a72021-06-06 00:06:00 +0800888 u32 idx, agg_cnt, agg_size;
889
developere9356982022-07-04 09:03:20 +0800890 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
891 idx = ring_no - 4;
892 agg_cnt = RX_DMA_GET_AGG_CNT_V2(rxd->rxd6);
893 } else {
894 idx = ring_no - 1;
895 agg_cnt = RX_DMA_GET_AGG_CNT(rxd->rxd2);
896 }
developer77d03a72021-06-06 00:06:00 +0800897
898 agg_size = RX_DMA_GET_PLEN0(rxd->rxd2);
899
900 hw_lro_agg_size_cnt[idx][agg_size / 5000]++;
901 hw_lro_agg_num_cnt[idx][agg_cnt]++;
902 hw_lro_tot_flush_cnt[idx]++;
903 hw_lro_tot_agg_cnt[idx] += agg_cnt;
904}
905
developere9356982022-07-04 09:03:20 +0800906void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
developer77d03a72021-06-06 00:06:00 +0800907{
developere9356982022-07-04 09:03:20 +0800908 struct mtk_eth *eth = g_eth;
developer77d03a72021-06-06 00:06:00 +0800909 u32 idx, flush_reason;
910
developere9356982022-07-04 09:03:20 +0800911 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
912 idx = ring_no - 4;
913 flush_reason = RX_DMA_GET_FLUSH_RSN_V2(rxd->rxd6);
914 } else {
915 idx = ring_no - 1;
916 flush_reason = RX_DMA_GET_REV(rxd->rxd2);
917 }
developer77d03a72021-06-06 00:06:00 +0800918
919 if ((flush_reason & 0x7) == MTK_HW_LRO_AGG_FLUSH)
920 hw_lro_agg_flush_cnt[idx]++;
921 else if ((flush_reason & 0x7) == MTK_HW_LRO_AGE_FLUSH)
922 hw_lro_age_flush_cnt[idx]++;
923 else if ((flush_reason & 0x7) == MTK_HW_LRO_NOT_IN_SEQ_FLUSH)
924 hw_lro_seq_flush_cnt[idx]++;
925 else if ((flush_reason & 0x7) == MTK_HW_LRO_TIMESTAMP_FLUSH)
926 hw_lro_timestamp_flush_cnt[idx]++;
927 else if ((flush_reason & 0x7) == MTK_HW_LRO_NON_RULE_FLUSH)
928 hw_lro_norule_flush_cnt[idx]++;
929}
930
931ssize_t hw_lro_stats_write(struct file *file, const char __user *buffer,
932 size_t count, loff_t *data)
933{
934 memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt));
935 memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt));
936 memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt));
937 memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt));
938 memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt));
939 memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt));
940 memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt));
941 memset(hw_lro_timestamp_flush_cnt, 0,
942 sizeof(hw_lro_timestamp_flush_cnt));
943 memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt));
944
945 pr_info("clear hw lro cnt table\n");
946
947 return count;
948}
949
950int hw_lro_stats_read_v1(struct seq_file *seq, void *v)
951{
952 int i;
953
954 seq_puts(seq, "HW LRO statistic dump:\n");
955
956 /* Agg number count */
957 seq_puts(seq, "Cnt: RING1 | RING2 | RING3 | Total\n");
958 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
959 seq_printf(seq, " %d : %d %d %d %d\n",
960 i, hw_lro_agg_num_cnt[0][i],
961 hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i],
962 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
963 hw_lro_agg_num_cnt[2][i]);
964 }
965
966 /* Total agg count */
967 seq_puts(seq, "Total agg: RING1 | RING2 | RING3 | Total\n");
968 seq_printf(seq, " %d %d %d %d\n",
969 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
970 hw_lro_tot_agg_cnt[2],
971 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
972 hw_lro_tot_agg_cnt[2]);
973
974 /* Total flush count */
975 seq_puts(seq, "Total flush: RING1 | RING2 | RING3 | Total\n");
976 seq_printf(seq, " %d %d %d %d\n",
977 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
978 hw_lro_tot_flush_cnt[2],
979 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
980 hw_lro_tot_flush_cnt[2]);
981
982 /* Avg agg count */
983 seq_puts(seq, "Avg agg: RING1 | RING2 | RING3 | Total\n");
984 seq_printf(seq, " %d %d %d %d\n",
985 (hw_lro_tot_flush_cnt[0]) ?
986 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
987 (hw_lro_tot_flush_cnt[1]) ?
988 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
989 (hw_lro_tot_flush_cnt[2]) ?
990 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
991 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
992 hw_lro_tot_flush_cnt[2]) ?
993 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
994 hw_lro_tot_agg_cnt[2]) / (hw_lro_tot_flush_cnt[0] +
995 hw_lro_tot_flush_cnt[1] + hw_lro_tot_flush_cnt[2])) : 0);
996
997 /* Statistics of aggregation size counts */
998 seq_puts(seq, "HW LRO flush pkt len:\n");
999 seq_puts(seq, " Length | RING1 | RING2 | RING3 | Total\n");
1000 for (i = 0; i < 15; i++) {
1001 seq_printf(seq, "%d~%d: %d %d %d %d\n", i * 5000,
1002 (i + 1) * 5000, hw_lro_agg_size_cnt[0][i],
1003 hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i],
1004 hw_lro_agg_size_cnt[0][i] +
1005 hw_lro_agg_size_cnt[1][i] +
1006 hw_lro_agg_size_cnt[2][i]);
1007 }
1008
1009 seq_puts(seq, "Flush reason: RING1 | RING2 | RING3 | Total\n");
1010 seq_printf(seq, "AGG timeout: %d %d %d %d\n",
1011 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1012 hw_lro_agg_flush_cnt[2],
1013 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1014 hw_lro_agg_flush_cnt[2]));
1015
1016 seq_printf(seq, "AGE timeout: %d %d %d %d\n",
1017 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1018 hw_lro_age_flush_cnt[2],
1019 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1020 hw_lro_age_flush_cnt[2]));
1021
1022 seq_printf(seq, "Not in-sequence: %d %d %d %d\n",
1023 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1024 hw_lro_seq_flush_cnt[2],
1025 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1026 hw_lro_seq_flush_cnt[2]));
1027
1028 seq_printf(seq, "Timestamp: %d %d %d %d\n",
1029 hw_lro_timestamp_flush_cnt[0],
1030 hw_lro_timestamp_flush_cnt[1],
1031 hw_lro_timestamp_flush_cnt[2],
1032 (hw_lro_timestamp_flush_cnt[0] +
1033 hw_lro_timestamp_flush_cnt[1] +
1034 hw_lro_timestamp_flush_cnt[2]));
1035
1036 seq_printf(seq, "No LRO rule: %d %d %d %d\n",
1037 hw_lro_norule_flush_cnt[0],
1038 hw_lro_norule_flush_cnt[1],
1039 hw_lro_norule_flush_cnt[2],
1040 (hw_lro_norule_flush_cnt[0] +
1041 hw_lro_norule_flush_cnt[1] +
1042 hw_lro_norule_flush_cnt[2]));
1043
1044 return 0;
1045}
1046
1047int hw_lro_stats_read_v2(struct seq_file *seq, void *v)
1048{
1049 int i;
1050
1051 seq_puts(seq, "HW LRO statistic dump:\n");
1052
1053 /* Agg number count */
1054 seq_puts(seq, "Cnt: RING4 | RING5 | RING6 | RING7 Total\n");
1055 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
1056 seq_printf(seq,
1057 " %d : %d %d %d %d %d\n",
1058 i, hw_lro_agg_num_cnt[0][i], hw_lro_agg_num_cnt[1][i],
1059 hw_lro_agg_num_cnt[2][i], hw_lro_agg_num_cnt[3][i],
1060 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
1061 hw_lro_agg_num_cnt[2][i] + hw_lro_agg_num_cnt[3][i]);
1062 }
1063
1064 /* Total agg count */
1065 seq_puts(seq, "Total agg: RING4 | RING5 | RING6 | RING7 Total\n");
1066 seq_printf(seq, " %d %d %d %d %d\n",
1067 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
1068 hw_lro_tot_agg_cnt[2], hw_lro_tot_agg_cnt[3],
1069 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1070 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]);
1071
1072 /* Total flush count */
1073 seq_puts(seq, "Total flush: RING4 | RING5 | RING6 | RING7 Total\n");
1074 seq_printf(seq, " %d %d %d %d %d\n",
1075 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
1076 hw_lro_tot_flush_cnt[2], hw_lro_tot_flush_cnt[3],
1077 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1078 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]);
1079
1080 /* Avg agg count */
1081 seq_puts(seq, "Avg agg: RING4 | RING5 | RING6 | RING7 Total\n");
1082 seq_printf(seq, " %d %d %d %d %d\n",
1083 (hw_lro_tot_flush_cnt[0]) ?
1084 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
1085 (hw_lro_tot_flush_cnt[1]) ?
1086 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
1087 (hw_lro_tot_flush_cnt[2]) ?
1088 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
1089 (hw_lro_tot_flush_cnt[3]) ?
1090 hw_lro_tot_agg_cnt[3] / hw_lro_tot_flush_cnt[3] : 0,
1091 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1092 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]) ?
1093 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1094 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]) /
1095 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1096 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3])) : 0);
1097
1098 /* Statistics of aggregation size counts */
1099 seq_puts(seq, "HW LRO flush pkt len:\n");
1100 seq_puts(seq, " Length | RING4 | RING5 | RING6 | RING7 Total\n");
1101 for (i = 0; i < 15; i++) {
1102 seq_printf(seq, "%d~%d: %d %d %d %d %d\n",
1103 i * 5000, (i + 1) * 5000,
1104 hw_lro_agg_size_cnt[0][i], hw_lro_agg_size_cnt[1][i],
1105 hw_lro_agg_size_cnt[2][i], hw_lro_agg_size_cnt[3][i],
1106 hw_lro_agg_size_cnt[0][i] +
1107 hw_lro_agg_size_cnt[1][i] +
1108 hw_lro_agg_size_cnt[2][i] +
1109 hw_lro_agg_size_cnt[3][i]);
1110 }
1111
1112 seq_puts(seq, "Flush reason: RING4 | RING5 | RING6 | RING7 Total\n");
1113 seq_printf(seq, "AGG timeout: %d %d %d %d %d\n",
1114 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1115 hw_lro_agg_flush_cnt[2], hw_lro_agg_flush_cnt[3],
1116 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1117 hw_lro_agg_flush_cnt[2] + hw_lro_agg_flush_cnt[3]));
1118
1119 seq_printf(seq, "AGE timeout: %d %d %d %d %d\n",
1120 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1121 hw_lro_age_flush_cnt[2], hw_lro_age_flush_cnt[3],
1122 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1123 hw_lro_age_flush_cnt[2] + hw_lro_age_flush_cnt[3]));
1124
1125 seq_printf(seq, "Not in-sequence: %d %d %d %d %d\n",
1126 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1127 hw_lro_seq_flush_cnt[2], hw_lro_seq_flush_cnt[3],
1128 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1129 hw_lro_seq_flush_cnt[2] + hw_lro_seq_flush_cnt[3]));
1130
1131 seq_printf(seq, "Timestamp: %d %d %d %d %d\n",
1132 hw_lro_timestamp_flush_cnt[0],
1133 hw_lro_timestamp_flush_cnt[1],
1134 hw_lro_timestamp_flush_cnt[2],
1135 hw_lro_timestamp_flush_cnt[3],
1136 (hw_lro_timestamp_flush_cnt[0] +
1137 hw_lro_timestamp_flush_cnt[1] +
1138 hw_lro_timestamp_flush_cnt[2] +
1139 hw_lro_timestamp_flush_cnt[3]));
1140
1141 seq_printf(seq, "No LRO rule: %d %d %d %d %d\n",
1142 hw_lro_norule_flush_cnt[0],
1143 hw_lro_norule_flush_cnt[1],
1144 hw_lro_norule_flush_cnt[2],
1145 hw_lro_norule_flush_cnt[3],
1146 (hw_lro_norule_flush_cnt[0] +
1147 hw_lro_norule_flush_cnt[1] +
1148 hw_lro_norule_flush_cnt[2] +
1149 hw_lro_norule_flush_cnt[3]));
1150
1151 return 0;
1152}
1153
1154int hw_lro_stats_read_wrapper(struct seq_file *seq, void *v)
1155{
1156 struct mtk_eth *eth = g_eth;
1157
1158 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1159 hw_lro_stats_read_v2(seq, v);
1160 else
1161 hw_lro_stats_read_v1(seq, v);
1162
1163 return 0;
1164}
1165
1166static int hw_lro_stats_open(struct inode *inode, struct file *file)
1167{
1168 return single_open(file, hw_lro_stats_read_wrapper, NULL);
1169}
1170
1171static const struct file_operations hw_lro_stats_fops = {
1172 .owner = THIS_MODULE,
1173 .open = hw_lro_stats_open,
1174 .read = seq_read,
1175 .llseek = seq_lseek,
1176 .write = hw_lro_stats_write,
developerfd40db22021-04-29 10:08:25 +08001177 .release = single_release
1178};
1179
developer77d03a72021-06-06 00:06:00 +08001180int hwlro_agg_cnt_ctrl(int cnt)
1181{
1182 int i;
1183
1184 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1185 SET_PDMA_RXRING_MAX_AGG_CNT(g_eth, i, cnt);
1186
1187 return 0;
1188}
1189
1190int hwlro_agg_time_ctrl(int time)
1191{
1192 int i;
1193
1194 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1195 SET_PDMA_RXRING_AGG_TIME(g_eth, i, time);
1196
1197 return 0;
1198}
1199
1200int hwlro_age_time_ctrl(int time)
1201{
1202 int i;
1203
1204 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1205 SET_PDMA_RXRING_AGE_TIME(g_eth, i, time);
1206
1207 return 0;
1208}
1209
1210int hwlro_threshold_ctrl(int bandwidth)
1211{
1212 SET_PDMA_LRO_BW_THRESHOLD(g_eth, bandwidth);
1213
1214 return 0;
1215}
1216
1217int hwlro_ring_enable_ctrl(int enable)
1218{
1219 int i;
1220
1221 pr_info("[%s] %s HW LRO rings\n", __func__, (enable) ? "Enable" : "Disable");
1222
1223 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1224 SET_PDMA_RXRING_VALID(g_eth, i, enable);
1225
1226 return 0;
1227}
1228
1229int hwlro_stats_enable_ctrl(int enable)
1230{
1231 pr_info("[%s] %s HW LRO statistics\n", __func__, (enable) ? "Enable" : "Disable");
1232 mtk_hwlro_stats_ebl = enable;
1233
1234 return 0;
1235}
1236
1237static const mtk_lro_dbg_func lro_dbg_func[] = {
1238 [0] = hwlro_agg_cnt_ctrl,
1239 [1] = hwlro_agg_time_ctrl,
1240 [2] = hwlro_age_time_ctrl,
1241 [3] = hwlro_threshold_ctrl,
1242 [4] = hwlro_ring_enable_ctrl,
1243 [5] = hwlro_stats_enable_ctrl,
1244};
1245
1246ssize_t hw_lro_auto_tlb_write(struct file *file, const char __user *buffer,
1247 size_t count, loff_t *data)
1248{
1249 char buf[32];
1250 char *p_buf;
1251 char *p_token = NULL;
1252 char *p_delimiter = " \t";
1253 long x = 0, y = 0;
developer4c32b7a2021-11-13 16:46:43 +08001254 u32 len = count;
developer77d03a72021-06-06 00:06:00 +08001255 int ret;
1256
1257 if (len >= sizeof(buf)) {
1258 pr_info("Input handling fail!\n");
developer77d03a72021-06-06 00:06:00 +08001259 return -1;
1260 }
1261
1262 if (copy_from_user(buf, buffer, len))
1263 return -EFAULT;
1264
1265 buf[len] = '\0';
1266
1267 p_buf = buf;
1268 p_token = strsep(&p_buf, p_delimiter);
1269 if (!p_token)
1270 x = 0;
1271 else
1272 ret = kstrtol(p_token, 10, &x);
1273
1274 p_token = strsep(&p_buf, "\t\n ");
1275 if (p_token)
1276 ret = kstrtol(p_token, 10, &y);
1277
1278 if (lro_dbg_func[x] && (ARRAY_SIZE(lro_dbg_func) > x))
1279 (*lro_dbg_func[x]) (y);
1280
1281 return count;
1282}
1283
1284void hw_lro_auto_tlb_dump_v1(struct seq_file *seq, u32 index)
1285{
1286 int i;
1287 struct mtk_lro_alt_v1 alt;
1288 __be32 addr;
1289 u32 tlb_info[9];
1290 u32 dw_len, cnt, priority;
1291 u32 entry;
1292
1293 if (index > 4)
1294 index = index - 1;
1295 entry = (index * 9) + 1;
1296
1297 /* read valid entries of the auto-learn table */
1298 mtk_w32(g_eth, entry, MTK_FE_ALT_CF8);
1299
1300 for (i = 0; i < 9; i++)
1301 tlb_info[i] = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1302
1303 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v1));
1304
1305 dw_len = alt.alt_info7.dw_len;
1306 cnt = alt.alt_info6.cnt;
1307
1308 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1309 priority = cnt; /* packet count */
1310 else
1311 priority = dw_len; /* byte count */
1312
1313 /* dump valid entries of the auto-learn table */
1314 if (index >= 4)
1315 seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index);
1316 else
1317 seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index);
1318
1319 if (alt.alt_info8.ipv4) {
1320 addr = htonl(alt.alt_info1.sip0);
1321 seq_printf(seq, "SIP = %pI4 (IPv4)\n", &addr);
1322 } else {
1323 seq_printf(seq, "SIP = %08X:%08X:%08X:%08X (IPv6)\n",
1324 alt.alt_info4.sip3, alt.alt_info3.sip2,
1325 alt.alt_info2.sip1, alt.alt_info1.sip0);
1326 }
1327
1328 seq_printf(seq, "DIP_ID = %d\n", alt.alt_info8.dip_id);
1329 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1330 alt.alt_info0.stp, alt.alt_info0.dtp);
1331 seq_printf(seq, "VLAN_VID_VLD = %d\n", alt.alt_info6.vlan_vid_vld);
1332 seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d\n",
1333 (alt.alt_info5.vlan_vid0 & 0xfff),
1334 ((alt.alt_info5.vlan_vid0 >> 12) & 0xfff),
1335 ((alt.alt_info6.vlan_vid1 << 8) |
1336 ((alt.alt_info5.vlan_vid0 >> 24) & 0xfff)),
1337 ((alt.alt_info6.vlan_vid1 >> 4) & 0xfff));
1338 seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt);
1339 seq_printf(seq, "PRIORITY = %d\n", priority);
1340}
1341
1342void hw_lro_auto_tlb_dump_v2(struct seq_file *seq, u32 index)
1343{
1344 int i;
1345 struct mtk_lro_alt_v2 alt;
1346 u32 score = 0, ipv4 = 0;
1347 u32 ipv6[4] = { 0 };
1348 u32 tlb_info[12];
1349
1350 /* read valid entries of the auto-learn table */
1351 mtk_w32(g_eth, index << MTK_LRO_ALT_INDEX_OFFSET, MTK_LRO_ALT_DBG);
1352
1353 for (i = 0; i < 11; i++)
1354 tlb_info[i] = mtk_r32(g_eth, MTK_LRO_ALT_DBG_DATA);
1355
1356 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v2));
1357
1358 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1359 score = 1; /* packet count */
1360 else
1361 score = 0; /* byte count */
1362
1363 /* dump valid entries of the auto-learn table */
1364 if (alt.alt_info0.valid) {
1365 if (index < 5)
1366 seq_printf(seq,
1367 "\n===== TABLE Entry: %d (onging) =====\n",
1368 index);
1369 else
1370 seq_printf(seq,
1371 "\n===== TABLE Entry: %d (candidate) =====\n",
1372 index);
1373
1374 if (alt.alt_info1.v4_valid) {
1375 ipv4 = (alt.alt_info4.sip0_h << 23) |
1376 alt.alt_info5.sip0_l;
1377 seq_printf(seq, "SIP = 0x%x: (IPv4)\n", ipv4);
1378
1379 ipv4 = (alt.alt_info8.dip0_h << 23) |
1380 alt.alt_info9.dip0_l;
1381 seq_printf(seq, "DIP = 0x%x: (IPv4)\n", ipv4);
1382 } else if (alt.alt_info1.v6_valid) {
1383 ipv6[3] = (alt.alt_info1.sip3_h << 23) |
1384 (alt.alt_info2.sip3_l << 9);
1385 ipv6[2] = (alt.alt_info2.sip2_h << 23) |
1386 (alt.alt_info3.sip2_l << 9);
1387 ipv6[1] = (alt.alt_info3.sip1_h << 23) |
1388 (alt.alt_info4.sip1_l << 9);
1389 ipv6[0] = (alt.alt_info4.sip0_h << 23) |
1390 (alt.alt_info5.sip0_l << 9);
1391 seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1392 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1393
1394 ipv6[3] = (alt.alt_info5.dip3_h << 23) |
1395 (alt.alt_info6.dip3_l << 9);
1396 ipv6[2] = (alt.alt_info6.dip2_h << 23) |
1397 (alt.alt_info7.dip2_l << 9);
1398 ipv6[1] = (alt.alt_info7.dip1_h << 23) |
1399 (alt.alt_info8.dip1_l << 9);
1400 ipv6[0] = (alt.alt_info8.dip0_h << 23) |
1401 (alt.alt_info9.dip0_l << 9);
1402 seq_printf(seq, "DIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1403 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1404 }
1405
1406 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1407 (alt.alt_info9.sp_h << 7) | (alt.alt_info10.sp_l),
1408 alt.alt_info10.dp);
1409 }
1410}
1411
1412int hw_lro_auto_tlb_read(struct seq_file *seq, void *v)
1413{
1414 int i;
1415 u32 reg_val;
1416 u32 reg_op1, reg_op2, reg_op3, reg_op4;
1417 u32 agg_cnt, agg_time, age_time;
1418
1419 seq_puts(seq, "Usage of /proc/mtketh/hw_lro_auto_tlb:\n");
1420 seq_puts(seq, "echo [function] [setting] > /proc/mtketh/hw_lro_auto_tlb\n");
1421 seq_puts(seq, "Functions:\n");
1422 seq_puts(seq, "[0] = hwlro_agg_cnt_ctrl\n");
1423 seq_puts(seq, "[1] = hwlro_agg_time_ctrl\n");
1424 seq_puts(seq, "[2] = hwlro_age_time_ctrl\n");
1425 seq_puts(seq, "[3] = hwlro_threshold_ctrl\n");
1426 seq_puts(seq, "[4] = hwlro_ring_enable_ctrl\n");
1427 seq_puts(seq, "[5] = hwlro_stats_enable_ctrl\n\n");
1428
1429 if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2)) {
1430 for (i = 1; i <= 8; i++)
1431 hw_lro_auto_tlb_dump_v2(seq, i);
1432 } else {
1433 /* Read valid entries of the auto-learn table */
1434 mtk_w32(g_eth, 0, MTK_FE_ALT_CF8);
1435 reg_val = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1436
1437 seq_printf(seq,
1438 "HW LRO Auto-learn Table: (MTK_FE_ALT_SEQ_CFC=0x%x)\n",
1439 reg_val);
1440
1441 for (i = 7; i >= 0; i--) {
1442 if (reg_val & (1 << i))
1443 hw_lro_auto_tlb_dump_v1(seq, i);
1444 }
1445 }
1446
1447 /* Read the agg_time/age_time/agg_cnt of LRO rings */
1448 seq_puts(seq, "\nHW LRO Ring Settings\n");
1449
1450 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
1451 reg_op1 = mtk_r32(g_eth, MTK_LRO_CTRL_DW1_CFG(i));
1452 reg_op2 = mtk_r32(g_eth, MTK_LRO_CTRL_DW2_CFG(i));
1453 reg_op3 = mtk_r32(g_eth, MTK_LRO_CTRL_DW3_CFG(i));
1454 reg_op4 = mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW2);
1455
1456 agg_cnt =
1457 ((reg_op3 & 0x3) << 6) |
1458 ((reg_op2 >> MTK_LRO_RING_AGG_CNT_L_OFFSET) & 0x3f);
1459 agg_time = (reg_op2 >> MTK_LRO_RING_AGG_TIME_OFFSET) & 0xffff;
1460 age_time =
1461 ((reg_op2 & 0x3f) << 10) |
1462 ((reg_op1 >> MTK_LRO_RING_AGE_TIME_L_OFFSET) & 0x3ff);
1463 seq_printf(seq,
1464 "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
1465 (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2))? i+3 : i,
1466 agg_cnt, agg_time, age_time, reg_op4);
1467 }
1468
1469 seq_puts(seq, "\n");
1470
1471 return 0;
1472}
1473
1474static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file)
1475{
1476 return single_open(file, hw_lro_auto_tlb_read, NULL);
1477}
1478
1479static const struct file_operations hw_lro_auto_tlb_fops = {
1480 .owner = THIS_MODULE,
1481 .open = hw_lro_auto_tlb_open,
1482 .read = seq_read,
1483 .llseek = seq_lseek,
1484 .write = hw_lro_auto_tlb_write,
1485 .release = single_release
1486};
developerfd40db22021-04-29 10:08:25 +08001487
developer8051e042022-04-08 13:26:36 +08001488int reset_event_read(struct seq_file *seq, void *v)
1489{
1490 struct mtk_eth *eth = g_eth;
1491 struct mtk_reset_event reset_event = eth->reset_event;
1492
1493 seq_printf(seq, "[Event] [Count]\n");
1494 seq_printf(seq, " FQ Empty: %d\n",
1495 reset_event.count[MTK_EVENT_FQ_EMPTY]);
1496 seq_printf(seq, " TSO Fail: %d\n",
1497 reset_event.count[MTK_EVENT_TSO_FAIL]);
1498 seq_printf(seq, " TSO Illegal: %d\n",
1499 reset_event.count[MTK_EVENT_TSO_ILLEGAL]);
1500 seq_printf(seq, " TSO Align: %d\n",
1501 reset_event.count[MTK_EVENT_TSO_ALIGN]);
1502 seq_printf(seq, " RFIFO OV: %d\n",
1503 reset_event.count[MTK_EVENT_RFIFO_OV]);
1504 seq_printf(seq, " RFIFO UF: %d\n",
1505 reset_event.count[MTK_EVENT_RFIFO_UF]);
1506 seq_printf(seq, " Force: %d\n",
1507 reset_event.count[MTK_EVENT_FORCE]);
1508 seq_printf(seq, "----------------------------\n");
1509 seq_printf(seq, " Warm Cnt: %d\n",
1510 reset_event.count[MTK_EVENT_WARM_CNT]);
1511 seq_printf(seq, " Cold Cnt: %d\n",
1512 reset_event.count[MTK_EVENT_COLD_CNT]);
1513 seq_printf(seq, " Total Cnt: %d\n",
1514 reset_event.count[MTK_EVENT_TOTAL_CNT]);
1515
1516 return 0;
1517}
1518
1519static int reset_event_open(struct inode *inode, struct file *file)
1520{
1521 return single_open(file, reset_event_read, 0);
1522}
1523
1524ssize_t reset_event_write(struct file *file, const char __user *buffer,
1525 size_t count, loff_t *data)
1526{
1527 struct mtk_eth *eth = g_eth;
1528 struct mtk_reset_event *reset_event = &eth->reset_event;
1529
1530 memset(reset_event, 0, sizeof(struct mtk_reset_event));
1531 pr_info("MTK reset event counter is cleared !\n");
1532
1533 return count;
1534}
1535
1536static const struct file_operations reset_event_fops = {
1537 .owner = THIS_MODULE,
1538 .open = reset_event_open,
1539 .read = seq_read,
1540 .llseek = seq_lseek,
1541 .write = reset_event_write,
1542 .release = single_release
1543};
1544
1545
developerfd40db22021-04-29 10:08:25 +08001546struct proc_dir_entry *proc_reg_dir;
developer8051e042022-04-08 13:26:36 +08001547static struct proc_dir_entry *proc_esw_cnt, *proc_dbg_regs, *proc_reset_event;
developerfd40db22021-04-29 10:08:25 +08001548
1549int debug_proc_init(struct mtk_eth *eth)
1550{
1551 g_eth = eth;
1552
1553 if (!proc_reg_dir)
1554 proc_reg_dir = proc_mkdir(PROCREG_DIR, NULL);
1555
1556 proc_tx_ring =
1557 proc_create(PROCREG_TXRING, 0, proc_reg_dir, &tx_ring_fops);
1558 if (!proc_tx_ring)
1559 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
1560
developer8051e042022-04-08 13:26:36 +08001561 proc_hwtx_ring =
1562 proc_create(PROCREG_HWTXRING, 0, proc_reg_dir, &hwtx_ring_fops);
1563 if (!proc_hwtx_ring)
1564 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_HWTXRING);
1565
developerfd40db22021-04-29 10:08:25 +08001566 proc_rx_ring =
1567 proc_create(PROCREG_RXRING, 0, proc_reg_dir, &rx_ring_fops);
1568 if (!proc_rx_ring)
1569 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
1570
1571 proc_esw_cnt =
1572 proc_create(PROCREG_ESW_CNT, 0, proc_reg_dir, &switch_count_fops);
1573 if (!proc_esw_cnt)
1574 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
1575
1576 proc_dbg_regs =
1577 proc_create(PROCREG_DBG_REGS, 0, proc_reg_dir, &dbg_regs_fops);
1578 if (!proc_dbg_regs)
1579 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_DBG_REGS);
1580
developer77d03a72021-06-06 00:06:00 +08001581 if (g_eth->hwlro) {
1582 proc_hw_lro_stats =
1583 proc_create(PROCREG_HW_LRO_STATS, 0, proc_reg_dir,
1584 &hw_lro_stats_fops);
1585 if (!proc_hw_lro_stats)
1586 pr_info("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS);
1587
1588 proc_hw_lro_auto_tlb =
1589 proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, proc_reg_dir,
1590 &hw_lro_auto_tlb_fops);
1591 if (!proc_hw_lro_auto_tlb)
1592 pr_info("!! FAIL to create %s PROC !!\n",
1593 PROCREG_HW_LRO_AUTO_TLB);
1594 }
1595
developer8051e042022-04-08 13:26:36 +08001596 proc_reset_event =
1597 proc_create(PROCREG_RESET_EVENT, 0, proc_reg_dir, &reset_event_fops);
1598 if (!proc_reset_event)
1599 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RESET_EVENT);
1600
developerfd40db22021-04-29 10:08:25 +08001601 return 0;
1602}
1603
1604void debug_proc_exit(void)
1605{
1606 if (proc_tx_ring)
1607 remove_proc_entry(PROCREG_TXRING, proc_reg_dir);
developer8051e042022-04-08 13:26:36 +08001608 if (proc_hwtx_ring)
1609 remove_proc_entry(PROCREG_HWTXRING, proc_reg_dir);
developerfd40db22021-04-29 10:08:25 +08001610 if (proc_rx_ring)
1611 remove_proc_entry(PROCREG_RXRING, proc_reg_dir);
1612
1613 if (proc_esw_cnt)
1614 remove_proc_entry(PROCREG_ESW_CNT, proc_reg_dir);
1615
1616 if (proc_reg_dir)
1617 remove_proc_entry(PROCREG_DIR, 0);
1618
1619 if (proc_dbg_regs)
1620 remove_proc_entry(PROCREG_DBG_REGS, proc_reg_dir);
developer77d03a72021-06-06 00:06:00 +08001621
1622 if (g_eth->hwlro) {
1623 if (proc_hw_lro_stats)
1624 remove_proc_entry(PROCREG_HW_LRO_STATS, proc_reg_dir);
1625
1626 if (proc_hw_lro_auto_tlb)
1627 remove_proc_entry(PROCREG_HW_LRO_AUTO_TLB, proc_reg_dir);
1628 }
developer8051e042022-04-08 13:26:36 +08001629
1630 if (proc_reset_event)
1631 remove_proc_entry(PROCREG_RESET_EVENT, proc_reg_dir);
developerfd40db22021-04-29 10:08:25 +08001632}
1633