blob: 7ba9a01e8db094aab8e0d366a0d7c1bc91afb47b [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
14 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
15 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
16 */
17
18#include <linux/trace_seq.h>
19#include <linux/seq_file.h>
20#include <linux/proc_fs.h>
21#include <linux/u64_stats_sync.h>
22#include <linux/dma-mapping.h>
23#include <linux/netdevice.h>
24#include <linux/ctype.h>
25#include <linux/debugfs.h>
26#include <linux/of_mdio.h>
27
28#include "mtk_eth_soc.h"
29#include "mtk_eth_dbg.h"
developer8051e042022-04-08 13:26:36 +080030#include "mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080031
developer77d03a72021-06-06 00:06:00 +080032u32 hw_lro_agg_num_cnt[MTK_HW_LRO_RING_NUM][MTK_HW_LRO_MAX_AGG_CNT + 1];
33u32 hw_lro_agg_size_cnt[MTK_HW_LRO_RING_NUM][16];
34u32 hw_lro_tot_agg_cnt[MTK_HW_LRO_RING_NUM];
35u32 hw_lro_tot_flush_cnt[MTK_HW_LRO_RING_NUM];
36u32 hw_lro_agg_flush_cnt[MTK_HW_LRO_RING_NUM];
37u32 hw_lro_age_flush_cnt[MTK_HW_LRO_RING_NUM];
38u32 hw_lro_seq_flush_cnt[MTK_HW_LRO_RING_NUM];
39u32 hw_lro_timestamp_flush_cnt[MTK_HW_LRO_RING_NUM];
40u32 hw_lro_norule_flush_cnt[MTK_HW_LRO_RING_NUM];
41u32 mtk_hwlro_stats_ebl;
42static struct proc_dir_entry *proc_hw_lro_stats, *proc_hw_lro_auto_tlb;
43typedef int (*mtk_lro_dbg_func) (int par);
44
developerfd40db22021-04-29 10:08:25 +080045struct mtk_eth_debug {
46 struct dentry *root;
47};
48
49struct mtk_eth *g_eth;
50
51struct mtk_eth_debug eth_debug;
52
developer3957a912021-05-13 16:44:31 +080053void mt7530_mdio_w32(struct mtk_eth *eth, u16 reg, u32 val)
developerfd40db22021-04-29 10:08:25 +080054{
55 mutex_lock(&eth->mii_bus->mdio_lock);
56
57 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
58 _mtk_mdio_write(eth, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
59 _mtk_mdio_write(eth, 0x1f, 0x10, val >> 16);
60
61 mutex_unlock(&eth->mii_bus->mdio_lock);
62}
63
64u32 mt7530_mdio_r32(struct mtk_eth *eth, u32 reg)
65{
66 u16 high, low;
67
68 mutex_lock(&eth->mii_bus->mdio_lock);
69
70 _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
71 low = _mtk_mdio_read(eth, 0x1f, (reg >> 2) & 0xf);
72 high = _mtk_mdio_read(eth, 0x1f, 0x10);
73
74 mutex_unlock(&eth->mii_bus->mdio_lock);
75
76 return (high << 16) | (low & 0xffff);
77}
78
79void mtk_switch_w32(struct mtk_eth *eth, u32 val, unsigned reg)
80{
81 mtk_w32(eth, val, reg + 0x10000);
82}
83EXPORT_SYMBOL(mtk_switch_w32);
84
85u32 mtk_switch_r32(struct mtk_eth *eth, unsigned reg)
86{
87 return mtk_r32(eth, reg + 0x10000);
88}
89EXPORT_SYMBOL(mtk_switch_r32);
90
91static int mtketh_debug_show(struct seq_file *m, void *private)
92{
93 struct mtk_eth *eth = m->private;
94 struct mtk_mac *mac = 0;
developer77d03a72021-06-06 00:06:00 +080095 int i = 0;
developerfd40db22021-04-29 10:08:25 +080096
97 for (i = 0 ; i < MTK_MAX_DEVS ; i++) {
98 if (!eth->mac[i] ||
99 of_phy_is_fixed_link(eth->mac[i]->of_node))
100 continue;
101 mac = eth->mac[i];
102#if 0 //FIXME
103 while (j < 30) {
104 d = _mtk_mdio_read(eth, mac->phy_dev->addr, j);
105
106 seq_printf(m, "phy=%d, reg=0x%08x, data=0x%08x\n",
107 mac->phy_dev->addr, j, d);
108 j++;
109 }
110#endif
111 }
112 return 0;
113}
114
115static int mtketh_debug_open(struct inode *inode, struct file *file)
116{
117 return single_open(file, mtketh_debug_show, inode->i_private);
118}
119
120static const struct file_operations mtketh_debug_fops = {
121 .open = mtketh_debug_open,
122 .read = seq_read,
123 .llseek = seq_lseek,
124 .release = single_release,
125};
126
127static int mtketh_mt7530sw_debug_show(struct seq_file *m, void *private)
128{
129 struct mtk_eth *eth = m->private;
130 u32 offset, data;
131 int i;
132 struct mt7530_ranges {
133 u32 start;
134 u32 end;
135 } ranges[] = {
136 {0x0, 0xac},
137 {0x1000, 0x10e0},
138 {0x1100, 0x1140},
139 {0x1200, 0x1240},
140 {0x1300, 0x1340},
141 {0x1400, 0x1440},
142 {0x1500, 0x1540},
143 {0x1600, 0x1640},
144 {0x1800, 0x1848},
145 {0x1900, 0x1948},
146 {0x1a00, 0x1a48},
147 {0x1b00, 0x1b48},
148 {0x1c00, 0x1c48},
149 {0x1d00, 0x1d48},
150 {0x1e00, 0x1e48},
151 {0x1f60, 0x1ffc},
152 {0x2000, 0x212c},
153 {0x2200, 0x222c},
154 {0x2300, 0x232c},
155 {0x2400, 0x242c},
156 {0x2500, 0x252c},
157 {0x2600, 0x262c},
158 {0x3000, 0x3014},
159 {0x30c0, 0x30f8},
160 {0x3100, 0x3114},
161 {0x3200, 0x3214},
162 {0x3300, 0x3314},
163 {0x3400, 0x3414},
164 {0x3500, 0x3514},
165 {0x3600, 0x3614},
166 {0x4000, 0x40d4},
167 {0x4100, 0x41d4},
168 {0x4200, 0x42d4},
169 {0x4300, 0x43d4},
170 {0x4400, 0x44d4},
171 {0x4500, 0x45d4},
172 {0x4600, 0x46d4},
173 {0x4f00, 0x461c},
174 {0x7000, 0x7038},
175 {0x7120, 0x7124},
176 {0x7800, 0x7804},
177 {0x7810, 0x7810},
178 {0x7830, 0x7830},
179 {0x7a00, 0x7a7c},
180 {0x7b00, 0x7b04},
181 {0x7e00, 0x7e04},
182 {0x7ffc, 0x7ffc},
183 };
184
185 if (!mt7530_exist(eth))
186 return -EOPNOTSUPP;
187
188 if ((!eth->mac[0] || !of_phy_is_fixed_link(eth->mac[0]->of_node)) &&
189 (!eth->mac[1] || !of_phy_is_fixed_link(eth->mac[1]->of_node))) {
190 seq_puts(m, "no switch found\n");
191 return 0;
192 }
193
194 for (i = 0 ; i < ARRAY_SIZE(ranges) ; i++) {
195 for (offset = ranges[i].start;
196 offset <= ranges[i].end; offset += 4) {
197 data = mt7530_mdio_r32(eth, offset);
198 seq_printf(m, "mt7530 switch reg=0x%08x, data=0x%08x\n",
199 offset, data);
200 }
201 }
202
203 return 0;
204}
205
206static int mtketh_debug_mt7530sw_open(struct inode *inode, struct file *file)
207{
208 return single_open(file, mtketh_mt7530sw_debug_show, inode->i_private);
209}
210
211static const struct file_operations mtketh_debug_mt7530sw_fops = {
212 .open = mtketh_debug_mt7530sw_open,
213 .read = seq_read,
214 .llseek = seq_lseek,
215 .release = single_release,
216};
217
218static ssize_t mtketh_mt7530sw_debugfs_write(struct file *file,
219 const char __user *ptr,
220 size_t len, loff_t *off)
221{
222 struct mtk_eth *eth = file->private_data;
223 char buf[32], *token, *p = buf;
224 u32 reg, value, phy;
225 int ret;
226
227 if (!mt7530_exist(eth))
228 return -EOPNOTSUPP;
229
230 if (*off != 0)
231 return 0;
232
233 if (len > sizeof(buf) - 1)
234 len = sizeof(buf) - 1;
235
236 ret = strncpy_from_user(buf, ptr, len);
237 if (ret < 0)
238 return ret;
239 buf[len] = '\0';
240
241 token = strsep(&p, " ");
242 if (!token)
243 return -EINVAL;
244 if (kstrtoul(token, 16, (unsigned long *)&phy))
245 return -EINVAL;
246
247 token = strsep(&p, " ");
248 if (!token)
249 return -EINVAL;
250 if (kstrtoul(token, 16, (unsigned long *)&reg))
251 return -EINVAL;
252
253 token = strsep(&p, " ");
254 if (!token)
255 return -EINVAL;
256 if (kstrtoul(token, 16, (unsigned long *)&value))
257 return -EINVAL;
258
259 pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
260 0x1f, reg, value);
261 mt7530_mdio_w32(eth, reg, value);
262 pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
263 0x1f, reg, mt7530_mdio_r32(eth, reg));
264
265 return len;
266}
267
268static ssize_t mtketh_debugfs_write(struct file *file, const char __user *ptr,
269 size_t len, loff_t *off)
270{
271 struct mtk_eth *eth = file->private_data;
272 char buf[32], *token, *p = buf;
273 u32 reg, value, phy;
274 int ret;
275
276 if (*off != 0)
277 return 0;
278
279 if (len > sizeof(buf) - 1)
280 len = sizeof(buf) - 1;
281
282 ret = strncpy_from_user(buf, ptr, len);
283 if (ret < 0)
284 return ret;
285 buf[len] = '\0';
286
287 token = strsep(&p, " ");
288 if (!token)
289 return -EINVAL;
290 if (kstrtoul(token, 16, (unsigned long *)&phy))
291 return -EINVAL;
292
293 token = strsep(&p, " ");
294
295 if (!token)
296 return -EINVAL;
297 if (kstrtoul(token, 16, (unsigned long *)&reg))
298 return -EINVAL;
299
300 token = strsep(&p, " ");
301
302 if (!token)
303 return -EINVAL;
304 if (kstrtoul(token, 16, (unsigned long *)&value))
305 return -EINVAL;
306
307 pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
308 phy, reg, value);
309
310 _mtk_mdio_write(eth, phy, reg, value);
311
312 pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
313 phy, reg, _mtk_mdio_read(eth, phy, reg));
314
315 return len;
316}
317
318static ssize_t mtketh_debugfs_reset(struct file *file, const char __user *ptr,
319 size_t len, loff_t *off)
320{
321 struct mtk_eth *eth = file->private_data;
322
developer8051e042022-04-08 13:26:36 +0800323 atomic_inc(&force);
developerfd40db22021-04-29 10:08:25 +0800324 schedule_work(&eth->pending_work);
325 return len;
326}
327
328static const struct file_operations fops_reg_w = {
329 .owner = THIS_MODULE,
330 .open = simple_open,
331 .write = mtketh_debugfs_write,
332 .llseek = noop_llseek,
333};
334
335static const struct file_operations fops_eth_reset = {
336 .owner = THIS_MODULE,
337 .open = simple_open,
338 .write = mtketh_debugfs_reset,
339 .llseek = noop_llseek,
340};
341
342static const struct file_operations fops_mt7530sw_reg_w = {
343 .owner = THIS_MODULE,
344 .open = simple_open,
345 .write = mtketh_mt7530sw_debugfs_write,
346 .llseek = noop_llseek,
347};
348
349void mtketh_debugfs_exit(struct mtk_eth *eth)
350{
351 debugfs_remove_recursive(eth_debug.root);
352}
353
354int mtketh_debugfs_init(struct mtk_eth *eth)
355{
356 int ret = 0;
357
358 eth_debug.root = debugfs_create_dir("mtketh", NULL);
359 if (!eth_debug.root) {
360 dev_notice(eth->dev, "%s:err at %d\n", __func__, __LINE__);
361 ret = -ENOMEM;
362 }
363
364 debugfs_create_file("phy_regs", S_IRUGO,
365 eth_debug.root, eth, &mtketh_debug_fops);
366 debugfs_create_file("phy_reg_w", S_IFREG | S_IWUSR,
367 eth_debug.root, eth, &fops_reg_w);
368 debugfs_create_file("reset", S_IFREG | S_IWUSR,
369 eth_debug.root, eth, &fops_eth_reset);
370 if (mt7530_exist(eth)) {
371 debugfs_create_file("mt7530sw_regs", S_IRUGO,
372 eth_debug.root, eth,
373 &mtketh_debug_mt7530sw_fops);
374 debugfs_create_file("mt7530sw_reg_w", S_IFREG | S_IWUSR,
375 eth_debug.root, eth,
376 &fops_mt7530sw_reg_w);
377 }
378 return ret;
379}
380
381void mii_mgr_read_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
382 u32 *read_data)
383{
384 if (mt7530_exist(eth) && phy_addr == 31)
385 *read_data = mt7530_mdio_r32(eth, phy_register);
386
387 else
388 *read_data = _mtk_mdio_read(eth, phy_addr, phy_register);
389}
390
developer3957a912021-05-13 16:44:31 +0800391void mii_mgr_write_combine(struct mtk_eth *eth, u16 phy_addr, u16 phy_register,
developerfd40db22021-04-29 10:08:25 +0800392 u32 write_data)
393{
394 if (mt7530_exist(eth) && phy_addr == 31)
395 mt7530_mdio_w32(eth, phy_register, write_data);
396
397 else
398 _mtk_mdio_write(eth, phy_addr, phy_register, write_data);
399}
400
developer3957a912021-05-13 16:44:31 +0800401static void mii_mgr_read_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data)
developerfd40db22021-04-29 10:08:25 +0800402{
developer599cda42022-05-24 15:13:31 +0800403 *data = _mtk_mdio_read(eth, port, mdiobus_c45_addr(devad, reg));
developerfd40db22021-04-29 10:08:25 +0800404}
405
developer3957a912021-05-13 16:44:31 +0800406static void mii_mgr_write_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data)
developerfd40db22021-04-29 10:08:25 +0800407{
developer599cda42022-05-24 15:13:31 +0800408 _mtk_mdio_write(eth, port, mdiobus_c45_addr(devad, reg), data);
developerfd40db22021-04-29 10:08:25 +0800409}
410
411int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
412{
413 struct mtk_mac *mac = netdev_priv(dev);
414 struct mtk_eth *eth = mac->hw;
415 struct mtk_mii_ioctl_data mii;
416 struct mtk_esw_reg reg;
developerba2d1eb2021-05-25 19:26:45 +0800417 u16 val;
developerfd40db22021-04-29 10:08:25 +0800418
419 switch (cmd) {
420 case MTKETH_MII_READ:
421 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
422 goto err_copy;
423 mii_mgr_read_combine(eth, mii.phy_id, mii.reg_num,
424 &mii.val_out);
425 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
426 goto err_copy;
427
428 return 0;
429 case MTKETH_MII_WRITE:
430 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
431 goto err_copy;
432 mii_mgr_write_combine(eth, mii.phy_id, mii.reg_num,
433 mii.val_in);
developerfd40db22021-04-29 10:08:25 +0800434 return 0;
435 case MTKETH_MII_READ_CL45:
436 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
437 goto err_copy;
developer3957a912021-05-13 16:44:31 +0800438 mii_mgr_read_cl45(eth,
439 mdio_phy_id_prtad(mii.phy_id),
440 mdio_phy_id_devad(mii.phy_id),
441 mii.reg_num,
developerba2d1eb2021-05-25 19:26:45 +0800442 &val);
443 mii.val_out = val;
developerfd40db22021-04-29 10:08:25 +0800444 if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
445 goto err_copy;
446
447 return 0;
448 case MTKETH_MII_WRITE_CL45:
449 if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
450 goto err_copy;
developerba2d1eb2021-05-25 19:26:45 +0800451 val = mii.val_in;
developer3957a912021-05-13 16:44:31 +0800452 mii_mgr_write_cl45(eth,
453 mdio_phy_id_prtad(mii.phy_id),
454 mdio_phy_id_devad(mii.phy_id),
455 mii.reg_num,
developerba2d1eb2021-05-25 19:26:45 +0800456 val);
developerfd40db22021-04-29 10:08:25 +0800457 return 0;
458 case MTKETH_ESW_REG_READ:
459 if (!mt7530_exist(eth))
460 return -EOPNOTSUPP;
461 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
462 goto err_copy;
463 if (reg.off > REG_ESW_MAX)
464 return -EINVAL;
465 reg.val = mtk_switch_r32(eth, reg.off);
466
467 if (copy_to_user(ifr->ifr_data, &reg, sizeof(reg)))
468 goto err_copy;
469
470 return 0;
471 case MTKETH_ESW_REG_WRITE:
472 if (!mt7530_exist(eth))
473 return -EOPNOTSUPP;
474 if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
475 goto err_copy;
476 if (reg.off > REG_ESW_MAX)
477 return -EINVAL;
478 mtk_switch_w32(eth, reg.val, reg.off);
479
480 return 0;
481 default:
482 break;
483 }
484
485 return -EOPNOTSUPP;
486err_copy:
487 return -EFAULT;
488}
489
490int esw_cnt_read(struct seq_file *seq, void *v)
491{
492 unsigned int pkt_cnt = 0;
493 int i = 0;
494 struct mtk_eth *eth = g_eth;
495 unsigned int mib_base = MTK_GDM1_TX_GBCNT;
496
497 seq_puts(seq, "\n <<CPU>>\n");
498 seq_puts(seq, " |\n");
499 seq_puts(seq, "+-----------------------------------------------+\n");
500 seq_puts(seq, "| <<PSE>> |\n");
501 seq_puts(seq, "+-----------------------------------------------+\n");
502 seq_puts(seq, " |\n");
503 seq_puts(seq, "+-----------------------------------------------+\n");
504 seq_puts(seq, "| <<GDMA>> |\n");
505 seq_printf(seq, "| GDMA1_RX_GBCNT : %010u (Rx Good Bytes) |\n",
506 mtk_r32(eth, mib_base));
507 seq_printf(seq, "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n",
508 mtk_r32(eth, mib_base+0x08));
509 seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow error) |\n",
510 mtk_r32(eth, mib_base+0x10));
511 seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n",
512 mtk_r32(eth, mib_base+0x14));
513 seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n",
514 mtk_r32(eth, mib_base+0x18));
515 seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n",
516 mtk_r32(eth, mib_base+0x1C));
517 seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (checksum error) |\n",
518 mtk_r32(eth, mib_base+0x20));
519 seq_printf(seq, "| GDMA1_RX_FCCNT : %010u (flow control) |\n",
520 mtk_r32(eth, mib_base+0x24));
521 seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (about count) |\n",
522 mtk_r32(eth, mib_base+0x28));
523 seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision count) |\n",
524 mtk_r32(eth, mib_base+0x2C));
525 seq_printf(seq, "| GDMA1_TX_GBCNT : %010u (Tx Good Bytes) |\n",
526 mtk_r32(eth, mib_base+0x30));
527 seq_printf(seq, "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n",
528 mtk_r32(eth, mib_base+0x38));
529 seq_puts(seq, "| |\n");
530 seq_printf(seq, "| GDMA2_RX_GBCNT : %010u (Rx Good Bytes) |\n",
531 mtk_r32(eth, mib_base+0x40));
532 seq_printf(seq, "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n",
533 mtk_r32(eth, mib_base+0x48));
534 seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow error) |\n",
535 mtk_r32(eth, mib_base+0x50));
536 seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n",
537 mtk_r32(eth, mib_base+0x54));
538 seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n",
539 mtk_r32(eth, mib_base+0x58));
540 seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n",
541 mtk_r32(eth, mib_base+0x5C));
542 seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (checksum error) |\n",
543 mtk_r32(eth, mib_base+0x60));
544 seq_printf(seq, "| GDMA2_RX_FCCNT : %010u (flow control) |\n",
545 mtk_r32(eth, mib_base+0x64));
546 seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip) |\n",
547 mtk_r32(eth, mib_base+0x68));
548 seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n",
549 mtk_r32(eth, mib_base+0x6C));
550 seq_printf(seq, "| GDMA2_TX_GBCNT : %010u (Tx Good Bytes) |\n",
551 mtk_r32(eth, mib_base+0x70));
552 seq_printf(seq, "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n",
553 mtk_r32(eth, mib_base+0x78));
554 seq_puts(seq, "+-----------------------------------------------+\n");
555
556 if (!mt7530_exist(eth))
557 return 0;
558
559#define DUMP_EACH_PORT(base) \
560 do { \
561 for (i = 0; i < 7; i++) { \
562 pkt_cnt = mt7530_mdio_r32(eth, (base) + (i * 0x100));\
563 seq_printf(seq, "%8u ", pkt_cnt); \
564 } \
565 seq_puts(seq, "\n"); \
566 } while (0)
567
568 seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n",
569 "Port0", "Port1", "Port2", "Port3", "Port4", "Port5",
570 "Port6");
571 seq_puts(seq, "Tx Drop Packet :");
572 DUMP_EACH_PORT(0x4000);
573 seq_puts(seq, "Tx CRC Error :");
574 DUMP_EACH_PORT(0x4004);
575 seq_puts(seq, "Tx Unicast Packet :");
576 DUMP_EACH_PORT(0x4008);
577 seq_puts(seq, "Tx Multicast Packet :");
578 DUMP_EACH_PORT(0x400C);
579 seq_puts(seq, "Tx Broadcast Packet :");
580 DUMP_EACH_PORT(0x4010);
581 seq_puts(seq, "Tx Collision Event :");
582 DUMP_EACH_PORT(0x4014);
583 seq_puts(seq, "Tx Pause Packet :");
584 DUMP_EACH_PORT(0x402C);
585 seq_puts(seq, "Rx Drop Packet :");
586 DUMP_EACH_PORT(0x4060);
587 seq_puts(seq, "Rx Filtering Packet :");
588 DUMP_EACH_PORT(0x4064);
589 seq_puts(seq, "Rx Unicast Packet :");
590 DUMP_EACH_PORT(0x4068);
591 seq_puts(seq, "Rx Multicast Packet :");
592 DUMP_EACH_PORT(0x406C);
593 seq_puts(seq, "Rx Broadcast Packet :");
594 DUMP_EACH_PORT(0x4070);
595 seq_puts(seq, "Rx Alignment Error :");
596 DUMP_EACH_PORT(0x4074);
597 seq_puts(seq, "Rx CRC Error :");
598 DUMP_EACH_PORT(0x4078);
599 seq_puts(seq, "Rx Undersize Error :");
600 DUMP_EACH_PORT(0x407C);
601 seq_puts(seq, "Rx Fragment Error :");
602 DUMP_EACH_PORT(0x4080);
603 seq_puts(seq, "Rx Oversize Error :");
604 DUMP_EACH_PORT(0x4084);
605 seq_puts(seq, "Rx Jabber Error :");
606 DUMP_EACH_PORT(0x4088);
607 seq_puts(seq, "Rx Pause Packet :");
608 DUMP_EACH_PORT(0x408C);
609 mt7530_mdio_w32(eth, 0x4fe0, 0xf0);
610 mt7530_mdio_w32(eth, 0x4fe0, 0x800000f0);
611
612 seq_puts(seq, "\n");
613
614 return 0;
615}
616
617static int switch_count_open(struct inode *inode, struct file *file)
618{
619 return single_open(file, esw_cnt_read, 0);
620}
621
622static const struct file_operations switch_count_fops = {
623 .owner = THIS_MODULE,
624 .open = switch_count_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = single_release
628};
629
developer8051e042022-04-08 13:26:36 +0800630static struct proc_dir_entry *proc_tx_ring, *proc_hwtx_ring, *proc_rx_ring;
developerfd40db22021-04-29 10:08:25 +0800631
632int tx_ring_read(struct seq_file *seq, void *v)
633{
634 struct mtk_tx_ring *ring = &g_eth->tx_ring;
635 struct mtk_tx_dma *tx_ring;
636 int i = 0;
637
638 tx_ring =
639 kmalloc(sizeof(struct mtk_tx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
640 if (!tx_ring) {
641 seq_puts(seq, " allocate temp tx_ring fail.\n");
642 return 0;
643 }
644
645 for (i = 0; i < MTK_DMA_SIZE; i++)
646 tx_ring[i] = ring->dma[i];
647
648 seq_printf(seq, "free count = %d\n", (int)atomic_read(&ring->free_count));
649 seq_printf(seq, "cpu next free: %d\n", (int)(ring->next_free - ring->dma));
650 seq_printf(seq, "cpu last free: %d\n", (int)(ring->last_free - ring->dma));
651 for (i = 0; i < MTK_DMA_SIZE; i++) {
652 dma_addr_t tmp = ring->phys + i * sizeof(*tx_ring);
653
654 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &tmp,
655 *(int *)&tx_ring[i].txd1, *(int *)&tx_ring[i].txd2,
656 *(int *)&tx_ring[i].txd3, *(int *)&tx_ring[i].txd4);
657#if defined(CONFIG_MEDIATEK_NETSYS_V2)
658 seq_printf(seq, " %08x %08x %08x %08x",
659 *(int *)&tx_ring[i].txd5, *(int *)&tx_ring[i].txd6,
660 *(int *)&tx_ring[i].txd7, *(int *)&tx_ring[i].txd8);
661#endif
662 seq_printf(seq, "\n");
663 }
664
665 kfree(tx_ring);
666 return 0;
667}
668
669static int tx_ring_open(struct inode *inode, struct file *file)
670{
671 return single_open(file, tx_ring_read, NULL);
672}
673
674static const struct file_operations tx_ring_fops = {
675 .owner = THIS_MODULE,
676 .open = tx_ring_open,
677 .read = seq_read,
678 .llseek = seq_lseek,
679 .release = single_release
680};
681
developer8051e042022-04-08 13:26:36 +0800682int hwtx_ring_read(struct seq_file *seq, void *v)
683{
684 struct mtk_eth *eth = g_eth;
685 struct mtk_tx_dma *hwtx_ring;
686 int i = 0;
687
688 hwtx_ring =
689 kmalloc(sizeof(struct mtk_tx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
690 if (!hwtx_ring) {
691 seq_puts(seq, " allocate temp hwtx_ring fail.\n");
692 return 0;
693 }
694
695 for (i = 0; i < MTK_DMA_SIZE; i++)
696 hwtx_ring[i] = eth->scratch_ring[i];
697
698 for (i = 0; i < MTK_DMA_SIZE; i++) {
699 dma_addr_t addr = eth->phy_scratch_ring + i * sizeof(*hwtx_ring);
700
701 seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &addr,
702 *(int *)&hwtx_ring[i].txd1, *(int *)&hwtx_ring[i].txd2,
703 *(int *)&hwtx_ring[i].txd3, *(int *)&hwtx_ring[i].txd4);
704#if defined(CONFIG_MEDIATEK_NETSYS_V2)
705 seq_printf(seq, " %08x %08x %08x %08x",
706 *(int *)&hwtx_ring[i].txd5, *(int *)&hwtx_ring[i].txd6,
707 *(int *)&hwtx_ring[i].txd7, *(int *)&hwtx_ring[i].txd8);
708#endif
709 seq_printf(seq, "\n");
710 }
711
712 kfree(hwtx_ring);
713 return 0;
714}
715
716static int hwtx_ring_open(struct inode *inode, struct file *file)
717{
718 return single_open(file, hwtx_ring_read, NULL);
719}
720
721static const struct file_operations hwtx_ring_fops = {
722 .owner = THIS_MODULE,
723 .open = hwtx_ring_open,
724 .read = seq_read,
725 .llseek = seq_lseek,
726 .release = single_release
727};
728
developerfd40db22021-04-29 10:08:25 +0800729int rx_ring_read(struct seq_file *seq, void *v)
730{
731 struct mtk_rx_ring *ring = &g_eth->rx_ring[0];
732 struct mtk_rx_dma *rx_ring;
733
734 int i = 0;
735
736 rx_ring =
737 kmalloc(sizeof(struct mtk_rx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
738 if (!rx_ring) {
739 seq_puts(seq, " allocate temp rx_ring fail.\n");
740 return 0;
741 }
742
743 for (i = 0; i < MTK_DMA_SIZE; i++)
744 rx_ring[i] = ring->dma[i];
745
746 seq_printf(seq, "next to read: %d\n",
747 NEXT_DESP_IDX(ring->calc_idx, MTK_DMA_SIZE));
748 for (i = 0; i < MTK_DMA_SIZE; i++) {
749 seq_printf(seq, "%d: %08x %08x %08x %08x", i,
750 *(int *)&rx_ring[i].rxd1, *(int *)&rx_ring[i].rxd2,
751 *(int *)&rx_ring[i].rxd3, *(int *)&rx_ring[i].rxd4);
developera2bdbd52021-05-31 19:10:17 +0800752#if defined(CONFIG_MEDIATEK_NETSYS_V2)
developerfd40db22021-04-29 10:08:25 +0800753 seq_printf(seq, " %08x %08x %08x %08x",
754 *(int *)&rx_ring[i].rxd5, *(int *)&rx_ring[i].rxd6,
755 *(int *)&rx_ring[i].rxd7, *(int *)&rx_ring[i].rxd8);
756#endif
757 seq_printf(seq, "\n");
758 }
759
760 kfree(rx_ring);
761 return 0;
762}
763
764static int rx_ring_open(struct inode *inode, struct file *file)
765{
766 return single_open(file, rx_ring_read, NULL);
767}
768
769static const struct file_operations rx_ring_fops = {
770 .owner = THIS_MODULE,
771 .open = rx_ring_open,
772 .read = seq_read,
773 .llseek = seq_lseek,
774 .release = single_release
775};
776
developer77f3fd42021-10-05 15:16:05 +0800777static inline u32 mtk_dbg_r32(u32 reg)
778{
779 void __iomem *virt_reg;
780 u32 val;
781
782 virt_reg = ioremap(reg, 32);
783 val = __raw_readl(virt_reg);
784 iounmap(virt_reg);
785
786 return val;
787}
788
developerfd40db22021-04-29 10:08:25 +0800789int dbg_regs_read(struct seq_file *seq, void *v)
790{
791 struct mtk_eth *eth = g_eth;
792
developer77f3fd42021-10-05 15:16:05 +0800793 seq_puts(seq, " <<DEBUG REG DUMP>>\n");
794
795 seq_printf(seq, "| FE_INT_STA : %08x |\n",
developer8051e042022-04-08 13:26:36 +0800796 mtk_r32(eth, MTK_FE_INT_STATUS));
developer77f3fd42021-10-05 15:16:05 +0800797 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
798 seq_printf(seq, "| FE_INT_STA2 : %08x |\n",
developer8051e042022-04-08 13:26:36 +0800799 mtk_r32(eth, MTK_FE_INT_STATUS2));
developer77f3fd42021-10-05 15:16:05 +0800800
developerfd40db22021-04-29 10:08:25 +0800801 seq_printf(seq, "| PSE_FQFC_CFG : %08x |\n",
802 mtk_r32(eth, MTK_PSE_FQFC_CFG));
803 seq_printf(seq, "| PSE_IQ_STA1 : %08x |\n",
804 mtk_r32(eth, MTK_PSE_IQ_STA(0)));
805 seq_printf(seq, "| PSE_IQ_STA2 : %08x |\n",
806 mtk_r32(eth, MTK_PSE_IQ_STA(1)));
807
developera2bdbd52021-05-31 19:10:17 +0800808 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +0800809 seq_printf(seq, "| PSE_IQ_STA3 : %08x |\n",
810 mtk_r32(eth, MTK_PSE_IQ_STA(2)));
811 seq_printf(seq, "| PSE_IQ_STA4 : %08x |\n",
812 mtk_r32(eth, MTK_PSE_IQ_STA(3)));
developer77f3fd42021-10-05 15:16:05 +0800813 seq_printf(seq, "| PSE_IQ_STA5 : %08x |\n",
814 mtk_r32(eth, MTK_PSE_IQ_STA(4)));
developerfd40db22021-04-29 10:08:25 +0800815 }
816
817 seq_printf(seq, "| PSE_OQ_STA1 : %08x |\n",
818 mtk_r32(eth, MTK_PSE_OQ_STA(0)));
819 seq_printf(seq, "| PSE_OQ_STA2 : %08x |\n",
820 mtk_r32(eth, MTK_PSE_OQ_STA(1)));
821
developera2bdbd52021-05-31 19:10:17 +0800822 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +0800823 seq_printf(seq, "| PSE_OQ_STA3 : %08x |\n",
824 mtk_r32(eth, MTK_PSE_OQ_STA(2)));
825 seq_printf(seq, "| PSE_OQ_STA4 : %08x |\n",
826 mtk_r32(eth, MTK_PSE_OQ_STA(3)));
developer77f3fd42021-10-05 15:16:05 +0800827 seq_printf(seq, "| PSE_OQ_STA5 : %08x |\n",
828 mtk_r32(eth, MTK_PSE_OQ_STA(4)));
developerfd40db22021-04-29 10:08:25 +0800829 }
830
developer77f3fd42021-10-05 15:16:05 +0800831 seq_printf(seq, "| PDMA_CRX_IDX : %08x |\n",
832 mtk_r32(eth, MTK_PRX_CRX_IDX0));
833 seq_printf(seq, "| PDMA_DRX_IDX : %08x |\n",
834 mtk_r32(eth, MTK_PRX_DRX_IDX0));
835 seq_printf(seq, "| QDMA_CTX_IDX : %08x |\n",
836 mtk_r32(eth, MTK_QTX_CTX_PTR));
837 seq_printf(seq, "| QDMA_DTX_IDX : %08x |\n",
838 mtk_r32(eth, MTK_QTX_DTX_PTR));
developerfd40db22021-04-29 10:08:25 +0800839 seq_printf(seq, "| QDMA_FQ_CNT : %08x |\n",
840 mtk_r32(eth, MTK_QDMA_FQ_CNT));
841 seq_printf(seq, "| FE_PSE_FREE : %08x |\n",
842 mtk_r32(eth, MTK_FE_PSE_FREE));
843 seq_printf(seq, "| FE_DROP_FQ : %08x |\n",
844 mtk_r32(eth, MTK_FE_DROP_FQ));
845 seq_printf(seq, "| FE_DROP_FC : %08x |\n",
846 mtk_r32(eth, MTK_FE_DROP_FC));
847 seq_printf(seq, "| FE_DROP_PPE : %08x |\n",
848 mtk_r32(eth, MTK_FE_DROP_PPE));
849 seq_printf(seq, "| GDM1_IG_CTRL : %08x |\n",
850 mtk_r32(eth, MTK_GDMA_FWD_CFG(0)));
851 seq_printf(seq, "| GDM2_IG_CTRL : %08x |\n",
852 mtk_r32(eth, MTK_GDMA_FWD_CFG(1)));
853 seq_printf(seq, "| MAC_P1_MCR : %08x |\n",
854 mtk_r32(eth, MTK_MAC_MCR(0)));
855 seq_printf(seq, "| MAC_P2_MCR : %08x |\n",
856 mtk_r32(eth, MTK_MAC_MCR(1)));
developer77f3fd42021-10-05 15:16:05 +0800857 seq_printf(seq, "| MAC_P1_FSM : %08x |\n",
858 mtk_r32(eth, MTK_MAC_FSM(0)));
859 seq_printf(seq, "| MAC_P2_FSM : %08x |\n",
860 mtk_r32(eth, MTK_MAC_FSM(1)));
developerfd40db22021-04-29 10:08:25 +0800861
developera2bdbd52021-05-31 19:10:17 +0800862 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
developerfd40db22021-04-29 10:08:25 +0800863 seq_printf(seq, "| FE_CDM1_FSM : %08x |\n",
864 mtk_r32(eth, MTK_FE_CDM1_FSM));
865 seq_printf(seq, "| FE_CDM2_FSM : %08x |\n",
866 mtk_r32(eth, MTK_FE_CDM2_FSM));
developer77f3fd42021-10-05 15:16:05 +0800867 seq_printf(seq, "| FE_CDM3_FSM : %08x |\n",
868 mtk_r32(eth, MTK_FE_CDM3_FSM));
869 seq_printf(seq, "| FE_CDM4_FSM : %08x |\n",
870 mtk_r32(eth, MTK_FE_CDM4_FSM));
developerfd40db22021-04-29 10:08:25 +0800871 seq_printf(seq, "| FE_GDM1_FSM : %08x |\n",
872 mtk_r32(eth, MTK_FE_GDM1_FSM));
873 seq_printf(seq, "| FE_GDM2_FSM : %08x |\n",
874 mtk_r32(eth, MTK_FE_GDM2_FSM));
developer77f3fd42021-10-05 15:16:05 +0800875 seq_printf(seq, "| SGMII_EFUSE : %08x |\n",
876 mtk_dbg_r32(MTK_SGMII_EFUSE));
877 seq_printf(seq, "| SGMII0_RX_CNT : %08x |\n",
878 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(0)));
879 seq_printf(seq, "| SGMII1_RX_CNT : %08x |\n",
880 mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(1)));
881 seq_printf(seq, "| WED_RTQM_GLO : %08x |\n",
882 mtk_dbg_r32(MTK_WED_RTQM_GLO_CFG));
developerfd40db22021-04-29 10:08:25 +0800883 }
884
developer8051e042022-04-08 13:26:36 +0800885 mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS);
developer77f3fd42021-10-05 15:16:05 +0800886 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
developer8051e042022-04-08 13:26:36 +0800887 mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS2);
developer77f3fd42021-10-05 15:16:05 +0800888
developerfd40db22021-04-29 10:08:25 +0800889 return 0;
890}
891
892static int dbg_regs_open(struct inode *inode, struct file *file)
893{
894 return single_open(file, dbg_regs_read, 0);
895}
896
897static const struct file_operations dbg_regs_fops = {
898 .owner = THIS_MODULE,
899 .open = dbg_regs_open,
900 .read = seq_read,
901 .llseek = seq_lseek,
developer77d03a72021-06-06 00:06:00 +0800902 .release = single_release
903};
904
905void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma *rxd)
906{
907 u32 idx, agg_cnt, agg_size;
908
909#if defined(CONFIG_MEDIATEK_NETSYS_V2)
910 idx = ring_no - 4;
911 agg_cnt = RX_DMA_GET_AGG_CNT_V2(rxd->rxd6);
912#else
913 idx = ring_no - 1;
914 agg_cnt = RX_DMA_GET_AGG_CNT(rxd->rxd2);
915#endif
916
917 agg_size = RX_DMA_GET_PLEN0(rxd->rxd2);
918
919 hw_lro_agg_size_cnt[idx][agg_size / 5000]++;
920 hw_lro_agg_num_cnt[idx][agg_cnt]++;
921 hw_lro_tot_flush_cnt[idx]++;
922 hw_lro_tot_agg_cnt[idx] += agg_cnt;
923}
924
925void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma *rxd)
926{
927 u32 idx, flush_reason;
928
929#if defined(CONFIG_MEDIATEK_NETSYS_V2)
930 idx = ring_no - 4;
931 flush_reason = RX_DMA_GET_FLUSH_RSN_V2(rxd->rxd6);
932#else
933 idx = ring_no - 1;
934 flush_reason = RX_DMA_GET_REV(rxd->rxd2);
935#endif
936
937 if ((flush_reason & 0x7) == MTK_HW_LRO_AGG_FLUSH)
938 hw_lro_agg_flush_cnt[idx]++;
939 else if ((flush_reason & 0x7) == MTK_HW_LRO_AGE_FLUSH)
940 hw_lro_age_flush_cnt[idx]++;
941 else if ((flush_reason & 0x7) == MTK_HW_LRO_NOT_IN_SEQ_FLUSH)
942 hw_lro_seq_flush_cnt[idx]++;
943 else if ((flush_reason & 0x7) == MTK_HW_LRO_TIMESTAMP_FLUSH)
944 hw_lro_timestamp_flush_cnt[idx]++;
945 else if ((flush_reason & 0x7) == MTK_HW_LRO_NON_RULE_FLUSH)
946 hw_lro_norule_flush_cnt[idx]++;
947}
948
949ssize_t hw_lro_stats_write(struct file *file, const char __user *buffer,
950 size_t count, loff_t *data)
951{
952 memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt));
953 memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt));
954 memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt));
955 memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt));
956 memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt));
957 memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt));
958 memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt));
959 memset(hw_lro_timestamp_flush_cnt, 0,
960 sizeof(hw_lro_timestamp_flush_cnt));
961 memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt));
962
963 pr_info("clear hw lro cnt table\n");
964
965 return count;
966}
967
968int hw_lro_stats_read_v1(struct seq_file *seq, void *v)
969{
970 int i;
971
972 seq_puts(seq, "HW LRO statistic dump:\n");
973
974 /* Agg number count */
975 seq_puts(seq, "Cnt: RING1 | RING2 | RING3 | Total\n");
976 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
977 seq_printf(seq, " %d : %d %d %d %d\n",
978 i, hw_lro_agg_num_cnt[0][i],
979 hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i],
980 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
981 hw_lro_agg_num_cnt[2][i]);
982 }
983
984 /* Total agg count */
985 seq_puts(seq, "Total agg: RING1 | RING2 | RING3 | Total\n");
986 seq_printf(seq, " %d %d %d %d\n",
987 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
988 hw_lro_tot_agg_cnt[2],
989 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
990 hw_lro_tot_agg_cnt[2]);
991
992 /* Total flush count */
993 seq_puts(seq, "Total flush: RING1 | RING2 | RING3 | Total\n");
994 seq_printf(seq, " %d %d %d %d\n",
995 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
996 hw_lro_tot_flush_cnt[2],
997 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
998 hw_lro_tot_flush_cnt[2]);
999
1000 /* Avg agg count */
1001 seq_puts(seq, "Avg agg: RING1 | RING2 | RING3 | Total\n");
1002 seq_printf(seq, " %d %d %d %d\n",
1003 (hw_lro_tot_flush_cnt[0]) ?
1004 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
1005 (hw_lro_tot_flush_cnt[1]) ?
1006 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
1007 (hw_lro_tot_flush_cnt[2]) ?
1008 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
1009 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1010 hw_lro_tot_flush_cnt[2]) ?
1011 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1012 hw_lro_tot_agg_cnt[2]) / (hw_lro_tot_flush_cnt[0] +
1013 hw_lro_tot_flush_cnt[1] + hw_lro_tot_flush_cnt[2])) : 0);
1014
1015 /* Statistics of aggregation size counts */
1016 seq_puts(seq, "HW LRO flush pkt len:\n");
1017 seq_puts(seq, " Length | RING1 | RING2 | RING3 | Total\n");
1018 for (i = 0; i < 15; i++) {
1019 seq_printf(seq, "%d~%d: %d %d %d %d\n", i * 5000,
1020 (i + 1) * 5000, hw_lro_agg_size_cnt[0][i],
1021 hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i],
1022 hw_lro_agg_size_cnt[0][i] +
1023 hw_lro_agg_size_cnt[1][i] +
1024 hw_lro_agg_size_cnt[2][i]);
1025 }
1026
1027 seq_puts(seq, "Flush reason: RING1 | RING2 | RING3 | Total\n");
1028 seq_printf(seq, "AGG timeout: %d %d %d %d\n",
1029 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1030 hw_lro_agg_flush_cnt[2],
1031 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1032 hw_lro_agg_flush_cnt[2]));
1033
1034 seq_printf(seq, "AGE timeout: %d %d %d %d\n",
1035 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1036 hw_lro_age_flush_cnt[2],
1037 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1038 hw_lro_age_flush_cnt[2]));
1039
1040 seq_printf(seq, "Not in-sequence: %d %d %d %d\n",
1041 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1042 hw_lro_seq_flush_cnt[2],
1043 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1044 hw_lro_seq_flush_cnt[2]));
1045
1046 seq_printf(seq, "Timestamp: %d %d %d %d\n",
1047 hw_lro_timestamp_flush_cnt[0],
1048 hw_lro_timestamp_flush_cnt[1],
1049 hw_lro_timestamp_flush_cnt[2],
1050 (hw_lro_timestamp_flush_cnt[0] +
1051 hw_lro_timestamp_flush_cnt[1] +
1052 hw_lro_timestamp_flush_cnt[2]));
1053
1054 seq_printf(seq, "No LRO rule: %d %d %d %d\n",
1055 hw_lro_norule_flush_cnt[0],
1056 hw_lro_norule_flush_cnt[1],
1057 hw_lro_norule_flush_cnt[2],
1058 (hw_lro_norule_flush_cnt[0] +
1059 hw_lro_norule_flush_cnt[1] +
1060 hw_lro_norule_flush_cnt[2]));
1061
1062 return 0;
1063}
1064
1065int hw_lro_stats_read_v2(struct seq_file *seq, void *v)
1066{
1067 int i;
1068
1069 seq_puts(seq, "HW LRO statistic dump:\n");
1070
1071 /* Agg number count */
1072 seq_puts(seq, "Cnt: RING4 | RING5 | RING6 | RING7 Total\n");
1073 for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) {
1074 seq_printf(seq,
1075 " %d : %d %d %d %d %d\n",
1076 i, hw_lro_agg_num_cnt[0][i], hw_lro_agg_num_cnt[1][i],
1077 hw_lro_agg_num_cnt[2][i], hw_lro_agg_num_cnt[3][i],
1078 hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
1079 hw_lro_agg_num_cnt[2][i] + hw_lro_agg_num_cnt[3][i]);
1080 }
1081
1082 /* Total agg count */
1083 seq_puts(seq, "Total agg: RING4 | RING5 | RING6 | RING7 Total\n");
1084 seq_printf(seq, " %d %d %d %d %d\n",
1085 hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
1086 hw_lro_tot_agg_cnt[2], hw_lro_tot_agg_cnt[3],
1087 hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1088 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]);
1089
1090 /* Total flush count */
1091 seq_puts(seq, "Total flush: RING4 | RING5 | RING6 | RING7 Total\n");
1092 seq_printf(seq, " %d %d %d %d %d\n",
1093 hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
1094 hw_lro_tot_flush_cnt[2], hw_lro_tot_flush_cnt[3],
1095 hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1096 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]);
1097
1098 /* Avg agg count */
1099 seq_puts(seq, "Avg agg: RING4 | RING5 | RING6 | RING7 Total\n");
1100 seq_printf(seq, " %d %d %d %d %d\n",
1101 (hw_lro_tot_flush_cnt[0]) ?
1102 hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0,
1103 (hw_lro_tot_flush_cnt[1]) ?
1104 hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0,
1105 (hw_lro_tot_flush_cnt[2]) ?
1106 hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0,
1107 (hw_lro_tot_flush_cnt[3]) ?
1108 hw_lro_tot_agg_cnt[3] / hw_lro_tot_flush_cnt[3] : 0,
1109 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1110 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]) ?
1111 ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
1112 hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]) /
1113 (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
1114 hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3])) : 0);
1115
1116 /* Statistics of aggregation size counts */
1117 seq_puts(seq, "HW LRO flush pkt len:\n");
1118 seq_puts(seq, " Length | RING4 | RING5 | RING6 | RING7 Total\n");
1119 for (i = 0; i < 15; i++) {
1120 seq_printf(seq, "%d~%d: %d %d %d %d %d\n",
1121 i * 5000, (i + 1) * 5000,
1122 hw_lro_agg_size_cnt[0][i], hw_lro_agg_size_cnt[1][i],
1123 hw_lro_agg_size_cnt[2][i], hw_lro_agg_size_cnt[3][i],
1124 hw_lro_agg_size_cnt[0][i] +
1125 hw_lro_agg_size_cnt[1][i] +
1126 hw_lro_agg_size_cnt[2][i] +
1127 hw_lro_agg_size_cnt[3][i]);
1128 }
1129
1130 seq_puts(seq, "Flush reason: RING4 | RING5 | RING6 | RING7 Total\n");
1131 seq_printf(seq, "AGG timeout: %d %d %d %d %d\n",
1132 hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
1133 hw_lro_agg_flush_cnt[2], hw_lro_agg_flush_cnt[3],
1134 (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
1135 hw_lro_agg_flush_cnt[2] + hw_lro_agg_flush_cnt[3]));
1136
1137 seq_printf(seq, "AGE timeout: %d %d %d %d %d\n",
1138 hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
1139 hw_lro_age_flush_cnt[2], hw_lro_age_flush_cnt[3],
1140 (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
1141 hw_lro_age_flush_cnt[2] + hw_lro_age_flush_cnt[3]));
1142
1143 seq_printf(seq, "Not in-sequence: %d %d %d %d %d\n",
1144 hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
1145 hw_lro_seq_flush_cnt[2], hw_lro_seq_flush_cnt[3],
1146 (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
1147 hw_lro_seq_flush_cnt[2] + hw_lro_seq_flush_cnt[3]));
1148
1149 seq_printf(seq, "Timestamp: %d %d %d %d %d\n",
1150 hw_lro_timestamp_flush_cnt[0],
1151 hw_lro_timestamp_flush_cnt[1],
1152 hw_lro_timestamp_flush_cnt[2],
1153 hw_lro_timestamp_flush_cnt[3],
1154 (hw_lro_timestamp_flush_cnt[0] +
1155 hw_lro_timestamp_flush_cnt[1] +
1156 hw_lro_timestamp_flush_cnt[2] +
1157 hw_lro_timestamp_flush_cnt[3]));
1158
1159 seq_printf(seq, "No LRO rule: %d %d %d %d %d\n",
1160 hw_lro_norule_flush_cnt[0],
1161 hw_lro_norule_flush_cnt[1],
1162 hw_lro_norule_flush_cnt[2],
1163 hw_lro_norule_flush_cnt[3],
1164 (hw_lro_norule_flush_cnt[0] +
1165 hw_lro_norule_flush_cnt[1] +
1166 hw_lro_norule_flush_cnt[2] +
1167 hw_lro_norule_flush_cnt[3]));
1168
1169 return 0;
1170}
1171
1172int hw_lro_stats_read_wrapper(struct seq_file *seq, void *v)
1173{
1174 struct mtk_eth *eth = g_eth;
1175
1176 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1177 hw_lro_stats_read_v2(seq, v);
1178 else
1179 hw_lro_stats_read_v1(seq, v);
1180
1181 return 0;
1182}
1183
1184static int hw_lro_stats_open(struct inode *inode, struct file *file)
1185{
1186 return single_open(file, hw_lro_stats_read_wrapper, NULL);
1187}
1188
1189static const struct file_operations hw_lro_stats_fops = {
1190 .owner = THIS_MODULE,
1191 .open = hw_lro_stats_open,
1192 .read = seq_read,
1193 .llseek = seq_lseek,
1194 .write = hw_lro_stats_write,
developerfd40db22021-04-29 10:08:25 +08001195 .release = single_release
1196};
1197
developer77d03a72021-06-06 00:06:00 +08001198int hwlro_agg_cnt_ctrl(int cnt)
1199{
1200 int i;
1201
1202 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1203 SET_PDMA_RXRING_MAX_AGG_CNT(g_eth, i, cnt);
1204
1205 return 0;
1206}
1207
1208int hwlro_agg_time_ctrl(int time)
1209{
1210 int i;
1211
1212 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1213 SET_PDMA_RXRING_AGG_TIME(g_eth, i, time);
1214
1215 return 0;
1216}
1217
1218int hwlro_age_time_ctrl(int time)
1219{
1220 int i;
1221
1222 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1223 SET_PDMA_RXRING_AGE_TIME(g_eth, i, time);
1224
1225 return 0;
1226}
1227
1228int hwlro_threshold_ctrl(int bandwidth)
1229{
1230 SET_PDMA_LRO_BW_THRESHOLD(g_eth, bandwidth);
1231
1232 return 0;
1233}
1234
1235int hwlro_ring_enable_ctrl(int enable)
1236{
1237 int i;
1238
1239 pr_info("[%s] %s HW LRO rings\n", __func__, (enable) ? "Enable" : "Disable");
1240
1241 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++)
1242 SET_PDMA_RXRING_VALID(g_eth, i, enable);
1243
1244 return 0;
1245}
1246
1247int hwlro_stats_enable_ctrl(int enable)
1248{
1249 pr_info("[%s] %s HW LRO statistics\n", __func__, (enable) ? "Enable" : "Disable");
1250 mtk_hwlro_stats_ebl = enable;
1251
1252 return 0;
1253}
1254
1255static const mtk_lro_dbg_func lro_dbg_func[] = {
1256 [0] = hwlro_agg_cnt_ctrl,
1257 [1] = hwlro_agg_time_ctrl,
1258 [2] = hwlro_age_time_ctrl,
1259 [3] = hwlro_threshold_ctrl,
1260 [4] = hwlro_ring_enable_ctrl,
1261 [5] = hwlro_stats_enable_ctrl,
1262};
1263
1264ssize_t hw_lro_auto_tlb_write(struct file *file, const char __user *buffer,
1265 size_t count, loff_t *data)
1266{
1267 char buf[32];
1268 char *p_buf;
1269 char *p_token = NULL;
1270 char *p_delimiter = " \t";
1271 long x = 0, y = 0;
developer4c32b7a2021-11-13 16:46:43 +08001272 u32 len = count;
developer77d03a72021-06-06 00:06:00 +08001273 int ret;
1274
1275 if (len >= sizeof(buf)) {
1276 pr_info("Input handling fail!\n");
developer77d03a72021-06-06 00:06:00 +08001277 return -1;
1278 }
1279
1280 if (copy_from_user(buf, buffer, len))
1281 return -EFAULT;
1282
1283 buf[len] = '\0';
1284
1285 p_buf = buf;
1286 p_token = strsep(&p_buf, p_delimiter);
1287 if (!p_token)
1288 x = 0;
1289 else
1290 ret = kstrtol(p_token, 10, &x);
1291
1292 p_token = strsep(&p_buf, "\t\n ");
1293 if (p_token)
1294 ret = kstrtol(p_token, 10, &y);
1295
1296 if (lro_dbg_func[x] && (ARRAY_SIZE(lro_dbg_func) > x))
1297 (*lro_dbg_func[x]) (y);
1298
1299 return count;
1300}
1301
1302void hw_lro_auto_tlb_dump_v1(struct seq_file *seq, u32 index)
1303{
1304 int i;
1305 struct mtk_lro_alt_v1 alt;
1306 __be32 addr;
1307 u32 tlb_info[9];
1308 u32 dw_len, cnt, priority;
1309 u32 entry;
1310
1311 if (index > 4)
1312 index = index - 1;
1313 entry = (index * 9) + 1;
1314
1315 /* read valid entries of the auto-learn table */
1316 mtk_w32(g_eth, entry, MTK_FE_ALT_CF8);
1317
1318 for (i = 0; i < 9; i++)
1319 tlb_info[i] = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1320
1321 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v1));
1322
1323 dw_len = alt.alt_info7.dw_len;
1324 cnt = alt.alt_info6.cnt;
1325
1326 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1327 priority = cnt; /* packet count */
1328 else
1329 priority = dw_len; /* byte count */
1330
1331 /* dump valid entries of the auto-learn table */
1332 if (index >= 4)
1333 seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index);
1334 else
1335 seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index);
1336
1337 if (alt.alt_info8.ipv4) {
1338 addr = htonl(alt.alt_info1.sip0);
1339 seq_printf(seq, "SIP = %pI4 (IPv4)\n", &addr);
1340 } else {
1341 seq_printf(seq, "SIP = %08X:%08X:%08X:%08X (IPv6)\n",
1342 alt.alt_info4.sip3, alt.alt_info3.sip2,
1343 alt.alt_info2.sip1, alt.alt_info1.sip0);
1344 }
1345
1346 seq_printf(seq, "DIP_ID = %d\n", alt.alt_info8.dip_id);
1347 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1348 alt.alt_info0.stp, alt.alt_info0.dtp);
1349 seq_printf(seq, "VLAN_VID_VLD = %d\n", alt.alt_info6.vlan_vid_vld);
1350 seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d\n",
1351 (alt.alt_info5.vlan_vid0 & 0xfff),
1352 ((alt.alt_info5.vlan_vid0 >> 12) & 0xfff),
1353 ((alt.alt_info6.vlan_vid1 << 8) |
1354 ((alt.alt_info5.vlan_vid0 >> 24) & 0xfff)),
1355 ((alt.alt_info6.vlan_vid1 >> 4) & 0xfff));
1356 seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt);
1357 seq_printf(seq, "PRIORITY = %d\n", priority);
1358}
1359
1360void hw_lro_auto_tlb_dump_v2(struct seq_file *seq, u32 index)
1361{
1362 int i;
1363 struct mtk_lro_alt_v2 alt;
1364 u32 score = 0, ipv4 = 0;
1365 u32 ipv6[4] = { 0 };
1366 u32 tlb_info[12];
1367
1368 /* read valid entries of the auto-learn table */
1369 mtk_w32(g_eth, index << MTK_LRO_ALT_INDEX_OFFSET, MTK_LRO_ALT_DBG);
1370
1371 for (i = 0; i < 11; i++)
1372 tlb_info[i] = mtk_r32(g_eth, MTK_LRO_ALT_DBG_DATA);
1373
1374 memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v2));
1375
1376 if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE)
1377 score = 1; /* packet count */
1378 else
1379 score = 0; /* byte count */
1380
1381 /* dump valid entries of the auto-learn table */
1382 if (alt.alt_info0.valid) {
1383 if (index < 5)
1384 seq_printf(seq,
1385 "\n===== TABLE Entry: %d (onging) =====\n",
1386 index);
1387 else
1388 seq_printf(seq,
1389 "\n===== TABLE Entry: %d (candidate) =====\n",
1390 index);
1391
1392 if (alt.alt_info1.v4_valid) {
1393 ipv4 = (alt.alt_info4.sip0_h << 23) |
1394 alt.alt_info5.sip0_l;
1395 seq_printf(seq, "SIP = 0x%x: (IPv4)\n", ipv4);
1396
1397 ipv4 = (alt.alt_info8.dip0_h << 23) |
1398 alt.alt_info9.dip0_l;
1399 seq_printf(seq, "DIP = 0x%x: (IPv4)\n", ipv4);
1400 } else if (alt.alt_info1.v6_valid) {
1401 ipv6[3] = (alt.alt_info1.sip3_h << 23) |
1402 (alt.alt_info2.sip3_l << 9);
1403 ipv6[2] = (alt.alt_info2.sip2_h << 23) |
1404 (alt.alt_info3.sip2_l << 9);
1405 ipv6[1] = (alt.alt_info3.sip1_h << 23) |
1406 (alt.alt_info4.sip1_l << 9);
1407 ipv6[0] = (alt.alt_info4.sip0_h << 23) |
1408 (alt.alt_info5.sip0_l << 9);
1409 seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1410 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1411
1412 ipv6[3] = (alt.alt_info5.dip3_h << 23) |
1413 (alt.alt_info6.dip3_l << 9);
1414 ipv6[2] = (alt.alt_info6.dip2_h << 23) |
1415 (alt.alt_info7.dip2_l << 9);
1416 ipv6[1] = (alt.alt_info7.dip1_h << 23) |
1417 (alt.alt_info8.dip1_l << 9);
1418 ipv6[0] = (alt.alt_info8.dip0_h << 23) |
1419 (alt.alt_info9.dip0_l << 9);
1420 seq_printf(seq, "DIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
1421 ipv6[3], ipv6[2], ipv6[1], ipv6[0]);
1422 }
1423
1424 seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
1425 (alt.alt_info9.sp_h << 7) | (alt.alt_info10.sp_l),
1426 alt.alt_info10.dp);
1427 }
1428}
1429
1430int hw_lro_auto_tlb_read(struct seq_file *seq, void *v)
1431{
1432 int i;
1433 u32 reg_val;
1434 u32 reg_op1, reg_op2, reg_op3, reg_op4;
1435 u32 agg_cnt, agg_time, age_time;
1436
1437 seq_puts(seq, "Usage of /proc/mtketh/hw_lro_auto_tlb:\n");
1438 seq_puts(seq, "echo [function] [setting] > /proc/mtketh/hw_lro_auto_tlb\n");
1439 seq_puts(seq, "Functions:\n");
1440 seq_puts(seq, "[0] = hwlro_agg_cnt_ctrl\n");
1441 seq_puts(seq, "[1] = hwlro_agg_time_ctrl\n");
1442 seq_puts(seq, "[2] = hwlro_age_time_ctrl\n");
1443 seq_puts(seq, "[3] = hwlro_threshold_ctrl\n");
1444 seq_puts(seq, "[4] = hwlro_ring_enable_ctrl\n");
1445 seq_puts(seq, "[5] = hwlro_stats_enable_ctrl\n\n");
1446
1447 if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2)) {
1448 for (i = 1; i <= 8; i++)
1449 hw_lro_auto_tlb_dump_v2(seq, i);
1450 } else {
1451 /* Read valid entries of the auto-learn table */
1452 mtk_w32(g_eth, 0, MTK_FE_ALT_CF8);
1453 reg_val = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC);
1454
1455 seq_printf(seq,
1456 "HW LRO Auto-learn Table: (MTK_FE_ALT_SEQ_CFC=0x%x)\n",
1457 reg_val);
1458
1459 for (i = 7; i >= 0; i--) {
1460 if (reg_val & (1 << i))
1461 hw_lro_auto_tlb_dump_v1(seq, i);
1462 }
1463 }
1464
1465 /* Read the agg_time/age_time/agg_cnt of LRO rings */
1466 seq_puts(seq, "\nHW LRO Ring Settings\n");
1467
1468 for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) {
1469 reg_op1 = mtk_r32(g_eth, MTK_LRO_CTRL_DW1_CFG(i));
1470 reg_op2 = mtk_r32(g_eth, MTK_LRO_CTRL_DW2_CFG(i));
1471 reg_op3 = mtk_r32(g_eth, MTK_LRO_CTRL_DW3_CFG(i));
1472 reg_op4 = mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW2);
1473
1474 agg_cnt =
1475 ((reg_op3 & 0x3) << 6) |
1476 ((reg_op2 >> MTK_LRO_RING_AGG_CNT_L_OFFSET) & 0x3f);
1477 agg_time = (reg_op2 >> MTK_LRO_RING_AGG_TIME_OFFSET) & 0xffff;
1478 age_time =
1479 ((reg_op2 & 0x3f) << 10) |
1480 ((reg_op1 >> MTK_LRO_RING_AGE_TIME_L_OFFSET) & 0x3ff);
1481 seq_printf(seq,
1482 "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
1483 (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2))? i+3 : i,
1484 agg_cnt, agg_time, age_time, reg_op4);
1485 }
1486
1487 seq_puts(seq, "\n");
1488
1489 return 0;
1490}
1491
1492static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file)
1493{
1494 return single_open(file, hw_lro_auto_tlb_read, NULL);
1495}
1496
1497static const struct file_operations hw_lro_auto_tlb_fops = {
1498 .owner = THIS_MODULE,
1499 .open = hw_lro_auto_tlb_open,
1500 .read = seq_read,
1501 .llseek = seq_lseek,
1502 .write = hw_lro_auto_tlb_write,
1503 .release = single_release
1504};
developerfd40db22021-04-29 10:08:25 +08001505
developer8051e042022-04-08 13:26:36 +08001506int reset_event_read(struct seq_file *seq, void *v)
1507{
1508 struct mtk_eth *eth = g_eth;
1509 struct mtk_reset_event reset_event = eth->reset_event;
1510
1511 seq_printf(seq, "[Event] [Count]\n");
1512 seq_printf(seq, " FQ Empty: %d\n",
1513 reset_event.count[MTK_EVENT_FQ_EMPTY]);
1514 seq_printf(seq, " TSO Fail: %d\n",
1515 reset_event.count[MTK_EVENT_TSO_FAIL]);
1516 seq_printf(seq, " TSO Illegal: %d\n",
1517 reset_event.count[MTK_EVENT_TSO_ILLEGAL]);
1518 seq_printf(seq, " TSO Align: %d\n",
1519 reset_event.count[MTK_EVENT_TSO_ALIGN]);
1520 seq_printf(seq, " RFIFO OV: %d\n",
1521 reset_event.count[MTK_EVENT_RFIFO_OV]);
1522 seq_printf(seq, " RFIFO UF: %d\n",
1523 reset_event.count[MTK_EVENT_RFIFO_UF]);
1524 seq_printf(seq, " Force: %d\n",
1525 reset_event.count[MTK_EVENT_FORCE]);
1526 seq_printf(seq, "----------------------------\n");
1527 seq_printf(seq, " Warm Cnt: %d\n",
1528 reset_event.count[MTK_EVENT_WARM_CNT]);
1529 seq_printf(seq, " Cold Cnt: %d\n",
1530 reset_event.count[MTK_EVENT_COLD_CNT]);
1531 seq_printf(seq, " Total Cnt: %d\n",
1532 reset_event.count[MTK_EVENT_TOTAL_CNT]);
1533
1534 return 0;
1535}
1536
1537static int reset_event_open(struct inode *inode, struct file *file)
1538{
1539 return single_open(file, reset_event_read, 0);
1540}
1541
1542ssize_t reset_event_write(struct file *file, const char __user *buffer,
1543 size_t count, loff_t *data)
1544{
1545 struct mtk_eth *eth = g_eth;
1546 struct mtk_reset_event *reset_event = &eth->reset_event;
1547
1548 memset(reset_event, 0, sizeof(struct mtk_reset_event));
1549 pr_info("MTK reset event counter is cleared !\n");
1550
1551 return count;
1552}
1553
1554static const struct file_operations reset_event_fops = {
1555 .owner = THIS_MODULE,
1556 .open = reset_event_open,
1557 .read = seq_read,
1558 .llseek = seq_lseek,
1559 .write = reset_event_write,
1560 .release = single_release
1561};
1562
1563
developerfd40db22021-04-29 10:08:25 +08001564struct proc_dir_entry *proc_reg_dir;
developer8051e042022-04-08 13:26:36 +08001565static struct proc_dir_entry *proc_esw_cnt, *proc_dbg_regs, *proc_reset_event;
developerfd40db22021-04-29 10:08:25 +08001566
1567int debug_proc_init(struct mtk_eth *eth)
1568{
1569 g_eth = eth;
1570
1571 if (!proc_reg_dir)
1572 proc_reg_dir = proc_mkdir(PROCREG_DIR, NULL);
1573
1574 proc_tx_ring =
1575 proc_create(PROCREG_TXRING, 0, proc_reg_dir, &tx_ring_fops);
1576 if (!proc_tx_ring)
1577 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
1578
developer8051e042022-04-08 13:26:36 +08001579 proc_hwtx_ring =
1580 proc_create(PROCREG_HWTXRING, 0, proc_reg_dir, &hwtx_ring_fops);
1581 if (!proc_hwtx_ring)
1582 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_HWTXRING);
1583
developerfd40db22021-04-29 10:08:25 +08001584 proc_rx_ring =
1585 proc_create(PROCREG_RXRING, 0, proc_reg_dir, &rx_ring_fops);
1586 if (!proc_rx_ring)
1587 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
1588
1589 proc_esw_cnt =
1590 proc_create(PROCREG_ESW_CNT, 0, proc_reg_dir, &switch_count_fops);
1591 if (!proc_esw_cnt)
1592 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
1593
1594 proc_dbg_regs =
1595 proc_create(PROCREG_DBG_REGS, 0, proc_reg_dir, &dbg_regs_fops);
1596 if (!proc_dbg_regs)
1597 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_DBG_REGS);
1598
developer77d03a72021-06-06 00:06:00 +08001599 if (g_eth->hwlro) {
1600 proc_hw_lro_stats =
1601 proc_create(PROCREG_HW_LRO_STATS, 0, proc_reg_dir,
1602 &hw_lro_stats_fops);
1603 if (!proc_hw_lro_stats)
1604 pr_info("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS);
1605
1606 proc_hw_lro_auto_tlb =
1607 proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, proc_reg_dir,
1608 &hw_lro_auto_tlb_fops);
1609 if (!proc_hw_lro_auto_tlb)
1610 pr_info("!! FAIL to create %s PROC !!\n",
1611 PROCREG_HW_LRO_AUTO_TLB);
1612 }
1613
developer8051e042022-04-08 13:26:36 +08001614 proc_reset_event =
1615 proc_create(PROCREG_RESET_EVENT, 0, proc_reg_dir, &reset_event_fops);
1616 if (!proc_reset_event)
1617 pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RESET_EVENT);
1618
developerfd40db22021-04-29 10:08:25 +08001619 return 0;
1620}
1621
1622void debug_proc_exit(void)
1623{
1624 if (proc_tx_ring)
1625 remove_proc_entry(PROCREG_TXRING, proc_reg_dir);
developer8051e042022-04-08 13:26:36 +08001626 if (proc_hwtx_ring)
1627 remove_proc_entry(PROCREG_HWTXRING, proc_reg_dir);
developerfd40db22021-04-29 10:08:25 +08001628 if (proc_rx_ring)
1629 remove_proc_entry(PROCREG_RXRING, proc_reg_dir);
1630
1631 if (proc_esw_cnt)
1632 remove_proc_entry(PROCREG_ESW_CNT, proc_reg_dir);
1633
1634 if (proc_reg_dir)
1635 remove_proc_entry(PROCREG_DIR, 0);
1636
1637 if (proc_dbg_regs)
1638 remove_proc_entry(PROCREG_DBG_REGS, proc_reg_dir);
developer77d03a72021-06-06 00:06:00 +08001639
1640 if (g_eth->hwlro) {
1641 if (proc_hw_lro_stats)
1642 remove_proc_entry(PROCREG_HW_LRO_STATS, proc_reg_dir);
1643
1644 if (proc_hw_lro_auto_tlb)
1645 remove_proc_entry(PROCREG_HW_LRO_AUTO_TLB, proc_reg_dir);
1646 }
developer8051e042022-04-08 13:26:36 +08001647
1648 if (proc_reset_event)
1649 remove_proc_entry(PROCREG_RESET_EVENT, proc_reg_dir);
developerfd40db22021-04-29 10:08:25 +08001650}
1651