blob: fd9a5d41be5dfaa400fca03c4e8adbe389fb4e5f [file] [log] [blame]
developer73cb4d52022-09-06 15:15:57 +08001diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
developeree39bcf2023-06-16 08:03:30 +08002index 0c724a5..93cd55f 100644
developer73cb4d52022-09-06 15:15:57 +08003--- a/drivers/net/ethernet/mediatek/Makefile
4+++ b/drivers/net/ethernet/mediatek/Makefile
5@@ -5,7 +5,7 @@
6
7 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
developer68838542022-10-03 23:42:21 +08008 mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_usxgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o \
developer73cb4d52022-09-06 15:15:57 +08009- mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
10+ mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o mtk_qdma_debugfs.o
11 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
12 ifdef CONFIG_DEBUG_FS
13 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
14diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developeree39bcf2023-06-16 08:03:30 +080015index ca76047..809c735 100644
developer73cb4d52022-09-06 15:15:57 +080016--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
17+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developeree39bcf2023-06-16 08:03:30 +080018@@ -4787,6 +4787,8 @@ static int mtk_probe(struct platform_device *pdev)
19 }
developer73cb4d52022-09-06 15:15:57 +080020
21 mtk_ppe_debugfs_init(eth);
22+
23+ mtk_qdma_debugfs_init(eth);
24 }
25
26 for (i = 0; i < MTK_MAX_DEVS; i++) {
developeree39bcf2023-06-16 08:03:30 +080027@@ -4901,6 +4903,7 @@ static const struct mtk_soc_data mt2701_data = {
developer1fb19c92023-03-07 23:45:23 +080028 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developer73cb4d52022-09-06 15:15:57 +080029 .dma_max_len = MTK_TX_DMA_BUF_LEN,
30 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
31+ .qdma_tx_sch = 2,
32 },
33 };
34
developeree39bcf2023-06-16 08:03:30 +080035@@ -4920,6 +4923,7 @@ static const struct mtk_soc_data mt7621_data = {
developer73cb4d52022-09-06 15:15:57 +080036 .rxd_size = sizeof(struct mtk_rx_dma),
37 .dma_max_len = MTK_TX_DMA_BUF_LEN,
38 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
39+ .qdma_tx_sch = 2,
40 },
41 };
42
developeree39bcf2023-06-16 08:03:30 +080043@@ -4940,6 +4944,7 @@ static const struct mtk_soc_data mt7622_data = {
developer1fb19c92023-03-07 23:45:23 +080044 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developer73cb4d52022-09-06 15:15:57 +080045 .dma_max_len = MTK_TX_DMA_BUF_LEN,
46 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
47+ .qdma_tx_sch = 2,
48 },
49 };
50
developeree39bcf2023-06-16 08:03:30 +080051@@ -4959,6 +4964,7 @@ static const struct mtk_soc_data mt7623_data = {
developer1fb19c92023-03-07 23:45:23 +080052 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developer73cb4d52022-09-06 15:15:57 +080053 .dma_max_len = MTK_TX_DMA_BUF_LEN,
54 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
55+ .qdma_tx_sch = 2,
56 },
57 };
58
developeree39bcf2023-06-16 08:03:30 +080059@@ -4997,6 +5003,7 @@ static const struct mtk_soc_data mt7986_data = {
developer1fb19c92023-03-07 23:45:23 +080060 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developer73cb4d52022-09-06 15:15:57 +080061 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
62 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
63+ .qdma_tx_sch = 4,
64 },
65 };
66
developeree39bcf2023-06-16 08:03:30 +080067@@ -5017,6 +5024,7 @@ static const struct mtk_soc_data mt7981_data = {
developer1fb19c92023-03-07 23:45:23 +080068 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developer73cb4d52022-09-06 15:15:57 +080069 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
70 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
71+ .qdma_tx_sch = 4,
72 },
73 };
74
developeree39bcf2023-06-16 08:03:30 +080075@@ -5034,6 +5042,7 @@ static const struct mtk_soc_data mt7988_data = {
developer1fb19c92023-03-07 23:45:23 +080076 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
77 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
78 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
79+ .qdma_tx_sch = 4,
80 },
81 };
82
developeree39bcf2023-06-16 08:03:30 +080083@@ -5051,6 +5060,7 @@ static const struct mtk_soc_data rt5350_data = {
developer1fb19c92023-03-07 23:45:23 +080084 .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
developer73cb4d52022-09-06 15:15:57 +080085 .dma_max_len = MTK_TX_DMA_BUF_LEN,
86 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
87+ .qdma_tx_sch = 4,
88 },
89 };
90
91diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developeree39bcf2023-06-16 08:03:30 +080092index c6afff5..bd73c27 100644
developer73cb4d52022-09-06 15:15:57 +080093--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
94+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developeree39bcf2023-06-16 08:03:30 +080095@@ -385,10 +385,21 @@
developer73cb4d52022-09-06 15:15:57 +080096
97 /* QDMA TX Queue Configuration Registers */
98 #define MTK_QTX_CFG(x) (QDMA_BASE + (x * 0x10))
99+#define MTK_QTX_CFG_HW_RESV_CNT_OFFSET GENMASK(15, 8)
100+#define MTK_QTX_CFG_SW_RESV_CNT_OFFSET GENMASK(7, 0)
101 #define QDMA_RES_THRES 4
102
103 /* QDMA TX Queue Scheduler Registers */
104 #define MTK_QTX_SCH(x) (QDMA_BASE + 4 + (x * 0x10))
105+#define MTK_QTX_SCH_TX_SCH_SEL BIT(31)
106+#define MTK_QTX_SCH_TX_SCH_SEL_V2 GENMASK(31, 30)
107+#define MTK_QTX_SCH_MIN_RATE_EN BIT(27)
108+#define MTK_QTX_SCH_MIN_RATE_MAN GENMASK(26, 20)
109+#define MTK_QTX_SCH_MIN_RATE_EXP GENMASK(19, 16)
110+#define MTK_QTX_SCH_MAX_RATE_WGHT GENMASK(15, 12)
111+#define MTK_QTX_SCH_MAX_RATE_EN BIT(11)
112+#define MTK_QTX_SCH_MAX_RATE_MAN GENMASK(10, 4)
113+#define MTK_QTX_SCH_MAX_RATE_EXP GENMASK(3, 0)
114
115 /* QDMA RX Base Pointer Register */
116 #define MTK_QRX_BASE_PTR0 (QDMA_BASE + 0x100)
developeree39bcf2023-06-16 08:03:30 +0800117@@ -406,7 +417,9 @@
developer73cb4d52022-09-06 15:15:57 +0800118 #define MTK_QRX_DRX_IDX0 (QDMA_BASE + 0x10c)
119
120 /* QDMA Page Configuration Register */
121-#define MTK_QDMA_PAGE (QDMA_BASE + 0x1f0)
122+#define MTK_QDMA_PAGE (QDMA_BASE + 0x1f0)
123+#define MTK_QTX_CFG_PAGE GENMASK(3, 0)
124+#define MTK_QTX_PER_PAGE (16)
125
126 /* QDMA Global Configuration Register */
127 #define MTK_QDMA_GLO_CFG (QDMA_BASE + 0x204)
developeree39bcf2023-06-16 08:03:30 +0800128@@ -443,6 +456,9 @@
developer73cb4d52022-09-06 15:15:57 +0800129 #define FC_THRES_DROP_EN (7 << 16)
130 #define FC_THRES_MIN 0x4444
131
132+/* QDMA TX Scheduler Rate Control Register */
133+#define MTK_QDMA_TX_2SCH_BASE (QDMA_BASE + 0x214)
134+
135 /* QDMA Interrupt Status Register */
136 #define MTK_QDMA_INT_STATUS (QDMA_BASE + 0x218)
developeree39bcf2023-06-16 08:03:30 +0800137 #if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
138@@ -478,6 +494,11 @@
developer73cb4d52022-09-06 15:15:57 +0800139 /* QDMA Interrupt Mask Register */
140 #define MTK_QDMA_HRED2 (QDMA_BASE + 0x244)
141
142+/* QDMA TX Queue MIB Interface Register */
143+#define MTK_QTX_MIB_IF (QDMA_BASE + 0x2bc)
144+#define MTK_MIB_ON_QTX_CFG BIT(31)
145+#define MTK_VQTX_MIB_EN BIT(28)
146+
147 /* QDMA TX Forward CPU Pointer Register */
148 #define MTK_QTX_CTX_PTR (QDMA_BASE +0x300)
149
developeree39bcf2023-06-16 08:03:30 +0800150@@ -505,6 +526,14 @@
developer73cb4d52022-09-06 15:15:57 +0800151 /* QDMA FQ Free Page Buffer Length Register */
152 #define MTK_QDMA_FQ_BLEN (QDMA_BASE +0x32c)
153
154+/* QDMA TX Scheduler Rate Control Register */
155+#define MTK_QDMA_TX_4SCH_BASE(x) (QDMA_BASE + 0x398 + (((x) >> 1) * 0x4))
156+#define MTK_QDMA_TX_SCH_MASK GENMASK(15, 0)
157+#define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15)
158+#define MTK_QDMA_TX_SCH_RATE_EN BIT(11)
159+#define MTK_QDMA_TX_SCH_RATE_MAN GENMASK(10, 4)
160+#define MTK_QDMA_TX_SCH_RATE_EXP GENMASK(3, 0)
161+
162 /* WDMA Registers */
developer1fb19c92023-03-07 23:45:23 +0800163 #define MTK_WDMA_CTX_PTR(x) (WDMA_BASE(x) + 0x8)
developer73cb4d52022-09-06 15:15:57 +0800164 #define MTK_WDMA_DTX_PTR(x) (WDMA_BASE(x) + 0xC)
developeree39bcf2023-06-16 08:03:30 +0800165@@ -1596,6 +1625,7 @@ struct mtk_soc_data {
developer1fb19c92023-03-07 23:45:23 +0800166 u32 rx_dma_l4_valid;
developer73cb4d52022-09-06 15:15:57 +0800167 u32 dma_max_len;
168 u32 dma_len_offset;
169+ u32 qdma_tx_sch;
170 } txrx;
171 };
172
developeree39bcf2023-06-16 08:03:30 +0800173@@ -1736,6 +1766,7 @@ struct mtk_eth {
developer73cb4d52022-09-06 15:15:57 +0800174 spinlock_t syscfg0_lock;
175 struct timer_list mtk_dma_monitor_timer;
176
developer0a320142022-09-21 23:18:01 +0800177+ u8 qos_toggle;
developeree39bcf2023-06-16 08:03:30 +0800178 u8 ppe_num;
179 struct mtk_ppe *ppe[MTK_MAX_PPE_NUM];
developer73cb4d52022-09-06 15:15:57 +0800180 struct rhashtable flow_table;
developeree39bcf2023-06-16 08:03:30 +0800181@@ -1815,4 +1846,6 @@ int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
developer1fb19c92023-03-07 23:45:23 +0800182 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
developeree39bcf2023-06-16 08:03:30 +0800183
developer73cb4d52022-09-06 15:15:57 +0800184 int mtk_ppe_debugfs_init(struct mtk_eth *eth);
developeree39bcf2023-06-16 08:03:30 +0800185+
developer1fb19c92023-03-07 23:45:23 +0800186+int mtk_qdma_debugfs_init(struct mtk_eth *eth);
187 #endif /* MTK_ETH_H */
developeree39bcf2023-06-16 08:03:30 +0800188diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
189index a49275f..1767823 100755
190--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
191+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
192@@ -406,6 +406,16 @@ int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
193 return 0;
194 }
195
196+int mtk_foe_entry_set_qid(struct mtk_foe_entry *entry, int qid)
197+{
198+ u32 *ib2 = mtk_foe_entry_ib2(entry);
199+
200+ *ib2 &= ~MTK_FOE_IB2_QID;
201+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, qid);
202+ *ib2 |= MTK_FOE_IB2_PSE_QOS;
203+
204+ return 0;
205+}
206 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
207 {
208 return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
209diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
210index 8076e5d..c46c4d9 100644
211--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
212+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
213@@ -356,6 +356,7 @@ int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
214 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
215 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
216 int bss, int wcid);
217+int mtk_foe_entry_set_qid(struct mtk_foe_entry *entry, int qid);
218 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
219 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
220 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
developer73cb4d52022-09-06 15:15:57 +0800221diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
developeree39bcf2023-06-16 08:03:30 +0800222index f258539..3b17819 100755
developer73cb4d52022-09-06 15:15:57 +0800223--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
224+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
developer0a320142022-09-21 23:18:01 +0800225@@ -9,6 +9,8 @@
226 #include <linux/ipv6.h>
227 #include <net/flow_offload.h>
228 #include <net/pkt_cls.h>
229+#include <net/netfilter/nf_conntrack.h>
230+#include <net/netfilter/nf_flow_table.h>
231 #include <net/dsa.h>
232 #include "mtk_eth_soc.h"
233 #include "mtk_wed.h"
234@@ -183,7 +185,7 @@ mtk_flow_get_dsa_port(struct net_device **dev)
developer73cb4d52022-09-06 15:15:57 +0800235
developer0a320142022-09-21 23:18:01 +0800236 static int
237 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
238- struct net_device *dev, const u8 *dest_mac,
239+ struct net_device *dev, struct nf_conn *ct, const u8 *dest_mac,
240 int *wed_index)
241 {
242 struct mtk_wdma_info info = {};
developerf52eda02023-07-14 09:40:17 +0800243@@ -209,6 +211,9 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
developeree39bcf2023-06-16 08:03:30 +0800244 if (dsa_port >= 0)
developerf52eda02023-07-14 09:40:17 +0800245 mtk_foe_entry_set_dsa(foe, dsa_port);
developer73cb4d52022-09-06 15:15:57 +0800246
developer0a320142022-09-21 23:18:01 +0800247+ if (eth->qos_toggle == 2 && dsa_port >= 0)
developeree39bcf2023-06-16 08:03:30 +0800248+ mtk_foe_entry_set_qid(foe, dsa_port & MTK_QDMA_TX_MASK);
249+
250 if (dev == eth->netdev[0])
251 pse_port = PSE_GDM1_PORT;
252 else if (dev == eth->netdev[1])
developerf52eda02023-07-14 09:40:17 +0800253@@ -217,6 +222,23 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
254 return -EOPNOTSUPP;
255
256 out:
257+ if (eth->qos_toggle == 1 || (ct->mark & MTK_QDMA_TX_MASK) >= 6) {
258+ u8 qos_ul_toggle;
259+
260+ if (eth->qos_toggle == 2)
261+ qos_ul_toggle = ((ct->mark >> 16) & MTK_QDMA_TX_MASK) >= 6 ? 1 : 0;
262+ else
263+ qos_ul_toggle = ((ct->mark >> 16) & MTK_QDMA_TX_MASK) >= 1 ? 1 : 0;
264+
265+ if (qos_ul_toggle == 1) {
266+ if (dev == eth->netdev[1])
267+ mtk_foe_entry_set_qid(foe, (ct->mark >> 16) & MTK_QDMA_TX_MASK);
268+ else
269+ mtk_foe_entry_set_qid(foe, ct->mark & MTK_QDMA_TX_MASK);
270+ } else
271+ mtk_foe_entry_set_qid(foe, ct->mark & MTK_QDMA_TX_MASK);
272+ }
273+
274 mtk_foe_entry_set_pse_port(foe, pse_port);
275
276 return 0;
277@@ -432,7 +455,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
developer0a320142022-09-21 23:18:01 +0800278 if (data.pppoe.num == 1)
developeree39bcf2023-06-16 08:03:30 +0800279 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
developer0a320142022-09-21 23:18:01 +0800280
281- err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
282+ err = mtk_flow_set_output_device(eth, &foe, odev, f->flow->ct, data.eth.h_dest,
283 &wed_index);
284 if (err)
285 return err;
developeree39bcf2023-06-16 08:03:30 +0800286diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
287index 59b8736..7261b6d 100644
288--- a/include/net/flow_offload.h
289+++ b/include/net/flow_offload.h
290@@ -365,6 +378,7 @@ struct flow_cls_offload {
291 struct flow_cls_common_offload common;
292 enum flow_cls_command command;
293 unsigned long cookie;
294+ struct flow_offload *flow;
295 struct flow_rule *rule;
296 struct flow_stats stats;
297 u32 classid;
298diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
299index d94c6fb..886ced5 100644
300--- a/net/netfilter/nf_flow_table_offload.c
301+++ b/net/netfilter/nf_flow_table_offload.c
302@@ -810,11 +810,13 @@ static int nf_flow_offload_alloc(const struct flow_offload_work *offload,
303 }
304
305 static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
306+ struct flow_offload *flow,
307 __be16 proto, int priority,
308 enum flow_cls_command cmd,
309 const struct flow_offload_tuple *tuple,
310 struct netlink_ext_ack *extack)
311 {
312+ cls_flow->flow = flow;
313 cls_flow->common.protocol = proto;
314 cls_flow->common.prio = priority;
315 cls_flow->common.extack = extack;
316@@ -836,7 +838,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
317 __be16 proto = ETH_P_ALL;
318 int err, i = 0;
319
320- nf_flow_offload_init(&cls_flow, proto, priority, cmd,
321+ nf_flow_offload_init(&cls_flow, flow, proto, priority, cmd,
322 &flow->tuplehash[dir].tuple, &extack);
323 if (cmd == FLOW_CLS_REPLACE)
324 cls_flow.rule = flow_rule->rule;
developer73cb4d52022-09-06 15:15:57 +0800325diff --git a/drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c b/drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c
326new file mode 100644
developeree39bcf2023-06-16 08:03:30 +0800327index 0000000..198b924
developer73cb4d52022-09-06 15:15:57 +0800328--- /dev/null
329+++ b/drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c
developerc66b2152023-01-11 15:20:04 +0800330@@ -0,0 +1,435 @@
developer73cb4d52022-09-06 15:15:57 +0800331+/* SPDX-License-Identifier: GPL-2.0
332+ *
333+ * Copyright (c) 2022 MediaTek Inc.
334+ * Author: Henry Yen <henry.yen@mediatek.com>
335+ * Bo-Cun Chen <bc-bocun.chen@mediatek.com>
336+ */
337+
338+#include <linux/kernel.h>
339+#include <linux/debugfs.h>
340+#include "mtk_eth_soc.h"
341+
342+#define MAX_PPPQ_PORT_NUM 6
343+
344+static struct mtk_eth *_eth;
345+
346+static void mtk_qdma_qos_shaper_ebl(struct mtk_eth *eth, u32 id, u32 enable)
347+{
348+ u32 val;
349+
350+ if (enable) {
351+ val = MTK_QTX_SCH_MIN_RATE_EN | MTK_QTX_SCH_MAX_RATE_EN;
352+ val |= FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
353+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
354+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 25) |
355+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
356+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WGHT, 4);
357+
358+ writel(val, eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
359+ } else {
360+ writel(0, eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
361+ }
362+}
363+
364+static void mtk_qdma_qos_disable(struct mtk_eth *eth)
365+{
366+ u32 id, val;
367+
368+ for (id = 0; id < MAX_PPPQ_PORT_NUM; id++) {
369+ mtk_qdma_qos_shaper_ebl(eth, id, 0);
370+
371+ writel(FIELD_PREP(MTK_QTX_CFG_HW_RESV_CNT_OFFSET, 4) |
372+ FIELD_PREP(MTK_QTX_CFG_SW_RESV_CNT_OFFSET, 4),
373+ eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
374+ }
375+
376+ val = (MTK_QDMA_TX_SCH_MAX_WFQ) | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
377+ for (id = 0; id < eth->soc->txrx.qdma_tx_sch; id += 2) {
378+ if (eth->soc->txrx.qdma_tx_sch == 4)
379+ writel(val, eth->base + MTK_QDMA_TX_4SCH_BASE(id));
380+ else
381+ writel(val, eth->base + MTK_QDMA_TX_2SCH_BASE);
382+ }
383+}
384+
385+static void mtk_qdma_qos_pppq_enable(struct mtk_eth *eth)
386+{
387+ u32 id, val;
388+
389+ for (id = 0; id < MAX_PPPQ_PORT_NUM; id++) {
390+ mtk_qdma_qos_shaper_ebl(eth, id, 1);
391+
392+ writel(FIELD_PREP(MTK_QTX_CFG_HW_RESV_CNT_OFFSET, 4) |
393+ FIELD_PREP(MTK_QTX_CFG_SW_RESV_CNT_OFFSET, 4),
394+ eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
395+ }
396+
397+ val = (MTK_QDMA_TX_SCH_MAX_WFQ) | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
398+ for (id = 0; id < eth->soc->txrx.qdma_tx_sch; id+= 2) {
399+ if (eth->soc->txrx.qdma_tx_sch == 4)
400+ writel(val, eth->base + MTK_QDMA_TX_4SCH_BASE(id));
401+ else
402+ writel(val, eth->base + MTK_QDMA_TX_2SCH_BASE);
403+ }
404+}
405+
406+ static ssize_t mtk_qmda_debugfs_write_qos(struct file *file, const char __user *buffer,
407+ size_t count, loff_t *data)
408+{
409+ struct seq_file *m = file->private_data;
410+ struct mtk_eth *eth = m->private;
411+ char buf[8];
412+ int len = count;
413+
414+ if ((len > 8) || copy_from_user(buf, buffer, len))
415+ return -EFAULT;
416+
417+ if (buf[0] == '0') {
418+ pr_info("HQoS is going to be disabled !\n");
developer0a320142022-09-21 23:18:01 +0800419+ eth->qos_toggle = 0;
developer73cb4d52022-09-06 15:15:57 +0800420+ mtk_qdma_qos_disable(eth);
421+ } else if (buf[0] == '1') {
422+ pr_info("HQoS mode is going to be enabled !\n");
developer0a320142022-09-21 23:18:01 +0800423+ eth->qos_toggle = 1;
developer73cb4d52022-09-06 15:15:57 +0800424+ } else if (buf[0] == '2') {
425+ pr_info("Per-port-per-queue mode is going to be enabled !\n");
426+ pr_info("PPPQ use qid 0~5 (scheduler 0).\n");
developer0a320142022-09-21 23:18:01 +0800427+ eth->qos_toggle = 2;
developer73cb4d52022-09-06 15:15:57 +0800428+ mtk_qdma_qos_pppq_enable(eth);
429+ }
430+
431+ return len;
432+}
433+
434+static int mtk_qmda_debugfs_read_qos(struct seq_file *m, void *private)
435+{
436+ struct mtk_eth *eth = m->private;
437+
438+ seq_printf(m, "value=%d, HQoS is %s now!\n",
developer0a320142022-09-21 23:18:01 +0800439+ eth->qos_toggle, (eth->qos_toggle) ? "enabled" : "disabled");
developer73cb4d52022-09-06 15:15:57 +0800440+
441+ return 0;
442+}
443+
444+static int mtk_qmda_debugfs_open_qos(struct inode *inode, struct file *file)
445+{
446+ return single_open(file, mtk_qmda_debugfs_read_qos,
447+ inode->i_private);
448+}
449+
450+static ssize_t mtk_qmda_debugfs_read_qos_sched(struct file *file, char __user *user_buf,
451+ size_t count, loff_t *ppos)
452+{
453+ struct mtk_eth *eth = _eth;
454+ long id = (long)file->private_data;
455+ char *buf;
456+ unsigned int len = 0, buf_len = 1500;
developerc66b2152023-01-11 15:20:04 +0800457+ int enable, scheduling, max_rate, exp, scheduler, i;
developer73cb4d52022-09-06 15:15:57 +0800458+ ssize_t ret_cnt;
459+ u32 val;
460+
461+ buf = kzalloc(buf_len, GFP_KERNEL);
462+ if (!buf)
463+ return -ENOMEM;
464+
465+ if (eth->soc->txrx.qdma_tx_sch == 4)
466+ val = readl(eth->base + MTK_QDMA_TX_4SCH_BASE(id));
467+ else
468+ val = readl(eth->base + MTK_QDMA_TX_2SCH_BASE);
469+
470+ if (id & 0x1)
471+ val >>= 16;
472+
developerc66b2152023-01-11 15:20:04 +0800473+ val &= MTK_QDMA_TX_SCH_MASK;
developer73cb4d52022-09-06 15:15:57 +0800474+ enable = FIELD_GET(MTK_QDMA_TX_SCH_RATE_EN, val);
475+ scheduling = FIELD_GET(MTK_QDMA_TX_SCH_MAX_WFQ, val);
476+ max_rate = FIELD_GET(MTK_QDMA_TX_SCH_RATE_MAN, val);
developerc66b2152023-01-11 15:20:04 +0800477+ exp = FIELD_GET(MTK_QDMA_TX_SCH_RATE_EXP, val);
478+ while (exp--)
developer73cb4d52022-09-06 15:15:57 +0800479+ max_rate *= 10;
480+
481+ len += scnprintf(buf + len, buf_len - len,
482+ "EN\tScheduling\tMAX\tQueue#\n%d\t%s%16d\t", enable,
483+ (scheduling == 1) ? "WRR" : "SP", max_rate);
484+
485+ for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
486+ val = readl(eth->base + MTK_QDMA_PAGE) & ~MTK_QTX_CFG_PAGE;
487+ val |= FIELD_PREP(MTK_QTX_CFG_PAGE, i / MTK_QTX_PER_PAGE);
488+ writel(val, eth->base + MTK_QDMA_PAGE);
489+
490+ val = readl(eth->base + MTK_QTX_SCH(i % MTK_QTX_PER_PAGE));
491+ if (eth->soc->txrx.qdma_tx_sch == 4)
492+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SCH_SEL_V2, val);
493+ else
494+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SCH_SEL, val);
495+ if (id == scheduler)
496+ len += scnprintf(buf + len, buf_len - len, "%d ", i);
497+ }
498+
499+ len += scnprintf(buf + len, buf_len - len, "\n");
500+ if (len > buf_len)
501+ len = buf_len;
502+
503+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
504+
505+ kfree(buf);
506+ return ret_cnt;
507+}
508+
509+static ssize_t mtk_qmda_debugfs_write_qos_sched(struct file *file, const char __user *buf,
510+ size_t length, loff_t *offset)
511+{
512+ struct mtk_eth *eth = _eth;
513+ long id = (long)file->private_data;
514+ char line[64] = {0}, scheduling[32];
515+ int enable, rate, exp = 0, shift = 0;
516+ size_t size;
developerc66b2152023-01-11 15:20:04 +0800517+ u32 sch, val = 0;
developer73cb4d52022-09-06 15:15:57 +0800518+
519+ if (length >= sizeof(line))
520+ return -EINVAL;
521+
522+ if (copy_from_user(line, buf, length))
523+ return -EFAULT;
524+
525+ if (sscanf(line, "%d %s %d", &enable, scheduling, &rate) != 3)
526+ return -EFAULT;
527+
528+ while (rate > 127) {
529+ rate /= 10;
530+ exp++;
531+ }
532+
533+ line[length] = '\0';
534+
535+ if (enable)
536+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_RATE_EN, 1);
537+ if (strcmp(scheduling, "sp") != 0)
538+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_MAX_WFQ, 1);
539+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_RATE_MAN, rate);
540+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_RATE_EXP, exp);
541+
542+ if (id & 0x1)
543+ shift = 16;
544+
545+ if (eth->soc->txrx.qdma_tx_sch == 4)
developerc66b2152023-01-11 15:20:04 +0800546+ sch = readl(eth->base + MTK_QDMA_TX_4SCH_BASE(id));
developer73cb4d52022-09-06 15:15:57 +0800547+ else
developerc66b2152023-01-11 15:20:04 +0800548+ sch = readl(eth->base + MTK_QDMA_TX_2SCH_BASE);
developer73cb4d52022-09-06 15:15:57 +0800549+
developerc66b2152023-01-11 15:20:04 +0800550+ sch &= ~(MTK_QDMA_TX_SCH_MASK << shift);
551+ sch |= val << shift;
developer73cb4d52022-09-06 15:15:57 +0800552+ if (eth->soc->txrx.qdma_tx_sch == 4)
developerc66b2152023-01-11 15:20:04 +0800553+ writel(sch, eth->base + MTK_QDMA_TX_4SCH_BASE(id));
developer73cb4d52022-09-06 15:15:57 +0800554+ else
developerc66b2152023-01-11 15:20:04 +0800555+ writel(sch, eth->base + MTK_QDMA_TX_2SCH_BASE);
developer73cb4d52022-09-06 15:15:57 +0800556+
557+ size = strlen(line);
558+ *offset += size;
559+
560+ return length;
561+}
562+
563+static ssize_t mtk_qmda_debugfs_read_qos_queue(struct file *file, char __user *user_buf,
564+ size_t count, loff_t *ppos)
565+{
566+ struct mtk_eth *eth = _eth;
567+ long id = (long)file->private_data;
568+ char *buf;
569+ unsigned int len = 0, buf_len = 1500;
570+ int min_rate_en, min_rate, min_rate_exp;
571+ int max_rate_en, max_weight, max_rate, max_rate_exp;
572+ u32 qtx_sch, qtx_cfg, scheduler, val;
573+ ssize_t ret_cnt;
574+
575+ buf = kzalloc(buf_len, GFP_KERNEL);
576+ if (!buf)
577+ return -ENOMEM;
578+
579+ val = readl(eth->base + MTK_QDMA_PAGE) & ~MTK_QTX_CFG_PAGE;
580+ val |= FIELD_PREP(MTK_QTX_CFG_PAGE, id / MTK_QTX_PER_PAGE);
581+ writel(val, eth->base + MTK_QDMA_PAGE);
582+
583+ qtx_cfg = readl(eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
584+ qtx_sch = readl(eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
585+ if (eth->soc->txrx.qdma_tx_sch == 4)
586+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SCH_SEL_V2, qtx_sch);
587+ else
588+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SCH_SEL, qtx_sch);
589+
590+ min_rate_en = FIELD_GET(MTK_QTX_SCH_MIN_RATE_EN, qtx_sch);
591+ min_rate = FIELD_GET(MTK_QTX_SCH_MIN_RATE_MAN, qtx_sch);
592+ min_rate_exp = FIELD_GET(MTK_QTX_SCH_MIN_RATE_EXP, qtx_sch);
593+ max_rate_en = FIELD_GET(MTK_QTX_SCH_MAX_RATE_EN, qtx_sch);
594+ max_weight = FIELD_GET(MTK_QTX_SCH_MAX_RATE_WGHT, qtx_sch);
595+ max_rate = FIELD_GET(MTK_QTX_SCH_MAX_RATE_MAN, qtx_sch);
596+ max_rate_exp = FIELD_GET(MTK_QTX_SCH_MAX_RATE_EXP, qtx_sch);
597+ while (min_rate_exp--)
598+ min_rate *= 10;
599+
600+ while (max_rate_exp--)
601+ max_rate *= 10;
602+
603+ len += scnprintf(buf + len, buf_len - len,
604+ "scheduler: %d\nhw resv: %d\nsw resv: %d\n", scheduler,
605+ (qtx_cfg >> 8) & 0xff, qtx_cfg & 0xff);
606+
607+ /* Switch to debug mode */
608+ val = readl(eth->base + MTK_QTX_MIB_IF) & ~MTK_MIB_ON_QTX_CFG;
609+ val |= MTK_MIB_ON_QTX_CFG;
610+ writel(val, eth->base + MTK_QTX_MIB_IF);
611+
612+ val = readl(eth->base + MTK_QTX_MIB_IF) & ~MTK_VQTX_MIB_EN;
613+ val |= MTK_VQTX_MIB_EN;
614+ writel(val, eth->base + MTK_QTX_MIB_IF);
615+
616+ qtx_cfg = readl(eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
617+ qtx_sch = readl(eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
618+
619+ len += scnprintf(buf + len, buf_len - len,
620+ "packet count: %u\n", qtx_cfg);
621+ len += scnprintf(buf + len, buf_len - len,
622+ "packet drop: %u\n\n", qtx_sch);
623+
624+ /* Recover to normal mode */
625+ val = readl(eth->base + MTK_QTX_MIB_IF);
626+ val &= ~MTK_MIB_ON_QTX_CFG;
627+ writel(val, eth->base + MTK_QTX_MIB_IF);
628+
629+ val = readl(eth->base + MTK_QTX_MIB_IF);
630+ val &= ~MTK_VQTX_MIB_EN;
631+ writel(val, eth->base + MTK_QTX_MIB_IF);
632+
633+ len += scnprintf(buf + len, buf_len - len,
634+ " EN RATE WEIGHT\n");
635+ len += scnprintf(buf + len, buf_len - len,
636+ "----------------------------\n");
637+ len += scnprintf(buf + len, buf_len - len,
638+ "max%5d%9d%9d\n", max_rate_en, max_rate, max_weight);
639+ len += scnprintf(buf + len, buf_len - len,
640+ "min%5d%9d -\n", min_rate_en, min_rate);
641+
642+ if (len > buf_len)
643+ len = buf_len;
644+
645+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
646+
647+ kfree(buf);
648+
649+ return ret_cnt;
650+}
651+
652+static ssize_t mtk_qmda_debugfs_write_qos_queue(struct file *file, const char __user *buf,
653+ size_t length, loff_t *offset)
654+{
655+ struct mtk_eth *eth = _eth;
656+ long id = (long)file->private_data;
657+ char line[64] = {0};
658+ int max_enable, max_rate, max_exp = 0;
659+ int min_enable, min_rate, min_exp = 0;
660+ int scheduler, weight, resv;
661+ size_t size;
662+ u32 val;
663+
664+ if (length >= sizeof(line))
665+ return -EINVAL;
666+
667+ if (copy_from_user(line, buf, length))
668+ return -EFAULT;
669+
670+ if (sscanf(line, "%d %d %d %d %d %d %d", &scheduler, &min_enable, &min_rate,
671+ &max_enable, &max_rate, &weight, &resv) != 7)
672+ return -EFAULT;
673+
674+ line[length] = '\0';
675+
676+ while (max_rate > 127) {
677+ max_rate /= 10;
678+ max_exp++;
679+ }
680+
681+ while (min_rate > 127) {
682+ min_rate /= 10;
683+ min_exp++;
684+ }
685+
686+ val = readl(eth->base + MTK_QDMA_PAGE) & ~MTK_QTX_CFG_PAGE;
687+ val |= FIELD_PREP(MTK_QTX_CFG_PAGE, id / MTK_QTX_PER_PAGE);
688+ writel(val, eth->base + MTK_QDMA_PAGE);
689+
690+ if (eth->soc->txrx.qdma_tx_sch == 4)
691+ val = FIELD_PREP(MTK_QTX_SCH_TX_SCH_SEL_V2, scheduler);
692+ else
693+ val = FIELD_PREP(MTK_QTX_SCH_TX_SCH_SEL, scheduler);
694+ if (min_enable)
695+ val |= MTK_QTX_SCH_MIN_RATE_EN;
696+ val |= FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, min_rate);
697+ val |= FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, min_exp);
698+ if (max_enable)
699+ val |= MTK_QTX_SCH_MAX_RATE_EN;
700+ val |= FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WGHT, weight);
701+ val |= FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, max_rate);
702+ val |= FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, max_exp);
703+ writel(val, eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
704+
705+ val = readl(eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
706+ val |= FIELD_PREP(MTK_QTX_CFG_HW_RESV_CNT_OFFSET, resv);
707+ val |= FIELD_PREP(MTK_QTX_CFG_SW_RESV_CNT_OFFSET, resv);
708+ writel(val, eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
709+
710+ size = strlen(line);
711+ *offset += size;
712+
713+ return length;
714+}
715+
716+int mtk_qdma_debugfs_init(struct mtk_eth *eth)
717+{
718+ static const struct file_operations fops_qos = {
719+ .open = mtk_qmda_debugfs_open_qos,
720+ .read = seq_read,
721+ .llseek = seq_lseek,
722+ .write = mtk_qmda_debugfs_write_qos,
723+ .release = single_release,
724+ };
725+
726+ static const struct file_operations fops_qos_sched = {
727+ .open = simple_open,
728+ .read = mtk_qmda_debugfs_read_qos_sched,
729+ .write = mtk_qmda_debugfs_write_qos_sched,
730+ .llseek = default_llseek,
731+ };
732+
733+ static const struct file_operations fops_qos_queue = {
734+ .open = simple_open,
735+ .read = mtk_qmda_debugfs_read_qos_queue,
736+ .write = mtk_qmda_debugfs_write_qos_queue,
737+ .llseek = default_llseek,
738+ };
739+
740+ struct dentry *root;
741+ long i;
742+ char name[16];
743+
744+ _eth = eth;
745+
746+ root = debugfs_lookup("mtk_ppe", NULL);
747+ if (!root)
748+ return -ENOMEM;
749+
developer0a320142022-09-21 23:18:01 +0800750+ debugfs_create_file("qos_toggle", S_IRUGO, root, eth, &fops_qos);
developer73cb4d52022-09-06 15:15:57 +0800751+
752+ for (i = 0; i < eth->soc->txrx.qdma_tx_sch; i++) {
753+ snprintf(name, sizeof(name), "qdma_sch%ld", i);
754+ debugfs_create_file(name, S_IRUGO, root, (void *)i,
755+ &fops_qos_sched);
756+ }
757+
758+ for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
759+ snprintf(name, sizeof(name), "qdma_txq%ld", i);
760+ debugfs_create_file(name, S_IRUGO, root, (void *)i,
761+ &fops_qos_queue);
762+ }
763+
764+ return 0;
765+}