blob: 661b93fde93a348a72db9d2477902a01c3fc3f16 [file] [log] [blame]
developer58aa0682023-09-18 14:02:26 +08001From 9e21d6fa97e93cb2ba9b923810666ddaf7a981ee Mon Sep 17 00:00:00 2001
2From: Bc-bocun Chen <bc-bocun.chen@mediatek.com>
3Date: Mon, 18 Sep 2023 11:11:03 +0800
4Subject: [PATCH 13/22] flow-offload-add-mtkhnat-qdma-qos
developer0aaf79d2023-08-21 14:10:16 +08005
6---
7 drivers/net/ethernet/mediatek/Makefile | 2 +-
8 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 10 +
developer64b431b2023-08-26 01:04:45 +08009 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 63 ++-
10 drivers/net/ethernet/mediatek/mtk_ppe.c | 48 +-
11 drivers/net/ethernet/mediatek/mtk_ppe.h | 4 +
developer0aaf79d2023-08-21 14:10:16 +080012 .../net/ethernet/mediatek/mtk_ppe_offload.c | 28 +-
13 .../net/ethernet/mediatek/mtk_qdma_debugfs.c | 439 ++++++++++++++++++
14 include/net/flow_offload.h | 1 +
15 net/netfilter/nf_flow_table_offload.c | 4 +-
developer64b431b2023-08-26 01:04:45 +080016 9 files changed, 593 insertions(+), 6 deletions(-)
developer0aaf79d2023-08-21 14:10:16 +080017 create mode 100644 drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c
18
developer73cb4d52022-09-06 15:15:57 +080019diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
developer0aaf79d2023-08-21 14:10:16 +080020index fdbb90f..c7d2296 100644
developer73cb4d52022-09-06 15:15:57 +080021--- a/drivers/net/ethernet/mediatek/Makefile
22+++ b/drivers/net/ethernet/mediatek/Makefile
23@@ -5,7 +5,7 @@
24
25 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
developer68838542022-10-03 23:42:21 +080026 mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_usxgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o \
developer73cb4d52022-09-06 15:15:57 +080027- mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
28+ mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o mtk_qdma_debugfs.o
29 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
30 ifdef CONFIG_DEBUG_FS
31 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
32diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developer58aa0682023-09-18 14:02:26 +080033index 9d8ce07..0f6613b 100644
developer73cb4d52022-09-06 15:15:57 +080034--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
35+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developer58aa0682023-09-18 14:02:26 +080036@@ -5266,6 +5266,8 @@ static int mtk_probe(struct platform_device *pdev)
developeree39bcf2023-06-16 08:03:30 +080037 }
developer73cb4d52022-09-06 15:15:57 +080038
39 mtk_ppe_debugfs_init(eth);
40+
41+ mtk_qdma_debugfs_init(eth);
42 }
43
44 for (i = 0; i < MTK_MAX_DEVS; i++) {
developer58aa0682023-09-18 14:02:26 +080045@@ -5371,6 +5373,7 @@ static const struct mtk_soc_data mt2701_data = {
developer1fb19c92023-03-07 23:45:23 +080046 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developer73cb4d52022-09-06 15:15:57 +080047 .dma_max_len = MTK_TX_DMA_BUF_LEN,
48 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
49+ .qdma_tx_sch = 2,
50 },
51 };
52
developer58aa0682023-09-18 14:02:26 +080053@@ -5391,6 +5394,7 @@ static const struct mtk_soc_data mt7621_data = {
developer73cb4d52022-09-06 15:15:57 +080054 .rxd_size = sizeof(struct mtk_rx_dma),
55 .dma_max_len = MTK_TX_DMA_BUF_LEN,
56 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
57+ .qdma_tx_sch = 2,
58 },
59 };
60
developer58aa0682023-09-18 14:02:26 +080061@@ -5412,6 +5416,7 @@ static const struct mtk_soc_data mt7622_data = {
developer1fb19c92023-03-07 23:45:23 +080062 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developer73cb4d52022-09-06 15:15:57 +080063 .dma_max_len = MTK_TX_DMA_BUF_LEN,
64 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
65+ .qdma_tx_sch = 2,
66 },
67 };
68
developer58aa0682023-09-18 14:02:26 +080069@@ -5432,6 +5437,7 @@ static const struct mtk_soc_data mt7623_data = {
developer1fb19c92023-03-07 23:45:23 +080070 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developer73cb4d52022-09-06 15:15:57 +080071 .dma_max_len = MTK_TX_DMA_BUF_LEN,
72 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
73+ .qdma_tx_sch = 2,
74 },
75 };
76
developer58aa0682023-09-18 14:02:26 +080077@@ -5472,6 +5478,7 @@ static const struct mtk_soc_data mt7986_data = {
developer1fb19c92023-03-07 23:45:23 +080078 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developer73cb4d52022-09-06 15:15:57 +080079 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
80 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
81+ .qdma_tx_sch = 4,
82 },
83 };
84
developer58aa0682023-09-18 14:02:26 +080085@@ -5493,6 +5500,7 @@ static const struct mtk_soc_data mt7981_data = {
developer1fb19c92023-03-07 23:45:23 +080086 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developer73cb4d52022-09-06 15:15:57 +080087 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
88 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
89+ .qdma_tx_sch = 4,
90 },
91 };
92
developer58aa0682023-09-18 14:02:26 +080093@@ -5511,6 +5519,7 @@ static const struct mtk_soc_data mt7988_data = {
developer1fb19c92023-03-07 23:45:23 +080094 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
95 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
96 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
97+ .qdma_tx_sch = 4,
98 },
99 };
100
developer58aa0682023-09-18 14:02:26 +0800101@@ -5529,6 +5538,7 @@ static const struct mtk_soc_data rt5350_data = {
developer1fb19c92023-03-07 23:45:23 +0800102 .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
developer73cb4d52022-09-06 15:15:57 +0800103 .dma_max_len = MTK_TX_DMA_BUF_LEN,
104 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
105+ .qdma_tx_sch = 4,
106 },
107 };
108
109diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developer58aa0682023-09-18 14:02:26 +0800110index 101c233..7ea380e 100644
developer73cb4d52022-09-06 15:15:57 +0800111--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
112+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developer58aa0682023-09-18 14:02:26 +0800113@@ -400,10 +400,21 @@
developer73cb4d52022-09-06 15:15:57 +0800114
115 /* QDMA TX Queue Configuration Registers */
116 #define MTK_QTX_CFG(x) (QDMA_BASE + (x * 0x10))
117+#define MTK_QTX_CFG_HW_RESV_CNT_OFFSET GENMASK(15, 8)
118+#define MTK_QTX_CFG_SW_RESV_CNT_OFFSET GENMASK(7, 0)
119 #define QDMA_RES_THRES 4
120
121 /* QDMA TX Queue Scheduler Registers */
122 #define MTK_QTX_SCH(x) (QDMA_BASE + 4 + (x * 0x10))
123+#define MTK_QTX_SCH_TX_SCH_SEL BIT(31)
124+#define MTK_QTX_SCH_TX_SCH_SEL_V2 GENMASK(31, 30)
125+#define MTK_QTX_SCH_MIN_RATE_EN BIT(27)
126+#define MTK_QTX_SCH_MIN_RATE_MAN GENMASK(26, 20)
127+#define MTK_QTX_SCH_MIN_RATE_EXP GENMASK(19, 16)
128+#define MTK_QTX_SCH_MAX_RATE_WGHT GENMASK(15, 12)
129+#define MTK_QTX_SCH_MAX_RATE_EN BIT(11)
130+#define MTK_QTX_SCH_MAX_RATE_MAN GENMASK(10, 4)
131+#define MTK_QTX_SCH_MAX_RATE_EXP GENMASK(3, 0)
132
133 /* QDMA RX Base Pointer Register */
134 #define MTK_QRX_BASE_PTR0 (QDMA_BASE + 0x100)
developer58aa0682023-09-18 14:02:26 +0800135@@ -421,7 +432,9 @@
developer73cb4d52022-09-06 15:15:57 +0800136 #define MTK_QRX_DRX_IDX0 (QDMA_BASE + 0x10c)
137
138 /* QDMA Page Configuration Register */
139-#define MTK_QDMA_PAGE (QDMA_BASE + 0x1f0)
140+#define MTK_QDMA_PAGE (QDMA_BASE + 0x1f0)
141+#define MTK_QTX_CFG_PAGE GENMASK(3, 0)
142+#define MTK_QTX_PER_PAGE (16)
143
144 /* QDMA Global Configuration Register */
145 #define MTK_QDMA_GLO_CFG (QDMA_BASE + 0x204)
developer58aa0682023-09-18 14:02:26 +0800146@@ -458,6 +471,9 @@
developer73cb4d52022-09-06 15:15:57 +0800147 #define FC_THRES_DROP_EN (7 << 16)
148 #define FC_THRES_MIN 0x4444
149
150+/* QDMA TX Scheduler Rate Control Register */
151+#define MTK_QDMA_TX_2SCH_BASE (QDMA_BASE + 0x214)
152+
153 /* QDMA Interrupt Status Register */
154 #define MTK_QDMA_INT_STATUS (QDMA_BASE + 0x218)
developer0aaf79d2023-08-21 14:10:16 +0800155 #if defined(CONFIG_MEDIATEK_NETSYS_RX_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer58aa0682023-09-18 14:02:26 +0800156@@ -494,6 +510,11 @@
developer73cb4d52022-09-06 15:15:57 +0800157 /* QDMA Interrupt Mask Register */
158 #define MTK_QDMA_HRED2 (QDMA_BASE + 0x244)
159
160+/* QDMA TX Queue MIB Interface Register */
161+#define MTK_QTX_MIB_IF (QDMA_BASE + 0x2bc)
162+#define MTK_MIB_ON_QTX_CFG BIT(31)
163+#define MTK_VQTX_MIB_EN BIT(28)
164+
165 /* QDMA TX Forward CPU Pointer Register */
166 #define MTK_QTX_CTX_PTR (QDMA_BASE +0x300)
167
developer58aa0682023-09-18 14:02:26 +0800168@@ -521,6 +542,14 @@
developer73cb4d52022-09-06 15:15:57 +0800169 /* QDMA FQ Free Page Buffer Length Register */
170 #define MTK_QDMA_FQ_BLEN (QDMA_BASE +0x32c)
171
172+/* QDMA TX Scheduler Rate Control Register */
173+#define MTK_QDMA_TX_4SCH_BASE(x) (QDMA_BASE + 0x398 + (((x) >> 1) * 0x4))
174+#define MTK_QDMA_TX_SCH_MASK GENMASK(15, 0)
175+#define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15)
176+#define MTK_QDMA_TX_SCH_RATE_EN BIT(11)
177+#define MTK_QDMA_TX_SCH_RATE_MAN GENMASK(10, 4)
178+#define MTK_QDMA_TX_SCH_RATE_EXP GENMASK(3, 0)
179+
180 /* WDMA Registers */
developer1fb19c92023-03-07 23:45:23 +0800181 #define MTK_WDMA_CTX_PTR(x) (WDMA_BASE(x) + 0x8)
developer73cb4d52022-09-06 15:15:57 +0800182 #define MTK_WDMA_DTX_PTR(x) (WDMA_BASE(x) + 0xC)
developer58aa0682023-09-18 14:02:26 +0800183@@ -1690,6 +1719,7 @@ struct mtk_soc_data {
developer1fb19c92023-03-07 23:45:23 +0800184 u32 rx_dma_l4_valid;
developer73cb4d52022-09-06 15:15:57 +0800185 u32 dma_max_len;
186 u32 dma_len_offset;
187+ u32 qdma_tx_sch;
188 } txrx;
189 };
190
developer58aa0682023-09-18 14:02:26 +0800191@@ -1879,6 +1909,7 @@ struct mtk_eth {
developer73cb4d52022-09-06 15:15:57 +0800192 spinlock_t syscfg0_lock;
193 struct timer_list mtk_dma_monitor_timer;
194
developer0a320142022-09-21 23:18:01 +0800195+ u8 qos_toggle;
developeree39bcf2023-06-16 08:03:30 +0800196 u8 ppe_num;
197 struct mtk_ppe *ppe[MTK_MAX_PPE_NUM];
developer73cb4d52022-09-06 15:15:57 +0800198 struct rhashtable flow_table;
developerc1d06e12023-10-26 21:52:08 +0800199@@ -1936,6 +1967,36 @@ extern const struct of_device_id of_mtk_match[];
developer0aaf79d2023-08-21 14:10:16 +0800200 extern u32 mtk_hwlro_stats_ebl;
201 extern u32 dbg_show_level;
202
developer64b431b2023-08-26 01:04:45 +0800203+static inline void mtk_set_ib1_sp(struct mtk_eth *eth, struct mtk_foe_entry *foe, u32 val)
204+{
developerc1d06e12023-10-26 21:52:08 +0800205+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
206+ foe->ib1 |= FIELD_PREP(MTK_FOE_IB1_UNBIND_SRC_PORT, val);
207+#endif
developer64b431b2023-08-26 01:04:45 +0800208+}
209+
210+static inline u32 mtk_get_ib1_sp(struct mtk_eth *eth, struct mtk_foe_entry *foe)
211+{
developerc1d06e12023-10-26 21:52:08 +0800212+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
213+ return FIELD_GET(MTK_FOE_IB1_UNBIND_SRC_PORT, foe->ib1);
214+#else
developer64b431b2023-08-26 01:04:45 +0800215+ return 0;
developerc1d06e12023-10-26 21:52:08 +0800216+#endif
developer64b431b2023-08-26 01:04:45 +0800217+}
218+
developer0aaf79d2023-08-21 14:10:16 +0800219+static inline int
developer64b431b2023-08-26 01:04:45 +0800220+mtk_ppe_check_pppq_path(struct mtk_eth *eth, struct mtk_foe_entry *foe, int dsa_port)
developer0aaf79d2023-08-21 14:10:16 +0800221+{
developer64b431b2023-08-26 01:04:45 +0800222+ u32 sp = mtk_get_ib1_sp(eth, foe);
developer0aaf79d2023-08-21 14:10:16 +0800223+
224+ if ((dsa_port >= 0 && dsa_port <= 4) ||
225+ (dsa_port == 5 && (sp == PSE_WDMA0_PORT ||
226+ sp == PSE_WDMA1_PORT ||
227+ sp == PSE_WDMA2_PORT)))
228+ return 1;
229+
230+ return 0;
231+}
232+
233 /* read the hardware status register */
234 void mtk_stats_update_mac(struct mtk_mac *mac);
235
developer58aa0682023-09-18 14:02:26 +0800236@@ -1969,4 +2028,6 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
developer0aaf79d2023-08-21 14:10:16 +0800237 u32 mtk_rss_indr_table(struct mtk_rss_params *rss_params, int index);
developeree39bcf2023-06-16 08:03:30 +0800238
developer73cb4d52022-09-06 15:15:57 +0800239 int mtk_ppe_debugfs_init(struct mtk_eth *eth);
developeree39bcf2023-06-16 08:03:30 +0800240+
developer1fb19c92023-03-07 23:45:23 +0800241+int mtk_qdma_debugfs_init(struct mtk_eth *eth);
242 #endif /* MTK_ETH_H */
developeree39bcf2023-06-16 08:03:30 +0800243diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
developer58aa0682023-09-18 14:02:26 +0800244index f55a95c..6d6c1e4 100755
developeree39bcf2023-06-16 08:03:30 +0800245--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
246+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
developer0aaf79d2023-08-21 14:10:16 +0800247@@ -128,7 +128,7 @@ static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
248 enable * MTK_PPE_CACHE_CTL_EN);
249 }
250
251-static u32 mtk_ppe_hash_entry(struct mtk_ppe *ppe, struct mtk_foe_entry *e)
252+u32 mtk_ppe_hash_entry(struct mtk_ppe *ppe, struct mtk_foe_entry *e)
253 {
254 u32 hv1, hv2, hv3;
255 u32 hash;
developer64b431b2023-08-26 01:04:45 +0800256@@ -420,12 +420,58 @@ int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
developeree39bcf2023-06-16 08:03:30 +0800257 return 0;
258 }
259
260+int mtk_foe_entry_set_qid(struct mtk_foe_entry *entry, int qid)
261+{
262+ u32 *ib2 = mtk_foe_entry_ib2(entry);
263+
264+ *ib2 &= ~MTK_FOE_IB2_QID;
265+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, qid);
266+ *ib2 |= MTK_FOE_IB2_PSE_QOS;
267+
268+ return 0;
269+}
270 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
271 {
272 return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
developer0aaf79d2023-08-21 14:10:16 +0800273 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
274 }
275
276+bool mtk_foe_entry_match(struct mtk_foe_entry *entry, struct mtk_foe_entry *data)
277+{
278+ int type, len;
279+
280+ if ((data->ib1 ^ entry->ib1) & MTK_FOE_IB1_UDP)
281+ return false;
282+
283+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
284+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
285+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
286+ else
287+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
288+
289+ return !memcmp(&entry->data, &data->data, len - 4);
290+}
291+
developer64b431b2023-08-26 01:04:45 +0800292+int mtk_foe_entry_set_sp(struct mtk_ppe *ppe, struct mtk_foe_entry *entry)
293+{
294+ struct mtk_foe_entry *hwe;
295+ u32 hash, sp = 0;
296+ int i;
297+
298+ hash = mtk_ppe_hash_entry(ppe, entry);
299+ for (i = 0; i < ppe->way; i++) {
300+ hwe = &ppe->foe_table[hash + i];
301+ if (mtk_foe_entry_match(hwe, entry)) {
302+ sp = mtk_get_ib1_sp(ppe->eth, hwe);
303+ break;
304+ }
305+ }
306+
307+ mtk_set_ib1_sp(ppe->eth, entry, sp);
308+
309+ return 0;
310+}
311+
developer0aaf79d2023-08-21 14:10:16 +0800312 static bool
313 mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
314 {
developeree39bcf2023-06-16 08:03:30 +0800315diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
developer64b431b2023-08-26 01:04:45 +0800316index 86288b0..5ab864f 100644
developeree39bcf2023-06-16 08:03:30 +0800317--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
318+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
developer64b431b2023-08-26 01:04:45 +0800319@@ -403,9 +403,13 @@ int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
developeree39bcf2023-06-16 08:03:30 +0800320 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
321 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
322 int bss, int wcid);
323+int mtk_foe_entry_set_qid(struct mtk_foe_entry *entry, int qid);
developer0aaf79d2023-08-21 14:10:16 +0800324+bool mtk_foe_entry_match(struct mtk_foe_entry *entry, struct mtk_foe_entry *data);
developer64b431b2023-08-26 01:04:45 +0800325+int mtk_foe_entry_set_sp(struct mtk_ppe *ppe, struct mtk_foe_entry *entry);
developeree39bcf2023-06-16 08:03:30 +0800326 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
327 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
328 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
developer0aaf79d2023-08-21 14:10:16 +0800329 struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index, struct mtk_foe_accounting *diff);
330+u32 mtk_ppe_hash_entry(struct mtk_ppe *ppe, struct mtk_foe_entry *e);
developer0aaf79d2023-08-21 14:10:16 +0800331
332 #endif
developer73cb4d52022-09-06 15:15:57 +0800333diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
developer64b431b2023-08-26 01:04:45 +0800334index b80f72d..3bc50a4 100755
developer73cb4d52022-09-06 15:15:57 +0800335--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
336+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
developer0a320142022-09-21 23:18:01 +0800337@@ -9,6 +9,8 @@
338 #include <linux/ipv6.h>
339 #include <net/flow_offload.h>
340 #include <net/pkt_cls.h>
341+#include <net/netfilter/nf_conntrack.h>
342+#include <net/netfilter/nf_flow_table.h>
343 #include <net/dsa.h>
344 #include "mtk_eth_soc.h"
345 #include "mtk_wed.h"
346@@ -183,7 +185,7 @@ mtk_flow_get_dsa_port(struct net_device **dev)
developer73cb4d52022-09-06 15:15:57 +0800347
developer0a320142022-09-21 23:18:01 +0800348 static int
349 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
350- struct net_device *dev, const u8 *dest_mac,
351+ struct net_device *dev, struct nf_conn *ct, const u8 *dest_mac,
352 int *wed_index)
353 {
354 struct mtk_wdma_info info = {};
developerf52eda02023-07-14 09:40:17 +0800355@@ -209,6 +211,9 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
developeree39bcf2023-06-16 08:03:30 +0800356 if (dsa_port >= 0)
developerf52eda02023-07-14 09:40:17 +0800357 mtk_foe_entry_set_dsa(foe, dsa_port);
developer73cb4d52022-09-06 15:15:57 +0800358
developer64b431b2023-08-26 01:04:45 +0800359+ if (eth->qos_toggle == 2 && mtk_ppe_check_pppq_path(eth, foe, dsa_port))
developeree39bcf2023-06-16 08:03:30 +0800360+ mtk_foe_entry_set_qid(foe, dsa_port & MTK_QDMA_TX_MASK);
361+
362 if (dev == eth->netdev[0])
363 pse_port = PSE_GDM1_PORT;
364 else if (dev == eth->netdev[1])
developerf52eda02023-07-14 09:40:17 +0800365@@ -217,6 +222,23 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
366 return -EOPNOTSUPP;
367
368 out:
369+ if (eth->qos_toggle == 1 || (ct->mark & MTK_QDMA_TX_MASK) >= 6) {
370+ u8 qos_ul_toggle;
371+
372+ if (eth->qos_toggle == 2)
373+ qos_ul_toggle = ((ct->mark >> 16) & MTK_QDMA_TX_MASK) >= 6 ? 1 : 0;
374+ else
375+ qos_ul_toggle = ((ct->mark >> 16) & MTK_QDMA_TX_MASK) >= 1 ? 1 : 0;
376+
377+ if (qos_ul_toggle == 1) {
378+ if (dev == eth->netdev[1])
379+ mtk_foe_entry_set_qid(foe, (ct->mark >> 16) & MTK_QDMA_TX_MASK);
380+ else
381+ mtk_foe_entry_set_qid(foe, ct->mark & MTK_QDMA_TX_MASK);
382+ } else
383+ mtk_foe_entry_set_qid(foe, ct->mark & MTK_QDMA_TX_MASK);
384+ }
385+
386 mtk_foe_entry_set_pse_port(foe, pse_port);
387
388 return 0;
developer0aaf79d2023-08-21 14:10:16 +0800389@@ -447,7 +469,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
developer0a320142022-09-21 23:18:01 +0800390 if (data.pppoe.num == 1)
developeree39bcf2023-06-16 08:03:30 +0800391 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
developer0a320142022-09-21 23:18:01 +0800392
393- err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
developer0aaf79d2023-08-21 14:10:16 +0800394+ mtk_foe_entry_set_sp(eth->ppe[ppe_index], &foe);
395+
developer0a320142022-09-21 23:18:01 +0800396+ err = mtk_flow_set_output_device(eth, &foe, odev, f->flow->ct, data.eth.h_dest,
397 &wed_index);
398 if (err)
399 return err;
developer73cb4d52022-09-06 15:15:57 +0800400diff --git a/drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c b/drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c
401new file mode 100644
developer0aaf79d2023-08-21 14:10:16 +0800402index 0000000..3a7c585
developer73cb4d52022-09-06 15:15:57 +0800403--- /dev/null
404+++ b/drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c
developer0aaf79d2023-08-21 14:10:16 +0800405@@ -0,0 +1,439 @@
developer73cb4d52022-09-06 15:15:57 +0800406+/* SPDX-License-Identifier: GPL-2.0
407+ *
408+ * Copyright (c) 2022 MediaTek Inc.
409+ * Author: Henry Yen <henry.yen@mediatek.com>
410+ * Bo-Cun Chen <bc-bocun.chen@mediatek.com>
411+ */
412+
413+#include <linux/kernel.h>
414+#include <linux/debugfs.h>
415+#include "mtk_eth_soc.h"
416+
417+#define MAX_PPPQ_PORT_NUM 6
418+
419+static struct mtk_eth *_eth;
420+
421+static void mtk_qdma_qos_shaper_ebl(struct mtk_eth *eth, u32 id, u32 enable)
422+{
423+ u32 val;
424+
425+ if (enable) {
426+ val = MTK_QTX_SCH_MIN_RATE_EN | MTK_QTX_SCH_MAX_RATE_EN;
427+ val |= FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
428+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
429+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 25) |
430+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
431+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WGHT, 4);
432+
433+ writel(val, eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
434+ } else {
435+ writel(0, eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
436+ }
437+}
438+
439+static void mtk_qdma_qos_disable(struct mtk_eth *eth)
440+{
441+ u32 id, val;
442+
443+ for (id = 0; id < MAX_PPPQ_PORT_NUM; id++) {
444+ mtk_qdma_qos_shaper_ebl(eth, id, 0);
445+
446+ writel(FIELD_PREP(MTK_QTX_CFG_HW_RESV_CNT_OFFSET, 4) |
447+ FIELD_PREP(MTK_QTX_CFG_SW_RESV_CNT_OFFSET, 4),
448+ eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
449+ }
450+
451+ val = (MTK_QDMA_TX_SCH_MAX_WFQ) | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
452+ for (id = 0; id < eth->soc->txrx.qdma_tx_sch; id += 2) {
453+ if (eth->soc->txrx.qdma_tx_sch == 4)
454+ writel(val, eth->base + MTK_QDMA_TX_4SCH_BASE(id));
455+ else
456+ writel(val, eth->base + MTK_QDMA_TX_2SCH_BASE);
457+ }
458+}
459+
460+static void mtk_qdma_qos_pppq_enable(struct mtk_eth *eth)
461+{
462+ u32 id, val;
463+
464+ for (id = 0; id < MAX_PPPQ_PORT_NUM; id++) {
465+ mtk_qdma_qos_shaper_ebl(eth, id, 1);
466+
467+ writel(FIELD_PREP(MTK_QTX_CFG_HW_RESV_CNT_OFFSET, 4) |
468+ FIELD_PREP(MTK_QTX_CFG_SW_RESV_CNT_OFFSET, 4),
469+ eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
470+ }
471+
472+ val = (MTK_QDMA_TX_SCH_MAX_WFQ) | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
473+ for (id = 0; id < eth->soc->txrx.qdma_tx_sch; id+= 2) {
474+ if (eth->soc->txrx.qdma_tx_sch == 4)
475+ writel(val, eth->base + MTK_QDMA_TX_4SCH_BASE(id));
476+ else
477+ writel(val, eth->base + MTK_QDMA_TX_2SCH_BASE);
478+ }
479+}
480+
481+ static ssize_t mtk_qmda_debugfs_write_qos(struct file *file, const char __user *buffer,
482+ size_t count, loff_t *data)
483+{
484+ struct seq_file *m = file->private_data;
485+ struct mtk_eth *eth = m->private;
486+ char buf[8];
487+ int len = count;
488+
489+ if ((len > 8) || copy_from_user(buf, buffer, len))
490+ return -EFAULT;
491+
492+ if (buf[0] == '0') {
493+ pr_info("HQoS is going to be disabled !\n");
developer0a320142022-09-21 23:18:01 +0800494+ eth->qos_toggle = 0;
developer73cb4d52022-09-06 15:15:57 +0800495+ mtk_qdma_qos_disable(eth);
496+ } else if (buf[0] == '1') {
497+ pr_info("HQoS mode is going to be enabled !\n");
developer0a320142022-09-21 23:18:01 +0800498+ eth->qos_toggle = 1;
developer73cb4d52022-09-06 15:15:57 +0800499+ } else if (buf[0] == '2') {
500+ pr_info("Per-port-per-queue mode is going to be enabled !\n");
501+ pr_info("PPPQ use qid 0~5 (scheduler 0).\n");
developer0a320142022-09-21 23:18:01 +0800502+ eth->qos_toggle = 2;
developer73cb4d52022-09-06 15:15:57 +0800503+ mtk_qdma_qos_pppq_enable(eth);
504+ }
505+
506+ return len;
507+}
508+
509+static int mtk_qmda_debugfs_read_qos(struct seq_file *m, void *private)
510+{
511+ struct mtk_eth *eth = m->private;
512+
developer0aaf79d2023-08-21 14:10:16 +0800513+ if (eth->qos_toggle == 0)
514+ pr_info("HQoS is disabled now!\n");
515+ else if (eth->qos_toggle == 1)
516+ pr_info("HQoS is enabled now!\n");
517+ else if (eth->qos_toggle == 2)
518+ pr_info("Per-port-per-queue mode is enabled!\n");
developer73cb4d52022-09-06 15:15:57 +0800519+
520+ return 0;
521+}
522+
523+static int mtk_qmda_debugfs_open_qos(struct inode *inode, struct file *file)
524+{
525+ return single_open(file, mtk_qmda_debugfs_read_qos,
526+ inode->i_private);
527+}
528+
529+static ssize_t mtk_qmda_debugfs_read_qos_sched(struct file *file, char __user *user_buf,
530+ size_t count, loff_t *ppos)
531+{
532+ struct mtk_eth *eth = _eth;
533+ long id = (long)file->private_data;
534+ char *buf;
535+ unsigned int len = 0, buf_len = 1500;
developerc66b2152023-01-11 15:20:04 +0800536+ int enable, scheduling, max_rate, exp, scheduler, i;
developer73cb4d52022-09-06 15:15:57 +0800537+ ssize_t ret_cnt;
538+ u32 val;
539+
540+ buf = kzalloc(buf_len, GFP_KERNEL);
541+ if (!buf)
542+ return -ENOMEM;
543+
544+ if (eth->soc->txrx.qdma_tx_sch == 4)
545+ val = readl(eth->base + MTK_QDMA_TX_4SCH_BASE(id));
546+ else
547+ val = readl(eth->base + MTK_QDMA_TX_2SCH_BASE);
548+
549+ if (id & 0x1)
550+ val >>= 16;
551+
developerc66b2152023-01-11 15:20:04 +0800552+ val &= MTK_QDMA_TX_SCH_MASK;
developer73cb4d52022-09-06 15:15:57 +0800553+ enable = FIELD_GET(MTK_QDMA_TX_SCH_RATE_EN, val);
554+ scheduling = FIELD_GET(MTK_QDMA_TX_SCH_MAX_WFQ, val);
555+ max_rate = FIELD_GET(MTK_QDMA_TX_SCH_RATE_MAN, val);
developerc66b2152023-01-11 15:20:04 +0800556+ exp = FIELD_GET(MTK_QDMA_TX_SCH_RATE_EXP, val);
557+ while (exp--)
developer73cb4d52022-09-06 15:15:57 +0800558+ max_rate *= 10;
559+
560+ len += scnprintf(buf + len, buf_len - len,
561+ "EN\tScheduling\tMAX\tQueue#\n%d\t%s%16d\t", enable,
562+ (scheduling == 1) ? "WRR" : "SP", max_rate);
563+
564+ for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
565+ val = readl(eth->base + MTK_QDMA_PAGE) & ~MTK_QTX_CFG_PAGE;
566+ val |= FIELD_PREP(MTK_QTX_CFG_PAGE, i / MTK_QTX_PER_PAGE);
567+ writel(val, eth->base + MTK_QDMA_PAGE);
568+
569+ val = readl(eth->base + MTK_QTX_SCH(i % MTK_QTX_PER_PAGE));
570+ if (eth->soc->txrx.qdma_tx_sch == 4)
571+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SCH_SEL_V2, val);
572+ else
573+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SCH_SEL, val);
574+ if (id == scheduler)
575+ len += scnprintf(buf + len, buf_len - len, "%d ", i);
576+ }
577+
578+ len += scnprintf(buf + len, buf_len - len, "\n");
579+ if (len > buf_len)
580+ len = buf_len;
581+
582+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
583+
584+ kfree(buf);
585+ return ret_cnt;
586+}
587+
588+static ssize_t mtk_qmda_debugfs_write_qos_sched(struct file *file, const char __user *buf,
589+ size_t length, loff_t *offset)
590+{
591+ struct mtk_eth *eth = _eth;
592+ long id = (long)file->private_data;
593+ char line[64] = {0}, scheduling[32];
594+ int enable, rate, exp = 0, shift = 0;
595+ size_t size;
developerc66b2152023-01-11 15:20:04 +0800596+ u32 sch, val = 0;
developer73cb4d52022-09-06 15:15:57 +0800597+
598+ if (length >= sizeof(line))
599+ return -EINVAL;
600+
601+ if (copy_from_user(line, buf, length))
602+ return -EFAULT;
603+
604+ if (sscanf(line, "%d %s %d", &enable, scheduling, &rate) != 3)
605+ return -EFAULT;
606+
607+ while (rate > 127) {
608+ rate /= 10;
609+ exp++;
610+ }
611+
612+ line[length] = '\0';
613+
614+ if (enable)
615+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_RATE_EN, 1);
616+ if (strcmp(scheduling, "sp") != 0)
617+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_MAX_WFQ, 1);
618+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_RATE_MAN, rate);
619+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_RATE_EXP, exp);
620+
621+ if (id & 0x1)
622+ shift = 16;
623+
624+ if (eth->soc->txrx.qdma_tx_sch == 4)
developerc66b2152023-01-11 15:20:04 +0800625+ sch = readl(eth->base + MTK_QDMA_TX_4SCH_BASE(id));
developer73cb4d52022-09-06 15:15:57 +0800626+ else
developerc66b2152023-01-11 15:20:04 +0800627+ sch = readl(eth->base + MTK_QDMA_TX_2SCH_BASE);
developer73cb4d52022-09-06 15:15:57 +0800628+
developerc66b2152023-01-11 15:20:04 +0800629+ sch &= ~(MTK_QDMA_TX_SCH_MASK << shift);
630+ sch |= val << shift;
developer73cb4d52022-09-06 15:15:57 +0800631+ if (eth->soc->txrx.qdma_tx_sch == 4)
developerc66b2152023-01-11 15:20:04 +0800632+ writel(sch, eth->base + MTK_QDMA_TX_4SCH_BASE(id));
developer73cb4d52022-09-06 15:15:57 +0800633+ else
developerc66b2152023-01-11 15:20:04 +0800634+ writel(sch, eth->base + MTK_QDMA_TX_2SCH_BASE);
developer73cb4d52022-09-06 15:15:57 +0800635+
636+ size = strlen(line);
637+ *offset += size;
638+
639+ return length;
640+}
641+
642+static ssize_t mtk_qmda_debugfs_read_qos_queue(struct file *file, char __user *user_buf,
643+ size_t count, loff_t *ppos)
644+{
645+ struct mtk_eth *eth = _eth;
646+ long id = (long)file->private_data;
647+ char *buf;
648+ unsigned int len = 0, buf_len = 1500;
649+ int min_rate_en, min_rate, min_rate_exp;
650+ int max_rate_en, max_weight, max_rate, max_rate_exp;
651+ u32 qtx_sch, qtx_cfg, scheduler, val;
652+ ssize_t ret_cnt;
653+
654+ buf = kzalloc(buf_len, GFP_KERNEL);
655+ if (!buf)
656+ return -ENOMEM;
657+
658+ val = readl(eth->base + MTK_QDMA_PAGE) & ~MTK_QTX_CFG_PAGE;
659+ val |= FIELD_PREP(MTK_QTX_CFG_PAGE, id / MTK_QTX_PER_PAGE);
660+ writel(val, eth->base + MTK_QDMA_PAGE);
661+
662+ qtx_cfg = readl(eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
663+ qtx_sch = readl(eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
664+ if (eth->soc->txrx.qdma_tx_sch == 4)
665+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SCH_SEL_V2, qtx_sch);
666+ else
667+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SCH_SEL, qtx_sch);
668+
669+ min_rate_en = FIELD_GET(MTK_QTX_SCH_MIN_RATE_EN, qtx_sch);
670+ min_rate = FIELD_GET(MTK_QTX_SCH_MIN_RATE_MAN, qtx_sch);
671+ min_rate_exp = FIELD_GET(MTK_QTX_SCH_MIN_RATE_EXP, qtx_sch);
672+ max_rate_en = FIELD_GET(MTK_QTX_SCH_MAX_RATE_EN, qtx_sch);
673+ max_weight = FIELD_GET(MTK_QTX_SCH_MAX_RATE_WGHT, qtx_sch);
674+ max_rate = FIELD_GET(MTK_QTX_SCH_MAX_RATE_MAN, qtx_sch);
675+ max_rate_exp = FIELD_GET(MTK_QTX_SCH_MAX_RATE_EXP, qtx_sch);
676+ while (min_rate_exp--)
677+ min_rate *= 10;
678+
679+ while (max_rate_exp--)
680+ max_rate *= 10;
681+
682+ len += scnprintf(buf + len, buf_len - len,
683+ "scheduler: %d\nhw resv: %d\nsw resv: %d\n", scheduler,
684+ (qtx_cfg >> 8) & 0xff, qtx_cfg & 0xff);
685+
686+ /* Switch to debug mode */
687+ val = readl(eth->base + MTK_QTX_MIB_IF) & ~MTK_MIB_ON_QTX_CFG;
688+ val |= MTK_MIB_ON_QTX_CFG;
689+ writel(val, eth->base + MTK_QTX_MIB_IF);
690+
691+ val = readl(eth->base + MTK_QTX_MIB_IF) & ~MTK_VQTX_MIB_EN;
692+ val |= MTK_VQTX_MIB_EN;
693+ writel(val, eth->base + MTK_QTX_MIB_IF);
694+
695+ qtx_cfg = readl(eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
696+ qtx_sch = readl(eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
697+
698+ len += scnprintf(buf + len, buf_len - len,
699+ "packet count: %u\n", qtx_cfg);
700+ len += scnprintf(buf + len, buf_len - len,
701+ "packet drop: %u\n\n", qtx_sch);
702+
703+ /* Recover to normal mode */
704+ val = readl(eth->base + MTK_QTX_MIB_IF);
705+ val &= ~MTK_MIB_ON_QTX_CFG;
706+ writel(val, eth->base + MTK_QTX_MIB_IF);
707+
708+ val = readl(eth->base + MTK_QTX_MIB_IF);
709+ val &= ~MTK_VQTX_MIB_EN;
710+ writel(val, eth->base + MTK_QTX_MIB_IF);
711+
712+ len += scnprintf(buf + len, buf_len - len,
713+ " EN RATE WEIGHT\n");
714+ len += scnprintf(buf + len, buf_len - len,
715+ "----------------------------\n");
716+ len += scnprintf(buf + len, buf_len - len,
717+ "max%5d%9d%9d\n", max_rate_en, max_rate, max_weight);
718+ len += scnprintf(buf + len, buf_len - len,
719+ "min%5d%9d -\n", min_rate_en, min_rate);
720+
721+ if (len > buf_len)
722+ len = buf_len;
723+
724+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
725+
726+ kfree(buf);
727+
728+ return ret_cnt;
729+}
730+
731+static ssize_t mtk_qmda_debugfs_write_qos_queue(struct file *file, const char __user *buf,
732+ size_t length, loff_t *offset)
733+{
734+ struct mtk_eth *eth = _eth;
735+ long id = (long)file->private_data;
736+ char line[64] = {0};
737+ int max_enable, max_rate, max_exp = 0;
738+ int min_enable, min_rate, min_exp = 0;
739+ int scheduler, weight, resv;
740+ size_t size;
741+ u32 val;
742+
743+ if (length >= sizeof(line))
744+ return -EINVAL;
745+
746+ if (copy_from_user(line, buf, length))
747+ return -EFAULT;
748+
749+ if (sscanf(line, "%d %d %d %d %d %d %d", &scheduler, &min_enable, &min_rate,
750+ &max_enable, &max_rate, &weight, &resv) != 7)
751+ return -EFAULT;
752+
753+ line[length] = '\0';
754+
755+ while (max_rate > 127) {
756+ max_rate /= 10;
757+ max_exp++;
758+ }
759+
760+ while (min_rate > 127) {
761+ min_rate /= 10;
762+ min_exp++;
763+ }
764+
765+ val = readl(eth->base + MTK_QDMA_PAGE) & ~MTK_QTX_CFG_PAGE;
766+ val |= FIELD_PREP(MTK_QTX_CFG_PAGE, id / MTK_QTX_PER_PAGE);
767+ writel(val, eth->base + MTK_QDMA_PAGE);
768+
769+ if (eth->soc->txrx.qdma_tx_sch == 4)
770+ val = FIELD_PREP(MTK_QTX_SCH_TX_SCH_SEL_V2, scheduler);
771+ else
772+ val = FIELD_PREP(MTK_QTX_SCH_TX_SCH_SEL, scheduler);
773+ if (min_enable)
774+ val |= MTK_QTX_SCH_MIN_RATE_EN;
775+ val |= FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, min_rate);
776+ val |= FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, min_exp);
777+ if (max_enable)
778+ val |= MTK_QTX_SCH_MAX_RATE_EN;
779+ val |= FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WGHT, weight);
780+ val |= FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, max_rate);
781+ val |= FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, max_exp);
782+ writel(val, eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
783+
784+ val = readl(eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
785+ val |= FIELD_PREP(MTK_QTX_CFG_HW_RESV_CNT_OFFSET, resv);
786+ val |= FIELD_PREP(MTK_QTX_CFG_SW_RESV_CNT_OFFSET, resv);
787+ writel(val, eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
788+
789+ size = strlen(line);
790+ *offset += size;
791+
792+ return length;
793+}
794+
795+int mtk_qdma_debugfs_init(struct mtk_eth *eth)
796+{
797+ static const struct file_operations fops_qos = {
798+ .open = mtk_qmda_debugfs_open_qos,
799+ .read = seq_read,
800+ .llseek = seq_lseek,
801+ .write = mtk_qmda_debugfs_write_qos,
802+ .release = single_release,
803+ };
804+
805+ static const struct file_operations fops_qos_sched = {
806+ .open = simple_open,
807+ .read = mtk_qmda_debugfs_read_qos_sched,
808+ .write = mtk_qmda_debugfs_write_qos_sched,
809+ .llseek = default_llseek,
810+ };
811+
812+ static const struct file_operations fops_qos_queue = {
813+ .open = simple_open,
814+ .read = mtk_qmda_debugfs_read_qos_queue,
815+ .write = mtk_qmda_debugfs_write_qos_queue,
816+ .llseek = default_llseek,
817+ };
818+
819+ struct dentry *root;
820+ long i;
821+ char name[16];
822+
823+ _eth = eth;
824+
825+ root = debugfs_lookup("mtk_ppe", NULL);
826+ if (!root)
827+ return -ENOMEM;
828+
developer0a320142022-09-21 23:18:01 +0800829+ debugfs_create_file("qos_toggle", S_IRUGO, root, eth, &fops_qos);
developer73cb4d52022-09-06 15:15:57 +0800830+
831+ for (i = 0; i < eth->soc->txrx.qdma_tx_sch; i++) {
832+ snprintf(name, sizeof(name), "qdma_sch%ld", i);
833+ debugfs_create_file(name, S_IRUGO, root, (void *)i,
834+ &fops_qos_sched);
835+ }
836+
837+ for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
838+ snprintf(name, sizeof(name), "qdma_txq%ld", i);
839+ debugfs_create_file(name, S_IRUGO, root, (void *)i,
840+ &fops_qos_queue);
841+ }
842+
843+ return 0;
844+}
developer0aaf79d2023-08-21 14:10:16 +0800845diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
846index 59b8736..c4eb45c 100644
847--- a/include/net/flow_offload.h
848+++ b/include/net/flow_offload.h
849@@ -365,6 +365,7 @@ struct flow_cls_offload {
850 struct flow_cls_common_offload common;
851 enum flow_cls_command command;
852 unsigned long cookie;
853+ struct flow_offload *flow;
854 struct flow_rule *rule;
855 struct flow_stats stats;
856 u32 classid;
857diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
858index 50f2f2e..ba34572 100644
859--- a/net/netfilter/nf_flow_table_offload.c
860+++ b/net/netfilter/nf_flow_table_offload.c
861@@ -810,11 +810,13 @@ static int nf_flow_offload_alloc(const struct flow_offload_work *offload,
862 }
863
864 static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
865+ struct flow_offload *flow,
866 __be16 proto, int priority,
867 enum flow_cls_command cmd,
868 const struct flow_offload_tuple *tuple,
869 struct netlink_ext_ack *extack)
870 {
871+ cls_flow->flow = flow;
872 cls_flow->common.protocol = proto;
873 cls_flow->common.prio = priority;
874 cls_flow->common.extack = extack;
875@@ -836,7 +838,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
876 __be16 proto = ETH_P_ALL;
877 int err, i = 0;
878
879- nf_flow_offload_init(&cls_flow, proto, priority, cmd,
880+ nf_flow_offload_init(&cls_flow, flow, proto, priority, cmd,
881 &flow->tuplehash[dir].tuple, &extack);
882 if (cmd == FLOW_CLS_REPLACE)
883 cls_flow.rule = flow_rule->rule;
884--
8852.18.0
886