blob: d51f24fcaa105f82f2fa853e46c44c3b24842cee [file] [log] [blame]
developer740bee82023-10-16 10:58:43 +08001From 9e21d6fa97e93cb2ba9b923810666ddaf7a981ee Mon Sep 17 00:00:00 2001
2From: Bc-bocun Chen <bc-bocun.chen@mediatek.com>
3Date: Mon, 18 Sep 2023 11:11:03 +0800
4Subject: [PATCH 13/22] flow-offload-add-mtkhnat-qdma-qos
developer3c9c74d2023-09-11 11:36:12 +08005
6---
7 drivers/net/ethernet/mediatek/Makefile | 2 +-
8 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 10 +
developer722ab5f2024-02-22 11:01:46 +08009 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 48 ++-
developer3c9c74d2023-09-11 11:36:12 +080010 drivers/net/ethernet/mediatek/mtk_ppe.c | 48 +-
11 drivers/net/ethernet/mediatek/mtk_ppe.h | 4 +
12 .../net/ethernet/mediatek/mtk_ppe_offload.c | 28 +-
13 .../net/ethernet/mediatek/mtk_qdma_debugfs.c | 439 ++++++++++++++++++
14 include/net/flow_offload.h | 1 +
15 net/netfilter/nf_flow_table_offload.c | 4 +-
16 9 files changed, 593 insertions(+), 6 deletions(-)
17 create mode 100644 drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c
18
developer20126ad2022-09-12 14:42:56 +080019diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
developer3c9c74d2023-09-11 11:36:12 +080020index fdbb90f..c7d2296 100644
developer20126ad2022-09-12 14:42:56 +080021--- a/drivers/net/ethernet/mediatek/Makefile
22+++ b/drivers/net/ethernet/mediatek/Makefile
23@@ -5,7 +5,7 @@
24
25 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
developere86c3ec2022-10-11 10:29:18 +080026 mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_usxgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o \
developer20126ad2022-09-12 14:42:56 +080027- mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
28+ mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o mtk_qdma_debugfs.o
29 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
30 ifdef CONFIG_DEBUG_FS
31 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
32diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developer740bee82023-10-16 10:58:43 +080033index 9d8ce07..0f6613b 100644
developer20126ad2022-09-12 14:42:56 +080034--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
35+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developer740bee82023-10-16 10:58:43 +080036@@ -5266,6 +5266,8 @@ static int mtk_probe(struct platform_device *pdev)
developer20126ad2022-09-12 14:42:56 +080037 }
38
39 mtk_ppe_debugfs_init(eth);
40+
41+ mtk_qdma_debugfs_init(eth);
42 }
43
44 for (i = 0; i < MTK_MAX_DEVS; i++) {
developer740bee82023-10-16 10:58:43 +080045@@ -5371,6 +5373,7 @@ static const struct mtk_soc_data mt2701_data = {
developerb35f4fa2023-03-14 13:24:47 +080046 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developer20126ad2022-09-12 14:42:56 +080047 .dma_max_len = MTK_TX_DMA_BUF_LEN,
48 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
49+ .qdma_tx_sch = 2,
50 },
51 };
52
developer740bee82023-10-16 10:58:43 +080053@@ -5391,6 +5394,7 @@ static const struct mtk_soc_data mt7621_data = {
developer20126ad2022-09-12 14:42:56 +080054 .rxd_size = sizeof(struct mtk_rx_dma),
55 .dma_max_len = MTK_TX_DMA_BUF_LEN,
56 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
57+ .qdma_tx_sch = 2,
58 },
59 };
60
developer740bee82023-10-16 10:58:43 +080061@@ -5412,6 +5416,7 @@ static const struct mtk_soc_data mt7622_data = {
developerb35f4fa2023-03-14 13:24:47 +080062 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developer20126ad2022-09-12 14:42:56 +080063 .dma_max_len = MTK_TX_DMA_BUF_LEN,
64 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
65+ .qdma_tx_sch = 2,
66 },
67 };
68
developer740bee82023-10-16 10:58:43 +080069@@ -5432,6 +5437,7 @@ static const struct mtk_soc_data mt7623_data = {
developerb35f4fa2023-03-14 13:24:47 +080070 .rx_dma_l4_valid = RX_DMA_L4_VALID,
developer20126ad2022-09-12 14:42:56 +080071 .dma_max_len = MTK_TX_DMA_BUF_LEN,
72 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
73+ .qdma_tx_sch = 2,
74 },
75 };
76
developer740bee82023-10-16 10:58:43 +080077@@ -5472,6 +5478,7 @@ static const struct mtk_soc_data mt7986_data = {
developerb35f4fa2023-03-14 13:24:47 +080078 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developer20126ad2022-09-12 14:42:56 +080079 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
80 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
81+ .qdma_tx_sch = 4,
82 },
83 };
84
developer740bee82023-10-16 10:58:43 +080085@@ -5493,6 +5500,7 @@ static const struct mtk_soc_data mt7981_data = {
developerb35f4fa2023-03-14 13:24:47 +080086 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
developer20126ad2022-09-12 14:42:56 +080087 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
88 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
89+ .qdma_tx_sch = 4,
90 },
91 };
92
developer740bee82023-10-16 10:58:43 +080093@@ -5511,6 +5519,7 @@ static const struct mtk_soc_data mt7988_data = {
developerb35f4fa2023-03-14 13:24:47 +080094 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
95 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
96 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
97+ .qdma_tx_sch = 4,
98 },
99 };
100
developer740bee82023-10-16 10:58:43 +0800101@@ -5529,6 +5538,7 @@ static const struct mtk_soc_data rt5350_data = {
developerb35f4fa2023-03-14 13:24:47 +0800102 .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
developer20126ad2022-09-12 14:42:56 +0800103 .dma_max_len = MTK_TX_DMA_BUF_LEN,
104 .dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
105+ .qdma_tx_sch = 4,
106 },
107 };
108
109diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developer740bee82023-10-16 10:58:43 +0800110index 101c233..7ea380e 100644
developer20126ad2022-09-12 14:42:56 +0800111--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
112+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developer740bee82023-10-16 10:58:43 +0800113@@ -458,6 +471,9 @@
developer20126ad2022-09-12 14:42:56 +0800114 #define FC_THRES_DROP_EN (7 << 16)
115 #define FC_THRES_MIN 0x4444
116
117+/* QDMA TX Scheduler Rate Control Register */
118+#define MTK_QDMA_TX_2SCH_BASE (QDMA_BASE + 0x214)
119+
120 /* QDMA Interrupt Status Register */
121 #define MTK_QDMA_INT_STATUS (QDMA_BASE + 0x218)
developer3c9c74d2023-09-11 11:36:12 +0800122 #if defined(CONFIG_MEDIATEK_NETSYS_RX_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer740bee82023-10-16 10:58:43 +0800123@@ -494,6 +510,11 @@
developer20126ad2022-09-12 14:42:56 +0800124 /* QDMA Interrupt Mask Register */
125 #define MTK_QDMA_HRED2 (QDMA_BASE + 0x244)
126
127+/* QDMA TX Queue MIB Interface Register */
128+#define MTK_QTX_MIB_IF (QDMA_BASE + 0x2bc)
129+#define MTK_MIB_ON_QTX_CFG BIT(31)
130+#define MTK_VQTX_MIB_EN BIT(28)
131+
132 /* QDMA TX Forward CPU Pointer Register */
133 #define MTK_QTX_CTX_PTR (QDMA_BASE +0x300)
134
developer740bee82023-10-16 10:58:43 +0800135@@ -521,6 +542,14 @@
developer20126ad2022-09-12 14:42:56 +0800136 /* QDMA FQ Free Page Buffer Length Register */
137 #define MTK_QDMA_FQ_BLEN (QDMA_BASE +0x32c)
138
139+/* QDMA TX Scheduler Rate Control Register */
140+#define MTK_QDMA_TX_4SCH_BASE(x) (QDMA_BASE + 0x398 + (((x) >> 1) * 0x4))
141+#define MTK_QDMA_TX_SCH_MASK GENMASK(15, 0)
142+#define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15)
143+#define MTK_QDMA_TX_SCH_RATE_EN BIT(11)
144+#define MTK_QDMA_TX_SCH_RATE_MAN GENMASK(10, 4)
145+#define MTK_QDMA_TX_SCH_RATE_EXP GENMASK(3, 0)
146+
147 /* WDMA Registers */
developerb35f4fa2023-03-14 13:24:47 +0800148 #define MTK_WDMA_CTX_PTR(x) (WDMA_BASE(x) + 0x8)
developer20126ad2022-09-12 14:42:56 +0800149 #define MTK_WDMA_DTX_PTR(x) (WDMA_BASE(x) + 0xC)
developer740bee82023-10-16 10:58:43 +0800150@@ -1690,6 +1719,7 @@ struct mtk_soc_data {
developerb35f4fa2023-03-14 13:24:47 +0800151 u32 rx_dma_l4_valid;
developer20126ad2022-09-12 14:42:56 +0800152 u32 dma_max_len;
153 u32 dma_len_offset;
154+ u32 qdma_tx_sch;
155 } txrx;
156 };
157
developer740bee82023-10-16 10:58:43 +0800158@@ -1879,6 +1909,7 @@ struct mtk_eth {
developer20126ad2022-09-12 14:42:56 +0800159 spinlock_t syscfg0_lock;
160 struct timer_list mtk_dma_monitor_timer;
161
developer36936c32022-09-30 12:55:06 +0800162+ u8 qos_toggle;
developer20126ad2022-09-12 14:42:56 +0800163 u8 ppe_num;
164 struct mtk_ppe *ppe[MTK_MAX_PPE_NUM];
165 struct rhashtable flow_table;
developera05cf4c2023-10-27 14:35:41 +0800166@@ -1936,6 +1967,36 @@ extern const struct of_device_id of_mtk_match[];
developer3c9c74d2023-09-11 11:36:12 +0800167 extern u32 mtk_hwlro_stats_ebl;
168 extern u32 dbg_show_level;
169
170+static inline void mtk_set_ib1_sp(struct mtk_eth *eth, struct mtk_foe_entry *foe, u32 val)
171+{
developera05cf4c2023-10-27 14:35:41 +0800172+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
173+ foe->ib1 |= FIELD_PREP(MTK_FOE_IB1_UNBIND_SRC_PORT, val);
174+#endif
developer3c9c74d2023-09-11 11:36:12 +0800175+}
176+
177+static inline u32 mtk_get_ib1_sp(struct mtk_eth *eth, struct mtk_foe_entry *foe)
178+{
developera05cf4c2023-10-27 14:35:41 +0800179+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
180+ return FIELD_GET(MTK_FOE_IB1_UNBIND_SRC_PORT, foe->ib1);
181+#else
developer3c9c74d2023-09-11 11:36:12 +0800182+ return 0;
developera05cf4c2023-10-27 14:35:41 +0800183+#endif
developer3c9c74d2023-09-11 11:36:12 +0800184+}
185+
186+static inline int
187+mtk_ppe_check_pppq_path(struct mtk_eth *eth, struct mtk_foe_entry *foe, int dsa_port)
188+{
189+ u32 sp = mtk_get_ib1_sp(eth, foe);
190+
191+ if ((dsa_port >= 0 && dsa_port <= 4) ||
192+ (dsa_port == 5 && (sp == PSE_WDMA0_PORT ||
193+ sp == PSE_WDMA1_PORT ||
194+ sp == PSE_WDMA2_PORT)))
195+ return 1;
196+
197+ return 0;
198+}
199+
200 /* read the hardware status register */
201 void mtk_stats_update_mac(struct mtk_mac *mac);
202
developer740bee82023-10-16 10:58:43 +0800203@@ -1969,4 +2028,6 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
developer3c9c74d2023-09-11 11:36:12 +0800204 u32 mtk_rss_indr_table(struct mtk_rss_params *rss_params, int index);
developer20126ad2022-09-12 14:42:56 +0800205
206 int mtk_ppe_debugfs_init(struct mtk_eth *eth);
developer20126ad2022-09-12 14:42:56 +0800207+
developerb35f4fa2023-03-14 13:24:47 +0800208+int mtk_qdma_debugfs_init(struct mtk_eth *eth);
209 #endif /* MTK_ETH_H */
developer20126ad2022-09-12 14:42:56 +0800210diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
developer740bee82023-10-16 10:58:43 +0800211index f55a95c..6d6c1e4 100755
developer20126ad2022-09-12 14:42:56 +0800212--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
213+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
developer3c9c74d2023-09-11 11:36:12 +0800214@@ -128,7 +128,7 @@ static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
215 enable * MTK_PPE_CACHE_CTL_EN);
216 }
217
218-static u32 mtk_ppe_hash_entry(struct mtk_ppe *ppe, struct mtk_foe_entry *e)
219+u32 mtk_ppe_hash_entry(struct mtk_ppe *ppe, struct mtk_foe_entry *e)
220 {
221 u32 hv1, hv2, hv3;
222 u32 hash;
223@@ -420,12 +420,58 @@ int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
developer20126ad2022-09-12 14:42:56 +0800224 return 0;
225 }
226
227+int mtk_foe_entry_set_qid(struct mtk_foe_entry *entry, int qid)
228+{
229+ u32 *ib2 = mtk_foe_entry_ib2(entry);
230+
231+ *ib2 &= ~MTK_FOE_IB2_QID;
232+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, qid);
233+ *ib2 |= MTK_FOE_IB2_PSE_QOS;
234+
235+ return 0;
236+}
237 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
238 {
239 return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
developer3c9c74d2023-09-11 11:36:12 +0800240 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
241 }
242
243+bool mtk_foe_entry_match(struct mtk_foe_entry *entry, struct mtk_foe_entry *data)
244+{
245+ int type, len;
246+
247+ if ((data->ib1 ^ entry->ib1) & MTK_FOE_IB1_UDP)
248+ return false;
249+
250+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
251+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
252+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
253+ else
254+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
255+
256+ return !memcmp(&entry->data, &data->data, len - 4);
257+}
258+
259+int mtk_foe_entry_set_sp(struct mtk_ppe *ppe, struct mtk_foe_entry *entry)
260+{
261+ struct mtk_foe_entry *hwe;
262+ u32 hash, sp = 0;
263+ int i;
264+
265+ hash = mtk_ppe_hash_entry(ppe, entry);
266+ for (i = 0; i < ppe->way; i++) {
267+ hwe = &ppe->foe_table[hash + i];
268+ if (mtk_foe_entry_match(hwe, entry)) {
269+ sp = mtk_get_ib1_sp(ppe->eth, hwe);
270+ break;
271+ }
272+ }
273+
274+ mtk_set_ib1_sp(ppe->eth, entry, sp);
275+
276+ return 0;
277+}
278+
279 static bool
280 mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
281 {
developer20126ad2022-09-12 14:42:56 +0800282diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
developer3c9c74d2023-09-11 11:36:12 +0800283index 86288b0..5ab864f 100644
developer20126ad2022-09-12 14:42:56 +0800284--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
285+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
developer3c9c74d2023-09-11 11:36:12 +0800286@@ -403,9 +403,13 @@ int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
developer20126ad2022-09-12 14:42:56 +0800287 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
288 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
289 int bss, int wcid);
290+int mtk_foe_entry_set_qid(struct mtk_foe_entry *entry, int qid);
developer3c9c74d2023-09-11 11:36:12 +0800291+bool mtk_foe_entry_match(struct mtk_foe_entry *entry, struct mtk_foe_entry *data);
292+int mtk_foe_entry_set_sp(struct mtk_ppe *ppe, struct mtk_foe_entry *entry);
developer20126ad2022-09-12 14:42:56 +0800293 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
294 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
295 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
developer3c9c74d2023-09-11 11:36:12 +0800296 struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index, struct mtk_foe_accounting *diff);
297+u32 mtk_ppe_hash_entry(struct mtk_ppe *ppe, struct mtk_foe_entry *e);
298
299 #endif
developer20126ad2022-09-12 14:42:56 +0800300diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
developer3c9c74d2023-09-11 11:36:12 +0800301index b80f72d..3bc50a4 100755
developer20126ad2022-09-12 14:42:56 +0800302--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
303+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
developer36936c32022-09-30 12:55:06 +0800304@@ -9,6 +9,8 @@
305 #include <linux/ipv6.h>
306 #include <net/flow_offload.h>
307 #include <net/pkt_cls.h>
308+#include <net/netfilter/nf_conntrack.h>
309+#include <net/netfilter/nf_flow_table.h>
310 #include <net/dsa.h>
311 #include "mtk_eth_soc.h"
312 #include "mtk_wed.h"
313@@ -183,7 +185,7 @@ mtk_flow_get_dsa_port(struct net_device **dev)
developer20126ad2022-09-12 14:42:56 +0800314
developer36936c32022-09-30 12:55:06 +0800315 static int
316 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
317- struct net_device *dev, const u8 *dest_mac,
318+ struct net_device *dev, struct nf_conn *ct, const u8 *dest_mac,
319 int *wed_index)
320 {
321 struct mtk_wdma_info info = {};
developer9cebade2023-07-31 10:36:52 +0800322@@ -209,6 +211,9 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
developer36936c32022-09-30 12:55:06 +0800323 if (dsa_port >= 0)
developer9cebade2023-07-31 10:36:52 +0800324 mtk_foe_entry_set_dsa(foe, dsa_port);
developer20126ad2022-09-12 14:42:56 +0800325
developer3c9c74d2023-09-11 11:36:12 +0800326+ if (eth->qos_toggle == 2 && mtk_ppe_check_pppq_path(eth, foe, dsa_port))
developer36936c32022-09-30 12:55:06 +0800327+ mtk_foe_entry_set_qid(foe, dsa_port & MTK_QDMA_TX_MASK);
developer20126ad2022-09-12 14:42:56 +0800328+
329 if (dev == eth->netdev[0])
developerd18d9be2022-12-05 13:12:37 +0800330 pse_port = PSE_GDM1_PORT;
developer20126ad2022-09-12 14:42:56 +0800331 else if (dev == eth->netdev[1])
developer9cebade2023-07-31 10:36:52 +0800332@@ -217,6 +222,23 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
333 return -EOPNOTSUPP;
334
335 out:
336+ if (eth->qos_toggle == 1 || (ct->mark & MTK_QDMA_TX_MASK) >= 6) {
337+ u8 qos_ul_toggle;
338+
339+ if (eth->qos_toggle == 2)
340+ qos_ul_toggle = ((ct->mark >> 16) & MTK_QDMA_TX_MASK) >= 6 ? 1 : 0;
341+ else
342+ qos_ul_toggle = ((ct->mark >> 16) & MTK_QDMA_TX_MASK) >= 1 ? 1 : 0;
343+
344+ if (qos_ul_toggle == 1) {
345+ if (dev == eth->netdev[1])
346+ mtk_foe_entry_set_qid(foe, (ct->mark >> 16) & MTK_QDMA_TX_MASK);
347+ else
348+ mtk_foe_entry_set_qid(foe, ct->mark & MTK_QDMA_TX_MASK);
349+ } else
350+ mtk_foe_entry_set_qid(foe, ct->mark & MTK_QDMA_TX_MASK);
351+ }
352+
353 mtk_foe_entry_set_pse_port(foe, pse_port);
354
355 return 0;
developer3c9c74d2023-09-11 11:36:12 +0800356@@ -447,7 +469,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
developer36936c32022-09-30 12:55:06 +0800357 if (data.pppoe.num == 1)
358 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
359
360- err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
developer3c9c74d2023-09-11 11:36:12 +0800361+ mtk_foe_entry_set_sp(eth->ppe[ppe_index], &foe);
362+
developer36936c32022-09-30 12:55:06 +0800363+ err = mtk_flow_set_output_device(eth, &foe, odev, f->flow->ct, data.eth.h_dest,
364 &wed_index);
365 if (err)
366 return err;
developer20126ad2022-09-12 14:42:56 +0800367diff --git a/drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c b/drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c
368new file mode 100644
developer3c9c74d2023-09-11 11:36:12 +0800369index 0000000..3a7c585
developer20126ad2022-09-12 14:42:56 +0800370--- /dev/null
371+++ b/drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c
developer3c9c74d2023-09-11 11:36:12 +0800372@@ -0,0 +1,439 @@
developer20126ad2022-09-12 14:42:56 +0800373+/* SPDX-License-Identifier: GPL-2.0
374+ *
375+ * Copyright (c) 2022 MediaTek Inc.
376+ * Author: Henry Yen <henry.yen@mediatek.com>
377+ * Bo-Cun Chen <bc-bocun.chen@mediatek.com>
378+ */
379+
380+#include <linux/kernel.h>
381+#include <linux/debugfs.h>
382+#include "mtk_eth_soc.h"
383+
384+#define MAX_PPPQ_PORT_NUM 6
385+
386+static struct mtk_eth *_eth;
387+
388+static void mtk_qdma_qos_shaper_ebl(struct mtk_eth *eth, u32 id, u32 enable)
389+{
390+ u32 val;
391+
392+ if (enable) {
393+ val = MTK_QTX_SCH_MIN_RATE_EN | MTK_QTX_SCH_MAX_RATE_EN;
394+ val |= FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
395+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
396+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 25) |
397+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
developer722ab5f2024-02-22 11:01:46 +0800398+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 4);
developer20126ad2022-09-12 14:42:56 +0800399+
400+ writel(val, eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
401+ } else {
402+ writel(0, eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
403+ }
404+}
405+
406+static void mtk_qdma_qos_disable(struct mtk_eth *eth)
407+{
408+ u32 id, val;
409+
410+ for (id = 0; id < MAX_PPPQ_PORT_NUM; id++) {
411+ mtk_qdma_qos_shaper_ebl(eth, id, 0);
412+
413+ writel(FIELD_PREP(MTK_QTX_CFG_HW_RESV_CNT_OFFSET, 4) |
414+ FIELD_PREP(MTK_QTX_CFG_SW_RESV_CNT_OFFSET, 4),
415+ eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
416+ }
417+
418+ val = (MTK_QDMA_TX_SCH_MAX_WFQ) | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
419+ for (id = 0; id < eth->soc->txrx.qdma_tx_sch; id += 2) {
420+ if (eth->soc->txrx.qdma_tx_sch == 4)
421+ writel(val, eth->base + MTK_QDMA_TX_4SCH_BASE(id));
422+ else
423+ writel(val, eth->base + MTK_QDMA_TX_2SCH_BASE);
424+ }
425+}
426+
427+static void mtk_qdma_qos_pppq_enable(struct mtk_eth *eth)
428+{
429+ u32 id, val;
430+
431+ for (id = 0; id < MAX_PPPQ_PORT_NUM; id++) {
432+ mtk_qdma_qos_shaper_ebl(eth, id, 1);
433+
434+ writel(FIELD_PREP(MTK_QTX_CFG_HW_RESV_CNT_OFFSET, 4) |
435+ FIELD_PREP(MTK_QTX_CFG_SW_RESV_CNT_OFFSET, 4),
436+ eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
437+ }
438+
439+ val = (MTK_QDMA_TX_SCH_MAX_WFQ) | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
440+ for (id = 0; id < eth->soc->txrx.qdma_tx_sch; id+= 2) {
441+ if (eth->soc->txrx.qdma_tx_sch == 4)
442+ writel(val, eth->base + MTK_QDMA_TX_4SCH_BASE(id));
443+ else
444+ writel(val, eth->base + MTK_QDMA_TX_2SCH_BASE);
445+ }
446+}
447+
448+ static ssize_t mtk_qmda_debugfs_write_qos(struct file *file, const char __user *buffer,
449+ size_t count, loff_t *data)
450+{
451+ struct seq_file *m = file->private_data;
452+ struct mtk_eth *eth = m->private;
453+ char buf[8];
454+ int len = count;
455+
456+ if ((len > 8) || copy_from_user(buf, buffer, len))
457+ return -EFAULT;
458+
459+ if (buf[0] == '0') {
460+ pr_info("HQoS is going to be disabled !\n");
developer36936c32022-09-30 12:55:06 +0800461+ eth->qos_toggle = 0;
developer20126ad2022-09-12 14:42:56 +0800462+ mtk_qdma_qos_disable(eth);
463+ } else if (buf[0] == '1') {
464+ pr_info("HQoS mode is going to be enabled !\n");
developer36936c32022-09-30 12:55:06 +0800465+ eth->qos_toggle = 1;
developer20126ad2022-09-12 14:42:56 +0800466+ } else if (buf[0] == '2') {
467+ pr_info("Per-port-per-queue mode is going to be enabled !\n");
468+ pr_info("PPPQ use qid 0~5 (scheduler 0).\n");
developer36936c32022-09-30 12:55:06 +0800469+ eth->qos_toggle = 2;
developer20126ad2022-09-12 14:42:56 +0800470+ mtk_qdma_qos_pppq_enable(eth);
471+ }
472+
473+ return len;
474+}
475+
476+static int mtk_qmda_debugfs_read_qos(struct seq_file *m, void *private)
477+{
478+ struct mtk_eth *eth = m->private;
479+
developer3c9c74d2023-09-11 11:36:12 +0800480+ if (eth->qos_toggle == 0)
481+ pr_info("HQoS is disabled now!\n");
482+ else if (eth->qos_toggle == 1)
483+ pr_info("HQoS is enabled now!\n");
484+ else if (eth->qos_toggle == 2)
485+ pr_info("Per-port-per-queue mode is enabled!\n");
developer20126ad2022-09-12 14:42:56 +0800486+
487+ return 0;
488+}
489+
490+static int mtk_qmda_debugfs_open_qos(struct inode *inode, struct file *file)
491+{
492+ return single_open(file, mtk_qmda_debugfs_read_qos,
493+ inode->i_private);
494+}
495+
496+static ssize_t mtk_qmda_debugfs_read_qos_sched(struct file *file, char __user *user_buf,
497+ size_t count, loff_t *ppos)
498+{
499+ struct mtk_eth *eth = _eth;
500+ long id = (long)file->private_data;
501+ char *buf;
502+ unsigned int len = 0, buf_len = 1500;
developerc3d2b632023-01-13 11:32:11 +0800503+ int enable, scheduling, max_rate, exp, scheduler, i;
developer20126ad2022-09-12 14:42:56 +0800504+ ssize_t ret_cnt;
505+ u32 val;
506+
507+ buf = kzalloc(buf_len, GFP_KERNEL);
508+ if (!buf)
509+ return -ENOMEM;
510+
511+ if (eth->soc->txrx.qdma_tx_sch == 4)
512+ val = readl(eth->base + MTK_QDMA_TX_4SCH_BASE(id));
513+ else
514+ val = readl(eth->base + MTK_QDMA_TX_2SCH_BASE);
515+
516+ if (id & 0x1)
517+ val >>= 16;
518+
developerc3d2b632023-01-13 11:32:11 +0800519+ val &= MTK_QDMA_TX_SCH_MASK;
developer20126ad2022-09-12 14:42:56 +0800520+ enable = FIELD_GET(MTK_QDMA_TX_SCH_RATE_EN, val);
521+ scheduling = FIELD_GET(MTK_QDMA_TX_SCH_MAX_WFQ, val);
522+ max_rate = FIELD_GET(MTK_QDMA_TX_SCH_RATE_MAN, val);
developerc3d2b632023-01-13 11:32:11 +0800523+ exp = FIELD_GET(MTK_QDMA_TX_SCH_RATE_EXP, val);
524+ while (exp--)
developer20126ad2022-09-12 14:42:56 +0800525+ max_rate *= 10;
526+
527+ len += scnprintf(buf + len, buf_len - len,
528+ "EN\tScheduling\tMAX\tQueue#\n%d\t%s%16d\t", enable,
529+ (scheduling == 1) ? "WRR" : "SP", max_rate);
530+
531+ for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
532+ val = readl(eth->base + MTK_QDMA_PAGE) & ~MTK_QTX_CFG_PAGE;
533+ val |= FIELD_PREP(MTK_QTX_CFG_PAGE, i / MTK_QTX_PER_PAGE);
534+ writel(val, eth->base + MTK_QDMA_PAGE);
535+
536+ val = readl(eth->base + MTK_QTX_SCH(i % MTK_QTX_PER_PAGE));
537+ if (eth->soc->txrx.qdma_tx_sch == 4)
developer722ab5f2024-02-22 11:01:46 +0800538+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SEL_V2, val);
developer20126ad2022-09-12 14:42:56 +0800539+ else
developer722ab5f2024-02-22 11:01:46 +0800540+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SEL, val);
developer20126ad2022-09-12 14:42:56 +0800541+ if (id == scheduler)
542+ len += scnprintf(buf + len, buf_len - len, "%d ", i);
543+ }
544+
545+ len += scnprintf(buf + len, buf_len - len, "\n");
546+ if (len > buf_len)
547+ len = buf_len;
548+
549+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
550+
551+ kfree(buf);
552+ return ret_cnt;
553+}
554+
555+static ssize_t mtk_qmda_debugfs_write_qos_sched(struct file *file, const char __user *buf,
556+ size_t length, loff_t *offset)
557+{
558+ struct mtk_eth *eth = _eth;
559+ long id = (long)file->private_data;
560+ char line[64] = {0}, scheduling[32];
561+ int enable, rate, exp = 0, shift = 0;
562+ size_t size;
developerc3d2b632023-01-13 11:32:11 +0800563+ u32 sch, val = 0;
developer20126ad2022-09-12 14:42:56 +0800564+
565+ if (length >= sizeof(line))
566+ return -EINVAL;
567+
568+ if (copy_from_user(line, buf, length))
569+ return -EFAULT;
570+
571+ if (sscanf(line, "%d %s %d", &enable, scheduling, &rate) != 3)
572+ return -EFAULT;
573+
574+ while (rate > 127) {
575+ rate /= 10;
576+ exp++;
577+ }
578+
579+ line[length] = '\0';
580+
581+ if (enable)
582+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_RATE_EN, 1);
583+ if (strcmp(scheduling, "sp") != 0)
584+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_MAX_WFQ, 1);
585+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_RATE_MAN, rate);
586+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_RATE_EXP, exp);
587+
588+ if (id & 0x1)
589+ shift = 16;
590+
591+ if (eth->soc->txrx.qdma_tx_sch == 4)
developerc3d2b632023-01-13 11:32:11 +0800592+ sch = readl(eth->base + MTK_QDMA_TX_4SCH_BASE(id));
developer20126ad2022-09-12 14:42:56 +0800593+ else
developerc3d2b632023-01-13 11:32:11 +0800594+ sch = readl(eth->base + MTK_QDMA_TX_2SCH_BASE);
developer20126ad2022-09-12 14:42:56 +0800595+
developerc3d2b632023-01-13 11:32:11 +0800596+ sch &= ~(MTK_QDMA_TX_SCH_MASK << shift);
597+ sch |= val << shift;
developer20126ad2022-09-12 14:42:56 +0800598+ if (eth->soc->txrx.qdma_tx_sch == 4)
developerc3d2b632023-01-13 11:32:11 +0800599+ writel(sch, eth->base + MTK_QDMA_TX_4SCH_BASE(id));
developer20126ad2022-09-12 14:42:56 +0800600+ else
developerc3d2b632023-01-13 11:32:11 +0800601+ writel(sch, eth->base + MTK_QDMA_TX_2SCH_BASE);
developer20126ad2022-09-12 14:42:56 +0800602+
603+ size = strlen(line);
604+ *offset += size;
605+
606+ return length;
607+}
608+
609+static ssize_t mtk_qmda_debugfs_read_qos_queue(struct file *file, char __user *user_buf,
610+ size_t count, loff_t *ppos)
611+{
612+ struct mtk_eth *eth = _eth;
613+ long id = (long)file->private_data;
614+ char *buf;
615+ unsigned int len = 0, buf_len = 1500;
616+ int min_rate_en, min_rate, min_rate_exp;
617+ int max_rate_en, max_weight, max_rate, max_rate_exp;
618+ u32 qtx_sch, qtx_cfg, scheduler, val;
619+ ssize_t ret_cnt;
620+
621+ buf = kzalloc(buf_len, GFP_KERNEL);
622+ if (!buf)
623+ return -ENOMEM;
624+
625+ val = readl(eth->base + MTK_QDMA_PAGE) & ~MTK_QTX_CFG_PAGE;
626+ val |= FIELD_PREP(MTK_QTX_CFG_PAGE, id / MTK_QTX_PER_PAGE);
627+ writel(val, eth->base + MTK_QDMA_PAGE);
628+
629+ qtx_cfg = readl(eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
630+ qtx_sch = readl(eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
631+ if (eth->soc->txrx.qdma_tx_sch == 4)
developer722ab5f2024-02-22 11:01:46 +0800632+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SEL_V2, qtx_sch);
developer20126ad2022-09-12 14:42:56 +0800633+ else
developer722ab5f2024-02-22 11:01:46 +0800634+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SEL, qtx_sch);
developer20126ad2022-09-12 14:42:56 +0800635+
636+ min_rate_en = FIELD_GET(MTK_QTX_SCH_MIN_RATE_EN, qtx_sch);
637+ min_rate = FIELD_GET(MTK_QTX_SCH_MIN_RATE_MAN, qtx_sch);
638+ min_rate_exp = FIELD_GET(MTK_QTX_SCH_MIN_RATE_EXP, qtx_sch);
639+ max_rate_en = FIELD_GET(MTK_QTX_SCH_MAX_RATE_EN, qtx_sch);
developer722ab5f2024-02-22 11:01:46 +0800640+ max_weight = FIELD_GET(MTK_QTX_SCH_MAX_RATE_WEIGHT, qtx_sch);
developer20126ad2022-09-12 14:42:56 +0800641+ max_rate = FIELD_GET(MTK_QTX_SCH_MAX_RATE_MAN, qtx_sch);
642+ max_rate_exp = FIELD_GET(MTK_QTX_SCH_MAX_RATE_EXP, qtx_sch);
643+ while (min_rate_exp--)
644+ min_rate *= 10;
645+
646+ while (max_rate_exp--)
647+ max_rate *= 10;
648+
649+ len += scnprintf(buf + len, buf_len - len,
650+ "scheduler: %d\nhw resv: %d\nsw resv: %d\n", scheduler,
651+ (qtx_cfg >> 8) & 0xff, qtx_cfg & 0xff);
652+
653+ /* Switch to debug mode */
654+ val = readl(eth->base + MTK_QTX_MIB_IF) & ~MTK_MIB_ON_QTX_CFG;
655+ val |= MTK_MIB_ON_QTX_CFG;
656+ writel(val, eth->base + MTK_QTX_MIB_IF);
657+
658+ val = readl(eth->base + MTK_QTX_MIB_IF) & ~MTK_VQTX_MIB_EN;
659+ val |= MTK_VQTX_MIB_EN;
660+ writel(val, eth->base + MTK_QTX_MIB_IF);
661+
662+ qtx_cfg = readl(eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
663+ qtx_sch = readl(eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
664+
665+ len += scnprintf(buf + len, buf_len - len,
666+ "packet count: %u\n", qtx_cfg);
667+ len += scnprintf(buf + len, buf_len - len,
668+ "packet drop: %u\n\n", qtx_sch);
669+
670+ /* Recover to normal mode */
671+ val = readl(eth->base + MTK_QTX_MIB_IF);
672+ val &= ~MTK_MIB_ON_QTX_CFG;
673+ writel(val, eth->base + MTK_QTX_MIB_IF);
674+
675+ val = readl(eth->base + MTK_QTX_MIB_IF);
676+ val &= ~MTK_VQTX_MIB_EN;
677+ writel(val, eth->base + MTK_QTX_MIB_IF);
678+
679+ len += scnprintf(buf + len, buf_len - len,
680+ " EN RATE WEIGHT\n");
681+ len += scnprintf(buf + len, buf_len - len,
682+ "----------------------------\n");
683+ len += scnprintf(buf + len, buf_len - len,
684+ "max%5d%9d%9d\n", max_rate_en, max_rate, max_weight);
685+ len += scnprintf(buf + len, buf_len - len,
686+ "min%5d%9d -\n", min_rate_en, min_rate);
687+
688+ if (len > buf_len)
689+ len = buf_len;
690+
691+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
692+
693+ kfree(buf);
694+
695+ return ret_cnt;
696+}
697+
698+static ssize_t mtk_qmda_debugfs_write_qos_queue(struct file *file, const char __user *buf,
699+ size_t length, loff_t *offset)
700+{
701+ struct mtk_eth *eth = _eth;
702+ long id = (long)file->private_data;
703+ char line[64] = {0};
704+ int max_enable, max_rate, max_exp = 0;
705+ int min_enable, min_rate, min_exp = 0;
706+ int scheduler, weight, resv;
707+ size_t size;
708+ u32 val;
709+
710+ if (length >= sizeof(line))
711+ return -EINVAL;
712+
713+ if (copy_from_user(line, buf, length))
714+ return -EFAULT;
715+
716+ if (sscanf(line, "%d %d %d %d %d %d %d", &scheduler, &min_enable, &min_rate,
717+ &max_enable, &max_rate, &weight, &resv) != 7)
718+ return -EFAULT;
719+
720+ line[length] = '\0';
721+
722+ while (max_rate > 127) {
723+ max_rate /= 10;
724+ max_exp++;
725+ }
726+
727+ while (min_rate > 127) {
728+ min_rate /= 10;
729+ min_exp++;
730+ }
731+
732+ val = readl(eth->base + MTK_QDMA_PAGE) & ~MTK_QTX_CFG_PAGE;
733+ val |= FIELD_PREP(MTK_QTX_CFG_PAGE, id / MTK_QTX_PER_PAGE);
734+ writel(val, eth->base + MTK_QDMA_PAGE);
735+
736+ if (eth->soc->txrx.qdma_tx_sch == 4)
developer722ab5f2024-02-22 11:01:46 +0800737+ val = FIELD_PREP(MTK_QTX_SCH_TX_SEL_V2, scheduler);
developer20126ad2022-09-12 14:42:56 +0800738+ else
developer722ab5f2024-02-22 11:01:46 +0800739+ val = FIELD_PREP(MTK_QTX_SCH_TX_SEL, scheduler);
developer20126ad2022-09-12 14:42:56 +0800740+ if (min_enable)
741+ val |= MTK_QTX_SCH_MIN_RATE_EN;
742+ val |= FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, min_rate);
743+ val |= FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, min_exp);
744+ if (max_enable)
745+ val |= MTK_QTX_SCH_MAX_RATE_EN;
developer722ab5f2024-02-22 11:01:46 +0800746+ val |= FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, weight);
developer20126ad2022-09-12 14:42:56 +0800747+ val |= FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, max_rate);
748+ val |= FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, max_exp);
749+ writel(val, eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
750+
751+ val = readl(eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
752+ val |= FIELD_PREP(MTK_QTX_CFG_HW_RESV_CNT_OFFSET, resv);
753+ val |= FIELD_PREP(MTK_QTX_CFG_SW_RESV_CNT_OFFSET, resv);
754+ writel(val, eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
755+
756+ size = strlen(line);
757+ *offset += size;
758+
759+ return length;
760+}
761+
762+int mtk_qdma_debugfs_init(struct mtk_eth *eth)
763+{
764+ static const struct file_operations fops_qos = {
765+ .open = mtk_qmda_debugfs_open_qos,
766+ .read = seq_read,
767+ .llseek = seq_lseek,
768+ .write = mtk_qmda_debugfs_write_qos,
769+ .release = single_release,
770+ };
771+
772+ static const struct file_operations fops_qos_sched = {
773+ .open = simple_open,
774+ .read = mtk_qmda_debugfs_read_qos_sched,
775+ .write = mtk_qmda_debugfs_write_qos_sched,
776+ .llseek = default_llseek,
777+ };
778+
779+ static const struct file_operations fops_qos_queue = {
780+ .open = simple_open,
781+ .read = mtk_qmda_debugfs_read_qos_queue,
782+ .write = mtk_qmda_debugfs_write_qos_queue,
783+ .llseek = default_llseek,
784+ };
785+
786+ struct dentry *root;
787+ long i;
788+ char name[16];
789+
790+ _eth = eth;
791+
792+ root = debugfs_lookup("mtk_ppe", NULL);
793+ if (!root)
794+ return -ENOMEM;
795+
developer36936c32022-09-30 12:55:06 +0800796+ debugfs_create_file("qos_toggle", S_IRUGO, root, eth, &fops_qos);
developer20126ad2022-09-12 14:42:56 +0800797+
798+ for (i = 0; i < eth->soc->txrx.qdma_tx_sch; i++) {
799+ snprintf(name, sizeof(name), "qdma_sch%ld", i);
800+ debugfs_create_file(name, S_IRUGO, root, (void *)i,
801+ &fops_qos_sched);
802+ }
803+
804+ for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
805+ snprintf(name, sizeof(name), "qdma_txq%ld", i);
806+ debugfs_create_file(name, S_IRUGO, root, (void *)i,
807+ &fops_qos_queue);
808+ }
809+
810+ return 0;
811+}
developer3c9c74d2023-09-11 11:36:12 +0800812diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
813index 59b8736..c4eb45c 100644
814--- a/include/net/flow_offload.h
815+++ b/include/net/flow_offload.h
816@@ -365,6 +365,7 @@ struct flow_cls_offload {
817 struct flow_cls_common_offload common;
818 enum flow_cls_command command;
819 unsigned long cookie;
820+ struct flow_offload *flow;
821 struct flow_rule *rule;
822 struct flow_stats stats;
823 u32 classid;
824diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
825index 50f2f2e..ba34572 100644
826--- a/net/netfilter/nf_flow_table_offload.c
827+++ b/net/netfilter/nf_flow_table_offload.c
828@@ -810,11 +810,13 @@ static int nf_flow_offload_alloc(const struct flow_offload_work *offload,
829 }
830
831 static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
832+ struct flow_offload *flow,
833 __be16 proto, int priority,
834 enum flow_cls_command cmd,
835 const struct flow_offload_tuple *tuple,
836 struct netlink_ext_ack *extack)
837 {
838+ cls_flow->flow = flow;
839 cls_flow->common.protocol = proto;
840 cls_flow->common.prio = priority;
841 cls_flow->common.extack = extack;
842@@ -836,7 +838,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
843 __be16 proto = ETH_P_ALL;
844 int err, i = 0;
845
846- nf_flow_offload_init(&cls_flow, proto, priority, cmd,
847+ nf_flow_offload_init(&cls_flow, flow, proto, priority, cmd,
848 &flow->tuplehash[dir].tuple, &extack);
849 if (cmd == FLOW_CLS_REPLACE)
850 cls_flow.rule = flow_rule->rule;
851--
8522.18.0
853