blob: 65781caf63c383e9fb6a4d3c1863ded837efbde2 [file] [log] [blame]
developere5e687d2023-08-08 16:05:33 +08001--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
2+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developerf88469b2023-08-18 16:01:49 +08003@@ -246,6 +246,9 @@ static const char * const mtk_clks_sourc
developere5e687d2023-08-08 16:05:33 +08004 "top_netsys_warp_sel",
5 };
6
7+struct net_device *(*mtk_get_tnl_dev)(int tnl_idx) = NULL;
8+EXPORT_SYMBOL(mtk_get_tnl_dev);
9+
10 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
11 {
12 __raw_writel(val, eth->base + reg);
developerf88469b2023-08-18 16:01:49 +080013@@ -2172,6 +2175,7 @@ static int mtk_poll_rx(struct napi_struc
developere5e687d2023-08-08 16:05:33 +080014 u64 addr64 = 0;
15 u8 *data, *new_data;
16 struct mtk_rx_dma_v2 *rxd, trxd;
17+ int tnl_idx = 0;
18 int done = 0;
19
20 if (unlikely(!ring))
developerf88469b2023-08-18 16:01:49 +080021@@ -2215,11 +2219,20 @@ static int mtk_poll_rx(struct napi_struc
developere5e687d2023-08-08 16:05:33 +080022 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
23 }
24
25- if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
26- !eth->netdev[mac]))
27- goto release_desc;
28+ tnl_idx = RX_DMA_GET_TOPS_CRSN(trxd.rxd6);
29+ if (mtk_get_tnl_dev && tnl_idx) {
30+ netdev = mtk_get_tnl_dev(tnl_idx);
31+ if (unlikely(IS_ERR(netdev)))
32+ netdev = NULL;
33+ }
34
35- netdev = eth->netdev[mac];
36+ if (!netdev) {
37+ if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
38+ !eth->netdev[mac]))
39+ goto release_desc;
40+
41+ netdev = eth->netdev[mac];
42+ }
43
44 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
45 goto release_desc;
developerf88469b2023-08-18 16:01:49 +080046@@ -2304,6 +2317,8 @@ static int mtk_poll_rx(struct napi_struc
developere5e687d2023-08-08 16:05:33 +080047 skb_hnat_alg(skb) = 0;
48 skb_hnat_filled(skb) = 0;
49 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
50+ skb_hnat_set_tops(skb, 0);
51+ skb_hnat_set_is_decap(skb, 0);
52
53 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
54 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
55--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
56+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
57@@ -43,6 +43,12 @@ void (*ppe_dev_register_hook)(struct net
58 EXPORT_SYMBOL(ppe_dev_register_hook);
59 void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL;
60 EXPORT_SYMBOL(ppe_dev_unregister_hook);
61+int (*mtk_tnl_encap_offload)(struct sk_buff *skb) = NULL;
62+EXPORT_SYMBOL(mtk_tnl_encap_offload);
63+int (*mtk_tnl_decap_offload)(struct sk_buff *skb) = NULL;
64+EXPORT_SYMBOL(mtk_tnl_decap_offload);
65+bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb) = NULL;
66+EXPORT_SYMBOL(mtk_tnl_decap_offloadable);
67
68 static void hnat_sma_build_entry(struct timer_list *t)
69 {
70@@ -53,6 +59,16 @@ static void hnat_sma_build_entry(struct
71 SMA, SMA_FWD_CPU_BUILD_ENTRY);
72 }
73
74+struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index)
75+{
76+ if (index == 0x7fff || index >= hnat_priv->foe_etry_num
77+ || ppe_id >= CFG_PPE_NUM)
78+ return ERR_PTR(-EINVAL);
79+
80+ return &hnat_priv->foe_table_cpu[ppe_id][index];
81+}
82+EXPORT_SYMBOL(hnat_get_foe_entry);
83+
84 void hnat_cache_ebl(int enable)
85 {
86 int i;
87@@ -63,6 +79,7 @@ void hnat_cache_ebl(int enable)
88 cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_EN, enable);
89 }
90 }
91+EXPORT_SYMBOL(hnat_cache_ebl);
92
93 static void hnat_reset_timestamp(struct timer_list *t)
94 {
95--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
96+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
developerf88469b2023-08-18 16:01:49 +080097@@ -1087,6 +1087,8 @@ enum FoeIpAct {
developere5e687d2023-08-08 16:05:33 +080098 #define NR_WDMA1_PORT 9
developerf88469b2023-08-18 16:01:49 +080099 #define NR_WDMA2_PORT 13
developere5e687d2023-08-08 16:05:33 +0800100 #define NR_GMAC3_PORT 15
101+#define NR_TDMA_TPORT 4
102+#define NR_TDMA_QDMA_TPORT 5
103 #define LAN_DEV_NAME hnat_priv->lan
104 #define LAN2_DEV_NAME hnat_priv->lan2
105 #define IS_WAN(dev) \
developerf88469b2023-08-18 16:01:49 +0800106@@ -1210,6 +1212,8 @@ static inline bool hnat_dsa_is_enable(st
developere5e687d2023-08-08 16:05:33 +0800107 }
108 #endif
109
110+struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index);
111+
112 void hnat_deinit_debugfs(struct mtk_hnat *h);
113 int hnat_init_debugfs(struct mtk_hnat *h);
114 int hnat_register_nf_hooks(void);
developerf88469b2023-08-18 16:01:49 +0800115@@ -1226,6 +1230,9 @@ extern int qos_ul_toggle;
developere5e687d2023-08-08 16:05:33 +0800116 extern int hook_toggle;
117 extern int mape_toggle;
118 extern int qos_toggle;
119+extern int (*mtk_tnl_encap_offload)(struct sk_buff *skb);
120+extern int (*mtk_tnl_decap_offload)(struct sk_buff *skb);
121+extern bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb);
122
123 int ext_if_add(struct extdev_entry *ext_entry);
124 int ext_if_del(struct extdev_entry *ext_entry);
125--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
126+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
127@@ -726,10 +726,14 @@ static unsigned int is_ppe_support_type(
128 case ETH_P_IP:
129 iph = ip_hdr(skb);
130
131- /* do not accelerate non tcp/udp traffic */
132- if ((iph->protocol == IPPROTO_TCP) ||
133+ if (mtk_tnl_decap_offloadable && mtk_tnl_decap_offloadable(skb)) {
134+ /* tunnel protocol is offloadable */
135+ skb_hnat_set_is_decap(skb, 1);
136+ return 1;
137+ } else if ((iph->protocol == IPPROTO_TCP) ||
138 (iph->protocol == IPPROTO_UDP) ||
139 (iph->protocol == IPPROTO_IPV6)) {
140+ /* do not accelerate non tcp/udp traffic */
141 return 1;
142 }
143
144@@ -846,6 +850,13 @@ mtk_hnat_ipv4_nf_pre_routing(void *priv,
145
146 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
147
148+ if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb)
149+ && is_magic_tag_valid(skb)
150+ && skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
151+ && mtk_tnl_decap_offload && mtk_tnl_decap_offload(skb)) {
152+ return NF_ACCEPT;
153+ }
154+
155 /*
156 * Avoid mistakenly binding of outer IP, ports in SW L2TP decap flow.
157 * In pre-routing, if dev is virtual iface, TOPS module is not loaded,
158@@ -922,6 +933,13 @@ mtk_hnat_br_nf_local_in(void *priv, stru
159
160 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
161
162+ if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb)
163+ && is_magic_tag_valid(skb)
164+ && skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
165+ && mtk_tnl_decap_offload && mtk_tnl_decap_offload(skb)) {
166+ return NF_ACCEPT;
167+ }
168+
169 pre_routing_print(skb, state->in, state->out, __func__);
170
171 if (unlikely(debug_level >= 7)) {
172@@ -1074,9 +1092,22 @@ static unsigned int hnat_ipv4_get_nextho
173 return -1;
174 }
175
176+ /*
177+ * if this packet is a tunnel packet and is about to construct
178+ * outer header, we must update its outer mac header pointer
179+ * before filling outer mac or it may screw up inner mac
180+ */
181+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
182+ skb_push(skb, sizeof(struct ethhdr));
183+ skb_reset_mac_header(skb);
184+ }
185+
186 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
187 memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
188
189+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
190+ skb_pull(skb, sizeof(struct ethhdr));
191+
192 rcu_read_unlock_bh();
193
194 return 0;
developerf88469b2023-08-18 16:01:49 +0800195@@ -1202,6 +1233,72 @@ static struct ethhdr *get_ipv6_ipip_ethh
developere5e687d2023-08-08 16:05:33 +0800196 return eth;
197 }
198
199+static inline void hnat_get_filled_unbind_entry(struct sk_buff *skb,
200+ struct foe_entry *entry)
201+{
202+ if (unlikely(!skb || !entry))
203+ return;
204+
205+ memcpy(entry,
206+ &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
207+ sizeof(*entry));
208+}
209+
developerf88469b2023-08-18 16:01:49 +0800210+static inline void hnat_qos_tnl(u32 id, const struct net_device *dev)
211+{
212+ u32 cfg;
213+ u32 max_man = 0;
214+ u32 max_exp = 0;
215+ const struct mtk_mac *mac;
216+
217+ if (!dev)
218+ return;
219+ mac = netdev_priv(dev);
220+
221+ switch (mac->speed) {
222+ case SPEED_100:
223+ case SPEED_1000:
224+ case SPEED_2500:
225+ case SPEED_5000:
226+ case SPEED_10000:
227+ max_man = mac->speed / SPEED_100;
228+ max_exp = 5;
229+ break;
230+ default:
231+ return;
232+ }
233+
234+ cfg = QTX_SCH_MIN_RATE_EN | QTX_SCH_MAX_RATE_EN;
235+ cfg |= (1 << QTX_SCH_MIN_RATE_MAN_OFFSET) |
236+ (4 << QTX_SCH_MIN_RATE_EXP_OFFSET) |
237+ (max_man << QTX_SCH_MAX_RATE_MAN_OFFSET) |
238+ (max_exp << QTX_SCH_MAX_RATE_EXP_OFFSET) |
239+ (4 << QTX_SCH_MAX_RATE_WGHT_OFFSET);
240+ writel(cfg, hnat_priv->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
241+}
242+
developere5e687d2023-08-08 16:05:33 +0800243+static inline void hnat_fill_offload_engine_entry(struct sk_buff *skb,
developerf88469b2023-08-18 16:01:49 +0800244+ struct foe_entry *entry,
245+ const struct net_device *dev)
developere5e687d2023-08-08 16:05:33 +0800246+{
247+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
248+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
249+ /*
250+ * if skb_hnat_tops(skb) is setup for encapsulation,
251+ * we fill in hnat tport and tops_entry for tunnel encapsulation
252+ * offloading
253+ */
254+ entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
255+ entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb);
256+ } else {
257+ return;
258+ }
259+
260+ entry->ipv4_hnapt.iblk2.qid = 12; /* offload engine use QID 12 */
developerf88469b2023-08-18 16:01:49 +0800261+ hnat_qos_tnl(12, dev); /* set rate limit to line rate */
developere5e687d2023-08-08 16:05:33 +0800262+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
263+}
264+
265 static unsigned int skb_to_hnat_info(struct sk_buff *skb,
266 const struct net_device *dev,
267 struct foe_entry *foe,
developerf88469b2023-08-18 16:01:49 +0800268@@ -1238,6 +1335,11 @@ static unsigned int skb_to_hnat_info(str
developere5e687d2023-08-08 16:05:33 +0800269 if (whnat && is_hnat_pre_filled(foe))
270 return 0;
271
272+ if (skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL)) {
273+ hnat_get_filled_unbind_entry(skb, &entry);
274+ goto hnat_entry_bind;
275+ }
276+
277 entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
278 entry.bfib1.state = foe->udib1.state;
279
developerf88469b2023-08-18 16:01:49 +0800280@@ -1248,6 +1350,7 @@ static unsigned int skb_to_hnat_info(str
developere5e687d2023-08-08 16:05:33 +0800281 switch (ntohs(eth->h_proto)) {
282 case ETH_P_IP:
283 iph = ip_hdr(skb);
284+
285 switch (iph->protocol) {
286 case IPPROTO_UDP:
287 udp = 1;
developerf88469b2023-08-18 16:01:49 +0800288@@ -1629,6 +1732,10 @@ static unsigned int skb_to_hnat_info(str
developere5e687d2023-08-08 16:05:33 +0800289 /* Fill Layer2 Info.*/
290 entry = ppe_fill_L2_info(eth, entry, hw_path);
291
292+ if (skb_hnat_tops(skb) && hw_path->flags & FLOW_OFFLOAD_PATH_TNL)
293+ goto hnat_entry_skip_bind;
294+
295+hnat_entry_bind:
296 /* Fill Info Blk*/
297 entry = ppe_fill_info_blk(eth, entry, hw_path);
298
developerf88469b2023-08-18 16:01:49 +0800299@@ -1827,7 +1934,20 @@ static unsigned int skb_to_hnat_info(str
developere5e687d2023-08-08 16:05:33 +0800300 entry.ipv6_5t_route.act_dp |= UDF_HNAT_PRE_FILLED;
301 }
302
303+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developerf88469b2023-08-18 16:01:49 +0800304+ hnat_fill_offload_engine_entry(skb, &entry, dev);
developere5e687d2023-08-08 16:05:33 +0800305+#endif
306+
307+hnat_entry_skip_bind:
308 wmb();
309+
310+ /*
311+ * final check before we write BIND info.
312+ * If this entry is already bound, we should not modify it right now
313+ */
314+ if (entry_hnat_is_bound(foe))
315+ return 0;
316+
317 memcpy(foe, &entry, sizeof(entry));
318 /*reset statistic for this entry*/
319 if (hnat_priv->data->per_flow_accounting &&
developerf88469b2023-08-18 16:01:49 +0800320@@ -1880,6 +2000,7 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
developere5e687d2023-08-08 16:05:33 +0800321 return NF_ACCEPT;
322
323 eth = eth_hdr(skb);
324+
325 memcpy(&bfib1_tx, &entry->bfib1, sizeof(entry->bfib1));
326
327 /*not bind multicast if PPE mcast not enable*/
developerf88469b2023-08-18 16:01:49 +0800328@@ -1899,6 +2020,12 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
developere5e687d2023-08-08 16:05:33 +0800329 switch ((int)bfib1_tx.pkt_type) {
330 case IPV4_HNAPT:
331 case IPV4_HNAT:
332+ /*
333+ * skip if packet is an encap tnl packet or it may
334+ * screw up inner mac header
335+ */
336+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
337+ break;
338 entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
339 entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
340 break;
developerf88469b2023-08-18 16:01:49 +0800341@@ -2060,6 +2187,10 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
developere5e687d2023-08-08 16:05:33 +0800342 entry->ipv6_5t_route.iblk2.dp = gmac_no;
343 }
344
345+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developerf88469b2023-08-18 16:01:49 +0800346+ hnat_fill_offload_engine_entry(skb, entry, NULL);
developere5e687d2023-08-08 16:05:33 +0800347+#endif
348+
349 bfib1_tx.ttl = 1;
350 bfib1_tx.state = BIND;
351 wmb();
developerf88469b2023-08-18 16:01:49 +0800352@@ -2081,6 +2212,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s
developere5e687d2023-08-08 16:05:33 +0800353 }
354
355 skb_hnat_alg(skb) = 0;
356+ skb_hnat_set_tops(skb, 0);
357 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
358
359 if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
developerf88469b2023-08-18 16:01:49 +0800360@@ -2529,6 +2661,7 @@ static unsigned int mtk_hnat_nf_post_rou
developere5e687d2023-08-08 16:05:33 +0800361 struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
362 .virt_dev = (struct net_device*)out };
363 const struct net_device *arp_dev = out;
364+ bool is_virt_dev = false;
365
366 if (xlat_toggle && !mtk_464xlat_post_process(skb, out))
367 return 0;
developerf88469b2023-08-18 16:01:49 +0800368@@ -2549,10 +2682,18 @@ static unsigned int mtk_hnat_nf_post_rou
developere5e687d2023-08-08 16:05:33 +0800369
370 if (out->netdev_ops->ndo_flow_offload_check) {
371 out->netdev_ops->ndo_flow_offload_check(&hw_path);
372+
373 out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
374+ if (hw_path.flags & FLOW_OFFLOAD_PATH_TNL && mtk_tnl_encap_offload)
375+ skb_hnat_set_tops(skb, hw_path.tnl_type + 1);
376 }
377
378 if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
379+ is_virt_dev = true;
380+
381+ if (is_virt_dev
382+ && !(skb_hnat_tops(skb) && skb_hnat_is_encap(skb)
383+ && (hw_path.flags & FLOW_OFFLOAD_PATH_TNL)))
384 return 0;
385
386 trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
developerf88469b2023-08-18 16:01:49 +0800387@@ -2572,9 +2713,18 @@ static unsigned int mtk_hnat_nf_post_rou
developere5e687d2023-08-08 16:05:33 +0800388 if (fn && !mtk_hnat_accel_type(skb))
389 break;
390
391- if (fn && fn(skb, arp_dev, &hw_path))
392+ if (!is_virt_dev && fn && fn(skb, arp_dev, &hw_path))
393 break;
394
395+ /* skb_hnat_tops(skb) is updated in mtk_tnl_offload() */
396+ if (skb_hnat_tops(skb)) {
397+ if (skb_hnat_is_encap(skb) && !is_virt_dev
398+ && mtk_tnl_encap_offload && mtk_tnl_encap_offload(skb))
399+ break;
400+ if (skb_hnat_is_decap(skb))
401+ break;
402+ }
403+
404 skb_to_hnat_info(skb, out, entry, &hw_path);
405 break;
406 case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
developerf88469b2023-08-18 16:01:49 +0800407@@ -2845,7 +2995,7 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s
developere5e687d2023-08-08 16:05:33 +0800408 if (iph->protocol == IPPROTO_IPV6) {
409 entry->udib1.pkt_type = IPV6_6RD;
410 hnat_set_head_frags(state, skb, 0, hnat_set_alg);
411- } else {
412+ } else if (!skb_hnat_tops(skb)) {
413 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
414 }
415
416--- a/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
417+++ b/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
418@@ -44,7 +44,9 @@ struct hnat_desc {
419 u32 is_sp : 1;
420 u32 hf : 1;
421 u32 amsdu : 1;
422- u32 resv3 : 19;
423+ u32 tops : 6;
424+ u32 is_decap : 1;
425+ u32 resv3 : 12;
426 u32 magic_tag_protect : 16;
427 } __packed;
428 #elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
429@@ -91,6 +93,19 @@ struct hnat_desc {
430 ((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
431
432 #define skb_hnat_info(skb) ((struct hnat_desc *)(skb->head))
433+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
434+#define skb_hnat_tops(skb) (((struct hnat_desc *)((skb)->head))->tops)
435+#define skb_hnat_is_decap(skb) (((struct hnat_desc *)((skb)->head))->is_decap)
436+#define skb_hnat_is_encap(skb) (!skb_hnat_is_decap(skb))
437+#define skb_hnat_set_tops(skb, tops) ((skb_hnat_tops(skb)) = (tops))
438+#define skb_hnat_set_is_decap(skb, is_decap) ((skb_hnat_is_decap(skb)) = (is_decap))
439+#else /* !defined(CONFIG_MEDIATEK_NETSYS_V3) */
440+#define skb_hnat_tops(skb) (0)
441+#define skb_hnat_is_decap(skb) (0)
442+#define skb_hnat_is_encap(skb) (0)
443+#define skb_hnat_set_tops(skb, tops)
444+#define skb_hnat_set_is_decap(skb, is_decap)
445+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
446 #define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
447 #define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
448 #define skb_hnat_entry(skb) (((struct hnat_desc *)(skb->head))->entry)
449--- a/include/net/netfilter/nf_flow_table.h
450+++ b/include/net/netfilter/nf_flow_table.h
451@@ -98,10 +98,22 @@ struct flow_offload {
452 #define FLOW_OFFLOAD_PATH_6RD BIT(5)
453 #define FLOW_OFFLOAD_PATH_TNL BIT(6)
454
455+enum flow_offload_tnl {
456+ FLOW_OFFLOAD_TNL_GRETAP,
457+ FLOW_OFFLOAD_TNL_PPTP,
458+ FLOW_OFFLOAD_TNL_IP_L2TP,
459+ FLOW_OFFLOAD_TNL_UDP_L2TP_CTRL,
460+ FLOW_OFFLOAD_TNL_UDP_L2TP_DATA,
461+ FLOW_OFFLOAD_VXLAN,
462+ FLOW_OFFLOAD_NATT,
463+ __FLOW_OFFLOAD_MAX,
464+};
465+
466 struct flow_offload_hw_path {
467 struct net_device *dev;
468 struct net_device *virt_dev;
469 u32 flags;
470+ u32 tnl_type;
471
472 u8 eth_src[ETH_ALEN];
473 u8 eth_dest[ETH_ALEN];
474--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
475+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developerf88469b2023-08-18 16:01:49 +0800476@@ -1874,6 +1874,9 @@ extern const struct of_device_id of_mtk_
developere5e687d2023-08-08 16:05:33 +0800477 extern u32 mtk_hwlro_stats_ebl;
478 extern u32 dbg_show_level;
479
480+/* tunnel offload related */
481+extern struct net_device *(*mtk_get_tnl_dev)(int tnl_idx);
482+
483 /* read the hardware status register */
484 void mtk_stats_update_mac(struct mtk_mac *mac);
485