blob: 6c26c22d6e8df905e4a7a4372059fd3174c9753a [file] [log] [blame]
developer0fb30d52023-12-04 09:51:36 +08001--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
2+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
3@@ -245,6 +245,9 @@ static const char * const mtk_clks_sourc
4 "top_netsys_warp_sel",
5 };
6
7+struct net_device *(*mtk_get_tnl_dev)(int tnl_idx) = NULL;
8+EXPORT_SYMBOL(mtk_get_tnl_dev);
9+
10 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
11 {
12 __raw_writel(val, eth->base + reg);
13@@ -2168,6 +2171,7 @@ static int mtk_poll_rx(struct napi_struc
14 u64 addr64 = 0;
15 u8 *data, *new_data;
16 struct mtk_rx_dma_v2 *rxd, trxd;
17+ int tnl_idx = 0;
18 int done = 0;
19
20 if (unlikely(!ring))
21@@ -2205,11 +2209,20 @@ static int mtk_poll_rx(struct napi_struc
22 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
23 }
24
25- if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
26- !eth->netdev[mac]))
27- goto release_desc;
28+ tnl_idx = RX_DMA_GET_TOPS_CRSN(trxd.rxd6);
29+ if (mtk_get_tnl_dev && tnl_idx) {
30+ netdev = mtk_get_tnl_dev(tnl_idx);
31+ if (unlikely(IS_ERR(netdev)))
32+ netdev = NULL;
33+ }
34
35- netdev = eth->netdev[mac];
36+ if (!netdev) {
37+ if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
38+ !eth->netdev[mac]))
39+ goto release_desc;
40+
41+ netdev = eth->netdev[mac];
42+ }
43
44 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
45 goto release_desc;
46@@ -2294,6 +2307,8 @@ static int mtk_poll_rx(struct napi_struc
47 skb_hnat_alg(skb) = 0;
48 skb_hnat_filled(skb) = 0;
49 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
50+ skb_hnat_set_tops(skb, 0);
51+ skb_hnat_set_is_decap(skb, 0);
52
53 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
54 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
55--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
56+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
57@@ -1915,6 +1915,9 @@ extern const struct of_device_id of_mtk_
58 extern u32 mtk_hwlro_stats_ebl;
59 extern u32 dbg_show_level;
60
61+/* tunnel offload related */
62+extern struct net_device *(*mtk_get_tnl_dev)(int tnl_idx);
63+
64 /* read the hardware status register */
65 void mtk_stats_update_mac(struct mtk_mac *mac);
66
67--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
68+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
69@@ -43,6 +43,12 @@ void (*ppe_dev_register_hook)(struct net
70 EXPORT_SYMBOL(ppe_dev_register_hook);
71 void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL;
72 EXPORT_SYMBOL(ppe_dev_unregister_hook);
73+int (*mtk_tnl_encap_offload)(struct sk_buff *skb) = NULL;
74+EXPORT_SYMBOL(mtk_tnl_encap_offload);
75+int (*mtk_tnl_decap_offload)(struct sk_buff *skb) = NULL;
76+EXPORT_SYMBOL(mtk_tnl_decap_offload);
77+bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb) = NULL;
78+EXPORT_SYMBOL(mtk_tnl_decap_offloadable);
79
80 static void hnat_sma_build_entry(struct timer_list *t)
81 {
82@@ -53,6 +59,16 @@ static void hnat_sma_build_entry(struct
83 SMA, SMA_FWD_CPU_BUILD_ENTRY);
84 }
85
86+struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index)
87+{
88+ if (index == 0x7fff || index >= hnat_priv->foe_etry_num
89+ || ppe_id >= CFG_PPE_NUM)
90+ return ERR_PTR(-EINVAL);
91+
92+ return &hnat_priv->foe_table_cpu[ppe_id][index];
93+}
94+EXPORT_SYMBOL(hnat_get_foe_entry);
95+
96 void hnat_cache_ebl(int enable)
97 {
98 int i;
99@@ -63,6 +79,7 @@ void hnat_cache_ebl(int enable)
100 cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_EN, enable);
101 }
102 }
103+EXPORT_SYMBOL(hnat_cache_ebl);
104
105 static void hnat_reset_timestamp(struct timer_list *t)
106 {
107--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
108+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
109@@ -1140,6 +1140,8 @@ enum FoeIpAct {
110 #define NR_WDMA1_PORT 9
111 #define NR_WDMA2_PORT 13
112 #define NR_GMAC3_PORT 15
113+#define NR_TDMA_TPORT 4
114+#define NR_TDMA_QDMA_TPORT 5
115 #define LAN_DEV_NAME hnat_priv->lan
116 #define LAN2_DEV_NAME hnat_priv->lan2
117 #define IS_WAN(dev) \
118@@ -1269,6 +1271,8 @@ static inline bool hnat_dsa_is_enable(st
119 }
120 #endif
121
122+struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index);
123+
124 void hnat_deinit_debugfs(struct mtk_hnat *h);
125 int hnat_init_debugfs(struct mtk_hnat *h);
126 int hnat_register_nf_hooks(void);
127@@ -1285,6 +1289,9 @@ extern int qos_ul_toggle;
128 extern int hook_toggle;
129 extern int mape_toggle;
130 extern int qos_toggle;
131+extern int (*mtk_tnl_encap_offload)(struct sk_buff *skb);
132+extern int (*mtk_tnl_decap_offload)(struct sk_buff *skb);
133+extern bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb);
134
135 int ext_if_add(struct extdev_entry *ext_entry);
136 int ext_if_del(struct extdev_entry *ext_entry);
137--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
138+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
139@@ -728,10 +728,14 @@ static unsigned int is_ppe_support_type(
140 case ETH_P_IP:
141 iph = ip_hdr(skb);
142
143- /* do not accelerate non tcp/udp traffic */
144- if ((iph->protocol == IPPROTO_TCP) ||
145+ if (mtk_tnl_decap_offloadable && mtk_tnl_decap_offloadable(skb)) {
146+ /* tunnel protocol is offloadable */
147+ skb_hnat_set_is_decap(skb, 1);
148+ return 1;
149+ } else if ((iph->protocol == IPPROTO_TCP) ||
150 (iph->protocol == IPPROTO_UDP) ||
151 (iph->protocol == IPPROTO_IPV6)) {
152+ /* do not accelerate non tcp/udp traffic */
153 return 1;
154 }
155
156@@ -848,6 +852,13 @@ mtk_hnat_ipv4_nf_pre_routing(void *priv,
157
158 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
159
160+ if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb)
161+ && is_magic_tag_valid(skb)
162+ && skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
163+ && mtk_tnl_decap_offload && mtk_tnl_decap_offload(skb)) {
164+ return NF_ACCEPT;
165+ }
166+
167 /*
168 * Avoid mistakenly binding of outer IP, ports in SW L2TP decap flow.
169 * In pre-routing, if dev is virtual iface, TOPS module is not loaded,
170@@ -923,6 +934,13 @@ mtk_hnat_br_nf_local_in(void *priv, stru
171
172 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
173
174+ if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb)
175+ && is_magic_tag_valid(skb)
176+ && skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
177+ && mtk_tnl_decap_offload && mtk_tnl_decap_offload(skb)) {
178+ return NF_ACCEPT;
179+ }
180+
181 pre_routing_print(skb, state->in, state->out, __func__);
182
183 if (unlikely(debug_level >= 7)) {
184@@ -1075,8 +1093,22 @@ static unsigned int hnat_ipv4_get_nextho
185 return -1;
186 }
187
188+ /*
189+ * if this packet is a tunnel packet and is about to construct
190+ * outer header, we must update its outer mac header pointer
191+ * before filling outer mac or it may screw up inner mac
192+ */
193+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
194+ skb_push(skb, sizeof(struct ethhdr));
195+ skb_reset_mac_header(skb);
196+ }
197+
198 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
199 memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
200+ eth_hdr(skb)->h_proto = htons(ETH_P_IP);
201+
202+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
203+ skb_pull(skb, sizeof(struct ethhdr));
204
205 rcu_read_unlock_bh();
206
207@@ -1203,6 +1235,81 @@ static struct ethhdr *get_ipv6_ipip_ethh
208 return eth;
209 }
210
211+static inline void hnat_get_filled_unbind_entry(struct sk_buff *skb,
212+ struct foe_entry *entry)
213+{
214+ if (unlikely(!skb || !entry))
215+ return;
216+
217+ memcpy(entry,
218+ &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
219+ sizeof(*entry));
220+
221+#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
222+ entry->bfib1.mc = 0;
223+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3) */
224+ entry->bfib1.ka = 0;
225+ entry->bfib1.vlan_layer = 0;
226+ entry->bfib1.psn = 0;
227+ entry->bfib1.vpm = 0;
228+ entry->bfib1.ps = 0;
229+}
230+
231+static inline void hnat_qos_tnl(u32 id, const struct net_device *dev)
232+{
233+ u32 cfg;
234+ u32 max_man = 0;
235+ u32 max_exp = 0;
236+ const struct mtk_mac *mac;
237+
238+ if (!dev)
239+ return;
240+ mac = netdev_priv(dev);
241+
242+ switch (mac->speed) {
243+ case SPEED_100:
244+ case SPEED_1000:
245+ case SPEED_2500:
246+ case SPEED_5000:
247+ case SPEED_10000:
248+ max_man = mac->speed / SPEED_100;
249+ max_exp = 5;
250+ break;
251+ default:
252+ return;
253+ }
254+
255+ cfg = QTX_SCH_MIN_RATE_EN | QTX_SCH_MAX_RATE_EN;
256+ cfg |= (1 << QTX_SCH_MIN_RATE_MAN_OFFSET) |
257+ (4 << QTX_SCH_MIN_RATE_EXP_OFFSET) |
258+ (max_man << QTX_SCH_MAX_RATE_MAN_OFFSET) |
259+ (max_exp << QTX_SCH_MAX_RATE_EXP_OFFSET) |
260+ (4 << QTX_SCH_MAX_RATE_WGHT_OFFSET);
261+ writel(cfg, hnat_priv->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
262+}
263+
264+static inline void hnat_fill_offload_engine_entry(struct sk_buff *skb,
265+ struct foe_entry *entry,
266+ const struct net_device *dev)
267+{
268+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
269+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
270+ /*
271+ * if skb_hnat_tops(skb) is setup for encapsulation,
272+ * we fill in hnat tport and tops_entry for tunnel encapsulation
273+ * offloading
274+ */
275+ entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
276+ entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb);
277+ } else {
278+ return;
279+ }
280+
281+ entry->ipv4_hnapt.iblk2.qid = 12; /* offload engine use QID 12 */
282+ hnat_qos_tnl(12, dev); /* set rate limit to line rate */
283+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
284+}
285+
286 static unsigned int skb_to_hnat_info(struct sk_buff *skb,
287 const struct net_device *dev,
288 struct foe_entry *foe,
289@@ -1240,6 +1347,11 @@ static unsigned int skb_to_hnat_info(str
290 if (whnat && is_hnat_pre_filled(foe))
291 return 0;
292
293+ if (skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL)) {
294+ hnat_get_filled_unbind_entry(skb, &entry);
295+ goto hnat_entry_bind;
296+ }
297+
298 entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
299 entry.bfib1.state = foe->udib1.state;
300
301@@ -1683,6 +1795,10 @@ static unsigned int skb_to_hnat_info(str
302 /* Fill Layer2 Info.*/
303 entry = ppe_fill_L2_info(eth, entry, hw_path);
304
305+ if (skb_hnat_tops(skb) && hw_path->flags & FLOW_OFFLOAD_PATH_TNL)
306+ goto hnat_entry_skip_bind;
307+
308+hnat_entry_bind:
309 /* Fill Info Blk*/
310 entry = ppe_fill_info_blk(eth, entry, hw_path);
311
312@@ -1881,7 +1997,20 @@ static unsigned int skb_to_hnat_info(str
313 entry.ipv6_5t_route.act_dp |= UDF_HNAT_PRE_FILLED;
314 }
315
316+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
317+ hnat_fill_offload_engine_entry(skb, &entry, dev);
318+#endif
319+
320+hnat_entry_skip_bind:
321 wmb();
322+
323+ /*
324+ * final check before we write BIND info.
325+ * If this entry is already bound, we should not modify it right now
326+ */
327+ if (entry_hnat_is_bound(foe))
328+ return 0;
329+
330 memcpy(foe, &entry, sizeof(entry));
331 /*reset statistic for this entry*/
332 if (hnat_priv->data->per_flow_accounting &&
333@@ -1953,6 +2082,12 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
334 switch ((int)entry.bfib1.pkt_type) {
335 case IPV4_HNAPT:
336 case IPV4_HNAT:
337+ /*
338+ * skip if packet is an encap tnl packet or it may
339+ * screw up inner mac header
340+ */
341+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
342+ break;
343 entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
344 entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
345 break;
346@@ -2144,6 +2279,10 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
347 entry.ipv6_5t_route.iblk2.dp = gmac_no;
348 }
349
350+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
351+ hnat_fill_offload_engine_entry(skb, &entry, NULL);
352+#endif
353+
354 entry.bfib1.ttl = 1;
355 entry.bfib1.state = BIND;
356 if (IS_IPV4_GRP(&entry))
357@@ -2219,6 +2358,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s
358 }
359
360 skb_hnat_alg(skb) = 0;
361+ skb_hnat_set_tops(skb, 0);
362 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
363
364 if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
365@@ -2672,6 +2812,7 @@ static unsigned int mtk_hnat_nf_post_rou
366 struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
367 .virt_dev = (struct net_device*)out };
368 const struct net_device *arp_dev = out;
369+ bool is_virt_dev = false;
370
371 if (xlat_toggle && !mtk_464xlat_post_process(skb, out))
372 return 0;
373@@ -2691,10 +2832,29 @@ static unsigned int mtk_hnat_nf_post_rou
374
375 if (out->netdev_ops->ndo_flow_offload_check) {
376 out->netdev_ops->ndo_flow_offload_check(&hw_path);
377+
378 out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
379+ if (hw_path.flags & FLOW_OFFLOAD_PATH_TNL && mtk_tnl_encap_offload) {
380+ if (ntohs(skb->protocol) == ETH_P_IP
381+ && ip_hdr(skb)->protocol == IPPROTO_TCP) {
382+ skb_hnat_set_tops(skb, hw_path.tnl_type + 1);
383+ } else {
384+ /*
385+ * we are not support protocols other than IPv4 TCP
386+ * for tunnel protocol offload yet
387+ */
388+ skb_hnat_alg(skb) = 1;
389+ return 0;
390+ }
391+ }
392 }
393
394 if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
395+ is_virt_dev = true;
396+
397+ if (is_virt_dev
398+ && !(skb_hnat_tops(skb) && skb_hnat_is_encap(skb)
399+ && (hw_path.flags & FLOW_OFFLOAD_PATH_TNL)))
400 return 0;
401
402 trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
403@@ -2714,9 +2874,18 @@ static unsigned int mtk_hnat_nf_post_rou
404 if (fn && !mtk_hnat_accel_type(skb))
405 break;
406
407- if (fn && fn(skb, arp_dev, &hw_path))
408+ if (!is_virt_dev && fn && fn(skb, arp_dev, &hw_path))
409 break;
410
411+ /* skb_hnat_tops(skb) is updated in mtk_tnl_offload() */
412+ if (skb_hnat_tops(skb)) {
413+ if (skb_hnat_is_encap(skb) && !is_virt_dev
414+ && mtk_tnl_encap_offload && mtk_tnl_encap_offload(skb))
415+ break;
416+ if (skb_hnat_is_decap(skb))
417+ break;
418+ }
419+
420 spin_lock(&hnat_priv->entry_lock);
421 skb_to_hnat_info(skb, out, entry, &hw_path);
422 spin_unlock(&hnat_priv->entry_lock);
423@@ -2989,7 +3158,7 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s
424 if (iph->protocol == IPPROTO_IPV6) {
425 entry->udib1.pkt_type = IPV6_6RD;
426 hnat_set_head_frags(state, skb, 0, hnat_set_alg);
427- } else {
428+ } else if (!skb_hnat_tops(skb)) {
429 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
430 }
431
432--- a/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
433+++ b/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
434@@ -44,7 +44,9 @@ struct hnat_desc {
435 u32 is_sp : 1;
436 u32 hf : 1;
437 u32 amsdu : 1;
438- u32 resv3 : 19;
439+ u32 tops : 6;
440+ u32 is_decap : 1;
441+ u32 resv3 : 12;
442 u32 magic_tag_protect : 16;
443 } __packed;
444 #elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
445@@ -91,6 +93,19 @@ struct hnat_desc {
446 ((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
447
448 #define skb_hnat_info(skb) ((struct hnat_desc *)(skb->head))
449+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
450+#define skb_hnat_tops(skb) (((struct hnat_desc *)((skb)->head))->tops)
451+#define skb_hnat_is_decap(skb) (((struct hnat_desc *)((skb)->head))->is_decap)
452+#define skb_hnat_is_encap(skb) (!skb_hnat_is_decap(skb))
453+#define skb_hnat_set_tops(skb, tops) ((skb_hnat_tops(skb)) = (tops))
454+#define skb_hnat_set_is_decap(skb, is_decap) ((skb_hnat_is_decap(skb)) = (is_decap))
455+#else /* !defined(CONFIG_MEDIATEK_NETSYS_V3) */
456+#define skb_hnat_tops(skb) (0)
457+#define skb_hnat_is_decap(skb) (0)
458+#define skb_hnat_is_encap(skb) (0)
459+#define skb_hnat_set_tops(skb, tops)
460+#define skb_hnat_set_is_decap(skb, is_decap)
461+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
462 #define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
463 #define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
464 #define skb_hnat_entry(skb) (((struct hnat_desc *)(skb->head))->entry)
465--- a/include/net/netfilter/nf_flow_table.h
466+++ b/include/net/netfilter/nf_flow_table.h
467@@ -98,10 +98,21 @@ struct flow_offload {
468 #define FLOW_OFFLOAD_PATH_6RD BIT(5)
469 #define FLOW_OFFLOAD_PATH_TNL BIT(6)
470
471+enum flow_offload_tnl {
472+ FLOW_OFFLOAD_TNL_GRETAP,
473+ FLOW_OFFLOAD_TNL_PPTP,
474+ FLOW_OFFLOAD_TNL_L2TP_V2,
475+ FLOW_OFFLOAD_TNL_L2TP_V3,
476+ FLOW_OFFLOAD_VXLAN,
477+ FLOW_OFFLOAD_NATT,
478+ __FLOW_OFFLOAD_MAX,
479+};
480+
481 struct flow_offload_hw_path {
482 struct net_device *dev;
483 struct net_device *virt_dev;
484 u32 flags;
485+ u32 tnl_type;
486
487 u8 eth_src[ETH_ALEN];
488 u8 eth_dest[ETH_ALEN];