blob: 95e5e3c55437f59b309e20a4de205a27d2e06f6b [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
11 * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
12 */
13
14#include <linux/netfilter_bridge.h>
15#include <linux/netfilter_ipv6.h>
16
17#include <net/arp.h>
18#include <net/neighbour.h>
19#include <net/netfilter/nf_conntrack_helper.h>
20#include <net/netfilter/nf_flow_table.h>
21#include <net/ipv6.h>
22#include <net/ip6_route.h>
23#include <net/ip.h>
24#include <net/tcp.h>
25#include <net/udp.h>
developer30a47682021-11-02 17:06:14 +080026#include <net/netfilter/nf_conntrack.h>
27#include <net/netfilter/nf_conntrack_acct.h>
developerfd40db22021-04-29 10:08:25 +080028
29#include "nf_hnat_mtk.h"
30#include "hnat.h"
31
32#include "../mtk_eth_soc.h"
developer8051e042022-04-08 13:26:36 +080033#include "../mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080034
35#define do_ge2ext_fast(dev, skb) \
developerd35bbcc2022-09-28 22:46:01 +080036 ((IS_LAN_GRP(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
developerfd40db22021-04-29 10:08:25 +080037 skb_hnat_is_hashed(skb) && \
38 skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU)
39#define do_ext2ge_fast_learn(dev, skb) \
40 (IS_PPD(dev) && \
41 (skb_hnat_sport(skb) == NR_PDMA_PORT || \
42 skb_hnat_sport(skb) == NR_QDMA_PORT) && \
43 ((get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK)) || \
44 get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK)))
45#define do_mape_w2l_fast(dev, skb) \
46 (mape_toggle && IS_WAN(dev) && (!is_from_mape(skb)))
47
48static struct ipv6hdr mape_l2w_v6h;
49static struct ipv6hdr mape_w2l_v6h;
50static inline uint8_t get_wifi_hook_if_index_from_dev(const struct net_device *dev)
51{
52 int i;
53
54 for (i = 1; i < MAX_IF_NUM; i++) {
55 if (hnat_priv->wifi_hook_if[i] == dev)
56 return i;
57 }
58
59 return 0;
60}
61
62static inline int get_ext_device_number(void)
63{
64 int i, number = 0;
65
66 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++)
67 number += 1;
68 return number;
69}
70
71static inline int find_extif_from_devname(const char *name)
72{
73 int i;
74 struct extdev_entry *ext_entry;
75
76 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
77 ext_entry = hnat_priv->ext_if[i];
78 if (!strcmp(name, ext_entry->name))
79 return 1;
80 }
81 return 0;
82}
83
84static inline int get_index_from_dev(const struct net_device *dev)
85{
86 int i;
87 struct extdev_entry *ext_entry;
88
89 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
90 ext_entry = hnat_priv->ext_if[i];
91 if (dev == ext_entry->dev)
92 return ext_entry->dev->ifindex;
93 }
94 return 0;
95}
96
97static inline struct net_device *get_dev_from_index(int index)
98{
99 int i;
100 struct extdev_entry *ext_entry;
101 struct net_device *dev = 0;
102
103 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
104 ext_entry = hnat_priv->ext_if[i];
105 if (ext_entry->dev && index == ext_entry->dev->ifindex) {
106 dev = ext_entry->dev;
107 break;
108 }
109 }
110 return dev;
111}
112
113static inline struct net_device *get_wandev_from_index(int index)
114{
developer8c9c0d02021-06-18 16:15:37 +0800115 if (!hnat_priv->g_wandev)
116 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
developerfd40db22021-04-29 10:08:25 +0800117
developer8c9c0d02021-06-18 16:15:37 +0800118 if (hnat_priv->g_wandev && hnat_priv->g_wandev->ifindex == index)
119 return hnat_priv->g_wandev;
developerfd40db22021-04-29 10:08:25 +0800120 return NULL;
121}
122
123static inline int extif_set_dev(struct net_device *dev)
124{
125 int i;
126 struct extdev_entry *ext_entry;
127
128 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
129 ext_entry = hnat_priv->ext_if[i];
130 if (!strcmp(dev->name, ext_entry->name) && !ext_entry->dev) {
131 dev_hold(dev);
132 ext_entry->dev = dev;
133 pr_info("%s(%s)\n", __func__, dev->name);
134
135 return ext_entry->dev->ifindex;
136 }
137 }
138
139 return -1;
140}
141
142static inline int extif_put_dev(struct net_device *dev)
143{
144 int i;
145 struct extdev_entry *ext_entry;
146
147 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
148 ext_entry = hnat_priv->ext_if[i];
149 if (ext_entry->dev == dev) {
150 ext_entry->dev = NULL;
151 dev_put(dev);
152 pr_info("%s(%s)\n", __func__, dev->name);
153
developerbc53e5f2021-05-21 10:07:17 +0800154 return 0;
developerfd40db22021-04-29 10:08:25 +0800155 }
156 }
157
158 return -1;
159}
160
161int ext_if_add(struct extdev_entry *ext_entry)
162{
163 int len = get_ext_device_number();
164
developer4c32b7a2021-11-13 16:46:43 +0800165 if (len < MAX_EXT_DEVS)
166 hnat_priv->ext_if[len++] = ext_entry;
167
developerfd40db22021-04-29 10:08:25 +0800168 return len;
169}
170
171int ext_if_del(struct extdev_entry *ext_entry)
172{
173 int i, j;
174
175 for (i = 0; i < MAX_EXT_DEVS; i++) {
176 if (hnat_priv->ext_if[i] == ext_entry) {
177 for (j = i; hnat_priv->ext_if[j] && j < MAX_EXT_DEVS - 1; j++)
178 hnat_priv->ext_if[j] = hnat_priv->ext_if[j + 1];
179 hnat_priv->ext_if[j] = NULL;
180 break;
181 }
182 }
183
184 return i;
185}
186
187void foe_clear_all_bind_entries(struct net_device *dev)
188{
developer471f6562021-05-10 20:48:34 +0800189 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800190 struct foe_entry *entry;
191
developerd35bbcc2022-09-28 22:46:01 +0800192 if (!IS_LAN_GRP(dev) && !IS_WAN(dev) &&
developerfd40db22021-04-29 10:08:25 +0800193 !find_extif_from_devname(dev->name) &&
194 !dev->netdev_ops->ndo_flow_offload_check)
195 return;
196
developer471f6562021-05-10 20:48:34 +0800197 for (i = 0; i < CFG_PPE_NUM; i++) {
198 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
199 SMA, SMA_ONLY_FWD_CPU);
200
201 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
202 entry = hnat_priv->foe_table_cpu[i] + hash_index;
203 if (entry->bfib1.state == BIND) {
204 entry->ipv4_hnapt.udib1.state = INVALID;
205 entry->ipv4_hnapt.udib1.time_stamp =
206 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
207 }
developerfd40db22021-04-29 10:08:25 +0800208 }
209 }
210
211 /* clear HWNAT cache */
212 hnat_cache_ebl(1);
213
214 mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
215}
216
217static void gmac_ppe_fwd_enable(struct net_device *dev)
218{
219 if (IS_LAN(dev) || IS_GMAC1_MODE)
developerd35bbcc2022-09-28 22:46:01 +0800220 set_gmac_ppe_fwd(NR_GMAC1_PORT, 1);
developerfd40db22021-04-29 10:08:25 +0800221 else if (IS_WAN(dev))
developerd35bbcc2022-09-28 22:46:01 +0800222 set_gmac_ppe_fwd(NR_GMAC2_PORT, 1);
223 else if (IS_LAN2(dev))
224 set_gmac_ppe_fwd(NR_GMAC3_PORT, 1);
developerfd40db22021-04-29 10:08:25 +0800225}
226
227int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
228 void *ptr)
229{
230 struct net_device *dev;
231
232 dev = netdev_notifier_info_to_dev(ptr);
233
234 switch (event) {
235 case NETDEV_UP:
236 gmac_ppe_fwd_enable(dev);
237
238 extif_set_dev(dev);
239
240 break;
241 case NETDEV_GOING_DOWN:
242 if (!get_wifi_hook_if_index_from_dev(dev))
243 extif_put_dev(dev);
244
245 foe_clear_all_bind_entries(dev);
246
247 break;
developer8c9c0d02021-06-18 16:15:37 +0800248 case NETDEV_UNREGISTER:
developer1901f412022-01-04 17:22:00 +0800249 if (hnat_priv->g_ppdev == dev) {
developer8c9c0d02021-06-18 16:15:37 +0800250 hnat_priv->g_ppdev = NULL;
251 dev_put(dev);
252 }
developer1901f412022-01-04 17:22:00 +0800253 if (hnat_priv->g_wandev == dev) {
developer8c9c0d02021-06-18 16:15:37 +0800254 hnat_priv->g_wandev = NULL;
255 dev_put(dev);
256 }
257
258 break;
259 case NETDEV_REGISTER:
260 if (IS_PPD(dev) && !hnat_priv->g_ppdev)
261 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
262 if (IS_WAN(dev) && !hnat_priv->g_wandev)
263 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
264
265 break;
developer8051e042022-04-08 13:26:36 +0800266 case MTK_FE_RESET_NAT_DONE:
267 pr_info("[%s] HNAT driver starts to do warm init !\n", __func__);
268 hnat_warm_init();
269 break;
developerfd40db22021-04-29 10:08:25 +0800270 default:
271 break;
272 }
273
274 return NOTIFY_DONE;
275}
276
277void foe_clear_entry(struct neighbour *neigh)
278{
279 u32 *daddr = (u32 *)neigh->primary_key;
280 unsigned char h_dest[ETH_ALEN];
281 struct foe_entry *entry;
developer471f6562021-05-10 20:48:34 +0800282 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800283 u32 dip;
284
285 dip = (u32)(*daddr);
286
developer471f6562021-05-10 20:48:34 +0800287 for (i = 0; i < CFG_PPE_NUM; i++) {
developer8051e042022-04-08 13:26:36 +0800288 if (!hnat_priv->foe_table_cpu[i])
289 continue;
290
developer471f6562021-05-10 20:48:34 +0800291 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
292 entry = hnat_priv->foe_table_cpu[i] + hash_index;
293 if (entry->bfib1.state == BIND &&
developerf6872252023-05-23 18:21:08 +0800294 entry->ipv4_hnapt.new_dip == ntohl(dip) &&
295 IS_IPV4_HNAPT(entry)) {
developer471f6562021-05-10 20:48:34 +0800296 *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
297 *((u16 *)&h_dest[4]) =
298 swab16(entry->ipv4_hnapt.dmac_lo);
299 if (strncmp(h_dest, neigh->ha, ETH_ALEN) != 0) {
300 pr_info("%s: state=%d\n", __func__,
301 neigh->nud_state);
302 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
303 SMA, SMA_ONLY_FWD_CPU);
developerfd40db22021-04-29 10:08:25 +0800304
developer471f6562021-05-10 20:48:34 +0800305 entry->ipv4_hnapt.udib1.state = INVALID;
306 entry->ipv4_hnapt.udib1.time_stamp =
307 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
developerfd40db22021-04-29 10:08:25 +0800308
developer471f6562021-05-10 20:48:34 +0800309 /* clear HWNAT cache */
310 hnat_cache_ebl(1);
developerfd40db22021-04-29 10:08:25 +0800311
developer471f6562021-05-10 20:48:34 +0800312 mod_timer(&hnat_priv->hnat_sma_build_entry_timer,
313 jiffies + 3 * HZ);
developerfd40db22021-04-29 10:08:25 +0800314
developer471f6562021-05-10 20:48:34 +0800315 pr_info("Delete old entry: dip =%pI4\n", &dip);
316 pr_info("Old mac= %pM\n", h_dest);
317 pr_info("New mac= %pM\n", neigh->ha);
318 }
developerfd40db22021-04-29 10:08:25 +0800319 }
320 }
321 }
322}
323
324int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
325 void *ptr)
326{
327 struct net_device *dev = NULL;
328 struct neighbour *neigh = NULL;
329
330 switch (event) {
331 case NETEVENT_NEIGH_UPDATE:
332 neigh = ptr;
333 dev = neigh->dev;
334 if (dev)
335 foe_clear_entry(neigh);
336 break;
337 }
338
339 return NOTIFY_DONE;
340}
341
342unsigned int mape_add_ipv6_hdr(struct sk_buff *skb, struct ipv6hdr mape_ip6h)
343{
344 struct ethhdr *eth = NULL;
345 struct ipv6hdr *ip6h = NULL;
346 struct iphdr *iph = NULL;
347
348 if (skb_headroom(skb) < IPV6_HDR_LEN || skb_shared(skb) ||
349 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
350 return -1;
351 }
352
353 /* point to L3 */
354 memcpy(skb->data - IPV6_HDR_LEN - ETH_HLEN, skb_push(skb, ETH_HLEN), ETH_HLEN);
355 memcpy(skb_push(skb, IPV6_HDR_LEN - ETH_HLEN), &mape_ip6h, IPV6_HDR_LEN);
356
357 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
358 eth->h_proto = htons(ETH_P_IPV6);
359 skb->protocol = htons(ETH_P_IPV6);
360
361 iph = (struct iphdr *)(skb->data + IPV6_HDR_LEN);
362 ip6h = (struct ipv6hdr *)(skb->data);
363 ip6h->payload_len = iph->tot_len; /* maybe different with ipv4 */
364
365 skb_set_network_header(skb, 0);
366 skb_set_transport_header(skb, iph->ihl * 4 + IPV6_HDR_LEN);
367 return 0;
368}
369
370static void fix_skb_packet_type(struct sk_buff *skb, struct net_device *dev,
371 struct ethhdr *eth)
372{
373 skb->pkt_type = PACKET_HOST;
374 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
375 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
376 skb->pkt_type = PACKET_BROADCAST;
377 else
378 skb->pkt_type = PACKET_MULTICAST;
379 }
380}
381
382unsigned int do_hnat_ext_to_ge(struct sk_buff *skb, const struct net_device *in,
383 const char *func)
384{
385 if (hnat_priv->g_ppdev && hnat_priv->g_ppdev->flags & IFF_UP) {
386 u16 vlan_id = 0;
387 skb_set_network_header(skb, 0);
388 skb_push(skb, ETH_HLEN);
389 set_to_ppe(skb);
390
391 vlan_id = skb_vlan_tag_get_id(skb);
392 if (vlan_id) {
393 skb = vlan_insert_tag(skb, skb->vlan_proto, skb->vlan_tci);
394 if (!skb)
395 return -1;
396 }
397
398 /*set where we come from*/
399 skb->vlan_proto = htons(ETH_P_8021Q);
400 skb->vlan_tci =
401 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
402 trace_printk(
403 "%s: vlan_prot=0x%x, vlan_tci=%x, in->name=%s, skb->dev->name=%s\n",
404 __func__, ntohs(skb->vlan_proto), skb->vlan_tci,
405 in->name, hnat_priv->g_ppdev->name);
406 skb->dev = hnat_priv->g_ppdev;
407 dev_queue_xmit(skb);
408 trace_printk("%s: called from %s successfully\n", __func__, func);
409 return 0;
410 }
411
412 trace_printk("%s: called from %s fail\n", __func__, func);
413 return -1;
414}
415
416unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func)
417{
418 struct ethhdr *eth = eth_hdr(skb);
419 struct net_device *dev;
420 struct foe_entry *entry;
421
422 trace_printk("%s: vlan_prot=0x%x, vlan_tci=%x\n", __func__,
423 ntohs(skb->vlan_proto), skb->vlan_tci);
424
developer577ad2f2022-11-28 10:33:36 +0800425 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
426 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
427 return -1;
428
developerfd40db22021-04-29 10:08:25 +0800429 dev = get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK);
430
431 if (dev) {
432 /*set where we to go*/
433 skb->dev = dev;
434 skb->vlan_proto = 0;
435 skb->vlan_tci = 0;
436
437 if (ntohs(eth->h_proto) == ETH_P_8021Q) {
438 skb = skb_vlan_untag(skb);
439 if (unlikely(!skb))
440 return -1;
441 }
442
443 if (IS_BOND_MODE &&
developer4164cfe2022-12-01 11:27:41 +0800444 (((hnat_priv->data->version == MTK_HNAT_V2 ||
445 hnat_priv->data->version == MTK_HNAT_V3) &&
developerfd40db22021-04-29 10:08:25 +0800446 (skb_hnat_entry(skb) != 0x7fff)) ||
developer4164cfe2022-12-01 11:27:41 +0800447 ((hnat_priv->data->version != MTK_HNAT_V2 &&
448 hnat_priv->data->version != MTK_HNAT_V3) &&
developerfd40db22021-04-29 10:08:25 +0800449 (skb_hnat_entry(skb) != 0x3fff))))
450 skb_set_hash(skb, skb_hnat_entry(skb) >> 1, PKT_HASH_TYPE_L4);
451
452 set_from_extge(skb);
453 fix_skb_packet_type(skb, skb->dev, eth);
454 netif_rx(skb);
455 trace_printk("%s: called from %s successfully\n", __func__,
456 func);
457 return 0;
458 } else {
459 /* MapE WAN --> LAN/WLAN PingPong. */
460 dev = get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK);
461 if (mape_toggle && dev) {
462 if (!mape_add_ipv6_hdr(skb, mape_w2l_v6h)) {
463 skb_set_mac_header(skb, -ETH_HLEN);
464 skb->dev = dev;
465 set_from_mape(skb);
466 skb->vlan_proto = 0;
467 skb->vlan_tci = 0;
468 fix_skb_packet_type(skb, skb->dev, eth_hdr(skb));
developer471f6562021-05-10 20:48:34 +0800469 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800470 entry->bfib1.pkt_type = IPV4_HNAPT;
471 netif_rx(skb);
472 return 0;
473 }
474 }
475 trace_printk("%s: called from %s fail\n", __func__, func);
476 return -1;
477 }
478}
479
480unsigned int do_hnat_ge_to_ext(struct sk_buff *skb, const char *func)
481{
482 /*set where we to go*/
483 u8 index;
484 struct foe_entry *entry;
485 struct net_device *dev;
486
developer577ad2f2022-11-28 10:33:36 +0800487 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
488 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
489 return -1;
490
developer471f6562021-05-10 20:48:34 +0800491 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800492
493 if (IS_IPV4_GRP(entry))
494 index = entry->ipv4_hnapt.act_dp;
495 else
496 index = entry->ipv6_5t_route.act_dp;
497
developerdce18f52023-03-18 22:11:13 +0800498 dev = get_dev_from_index(index);
499 if (!dev) {
500 trace_printk("%s: called from %s. Get wifi interface fail\n",
501 __func__, func);
502 return 0;
503 }
504
505 skb->dev = dev;
developerfd40db22021-04-29 10:08:25 +0800506
developer34028fb2022-01-11 13:51:29 +0800507 if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +0800508 skb = skb_unshare(skb, GFP_ATOMIC);
509 if (!skb)
510 return NF_ACCEPT;
511
512 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
513 return NF_ACCEPT;
514
515 skb_pull_rcsum(skb, VLAN_HLEN);
516
517 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN,
518 2 * ETH_ALEN);
519 }
developerfd40db22021-04-29 10:08:25 +0800520
521 if (skb->dev) {
522 skb_set_network_header(skb, 0);
523 skb_push(skb, ETH_HLEN);
524 dev_queue_xmit(skb);
525 trace_printk("%s: called from %s successfully\n", __func__,
526 func);
527 return 0;
528 } else {
529 if (mape_toggle) {
530 /* Add ipv6 header mape for lan/wlan -->wan */
531 dev = get_wandev_from_index(index);
532 if (dev) {
533 if (!mape_add_ipv6_hdr(skb, mape_l2w_v6h)) {
534 skb_set_network_header(skb, 0);
535 skb_push(skb, ETH_HLEN);
536 skb_set_mac_header(skb, 0);
537 skb->dev = dev;
538 dev_queue_xmit(skb);
539 return 0;
540 }
541 trace_printk("%s: called from %s fail[MapE]\n", __func__,
542 func);
543 return -1;
544 }
545 }
546 }
547 /*if external devices is down, invalidate related ppe entry*/
548 if (entry_hnat_is_bound(entry)) {
549 entry->bfib1.state = INVALID;
550 if (IS_IPV4_GRP(entry))
551 entry->ipv4_hnapt.act_dp = 0;
552 else
553 entry->ipv6_5t_route.act_dp = 0;
554
555 /* clear HWNAT cache */
556 hnat_cache_ebl(1);
557 }
558 trace_printk("%s: called from %s fail, index=%x\n", __func__,
559 func, index);
560 return -1;
561}
562
563static void pre_routing_print(struct sk_buff *skb, const struct net_device *in,
564 const struct net_device *out, const char *func)
565{
566 trace_printk(
567 "[%s]: %s(iif=0x%x CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
568 __func__, in->name, skb_hnat_iface(skb),
569 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
570 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
571 func);
572}
573
574static void post_routing_print(struct sk_buff *skb, const struct net_device *in,
575 const struct net_device *out, const char *func)
576{
577 trace_printk(
578 "[%s]: %s(iif=0x%x, CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
579 __func__, in->name, skb_hnat_iface(skb),
580 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
581 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
582 func);
583}
584
585static inline void hnat_set_iif(const struct nf_hook_state *state,
586 struct sk_buff *skb, int val)
587{
developer40017972021-06-29 14:27:35 +0800588 if (IS_WHNAT(state->in) && FROM_WED(skb)) {
developere567ad32021-05-25 17:16:17 +0800589 return;
590 } else if (IS_LAN(state->in)) {
developerfd40db22021-04-29 10:08:25 +0800591 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN;
developerd35bbcc2022-09-28 22:46:01 +0800592 } else if (IS_LAN2(state->in)) {
593 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN2;
developerfd40db22021-04-29 10:08:25 +0800594 } else if (IS_PPD(state->in)) {
595 skb_hnat_iface(skb) = FOE_MAGIC_GE_PPD;
596 } else if (IS_EXT(state->in)) {
597 skb_hnat_iface(skb) = FOE_MAGIC_EXT;
598 } else if (IS_WAN(state->in)) {
599 skb_hnat_iface(skb) = FOE_MAGIC_GE_WAN;
developerfd40db22021-04-29 10:08:25 +0800600 } else if (!IS_BR(state->in)) {
developer99506e52021-06-30 22:03:02 +0800601 if (state->in->netdev_ops->ndo_flow_offload_check) {
602 skb_hnat_iface(skb) = FOE_MAGIC_GE_VIRTUAL;
603 } else {
604 skb_hnat_iface(skb) = FOE_INVALID;
developerfd40db22021-04-29 10:08:25 +0800605
developer99506e52021-06-30 22:03:02 +0800606 if (is_magic_tag_valid(skb) &&
607 IS_SPACE_AVAILABLE_HEAD(skb))
608 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
609 }
developerfd40db22021-04-29 10:08:25 +0800610 }
611}
612
613static inline void hnat_set_alg(const struct nf_hook_state *state,
614 struct sk_buff *skb, int val)
615{
616 skb_hnat_alg(skb) = val;
617}
618
619static inline void hnat_set_head_frags(const struct nf_hook_state *state,
620 struct sk_buff *head_skb, int val,
621 void (*fn)(const struct nf_hook_state *state,
622 struct sk_buff *skb, int val))
623{
624 struct sk_buff *segs = skb_shinfo(head_skb)->frag_list;
625
626 fn(state, head_skb, val);
627 while (segs) {
628 fn(state, segs, val);
629 segs = segs->next;
630 }
631}
632
developer25fc8c02022-05-06 16:24:02 +0800633static void ppe_fill_flow_lbl(struct foe_entry *entry, struct ipv6hdr *ip6h)
634{
635 entry->ipv4_dslite.flow_lbl[0] = ip6h->flow_lbl[2];
636 entry->ipv4_dslite.flow_lbl[1] = ip6h->flow_lbl[1];
637 entry->ipv4_dslite.flow_lbl[2] = ip6h->flow_lbl[0];
638}
639
developerfd40db22021-04-29 10:08:25 +0800640unsigned int do_hnat_mape_w2l_fast(struct sk_buff *skb, const struct net_device *in,
641 const char *func)
642{
643 struct ipv6hdr *ip6h = ipv6_hdr(skb);
644 struct iphdr _iphdr;
645 struct iphdr *iph;
646 struct ethhdr *eth;
647
648 /* WAN -> LAN/WLAN MapE. */
649 if (mape_toggle && (ip6h->nexthdr == NEXTHDR_IPIP)) {
650 iph = skb_header_pointer(skb, IPV6_HDR_LEN, sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800651 if (unlikely(!iph))
652 return -1;
653
developerfd40db22021-04-29 10:08:25 +0800654 switch (iph->protocol) {
655 case IPPROTO_UDP:
656 case IPPROTO_TCP:
657 break;
658 default:
659 return -1;
660 }
661 mape_w2l_v6h = *ip6h;
662
663 /* Remove ipv6 header. */
664 memcpy(skb->data + IPV6_HDR_LEN - ETH_HLEN,
665 skb->data - ETH_HLEN, ETH_HLEN);
666 skb_pull(skb, IPV6_HDR_LEN - ETH_HLEN);
667 skb_set_mac_header(skb, 0);
668 skb_set_network_header(skb, ETH_HLEN);
669 skb_set_transport_header(skb, ETH_HLEN + sizeof(_iphdr));
670
671 eth = eth_hdr(skb);
672 eth->h_proto = htons(ETH_P_IP);
673 set_to_ppe(skb);
674
675 skb->vlan_proto = htons(ETH_P_8021Q);
676 skb->vlan_tci =
677 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
678
679 if (!hnat_priv->g_ppdev)
680 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
681
682 skb->dev = hnat_priv->g_ppdev;
683 skb->protocol = htons(ETH_P_IP);
684
685 dev_queue_xmit(skb);
686
687 return 0;
688 }
689 return -1;
690}
691
developere8b7dfa2023-04-20 10:16:44 +0800692void mtk_464xlat_pre_process(struct sk_buff *skb)
693{
694 struct foe_entry *foe;
695
developerdd61ff42023-05-02 22:17:16 +0800696 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
697 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
698 return;
699
developere8b7dfa2023-04-20 10:16:44 +0800700 foe = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
701 if (foe->bfib1.state != BIND &&
702 skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH)
703 memcpy(&headroom[skb_hnat_entry(skb)], skb->head,
704 sizeof(struct hnat_desc));
developer25fc8c02022-05-06 16:24:02 +0800705
developere8b7dfa2023-04-20 10:16:44 +0800706 if (foe->bfib1.state == BIND)
707 memset(&headroom[skb_hnat_entry(skb)], 0,
708 sizeof(struct hnat_desc));
709}
developer25fc8c02022-05-06 16:24:02 +0800710
developerfd40db22021-04-29 10:08:25 +0800711static unsigned int is_ppe_support_type(struct sk_buff *skb)
712{
713 struct ethhdr *eth = NULL;
714 struct iphdr *iph = NULL;
715 struct ipv6hdr *ip6h = NULL;
716 struct iphdr _iphdr;
717
718 eth = eth_hdr(skb);
developerfd2d7422021-06-09 17:09:39 +0800719 if (!is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb) ||
developerb254f762022-01-20 20:06:25 +0800720 is_broadcast_ether_addr(eth->h_dest))
developerfd40db22021-04-29 10:08:25 +0800721 return 0;
722
723 switch (ntohs(skb->protocol)) {
724 case ETH_P_IP:
725 iph = ip_hdr(skb);
726
727 /* do not accelerate non tcp/udp traffic */
728 if ((iph->protocol == IPPROTO_TCP) ||
729 (iph->protocol == IPPROTO_UDP) ||
730 (iph->protocol == IPPROTO_IPV6)) {
731 return 1;
732 }
733
734 break;
735 case ETH_P_IPV6:
736 ip6h = ipv6_hdr(skb);
737
738 if ((ip6h->nexthdr == NEXTHDR_TCP) ||
739 (ip6h->nexthdr == NEXTHDR_UDP)) {
740 return 1;
741 } else if (ip6h->nexthdr == NEXTHDR_IPIP) {
742 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
743 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800744 if (unlikely(!iph))
745 return 0;
developerfd40db22021-04-29 10:08:25 +0800746
747 if ((iph->protocol == IPPROTO_TCP) ||
748 (iph->protocol == IPPROTO_UDP)) {
749 return 1;
750 }
751
752 }
753
754 break;
755 case ETH_P_8021Q:
756 return 1;
757 }
758
759 return 0;
760}
761
762static unsigned int
763mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
764 const struct nf_hook_state *state)
765{
developer577ad2f2022-11-28 10:33:36 +0800766 if (!skb)
767 goto drop;
768
developerfd40db22021-04-29 10:08:25 +0800769 if (!is_ppe_support_type(skb)) {
770 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
771 return NF_ACCEPT;
772 }
773
774 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
775
776 pre_routing_print(skb, state->in, state->out, __func__);
777
developerfd40db22021-04-29 10:08:25 +0800778 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
779 if (do_ext2ge_fast_try(state->in, skb)) {
780 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
781 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800782 return NF_ACCEPT;
783 }
784
785 /* packets form ge -> external device
786 * For standalone wan interface
787 */
788 if (do_ge2ext_fast(state->in, skb)) {
789 if (!do_hnat_ge_to_ext(skb, __func__))
790 return NF_STOLEN;
791 goto drop;
792 }
793
developerf4c370a2022-10-08 17:01:19 +0800794
795#if !(defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +0800796 /* MapE need remove ipv6 header and pingpong. */
797 if (do_mape_w2l_fast(state->in, skb)) {
798 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
799 return NF_STOLEN;
800 else
801 return NF_ACCEPT;
802 }
803
804 if (is_from_mape(skb))
805 clr_from_extge(skb);
developerf4c370a2022-10-08 17:01:19 +0800806#endif
developere8b7dfa2023-04-20 10:16:44 +0800807 if (xlat_toggle)
808 mtk_464xlat_pre_process(skb);
809
developerfd40db22021-04-29 10:08:25 +0800810 return NF_ACCEPT;
811drop:
developer577ad2f2022-11-28 10:33:36 +0800812 if (skb)
813 printk_ratelimited(KERN_WARNING
814 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
815 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
816 __func__, state->in->name, skb_hnat_iface(skb),
817 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
818 skb_hnat_sport(skb), skb_hnat_reason(skb),
819 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800820
821 return NF_DROP;
822}
823
824static unsigned int
825mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
826 const struct nf_hook_state *state)
827{
developer577ad2f2022-11-28 10:33:36 +0800828 if (!skb)
829 goto drop;
830
developerfd40db22021-04-29 10:08:25 +0800831 if (!is_ppe_support_type(skb)) {
832 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
833 return NF_ACCEPT;
834 }
835
836 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
837
838 pre_routing_print(skb, state->in, state->out, __func__);
839
developerfd40db22021-04-29 10:08:25 +0800840 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
841 if (do_ext2ge_fast_try(state->in, skb)) {
842 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
843 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800844 return NF_ACCEPT;
845 }
846
847 /* packets form ge -> external device
848 * For standalone wan interface
849 */
850 if (do_ge2ext_fast(state->in, skb)) {
851 if (!do_hnat_ge_to_ext(skb, __func__))
852 return NF_STOLEN;
853 goto drop;
854 }
developere8b7dfa2023-04-20 10:16:44 +0800855 if (xlat_toggle)
856 mtk_464xlat_pre_process(skb);
developerfd40db22021-04-29 10:08:25 +0800857
858 return NF_ACCEPT;
859drop:
developer577ad2f2022-11-28 10:33:36 +0800860 if (skb)
861 printk_ratelimited(KERN_WARNING
862 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
863 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
864 __func__, state->in->name, skb_hnat_iface(skb),
865 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
866 skb_hnat_sport(skb), skb_hnat_reason(skb),
867 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800868
869 return NF_DROP;
870}
871
872static unsigned int
873mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
874 const struct nf_hook_state *state)
875{
developerfd40db22021-04-29 10:08:25 +0800876 struct vlan_ethhdr *veth;
877
developer577ad2f2022-11-28 10:33:36 +0800878 if (!skb)
879 goto drop;
880
developer34028fb2022-01-11 13:51:29 +0800881 if (IS_HQOS_MODE && hnat_priv->data->whnat) {
developerfd40db22021-04-29 10:08:25 +0800882 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
883
884 if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
885 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
886 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
887 }
888 }
developerfd40db22021-04-29 10:08:25 +0800889
890 if (!HAS_HQOS_MAGIC_TAG(skb) && !is_ppe_support_type(skb)) {
891 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
892 return NF_ACCEPT;
893 }
894
895 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
896
897 pre_routing_print(skb, state->in, state->out, __func__);
898
899 if (unlikely(debug_level >= 7)) {
900 hnat_cpu_reason_cnt(skb);
901 if (skb_hnat_reason(skb) == dbg_cpu_reason)
902 foe_dump_pkt(skb);
903 }
904
developerfd40db22021-04-29 10:08:25 +0800905 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
906 if ((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb) &&
907 !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
908 if (!hnat_priv->g_ppdev)
909 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
910
911 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
912 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800913 return NF_ACCEPT;
914 }
915
916 if (hnat_priv->data->whnat) {
917 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
918 clr_from_extge(skb);
919
920 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +0800921 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
922 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +0800923 if (!do_hnat_ext_to_ge2(skb, __func__))
924 return NF_STOLEN;
925 goto drop;
926 }
927
928 /* packets form ge -> external device */
929 if (do_ge2ext_fast(state->in, skb)) {
930 if (!do_hnat_ge_to_ext(skb, __func__))
931 return NF_STOLEN;
932 goto drop;
933 }
934 }
935
developerf4c370a2022-10-08 17:01:19 +0800936#if !(defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +0800937 /* MapE need remove ipv6 header and pingpong. (bridge mode) */
938 if (do_mape_w2l_fast(state->in, skb)) {
939 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
940 return NF_STOLEN;
941 else
942 return NF_ACCEPT;
943 }
developerf4c370a2022-10-08 17:01:19 +0800944#endif
developerfd40db22021-04-29 10:08:25 +0800945 return NF_ACCEPT;
946drop:
developer577ad2f2022-11-28 10:33:36 +0800947 if (skb)
948 printk_ratelimited(KERN_WARNING
949 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
950 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
951 __func__, state->in->name, skb_hnat_iface(skb),
952 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
953 skb_hnat_sport(skb), skb_hnat_reason(skb),
954 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800955
956 return NF_DROP;
957}
958
959static unsigned int hnat_ipv6_get_nexthop(struct sk_buff *skb,
960 const struct net_device *out,
961 struct flow_offload_hw_path *hw_path)
962{
963 const struct in6_addr *ipv6_nexthop;
964 struct neighbour *neigh = NULL;
965 struct dst_entry *dst = skb_dst(skb);
966 struct ethhdr *eth;
967
968 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
969 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
970 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
971 return 0;
972 }
973
974 rcu_read_lock_bh();
975 ipv6_nexthop =
976 rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
977 neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
978 if (unlikely(!neigh)) {
979 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n", __func__,
980 &ipv6_hdr(skb)->daddr);
981 rcu_read_unlock_bh();
982 return -1;
983 }
984
985 /* why do we get all zero ethernet address ? */
986 if (!is_valid_ether_addr(neigh->ha)) {
987 rcu_read_unlock_bh();
988 return -1;
989 }
990
991 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP) {
992 /*copy ether type for DS-Lite and MapE */
993 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
994 eth->h_proto = skb->protocol;
995 } else {
996 eth = eth_hdr(skb);
997 }
998
999 ether_addr_copy(eth->h_dest, neigh->ha);
1000 ether_addr_copy(eth->h_source, out->dev_addr);
1001
1002 rcu_read_unlock_bh();
1003
1004 return 0;
1005}
1006
1007static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb,
1008 const struct net_device *out,
1009 struct flow_offload_hw_path *hw_path)
1010{
1011 u32 nexthop;
1012 struct neighbour *neigh;
1013 struct dst_entry *dst = skb_dst(skb);
1014 struct rtable *rt = (struct rtable *)dst;
1015 struct net_device *dev = (__force struct net_device *)out;
1016
1017 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
1018 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
1019 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
1020 return 0;
1021 }
1022
1023 rcu_read_lock_bh();
1024 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
1025 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
1026 if (unlikely(!neigh)) {
1027 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n", __func__,
1028 &ip_hdr(skb)->daddr);
1029 rcu_read_unlock_bh();
1030 return -1;
1031 }
1032
1033 /* why do we get all zero ethernet address ? */
1034 if (!is_valid_ether_addr(neigh->ha)) {
1035 rcu_read_unlock_bh();
1036 return -1;
1037 }
1038
1039 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
1040 memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
1041
1042 rcu_read_unlock_bh();
1043
1044 return 0;
1045}
1046
1047static u16 ppe_get_chkbase(struct iphdr *iph)
1048{
1049 u16 org_chksum = ntohs(iph->check);
1050 u16 org_tot_len = ntohs(iph->tot_len);
1051 u16 org_id = ntohs(iph->id);
1052 u16 chksum_tmp, tot_len_tmp, id_tmp;
1053 u32 tmp = 0;
1054 u16 chksum_base = 0;
1055
1056 chksum_tmp = ~(org_chksum);
1057 tot_len_tmp = ~(org_tot_len);
1058 id_tmp = ~(org_id);
1059 tmp = chksum_tmp + tot_len_tmp + id_tmp;
1060 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
1061 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
1062 chksum_base = tmp & 0xFFFF;
1063
1064 return chksum_base;
1065}
1066
1067struct foe_entry ppe_fill_L2_info(struct ethhdr *eth, struct foe_entry entry,
1068 struct flow_offload_hw_path *hw_path)
1069{
developer5ffc5f12022-10-25 18:51:46 +08001070 switch ((int)entry.bfib1.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001071 case IPV4_HNAPT:
1072 case IPV4_HNAT:
1073 entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest));
1074 entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1075 entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1076 entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1077 entry.ipv4_hnapt.pppoe_id = hw_path->pppoe_sid;
1078 break;
1079 case IPV4_DSLITE:
1080 case IPV4_MAP_E:
1081 case IPV6_6RD:
1082 case IPV6_5T_ROUTE:
1083 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001084 case IPV6_HNAPT:
1085 case IPV6_HNAT:
developerfd40db22021-04-29 10:08:25 +08001086 entry.ipv6_5t_route.dmac_hi = swab32(*((u32 *)eth->h_dest));
1087 entry.ipv6_5t_route.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1088 entry.ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1089 entry.ipv6_5t_route.smac_lo =
1090 swab16(*((u16 *)&eth->h_source[4]));
1091 entry.ipv6_5t_route.pppoe_id = hw_path->pppoe_sid;
1092 break;
1093 }
1094 return entry;
1095}
1096
1097struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
1098 struct flow_offload_hw_path *hw_path)
1099{
1100 entry.bfib1.psn = (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) ? 1 : 0;
1101 entry.bfib1.vlan_layer += (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) ? 1 : 0;
1102 entry.bfib1.vpm = (entry.bfib1.vlan_layer) ? 1 : 0;
developerfd40db22021-04-29 10:08:25 +08001103 entry.bfib1.cah = 1;
developer4164cfe2022-12-01 11:27:41 +08001104 entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V2 ||
1105 hnat_priv->data->version == MTK_HNAT_V3) ?
developerfd40db22021-04-29 10:08:25 +08001106 readl(hnat_priv->fe_base + 0x0010) & (0xFF) :
1107 readl(hnat_priv->fe_base + 0x0010) & (0x7FFF);
1108
developer5ffc5f12022-10-25 18:51:46 +08001109 switch ((int)entry.bfib1.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001110 case IPV4_HNAPT:
1111 case IPV4_HNAT:
developer8116b0a2021-08-23 18:07:20 +08001112 if (hnat_priv->data->mcast &&
1113 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001114 entry.ipv4_hnapt.iblk2.mcast = 1;
developer4164cfe2022-12-01 11:27:41 +08001115 if (hnat_priv->data->version == MTK_HNAT_V1_3) {
developerfd40db22021-04-29 10:08:25 +08001116 entry.bfib1.sta = 1;
1117 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1118 }
1119 } else {
1120 entry.ipv4_hnapt.iblk2.mcast = 0;
1121 }
1122
1123 entry.ipv4_hnapt.iblk2.port_ag =
developer4164cfe2022-12-01 11:27:41 +08001124 (hnat_priv->data->version == MTK_HNAT_V2 ||
1125 hnat_priv->data->version == MTK_HNAT_V3) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001126 break;
1127 case IPV4_DSLITE:
1128 case IPV4_MAP_E:
1129 case IPV6_6RD:
1130 case IPV6_5T_ROUTE:
1131 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001132 case IPV6_HNAPT:
1133 case IPV6_HNAT:
developer8116b0a2021-08-23 18:07:20 +08001134 if (hnat_priv->data->mcast &&
1135 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001136 entry.ipv6_5t_route.iblk2.mcast = 1;
developer4164cfe2022-12-01 11:27:41 +08001137 if (hnat_priv->data->version == MTK_HNAT_V1_3) {
developerfd40db22021-04-29 10:08:25 +08001138 entry.bfib1.sta = 1;
1139 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1140 }
1141 } else {
1142 entry.ipv6_5t_route.iblk2.mcast = 0;
1143 }
1144
1145 entry.ipv6_5t_route.iblk2.port_ag =
developer4164cfe2022-12-01 11:27:41 +08001146 (hnat_priv->data->version == MTK_HNAT_V2 ||
1147 hnat_priv->data->version == MTK_HNAT_V3) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001148 break;
1149 }
1150 return entry;
1151}
1152
developerfd40db22021-04-29 10:08:25 +08001153static unsigned int skb_to_hnat_info(struct sk_buff *skb,
1154 const struct net_device *dev,
1155 struct foe_entry *foe,
1156 struct flow_offload_hw_path *hw_path)
1157{
1158 struct foe_entry entry = { 0 };
1159 int whnat = IS_WHNAT(dev);
1160 struct ethhdr *eth;
1161 struct iphdr *iph;
1162 struct ipv6hdr *ip6h;
1163 struct tcpudphdr _ports;
1164 const struct tcpudphdr *pptr;
developer5ffc5f12022-10-25 18:51:46 +08001165 struct nf_conn *ct;
1166 enum ip_conntrack_info ctinfo;
developerfd40db22021-04-29 10:08:25 +08001167 u32 gmac = NR_DISCARD;
1168 int udp = 0;
1169 u32 qid = 0;
developeraf07fad2021-11-19 17:53:42 +08001170 u32 port_id = 0;
developerfd40db22021-04-29 10:08:25 +08001171 int mape = 0;
1172
developer5ffc5f12022-10-25 18:51:46 +08001173 ct = nf_ct_get(skb, &ctinfo);
1174
developerfd40db22021-04-29 10:08:25 +08001175 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP)
1176 /* point to ethernet header for DS-Lite and MapE */
1177 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
1178 else
1179 eth = eth_hdr(skb);
developer8116b0a2021-08-23 18:07:20 +08001180
1181 /*do not bind multicast if PPE mcast not enable*/
1182 if (!hnat_priv->data->mcast && is_multicast_ether_addr(eth->h_dest))
1183 return 0;
developerfd40db22021-04-29 10:08:25 +08001184
1185 entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
developerf94d8862022-03-29 10:11:17 +08001186 entry.bfib1.state = foe->udib1.state;
1187
developerd35bbcc2022-09-28 22:46:01 +08001188#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08001189 entry.bfib1.sp = foe->udib1.sp;
1190#endif
1191
1192 switch (ntohs(eth->h_proto)) {
1193 case ETH_P_IP:
1194 iph = ip_hdr(skb);
1195 switch (iph->protocol) {
1196 case IPPROTO_UDP:
1197 udp = 1;
1198 /* fallthrough */
1199 case IPPROTO_TCP:
1200 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1201
1202 /* DS-Lite WAN->LAN */
1203 if (entry.ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE ||
1204 entry.ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {
1205 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1206 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1207 entry.ipv4_dslite.sport =
1208 foe->ipv4_dslite.sport;
1209 entry.ipv4_dslite.dport =
1210 foe->ipv4_dslite.dport;
1211
developerd35bbcc2022-09-28 22:46:01 +08001212#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08001213 if (entry.bfib1.pkt_type == IPV4_MAP_E) {
1214 pptr = skb_header_pointer(skb,
1215 iph->ihl * 4,
1216 sizeof(_ports),
1217 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001218 if (unlikely(!pptr))
1219 return -1;
developerfd40db22021-04-29 10:08:25 +08001220
developerd35bbcc2022-09-28 22:46:01 +08001221 entry.ipv4_mape.new_sip =
developerfd40db22021-04-29 10:08:25 +08001222 ntohl(iph->saddr);
developerd35bbcc2022-09-28 22:46:01 +08001223 entry.ipv4_mape.new_dip =
developerfd40db22021-04-29 10:08:25 +08001224 ntohl(iph->daddr);
developerd35bbcc2022-09-28 22:46:01 +08001225 entry.ipv4_mape.new_sport =
developerfd40db22021-04-29 10:08:25 +08001226 ntohs(pptr->src);
developerd35bbcc2022-09-28 22:46:01 +08001227 entry.ipv4_mape.new_dport =
developerfd40db22021-04-29 10:08:25 +08001228 ntohs(pptr->dst);
1229 }
1230#endif
1231
1232 entry.ipv4_dslite.tunnel_sipv6_0 =
1233 foe->ipv4_dslite.tunnel_sipv6_0;
1234 entry.ipv4_dslite.tunnel_sipv6_1 =
1235 foe->ipv4_dslite.tunnel_sipv6_1;
1236 entry.ipv4_dslite.tunnel_sipv6_2 =
1237 foe->ipv4_dslite.tunnel_sipv6_2;
1238 entry.ipv4_dslite.tunnel_sipv6_3 =
1239 foe->ipv4_dslite.tunnel_sipv6_3;
1240
1241 entry.ipv4_dslite.tunnel_dipv6_0 =
1242 foe->ipv4_dslite.tunnel_dipv6_0;
1243 entry.ipv4_dslite.tunnel_dipv6_1 =
1244 foe->ipv4_dslite.tunnel_dipv6_1;
1245 entry.ipv4_dslite.tunnel_dipv6_2 =
1246 foe->ipv4_dslite.tunnel_dipv6_2;
1247 entry.ipv4_dslite.tunnel_dipv6_3 =
1248 foe->ipv4_dslite.tunnel_dipv6_3;
1249
1250 entry.ipv4_dslite.bfib1.rmt = 1;
1251 entry.ipv4_dslite.iblk2.dscp = iph->tos;
1252 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1253 if (hnat_priv->data->per_flow_accounting)
1254 entry.ipv4_dslite.iblk2.mibf = 1;
1255
1256 } else {
1257 entry.ipv4_hnapt.iblk2.dscp = iph->tos;
1258 if (hnat_priv->data->per_flow_accounting)
1259 entry.ipv4_hnapt.iblk2.mibf = 1;
1260
1261 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
1262
developerdfc8ef52022-12-06 14:00:09 +08001263 if (skb_vlan_tag_present(skb)) {
developerfd40db22021-04-29 10:08:25 +08001264 entry.bfib1.vlan_layer += 1;
1265
1266 if (entry.ipv4_hnapt.vlan1)
developerdfc8ef52022-12-06 14:00:09 +08001267 entry.ipv4_hnapt.vlan2 =
1268 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001269 else
developerdfc8ef52022-12-06 14:00:09 +08001270 entry.ipv4_hnapt.vlan1 =
1271 skb->vlan_tci;
1272 }
developerfd40db22021-04-29 10:08:25 +08001273
1274 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1275 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1276 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1277 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1278
1279 entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
1280 entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
1281 }
1282
1283 entry.ipv4_hnapt.bfib1.udp = udp;
1284 if (IS_IPV4_HNAPT(foe)) {
1285 pptr = skb_header_pointer(skb, iph->ihl * 4,
1286 sizeof(_ports),
1287 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001288 if (unlikely(!pptr))
1289 return -1;
1290
developerfd40db22021-04-29 10:08:25 +08001291 entry.ipv4_hnapt.new_sport = ntohs(pptr->src);
1292 entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
1293 }
1294
1295 break;
1296
1297 default:
1298 return -1;
1299 }
1300 trace_printk(
1301 "[%s]skb->head=%p, skb->data=%p,ip_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1302 __func__, skb->head, skb->data, iph, skb->len,
1303 skb->data_len);
1304 break;
1305
1306 case ETH_P_IPV6:
1307 ip6h = ipv6_hdr(skb);
1308 switch (ip6h->nexthdr) {
1309 case NEXTHDR_UDP:
1310 udp = 1;
1311 /* fallthrough */
1312 case NEXTHDR_TCP: /* IPv6-5T or IPv6-3T */
1313 entry.ipv6_5t_route.etype = htons(ETH_P_IPV6);
1314
1315 entry.ipv6_5t_route.vlan1 = hw_path->vlan_id;
1316
developerdfc8ef52022-12-06 14:00:09 +08001317 if (skb_vlan_tag_present(skb)) {
developerfd40db22021-04-29 10:08:25 +08001318 entry.bfib1.vlan_layer += 1;
1319
1320 if (entry.ipv6_5t_route.vlan1)
developerdfc8ef52022-12-06 14:00:09 +08001321 entry.ipv6_5t_route.vlan2 =
1322 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001323 else
developerdfc8ef52022-12-06 14:00:09 +08001324 entry.ipv6_5t_route.vlan1 =
1325 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001326 }
1327
1328 if (hnat_priv->data->per_flow_accounting)
1329 entry.ipv6_5t_route.iblk2.mibf = 1;
1330 entry.ipv6_5t_route.bfib1.udp = udp;
1331
1332 if (IS_IPV6_6RD(foe)) {
1333 entry.ipv6_5t_route.bfib1.rmt = 1;
1334 entry.ipv6_6rd.tunnel_sipv4 =
1335 foe->ipv6_6rd.tunnel_sipv4;
1336 entry.ipv6_6rd.tunnel_dipv4 =
1337 foe->ipv6_6rd.tunnel_dipv4;
1338 }
1339
1340 entry.ipv6_3t_route.ipv6_sip0 =
1341 foe->ipv6_3t_route.ipv6_sip0;
1342 entry.ipv6_3t_route.ipv6_sip1 =
1343 foe->ipv6_3t_route.ipv6_sip1;
1344 entry.ipv6_3t_route.ipv6_sip2 =
1345 foe->ipv6_3t_route.ipv6_sip2;
1346 entry.ipv6_3t_route.ipv6_sip3 =
1347 foe->ipv6_3t_route.ipv6_sip3;
1348
1349 entry.ipv6_3t_route.ipv6_dip0 =
1350 foe->ipv6_3t_route.ipv6_dip0;
1351 entry.ipv6_3t_route.ipv6_dip1 =
1352 foe->ipv6_3t_route.ipv6_dip1;
1353 entry.ipv6_3t_route.ipv6_dip2 =
1354 foe->ipv6_3t_route.ipv6_dip2;
1355 entry.ipv6_3t_route.ipv6_dip3 =
1356 foe->ipv6_3t_route.ipv6_dip3;
1357
developer729f0272021-06-09 17:28:38 +08001358 if (IS_IPV6_3T_ROUTE(foe)) {
1359 entry.ipv6_3t_route.prot =
1360 foe->ipv6_3t_route.prot;
1361 entry.ipv6_3t_route.hph =
1362 foe->ipv6_3t_route.hph;
1363 }
1364
developerfd40db22021-04-29 10:08:25 +08001365 if (IS_IPV6_5T_ROUTE(foe) || IS_IPV6_6RD(foe)) {
1366 entry.ipv6_5t_route.sport =
1367 foe->ipv6_5t_route.sport;
1368 entry.ipv6_5t_route.dport =
1369 foe->ipv6_5t_route.dport;
1370 }
developer5ffc5f12022-10-25 18:51:46 +08001371
developer5ffc5f12022-10-25 18:51:46 +08001372 if (ct && (ct->status & IPS_SRC_NAT)) {
developer317dbfa2023-04-26 15:00:56 +08001373#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer5ffc5f12022-10-25 18:51:46 +08001374 entry.bfib1.pkt_type = IPV6_HNAPT;
1375
1376 if (IS_WAN(dev) || IS_DSA_WAN(dev)) {
1377 entry.ipv6_hnapt.eg_ipv6_dir =
1378 IPV6_SNAT;
1379 entry.ipv6_hnapt.new_ipv6_ip0 =
1380 ntohl(ip6h->saddr.s6_addr32[0]);
1381 entry.ipv6_hnapt.new_ipv6_ip1 =
1382 ntohl(ip6h->saddr.s6_addr32[1]);
1383 entry.ipv6_hnapt.new_ipv6_ip2 =
1384 ntohl(ip6h->saddr.s6_addr32[2]);
1385 entry.ipv6_hnapt.new_ipv6_ip3 =
1386 ntohl(ip6h->saddr.s6_addr32[3]);
1387 } else {
1388 entry.ipv6_hnapt.eg_ipv6_dir =
1389 IPV6_DNAT;
1390 entry.ipv6_hnapt.new_ipv6_ip0 =
1391 ntohl(ip6h->daddr.s6_addr32[0]);
1392 entry.ipv6_hnapt.new_ipv6_ip1 =
1393 ntohl(ip6h->daddr.s6_addr32[1]);
1394 entry.ipv6_hnapt.new_ipv6_ip2 =
1395 ntohl(ip6h->daddr.s6_addr32[2]);
1396 entry.ipv6_hnapt.new_ipv6_ip3 =
1397 ntohl(ip6h->daddr.s6_addr32[3]);
1398 }
1399
1400 pptr = skb_header_pointer(skb, IPV6_HDR_LEN,
1401 sizeof(_ports),
1402 &_ports);
1403 if (unlikely(!pptr))
1404 return -1;
1405
1406 entry.ipv6_hnapt.new_sport = ntohs(pptr->src);
1407 entry.ipv6_hnapt.new_dport = ntohs(pptr->dst);
developer317dbfa2023-04-26 15:00:56 +08001408#else
1409 return -1;
developer5ffc5f12022-10-25 18:51:46 +08001410#endif
developer317dbfa2023-04-26 15:00:56 +08001411 }
developer5ffc5f12022-10-25 18:51:46 +08001412
developerfd40db22021-04-29 10:08:25 +08001413 entry.ipv6_5t_route.iblk2.dscp =
1414 (ip6h->priority << 4 |
1415 (ip6h->flow_lbl[0] >> 4));
1416 break;
1417
1418 case NEXTHDR_IPIP:
1419 if ((!mape_toggle &&
1420 entry.bfib1.pkt_type == IPV4_DSLITE) ||
1421 (mape_toggle &&
1422 entry.bfib1.pkt_type == IPV4_MAP_E)) {
1423 /* DS-Lite LAN->WAN */
1424 entry.ipv4_dslite.bfib1.udp =
1425 foe->ipv4_dslite.bfib1.udp;
1426 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1427 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1428 entry.ipv4_dslite.sport =
1429 foe->ipv4_dslite.sport;
1430 entry.ipv4_dslite.dport =
1431 foe->ipv4_dslite.dport;
1432
1433 entry.ipv4_dslite.tunnel_sipv6_0 =
1434 ntohl(ip6h->saddr.s6_addr32[0]);
1435 entry.ipv4_dslite.tunnel_sipv6_1 =
1436 ntohl(ip6h->saddr.s6_addr32[1]);
1437 entry.ipv4_dslite.tunnel_sipv6_2 =
1438 ntohl(ip6h->saddr.s6_addr32[2]);
1439 entry.ipv4_dslite.tunnel_sipv6_3 =
1440 ntohl(ip6h->saddr.s6_addr32[3]);
1441
1442 entry.ipv4_dslite.tunnel_dipv6_0 =
1443 ntohl(ip6h->daddr.s6_addr32[0]);
1444 entry.ipv4_dslite.tunnel_dipv6_1 =
1445 ntohl(ip6h->daddr.s6_addr32[1]);
1446 entry.ipv4_dslite.tunnel_dipv6_2 =
1447 ntohl(ip6h->daddr.s6_addr32[2]);
1448 entry.ipv4_dslite.tunnel_dipv6_3 =
1449 ntohl(ip6h->daddr.s6_addr32[3]);
1450
1451 ppe_fill_flow_lbl(&entry, ip6h);
1452
1453 entry.ipv4_dslite.priority = ip6h->priority;
1454 entry.ipv4_dslite.hop_limit = ip6h->hop_limit;
1455 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1456 if (hnat_priv->data->per_flow_accounting)
1457 entry.ipv4_dslite.iblk2.mibf = 1;
developer25fc8c02022-05-06 16:24:02 +08001458 /* Map-E LAN->WAN record inner IPv4 header info. */
developer8c707df2022-10-24 14:09:00 +08001459#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer25fc8c02022-05-06 16:24:02 +08001460 if (mape_toggle) {
1461 entry.ipv4_dslite.iblk2.dscp = foe->ipv4_dslite.iblk2.dscp;
developerd35bbcc2022-09-28 22:46:01 +08001462 entry.ipv4_mape.new_sip = foe->ipv4_mape.new_sip;
1463 entry.ipv4_mape.new_dip = foe->ipv4_mape.new_dip;
1464 entry.ipv4_mape.new_sport = foe->ipv4_mape.new_sport;
1465 entry.ipv4_mape.new_dport = foe->ipv4_mape.new_dport;
developer25fc8c02022-05-06 16:24:02 +08001466 }
1467#endif
developerfd40db22021-04-29 10:08:25 +08001468 } else if (mape_toggle &&
1469 entry.bfib1.pkt_type == IPV4_HNAPT) {
1470 /* MapE LAN -> WAN */
1471 mape = 1;
1472 entry.ipv4_hnapt.iblk2.dscp =
1473 foe->ipv4_hnapt.iblk2.dscp;
1474 if (hnat_priv->data->per_flow_accounting)
1475 entry.ipv4_hnapt.iblk2.mibf = 1;
1476
developerbb816412021-06-11 15:43:44 +08001477 if (IS_GMAC1_MODE)
1478 entry.ipv4_hnapt.vlan1 = 1;
1479 else
1480 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
developerfd40db22021-04-29 10:08:25 +08001481
1482 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1483 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1484 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1485 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1486
1487 entry.ipv4_hnapt.new_sip =
1488 foe->ipv4_hnapt.new_sip;
1489 entry.ipv4_hnapt.new_dip =
1490 foe->ipv4_hnapt.new_dip;
1491 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1492
developer34028fb2022-01-11 13:51:29 +08001493 if (IS_HQOS_MODE) {
developeraf07fad2021-11-19 17:53:42 +08001494 entry.ipv4_hnapt.iblk2.qid =
developer4164cfe2022-12-01 11:27:41 +08001495 (hnat_priv->data->version ==
1496 MTK_HNAT_V2 ||
1497 hnat_priv->data->version ==
1498 MTK_HNAT_V3) ?
developeraf07fad2021-11-19 17:53:42 +08001499 skb->mark & 0x7f : skb->mark & 0xf;
developerd35bbcc2022-09-28 22:46:01 +08001500#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001501 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001502 (IS_HQOS_DL_MODE &&
1503 IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001504 (IS_PPPQ_MODE &&
1505 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001506 entry.ipv4_hnapt.tport_id = 1;
1507 else
1508 entry.ipv4_hnapt.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001509#else
developeraf07fad2021-11-19 17:53:42 +08001510 entry.ipv4_hnapt.iblk2.fqos = 1;
developerd35bbcc2022-09-28 22:46:01 +08001511#endif
developeraf07fad2021-11-19 17:53:42 +08001512 }
developerfd40db22021-04-29 10:08:25 +08001513
1514 entry.ipv4_hnapt.bfib1.udp =
1515 foe->ipv4_hnapt.bfib1.udp;
1516
1517 entry.ipv4_hnapt.new_sport =
1518 foe->ipv4_hnapt.new_sport;
1519 entry.ipv4_hnapt.new_dport =
1520 foe->ipv4_hnapt.new_dport;
1521 mape_l2w_v6h = *ip6h;
1522 }
1523 break;
1524
1525 default:
1526 return -1;
1527 }
1528
1529 trace_printk(
1530 "[%s]skb->head=%p, skb->data=%p,ipv6_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1531 __func__, skb->head, skb->data, ip6h, skb->len,
1532 skb->data_len);
1533 break;
1534
1535 default:
developerfd40db22021-04-29 10:08:25 +08001536 iph = ip_hdr(skb);
1537 switch (entry.bfib1.pkt_type) {
1538 case IPV6_6RD: /* 6RD LAN->WAN */
1539 entry.ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
1540 entry.ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
1541 entry.ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
1542 entry.ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
1543
1544 entry.ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
1545 entry.ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
1546 entry.ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
1547 entry.ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
1548
1549 entry.ipv6_6rd.sport = foe->ipv6_6rd.sport;
1550 entry.ipv6_6rd.dport = foe->ipv6_6rd.dport;
1551 entry.ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
1552 entry.ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
1553 entry.ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
1554 entry.ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
1555 entry.ipv6_6rd.ttl = iph->ttl;
1556 entry.ipv6_6rd.dscp = iph->tos;
1557 entry.ipv6_6rd.per_flow_6rd_id = 1;
1558 entry.ipv6_6rd.vlan1 = hw_path->vlan_id;
1559 if (hnat_priv->data->per_flow_accounting)
1560 entry.ipv6_6rd.iblk2.mibf = 1;
1561 break;
1562
1563 default:
1564 return -1;
1565 }
1566 }
1567
1568 /* Fill Layer2 Info.*/
1569 entry = ppe_fill_L2_info(eth, entry, hw_path);
1570
1571 /* Fill Info Blk*/
1572 entry = ppe_fill_info_blk(eth, entry, hw_path);
1573
1574 if (IS_LAN(dev)) {
1575 if (IS_DSA_LAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001576 port_id = hnat_dsa_fill_stag(dev, &entry, hw_path,
1577 ntohs(eth->h_proto),
1578 mape);
developerfd40db22021-04-29 10:08:25 +08001579
1580 if (IS_BOND_MODE)
1581 gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ?
1582 NR_GMAC2_PORT : NR_GMAC1_PORT;
1583 else
1584 gmac = NR_GMAC1_PORT;
developerd35bbcc2022-09-28 22:46:01 +08001585 } else if (IS_LAN2(dev)) {
1586 gmac = NR_GMAC3_PORT;
developerfd40db22021-04-29 10:08:25 +08001587 } else if (IS_WAN(dev)) {
1588 if (IS_DSA_WAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001589 port_id = hnat_dsa_fill_stag(dev,&entry, hw_path,
1590 ntohs(eth->h_proto),
1591 mape);
developerfd40db22021-04-29 10:08:25 +08001592 if (mape_toggle && mape == 1) {
1593 gmac = NR_PDMA_PORT;
1594 /* Set act_dp = wan_dev */
1595 entry.ipv4_hnapt.act_dp = dev->ifindex;
1596 } else {
1597 gmac = (IS_GMAC1_MODE) ? NR_GMAC1_PORT : NR_GMAC2_PORT;
1598 }
developerd35bbcc2022-09-28 22:46:01 +08001599 } else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN_GRP(skb) ||
developer99506e52021-06-30 22:03:02 +08001600 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb) || FROM_WED(skb))) {
developerfd40db22021-04-29 10:08:25 +08001601 if (!hnat_priv->data->whnat && IS_GMAC1_MODE) {
1602 entry.bfib1.vpm = 1;
1603 entry.bfib1.vlan_layer = 1;
1604
1605 if (FROM_GE_LAN(skb))
1606 entry.ipv4_hnapt.vlan1 = 1;
1607 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1608 entry.ipv4_hnapt.vlan1 = 2;
1609 }
1610
1611 trace_printk("learn of lan or wan(iif=%x) --> %s(ext)\n",
1612 skb_hnat_iface(skb), dev->name);
1613 /* To CPU then stolen by pre-routing hant hook of LAN/WAN
1614 * Current setting is PDMA RX.
1615 */
1616 gmac = NR_PDMA_PORT;
1617 if (IS_IPV4_GRP(foe))
1618 entry.ipv4_hnapt.act_dp = dev->ifindex;
1619 else
1620 entry.ipv6_5t_route.act_dp = dev->ifindex;
1621 } else {
1622 printk_ratelimited(KERN_WARNING
1623 "Unknown case of dp, iif=%x --> %s\n",
1624 skb_hnat_iface(skb), dev->name);
1625
1626 return 0;
1627 }
1628
developerafff5662022-06-29 10:09:56 +08001629 if (IS_HQOS_MODE || skb->mark >= MAX_PPPQ_PORT_NUM)
developeraf07fad2021-11-19 17:53:42 +08001630 qid = skb->mark & (MTK_QDMA_TX_MASK);
developer934756a2022-11-18 14:51:34 +08001631 else if (IS_PPPQ_MODE && IS_PPPQ_PATH(dev, skb))
developeraf07fad2021-11-19 17:53:42 +08001632 qid = port_id & MTK_QDMA_TX_MASK;
1633 else
1634 qid = 0;
developerfd40db22021-04-29 10:08:25 +08001635
1636 if (IS_IPV4_GRP(foe)) {
1637 entry.ipv4_hnapt.iblk2.dp = gmac;
1638 entry.ipv4_hnapt.iblk2.port_mg =
developer4164cfe2022-12-01 11:27:41 +08001639 (hnat_priv->data->version == MTK_HNAT_V1_1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001640
developeraf07fad2021-11-19 17:53:42 +08001641 if (qos_toggle) {
developer4164cfe2022-12-01 11:27:41 +08001642 if (hnat_priv->data->version == MTK_HNAT_V2 ||
1643 hnat_priv->data->version == MTK_HNAT_V3) {
developeraf07fad2021-11-19 17:53:42 +08001644 entry.ipv4_hnapt.iblk2.qid = qid & 0x7f;
1645 } else {
1646 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1647 entry.ipv4_hnapt.iblk2.qid = qid & 0xf;
developer4164cfe2022-12-01 11:27:41 +08001648 if (hnat_priv->data->version != MTK_HNAT_V1_1)
developeraf07fad2021-11-19 17:53:42 +08001649 entry.ipv4_hnapt.iblk2.port_mg |=
1650 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001651
developerd35bbcc2022-09-28 22:46:01 +08001652 if (((IS_EXT(dev) && (FROM_GE_LAN_GRP(skb) ||
developeraf07fad2021-11-19 17:53:42 +08001653 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) ||
1654 ((mape_toggle && mape == 1) && !FROM_EXT(skb))) &&
1655 (!whnat)) {
1656 entry.ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1657 entry.ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1658 entry.bfib1.vlan_layer = 1;
1659 }
developerfd40db22021-04-29 10:08:25 +08001660 }
developerfd40db22021-04-29 10:08:25 +08001661
developer34028fb2022-01-11 13:51:29 +08001662 if (FROM_EXT(skb) || skb_hnat_sport(skb) == NR_QDMA_PORT ||
1663 (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001664 entry.ipv4_hnapt.iblk2.fqos = 0;
1665 else
developerd35bbcc2022-09-28 22:46:01 +08001666#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001667 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001668 (IS_HQOS_DL_MODE && IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001669 (IS_PPPQ_MODE &&
1670 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001671 entry.ipv4_hnapt.tport_id = 1;
1672 else
1673 entry.ipv4_hnapt.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001674#else
developer399ec072022-06-24 16:07:41 +08001675 entry.ipv4_hnapt.iblk2.fqos =
developer934756a2022-11-18 14:51:34 +08001676 (!IS_PPPQ_MODE ||
1677 (IS_PPPQ_MODE &&
1678 IS_PPPQ_PATH(dev, skb)));
developerd35bbcc2022-09-28 22:46:01 +08001679#endif
developeraf07fad2021-11-19 17:53:42 +08001680 } else {
developerfd40db22021-04-29 10:08:25 +08001681 entry.ipv4_hnapt.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001682 }
developerfd40db22021-04-29 10:08:25 +08001683 } else {
1684 entry.ipv6_5t_route.iblk2.dp = gmac;
1685 entry.ipv6_5t_route.iblk2.port_mg =
developer4164cfe2022-12-01 11:27:41 +08001686 (hnat_priv->data->version == MTK_HNAT_V1_1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001687
developeraf07fad2021-11-19 17:53:42 +08001688 if (qos_toggle) {
developer4164cfe2022-12-01 11:27:41 +08001689 if (hnat_priv->data->version == MTK_HNAT_V2 ||
1690 hnat_priv->data->version == MTK_HNAT_V3) {
developeraf07fad2021-11-19 17:53:42 +08001691 entry.ipv6_5t_route.iblk2.qid = qid & 0x7f;
1692 } else {
1693 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1694 entry.ipv6_5t_route.iblk2.qid = qid & 0xf;
developer4164cfe2022-12-01 11:27:41 +08001695 if (hnat_priv->data->version != MTK_HNAT_V1_1)
developeraf07fad2021-11-19 17:53:42 +08001696 entry.ipv6_5t_route.iblk2.port_mg |=
1697 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001698
developerd35bbcc2022-09-28 22:46:01 +08001699 if (IS_EXT(dev) && (FROM_GE_LAN_GRP(skb) ||
developeraf07fad2021-11-19 17:53:42 +08001700 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) &&
1701 (!whnat)) {
1702 entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1703 entry.ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1704 entry.bfib1.vlan_layer = 1;
1705 }
developerfd40db22021-04-29 10:08:25 +08001706 }
developerfd40db22021-04-29 10:08:25 +08001707
developer34028fb2022-01-11 13:51:29 +08001708 if (FROM_EXT(skb) ||
1709 (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001710 entry.ipv6_5t_route.iblk2.fqos = 0;
1711 else
developerd35bbcc2022-09-28 22:46:01 +08001712#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001713 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001714 (IS_HQOS_DL_MODE && IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001715 (IS_PPPQ_MODE &&
1716 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001717 entry.ipv6_5t_route.tport_id = 1;
1718 else
1719 entry.ipv6_5t_route.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001720#else
developer399ec072022-06-24 16:07:41 +08001721 entry.ipv6_5t_route.iblk2.fqos =
developer934756a2022-11-18 14:51:34 +08001722 (!IS_PPPQ_MODE ||
1723 (IS_PPPQ_MODE &&
1724 IS_PPPQ_PATH(dev, skb)));
developerd35bbcc2022-09-28 22:46:01 +08001725#endif
developeraf07fad2021-11-19 17:53:42 +08001726 } else {
developerfd40db22021-04-29 10:08:25 +08001727 entry.ipv6_5t_route.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001728 }
developerfd40db22021-04-29 10:08:25 +08001729 }
1730
developer60e60962021-06-15 21:05:07 +08001731 /* The INFO2.port_mg and 2nd VLAN ID fields of PPE entry are redefined
1732 * by Wi-Fi whnat engine. These data and INFO2.dp will be updated and
1733 * the entry is set to BIND state in mtk_sw_nat_hook_tx().
1734 */
developer7b36dca2022-05-19 18:29:10 +08001735 if (!whnat) {
1736 entry.bfib1.ttl = 1;
developer60e60962021-06-15 21:05:07 +08001737 entry.bfib1.state = BIND;
developer7b36dca2022-05-19 18:29:10 +08001738 }
developer60e60962021-06-15 21:05:07 +08001739
developerbc552cc2022-03-15 16:19:27 +08001740 wmb();
developerfd40db22021-04-29 10:08:25 +08001741 memcpy(foe, &entry, sizeof(entry));
1742 /*reset statistic for this entry*/
developer577ad2f2022-11-28 10:33:36 +08001743 if (hnat_priv->data->per_flow_accounting &&
1744 skb_hnat_entry(skb) < hnat_priv->foe_etry_num &&
1745 skb_hnat_ppe(skb) < CFG_PPE_NUM)
developer471f6562021-05-10 20:48:34 +08001746 memset(&hnat_priv->acct[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
1747 0, sizeof(struct mib_entry));
developerfd40db22021-04-29 10:08:25 +08001748
developerfdfe1572021-09-13 16:56:33 +08001749 skb_hnat_filled(skb) = HNAT_INFO_FILLED;
developerfd40db22021-04-29 10:08:25 +08001750
1751 return 0;
1752}
1753
1754int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
1755{
1756 struct foe_entry *entry;
1757 struct ethhdr *eth;
developerbc552cc2022-03-15 16:19:27 +08001758 struct hnat_bind_info_blk bfib1_tx;
developerfd40db22021-04-29 10:08:25 +08001759
developerfdfe1572021-09-13 16:56:33 +08001760 if (skb_hnat_alg(skb) || !is_hnat_info_filled(skb) ||
1761 !is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb))
developerfd40db22021-04-29 10:08:25 +08001762 return NF_ACCEPT;
1763
1764 trace_printk(
1765 "[%s]entry=%x reason=%x gmac_no=%x wdmaid=%x rxid=%x wcid=%x bssid=%x\n",
1766 __func__, skb_hnat_entry(skb), skb_hnat_reason(skb), gmac_no,
1767 skb_hnat_wdma_id(skb), skb_hnat_bss_id(skb),
1768 skb_hnat_wc_id(skb), skb_hnat_rx_id(skb));
1769
developer99506e52021-06-30 22:03:02 +08001770 if ((gmac_no != NR_WDMA0_PORT) && (gmac_no != NR_WDMA1_PORT) &&
1771 (gmac_no != NR_WHNAT_WDMA_PORT))
1772 return NF_ACCEPT;
1773
developerc0419aa2022-12-07 15:56:36 +08001774 if (unlikely(!skb_mac_header_was_set(skb)))
1775 return NF_ACCEPT;
1776
developerfd40db22021-04-29 10:08:25 +08001777 if (!skb_hnat_is_hashed(skb))
1778 return NF_ACCEPT;
1779
developer955a6f62021-07-26 10:54:39 +08001780 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
1781 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
1782 return NF_ACCEPT;
1783
developer471f6562021-05-10 20:48:34 +08001784 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001785 if (entry_hnat_is_bound(entry))
1786 return NF_ACCEPT;
1787
1788 if (skb_hnat_reason(skb) != HIT_UNBIND_RATE_REACH)
1789 return NF_ACCEPT;
1790
1791 eth = eth_hdr(skb);
developerbc552cc2022-03-15 16:19:27 +08001792 memcpy(&bfib1_tx, &entry->bfib1, sizeof(entry->bfib1));
developer8116b0a2021-08-23 18:07:20 +08001793
1794 /*not bind multicast if PPE mcast not enable*/
developerfdfe1572021-09-13 16:56:33 +08001795 if (!hnat_priv->data->mcast) {
1796 if (is_multicast_ether_addr(eth->h_dest))
1797 return NF_ACCEPT;
1798
1799 if (IS_IPV4_GRP(entry))
1800 entry->ipv4_hnapt.iblk2.mcast = 0;
1801 else
1802 entry->ipv6_5t_route.iblk2.mcast = 0;
1803 }
developerfd40db22021-04-29 10:08:25 +08001804
1805 /* Some mt_wifi virtual interfaces, such as apcli,
1806 * will change the smac for specail purpose.
1807 */
developer5ffc5f12022-10-25 18:51:46 +08001808 switch ((int)bfib1_tx.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001809 case IPV4_HNAPT:
1810 case IPV4_HNAT:
1811 entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1812 entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1813 break;
1814 case IPV4_DSLITE:
1815 case IPV4_MAP_E:
1816 case IPV6_6RD:
1817 case IPV6_5T_ROUTE:
1818 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001819 case IPV6_HNAPT:
1820 case IPV6_HNAT:
developerfd40db22021-04-29 10:08:25 +08001821 entry->ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1822 entry->ipv6_5t_route.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1823 break;
1824 }
1825
developer0ff76882021-10-26 10:54:13 +08001826 if (skb->vlan_tci) {
developerbc552cc2022-03-15 16:19:27 +08001827 bfib1_tx.vlan_layer = 1;
1828 bfib1_tx.vpm = 1;
developer0ff76882021-10-26 10:54:13 +08001829 if (IS_IPV4_GRP(entry)) {
1830 entry->ipv4_hnapt.etype = htons(ETH_P_8021Q);
developer00a07372022-03-11 16:04:34 +08001831 entry->ipv4_hnapt.vlan1 = skb->vlan_tci;
developer0ff76882021-10-26 10:54:13 +08001832 } else if (IS_IPV6_GRP(entry)) {
1833 entry->ipv6_5t_route.etype = htons(ETH_P_8021Q);
developer00a07372022-03-11 16:04:34 +08001834 entry->ipv6_5t_route.vlan1 = skb->vlan_tci;
developer0ff76882021-10-26 10:54:13 +08001835 }
1836 } else {
developerbc552cc2022-03-15 16:19:27 +08001837 bfib1_tx.vpm = 0;
1838 bfib1_tx.vlan_layer = 0;
developer0ff76882021-10-26 10:54:13 +08001839 }
developer60e60962021-06-15 21:05:07 +08001840
developerfd40db22021-04-29 10:08:25 +08001841 /* MT7622 wifi hw_nat not support QoS */
1842 if (IS_IPV4_GRP(entry)) {
1843 entry->ipv4_hnapt.iblk2.fqos = 0;
developer4164cfe2022-12-01 11:27:41 +08001844 if ((hnat_priv->data->version == MTK_HNAT_V1_2 &&
developere567ad32021-05-25 17:16:17 +08001845 gmac_no == NR_WHNAT_WDMA_PORT) ||
developer4164cfe2022-12-01 11:27:41 +08001846 ((hnat_priv->data->version == MTK_HNAT_V2 ||
1847 hnat_priv->data->version == MTK_HNAT_V3) &&
developere567ad32021-05-25 17:16:17 +08001848 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001849 entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1850 entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
developerd35bbcc2022-09-28 22:46:01 +08001851#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer47545a32022-11-15 16:06:58 +08001852 entry->ipv4_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developerd35bbcc2022-09-28 22:46:01 +08001853 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1854 entry->ipv4_hnapt.iblk2.winfoi = 1;
1855 entry->ipv4_hnapt.winfo_pao.usr_info =
1856 skb_hnat_usr_info(skb);
1857 entry->ipv4_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
1858 entry->ipv4_hnapt.winfo_pao.is_fixedrate =
1859 skb_hnat_is_fixedrate(skb);
1860 entry->ipv4_hnapt.winfo_pao.is_prior =
1861 skb_hnat_is_prior(skb);
1862 entry->ipv4_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
1863 entry->ipv4_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
1864 entry->ipv4_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
1865#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
developerfd40db22021-04-29 10:08:25 +08001866 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1867 entry->ipv4_hnapt.iblk2.winfoi = 1;
1868#else
1869 entry->ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb);
1870 entry->ipv4_hnapt.iblk2w.winfoi = 1;
1871 entry->ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1872#endif
1873 } else {
1874 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
developerbc552cc2022-03-15 16:19:27 +08001875 bfib1_tx.vpm = 1;
1876 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001877
developerd35bbcc2022-09-28 22:46:01 +08001878 if (FROM_GE_LAN_GRP(skb))
developerfd40db22021-04-29 10:08:25 +08001879 entry->ipv4_hnapt.vlan1 = 1;
1880 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1881 entry->ipv4_hnapt.vlan1 = 2;
1882 }
1883
developer34028fb2022-01-11 13:51:29 +08001884 if (IS_HQOS_MODE &&
developerd35bbcc2022-09-28 22:46:01 +08001885 (FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerbc552cc2022-03-15 16:19:27 +08001886 bfib1_tx.vpm = 0;
1887 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001888 entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1889 entry->ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1890 entry->ipv4_hnapt.iblk2.fqos = 1;
1891 }
developerfd40db22021-04-29 10:08:25 +08001892 }
1893 entry->ipv4_hnapt.iblk2.dp = gmac_no;
developer5ffc5f12022-10-25 18:51:46 +08001894#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1895 } else if (IS_IPV6_HNAPT(entry) || IS_IPV6_HNAT(entry)) {
1896 entry->ipv6_hnapt.iblk2.dp = gmac_no;
1897 entry->ipv6_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1898 entry->ipv6_hnapt.iblk2.winfoi = 1;
1899
1900 entry->ipv6_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1901 entry->ipv6_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
1902 entry->ipv6_hnapt.winfo_pao.usr_info = skb_hnat_usr_info(skb);
1903 entry->ipv6_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
1904 entry->ipv6_hnapt.winfo_pao.is_fixedrate =
1905 skb_hnat_is_fixedrate(skb);
1906 entry->ipv6_hnapt.winfo_pao.is_prior = skb_hnat_is_prior(skb);
1907 entry->ipv6_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
1908 entry->ipv6_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
1909 entry->ipv6_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
developer47545a32022-11-15 16:06:58 +08001910 entry->ipv6_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developer5ffc5f12022-10-25 18:51:46 +08001911#endif
developerfd40db22021-04-29 10:08:25 +08001912 } else {
1913 entry->ipv6_5t_route.iblk2.fqos = 0;
developer4164cfe2022-12-01 11:27:41 +08001914 if ((hnat_priv->data->version == MTK_HNAT_V1_2 &&
developere567ad32021-05-25 17:16:17 +08001915 gmac_no == NR_WHNAT_WDMA_PORT) ||
developer4164cfe2022-12-01 11:27:41 +08001916 ((hnat_priv->data->version == MTK_HNAT_V2 ||
1917 hnat_priv->data->version == MTK_HNAT_V3) &&
developere567ad32021-05-25 17:16:17 +08001918 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001919 entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
1920 entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
developerd35bbcc2022-09-28 22:46:01 +08001921#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer47545a32022-11-15 16:06:58 +08001922 entry->ipv6_5t_route.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developerfd40db22021-04-29 10:08:25 +08001923 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1924 entry->ipv6_5t_route.iblk2.winfoi = 1;
developerd35bbcc2022-09-28 22:46:01 +08001925 entry->ipv6_5t_route.winfo_pao.usr_info =
1926 skb_hnat_usr_info(skb);
1927 entry->ipv6_5t_route.winfo_pao.tid =
1928 skb_hnat_tid(skb);
1929 entry->ipv6_5t_route.winfo_pao.is_fixedrate =
1930 skb_hnat_is_fixedrate(skb);
1931 entry->ipv6_5t_route.winfo_pao.is_prior =
1932 skb_hnat_is_prior(skb);
1933 entry->ipv6_5t_route.winfo_pao.is_sp =
1934 skb_hnat_is_sp(skb);
1935 entry->ipv6_5t_route.winfo_pao.hf =
1936 skb_hnat_hf(skb);
1937 entry->ipv6_5t_route.winfo_pao.amsdu =
1938 skb_hnat_amsdu(skb);
1939#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
1940 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1941 entry->ipv6_5t_route.iblk2.winfoi = 1;
developerfd40db22021-04-29 10:08:25 +08001942#else
1943 entry->ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb);
1944 entry->ipv6_5t_route.iblk2w.winfoi = 1;
1945 entry->ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1946#endif
1947 } else {
1948 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
developerbc552cc2022-03-15 16:19:27 +08001949 bfib1_tx.vpm = 1;
1950 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001951
developerd35bbcc2022-09-28 22:46:01 +08001952 if (FROM_GE_LAN_GRP(skb))
developerfd40db22021-04-29 10:08:25 +08001953 entry->ipv6_5t_route.vlan1 = 1;
1954 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1955 entry->ipv6_5t_route.vlan1 = 2;
1956 }
1957
developer34028fb2022-01-11 13:51:29 +08001958 if (IS_HQOS_MODE &&
developerd35bbcc2022-09-28 22:46:01 +08001959 (FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerbc552cc2022-03-15 16:19:27 +08001960 bfib1_tx.vpm = 0;
1961 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001962 entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1963 entry->ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1964 entry->ipv6_5t_route.iblk2.fqos = 1;
1965 }
developerfd40db22021-04-29 10:08:25 +08001966 }
1967 entry->ipv6_5t_route.iblk2.dp = gmac_no;
1968 }
1969
developer7b36dca2022-05-19 18:29:10 +08001970 bfib1_tx.ttl = 1;
developerbc552cc2022-03-15 16:19:27 +08001971 bfib1_tx.state = BIND;
1972 wmb();
1973 memcpy(&entry->bfib1, &bfib1_tx, sizeof(bfib1_tx));
developerfd40db22021-04-29 10:08:25 +08001974
1975 return NF_ACCEPT;
1976}
1977
1978int mtk_sw_nat_hook_rx(struct sk_buff *skb)
1979{
developer99506e52021-06-30 22:03:02 +08001980 if (!IS_SPACE_AVAILABLE_HEAD(skb) || !FROM_WED(skb)) {
1981 skb_hnat_magic_tag(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001982 return NF_ACCEPT;
developer99506e52021-06-30 22:03:02 +08001983 }
developerfd40db22021-04-29 10:08:25 +08001984
1985 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001986 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001987 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1988
1989 if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
1990 skb_hnat_sport(skb) = NR_WDMA0_PORT;
1991 else if (skb_hnat_iface(skb) == FOE_MAGIC_WED1)
1992 skb_hnat_sport(skb) = NR_WDMA1_PORT;
1993
1994 return NF_ACCEPT;
1995}
1996
1997void mtk_ppe_dev_register_hook(struct net_device *dev)
1998{
1999 int i, number = 0;
2000 struct extdev_entry *ext_entry;
2001
developerfd40db22021-04-29 10:08:25 +08002002 for (i = 1; i < MAX_IF_NUM; i++) {
2003 if (hnat_priv->wifi_hook_if[i] == dev) {
2004 pr_info("%s : %s has been registered in wifi_hook_if table[%d]\n",
2005 __func__, dev->name, i);
2006 return;
2007 }
developera7e6c242022-12-05 13:52:40 +08002008 }
2009
2010 for (i = 1; i < MAX_IF_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002011 if (!hnat_priv->wifi_hook_if[i]) {
2012 if (find_extif_from_devname(dev->name)) {
2013 extif_set_dev(dev);
2014 goto add_wifi_hook_if;
2015 }
2016
2017 number = get_ext_device_number();
2018 if (number >= MAX_EXT_DEVS) {
2019 pr_info("%s : extdev array is full. %s is not registered\n",
2020 __func__, dev->name);
2021 return;
2022 }
2023
2024 ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
2025 if (!ext_entry)
2026 return;
2027
developer4c32b7a2021-11-13 16:46:43 +08002028 strncpy(ext_entry->name, dev->name, IFNAMSIZ - 1);
developerfd40db22021-04-29 10:08:25 +08002029 dev_hold(dev);
2030 ext_entry->dev = dev;
2031 ext_if_add(ext_entry);
2032
2033add_wifi_hook_if:
2034 dev_hold(dev);
2035 hnat_priv->wifi_hook_if[i] = dev;
2036
2037 break;
2038 }
2039 }
2040 pr_info("%s : ineterface %s register (%d)\n", __func__, dev->name, i);
2041}
2042
2043void mtk_ppe_dev_unregister_hook(struct net_device *dev)
2044{
2045 int i;
2046
2047 for (i = 1; i < MAX_IF_NUM; i++) {
2048 if (hnat_priv->wifi_hook_if[i] == dev) {
2049 hnat_priv->wifi_hook_if[i] = NULL;
2050 dev_put(dev);
2051
2052 break;
2053 }
2054 }
2055
2056 extif_put_dev(dev);
2057 pr_info("%s : ineterface %s set null (%d)\n", __func__, dev->name, i);
2058}
2059
2060static unsigned int mtk_hnat_accel_type(struct sk_buff *skb)
2061{
2062 struct dst_entry *dst;
2063 struct nf_conn *ct;
2064 enum ip_conntrack_info ctinfo;
2065 const struct nf_conn_help *help;
2066
2067 /* Do not accelerate 1st round of xfrm flow, and 2nd round of xfrm flow
2068 * is from local_out which is also filtered in sanity check.
2069 */
2070 dst = skb_dst(skb);
2071 if (dst && dst_xfrm(dst))
2072 return 0;
2073
2074 ct = nf_ct_get(skb, &ctinfo);
2075 if (!ct)
2076 return 1;
2077
2078 /* rcu_read_lock()ed by nf_hook_slow */
2079 help = nfct_help(ct);
2080 if (help && rcu_dereference(help->helper))
2081 return 0;
2082
2083 return 1;
2084}
2085
developer6f4a0c72021-10-19 10:04:22 +08002086static void mtk_hnat_dscp_update(struct sk_buff *skb, struct foe_entry *entry)
2087{
2088 struct iphdr *iph;
2089 struct ethhdr *eth;
2090 struct ipv6hdr *ip6h;
2091 bool flag = false;
2092
2093 eth = eth_hdr(skb);
2094 switch (ntohs(eth->h_proto)) {
2095 case ETH_P_IP:
2096 iph = ip_hdr(skb);
developer001e7be2021-12-09 15:00:27 +08002097 if (IS_IPV4_GRP(entry) && entry->ipv4_hnapt.iblk2.dscp != iph->tos)
developer6f4a0c72021-10-19 10:04:22 +08002098 flag = true;
2099 break;
2100 case ETH_P_IPV6:
2101 ip6h = ipv6_hdr(skb);
developer001e7be2021-12-09 15:00:27 +08002102 if ((IS_IPV6_3T_ROUTE(entry) || IS_IPV6_5T_ROUTE(entry)) &&
2103 (entry->ipv6_5t_route.iblk2.dscp !=
2104 (ip6h->priority << 4 | (ip6h->flow_lbl[0] >> 4))))
developer6f4a0c72021-10-19 10:04:22 +08002105 flag = true;
2106 break;
2107 default:
2108 return;
2109 }
2110
2111 if (flag) {
developer1080dd82022-03-07 19:31:04 +08002112 if (debug_level >= 2)
2113 pr_info("Delete entry idx=%d.\n", skb_hnat_entry(skb));
developer6f4a0c72021-10-19 10:04:22 +08002114 memset(entry, 0, sizeof(struct foe_entry));
2115 hnat_cache_ebl(1);
2116 }
2117}
2118
developer30a47682021-11-02 17:06:14 +08002119static void mtk_hnat_nf_update(struct sk_buff *skb)
2120{
2121 struct nf_conn *ct;
2122 struct nf_conn_acct *acct;
2123 struct nf_conn_counter *counter;
2124 enum ip_conntrack_info ctinfo;
2125 struct hnat_accounting diff;
2126
2127 ct = nf_ct_get(skb, &ctinfo);
2128 if (ct) {
2129 if (!hnat_get_count(hnat_priv, skb_hnat_ppe(skb), skb_hnat_entry(skb), &diff))
2130 return;
2131
2132 acct = nf_conn_acct_find(ct);
2133 if (acct) {
2134 counter = acct->counter;
2135 atomic64_add(diff.packets, &counter[CTINFO2DIR(ctinfo)].packets);
2136 atomic64_add(diff.bytes, &counter[CTINFO2DIR(ctinfo)].bytes);
2137 }
2138 }
developere8b7dfa2023-04-20 10:16:44 +08002139}
2140
2141int mtk_464xlat_fill_mac(struct foe_entry *entry, struct sk_buff *skb,
2142 const struct net_device *out, bool l2w)
2143{
2144 const struct in6_addr *ipv6_nexthop;
2145 struct dst_entry *dst = skb_dst(skb);
2146 struct neighbour *neigh = NULL;
2147 struct rtable *rt = (struct rtable *)dst;
2148 u32 nexthop;
2149
2150 rcu_read_lock_bh();
2151 if (l2w) {
2152 ipv6_nexthop = rt6_nexthop((struct rt6_info *)dst,
2153 &ipv6_hdr(skb)->daddr);
2154 neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
2155 if (unlikely(!neigh)) {
2156 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n",
2157 __func__, &ipv6_hdr(skb)->daddr);
2158 rcu_read_unlock_bh();
2159 return -1;
2160 }
2161 } else {
2162 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
2163 neigh = __ipv4_neigh_lookup_noref(dst->dev, nexthop);
2164 if (unlikely(!neigh)) {
2165 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n",
2166 __func__, &ip_hdr(skb)->daddr);
2167 rcu_read_unlock_bh();
2168 return -1;
2169 }
2170 }
2171 rcu_read_unlock_bh();
2172
2173 entry->ipv4_dslite.dmac_hi = swab32(*((u32 *)neigh->ha));
2174 entry->ipv4_dslite.dmac_lo = swab16(*((u16 *)&neigh->ha[4]));
2175 entry->ipv4_dslite.smac_hi = swab32(*((u32 *)out->dev_addr));
2176 entry->ipv4_dslite.smac_lo = swab16(*((u16 *)&out->dev_addr[4]));
2177
2178 return 0;
2179}
2180
2181int mtk_464xlat_get_hash(struct sk_buff *skb, u32 *hash, bool l2w)
2182{
2183 struct in6_addr addr_v6, prefix;
2184 struct ipv6hdr *ip6h;
2185 struct iphdr *iph;
2186 struct tcpudphdr *pptr, _ports;
2187 struct foe_entry tmp;
2188 u32 addr, protoff;
2189
2190 if (l2w) {
2191 ip6h = ipv6_hdr(skb);
2192 if (mtk_ppe_get_xlat_v4_by_v6(&ip6h->daddr, &addr))
2193 return -1;
2194 protoff = IPV6_HDR_LEN;
2195
2196 tmp.bfib1.pkt_type = IPV4_HNAPT;
2197 tmp.ipv4_hnapt.sip = ntohl(ip6h->saddr.s6_addr32[3]);
2198 tmp.ipv4_hnapt.dip = ntohl(addr);
2199 } else {
2200 iph = ip_hdr(skb);
2201 if (mtk_ppe_get_xlat_v6_by_v4(&iph->saddr, &addr_v6, &prefix))
2202 return -1;
2203
2204 protoff = iph->ihl * 4;
2205
2206 tmp.bfib1.pkt_type = IPV6_5T_ROUTE;
2207 tmp.ipv6_5t_route.ipv6_sip0 = ntohl(addr_v6.s6_addr32[0]);
2208 tmp.ipv6_5t_route.ipv6_sip1 = ntohl(addr_v6.s6_addr32[1]);
2209 tmp.ipv6_5t_route.ipv6_sip2 = ntohl(addr_v6.s6_addr32[2]);
2210 tmp.ipv6_5t_route.ipv6_sip3 = ntohl(addr_v6.s6_addr32[3]);
2211 tmp.ipv6_5t_route.ipv6_dip0 = ntohl(prefix.s6_addr32[0]);
2212 tmp.ipv6_5t_route.ipv6_dip1 = ntohl(prefix.s6_addr32[1]);
2213 tmp.ipv6_5t_route.ipv6_dip2 = ntohl(prefix.s6_addr32[2]);
2214 tmp.ipv6_5t_route.ipv6_dip3 = ntohl(iph->daddr);
2215 }
2216
2217 pptr = skb_header_pointer(skb, protoff,
2218 sizeof(_ports), &_ports);
2219 if (unlikely(!pptr))
2220 return -1;
2221
2222 if (l2w) {
2223 tmp.ipv4_hnapt.sport = ntohs(pptr->src);
2224 tmp.ipv4_hnapt.dport = ntohs(pptr->dst);
2225 } else {
2226 tmp.ipv6_5t_route.sport = ntohs(pptr->src);
2227 tmp.ipv6_5t_route.dport = ntohs(pptr->dst);
2228 }
2229
2230 *hash = hnat_get_ppe_hash(&tmp);
2231
2232 return 0;
2233}
2234
2235void mtk_464xlat_fill_info1(struct foe_entry *entry,
2236 struct sk_buff *skb, bool l2w)
2237{
2238 entry->bfib1.cah = 1;
2239 entry->bfib1.ttl = 1;
2240 entry->bfib1.state = BIND;
2241 entry->bfib1.time_stamp = readl(hnat_priv->fe_base + 0x0010) & (0xFF);
2242 if (l2w) {
2243 entry->bfib1.pkt_type = IPV4_DSLITE;
2244 entry->bfib1.udp = ipv6_hdr(skb)->nexthdr ==
2245 IPPROTO_UDP ? 1 : 0;
2246 } else {
2247 entry->bfib1.pkt_type = IPV6_6RD;
2248 entry->bfib1.udp = ip_hdr(skb)->protocol ==
2249 IPPROTO_UDP ? 1 : 0;
2250 }
2251}
2252
2253void mtk_464xlat_fill_info2(struct foe_entry *entry, bool l2w)
2254{
2255 entry->ipv4_dslite.iblk2.mibf = 1;
2256 entry->ipv4_dslite.iblk2.port_ag = 0xF;
2257
2258 if (l2w)
2259 entry->ipv4_dslite.iblk2.dp = NR_GMAC2_PORT;
2260 else
2261 entry->ipv6_6rd.iblk2.dp = NR_GMAC1_PORT;
2262}
2263
2264void mtk_464xlat_fill_ipv4(struct foe_entry *entry, struct sk_buff *skb,
2265 struct foe_entry *foe, bool l2w)
2266{
2267 struct iphdr *iph;
2268
2269 if (l2w) {
2270 entry->ipv4_dslite.sip = foe->ipv4_dslite.sip;
2271 entry->ipv4_dslite.dip = foe->ipv4_dslite.dip;
2272 entry->ipv4_dslite.sport = foe->ipv4_dslite.sport;
2273 entry->ipv4_dslite.dport = foe->ipv4_dslite.dport;
2274 } else {
2275 iph = ip_hdr(skb);
2276 entry->ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
2277 entry->ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
2278 entry->ipv6_6rd.sport = foe->ipv6_6rd.sport;
2279 entry->ipv6_6rd.dport = foe->ipv6_6rd.dport;
2280 entry->ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
2281 entry->ipv6_6rd.ttl = iph->ttl;
2282 entry->ipv6_6rd.dscp = iph->tos;
2283 entry->ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
2284 }
2285}
2286
2287int mtk_464xlat_fill_ipv6(struct foe_entry *entry, struct sk_buff *skb,
2288 struct foe_entry *foe, bool l2w)
2289{
2290 struct ipv6hdr *ip6h;
2291 struct in6_addr addr_v6, prefix;
2292 u32 addr;
2293
2294 if (l2w) {
2295 ip6h = ipv6_hdr(skb);
2296
2297 if (mtk_ppe_get_xlat_v4_by_v6(&ip6h->daddr, &addr))
2298 return -1;
2299
2300 if (mtk_ppe_get_xlat_v6_by_v4(&addr, &addr_v6, &prefix))
2301 return -1;
2302
2303 entry->ipv4_dslite.tunnel_sipv6_0 =
2304 ntohl(prefix.s6_addr32[0]);
2305 entry->ipv4_dslite.tunnel_sipv6_1 =
2306 ntohl(ip6h->saddr.s6_addr32[1]);
2307 entry->ipv4_dslite.tunnel_sipv6_2 =
2308 ntohl(ip6h->saddr.s6_addr32[2]);
2309 entry->ipv4_dslite.tunnel_sipv6_3 =
2310 ntohl(ip6h->saddr.s6_addr32[3]);
2311 entry->ipv4_dslite.tunnel_dipv6_0 =
2312 ntohl(ip6h->daddr.s6_addr32[0]);
2313 entry->ipv4_dslite.tunnel_dipv6_1 =
2314 ntohl(ip6h->daddr.s6_addr32[1]);
2315 entry->ipv4_dslite.tunnel_dipv6_2 =
2316 ntohl(ip6h->daddr.s6_addr32[2]);
2317 entry->ipv4_dslite.tunnel_dipv6_3 =
2318 ntohl(ip6h->daddr.s6_addr32[3]);
2319
2320 ppe_fill_flow_lbl(entry, ip6h);
2321 entry->ipv4_dslite.priority = ip6h->priority;
2322 entry->ipv4_dslite.hop_limit = ip6h->hop_limit;
2323
2324 } else {
2325 entry->ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
2326 entry->ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
2327 entry->ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
2328 entry->ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
2329 entry->ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
2330 entry->ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
2331 entry->ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
2332 entry->ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
2333 }
2334
2335 return 0;
2336}
2337
2338int mtk_464xlat_fill_l2(struct foe_entry *entry, struct sk_buff *skb,
2339 const struct net_device *dev, bool l2w)
2340{
2341 const unsigned int *port_reg;
2342 int port_index;
2343 u16 sp_tag;
2344
2345 if (l2w)
2346 entry->ipv4_dslite.etype = ETH_P_IP;
2347 else {
2348 if (IS_DSA_LAN(dev)) {
2349 port_reg = of_get_property(dev->dev.of_node,
2350 "reg", NULL);
2351 if (unlikely(!port_reg))
2352 return -1;
2353
2354 port_index = be32_to_cpup(port_reg);
2355 sp_tag = BIT(port_index);
2356
2357 entry->bfib1.vlan_layer = 1;
2358 entry->bfib1.vpm = 0;
2359 entry->ipv6_6rd.etype = sp_tag;
2360 } else
2361 entry->ipv6_6rd.etype = ETH_P_IPV6;
2362 }
2363
2364 if (mtk_464xlat_fill_mac(entry, skb, dev, l2w))
2365 return -1;
2366
2367 return 0;
developer30a47682021-11-02 17:06:14 +08002368}
2369
developere8b7dfa2023-04-20 10:16:44 +08002370
2371int mtk_464xlat_fill_l3(struct foe_entry *entry, struct sk_buff *skb,
2372 struct foe_entry *foe, bool l2w)
2373{
2374 mtk_464xlat_fill_ipv4(entry, skb, foe, l2w);
2375
2376 if (mtk_464xlat_fill_ipv6(entry, skb, foe, l2w))
2377 return -1;
2378
2379 return 0;
2380}
2381
2382int mtk_464xlat_post_process(struct sk_buff *skb, const struct net_device *out)
2383{
2384 struct foe_entry *foe, entry = {};
2385 u32 hash;
2386 bool l2w;
2387
2388 if (skb->protocol == htons(ETH_P_IPV6))
2389 l2w = true;
2390 else if (skb->protocol == htons(ETH_P_IP))
2391 l2w = false;
2392 else
2393 return -1;
2394
2395 if (mtk_464xlat_get_hash(skb, &hash, l2w))
2396 return -1;
2397
2398 if (hash >= hnat_priv->foe_etry_num)
2399 return -1;
2400
2401 if (headroom[hash].crsn != HIT_UNBIND_RATE_REACH)
2402 return -1;
2403
2404 foe = &hnat_priv->foe_table_cpu[headroom_ppe(headroom[hash])][hash];
2405
2406 mtk_464xlat_fill_info1(&entry, skb, l2w);
2407
2408 if (mtk_464xlat_fill_l3(&entry, skb, foe, l2w))
2409 return -1;
2410
2411 mtk_464xlat_fill_info2(&entry, l2w);
2412
2413 if (mtk_464xlat_fill_l2(&entry, skb, out, l2w))
2414 return -1;
2415
2416 /* We must ensure all info has been updated before set to hw */
2417 wmb();
2418 memcpy(foe, &entry, sizeof(struct foe_entry));
2419
2420 return 0;
2421}
2422
developerfd40db22021-04-29 10:08:25 +08002423static unsigned int mtk_hnat_nf_post_routing(
2424 struct sk_buff *skb, const struct net_device *out,
2425 unsigned int (*fn)(struct sk_buff *, const struct net_device *,
2426 struct flow_offload_hw_path *),
2427 const char *func)
2428{
2429 struct foe_entry *entry;
2430 struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
developere5763512021-05-21 01:04:58 +08002431 .virt_dev = (struct net_device*)out };
developerfd40db22021-04-29 10:08:25 +08002432 const struct net_device *arp_dev = out;
2433
developere8b7dfa2023-04-20 10:16:44 +08002434 if (xlat_toggle && !mtk_464xlat_post_process(skb, out))
2435 return 0;
2436
developerfd40db22021-04-29 10:08:25 +08002437 if (skb_hnat_alg(skb) || unlikely(!is_magic_tag_valid(skb) ||
2438 !IS_SPACE_AVAILABLE_HEAD(skb)))
2439 return 0;
2440
developerc0419aa2022-12-07 15:56:36 +08002441 if (unlikely(!skb_mac_header_was_set(skb)))
2442 return 0;
2443
developerfd40db22021-04-29 10:08:25 +08002444 if (unlikely(!skb_hnat_is_hashed(skb)))
2445 return 0;
2446
2447 if (out->netdev_ops->ndo_flow_offload_check) {
developere5763512021-05-21 01:04:58 +08002448 out->netdev_ops->ndo_flow_offload_check(&hw_path);
developerfd40db22021-04-29 10:08:25 +08002449 out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
2450 }
2451
developerd35bbcc2022-09-28 22:46:01 +08002452 if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
developerfd40db22021-04-29 10:08:25 +08002453 return 0;
2454
2455 trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
2456 skb_hnat_iface(skb), out->name, skb_hnat_reason(skb));
2457
developer577ad2f2022-11-28 10:33:36 +08002458 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2459 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2460 return -1;
2461
developer471f6562021-05-10 20:48:34 +08002462 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002463
2464 switch (skb_hnat_reason(skb)) {
2465 case HIT_UNBIND_RATE_REACH:
2466 if (entry_hnat_is_bound(entry))
2467 break;
2468
2469 if (fn && !mtk_hnat_accel_type(skb))
2470 break;
2471
2472 if (fn && fn(skb, arp_dev, &hw_path))
2473 break;
2474
2475 skb_to_hnat_info(skb, out, entry, &hw_path);
2476 break;
2477 case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
developer30a47682021-11-02 17:06:14 +08002478 /* update hnat count to nf_conntrack by keepalive */
2479 if (hnat_priv->data->per_flow_accounting && hnat_priv->nf_stat_en)
2480 mtk_hnat_nf_update(skb);
2481
developerfd40db22021-04-29 10:08:25 +08002482 if (fn && !mtk_hnat_accel_type(skb))
2483 break;
2484
developer6f4a0c72021-10-19 10:04:22 +08002485 /* update dscp for qos */
2486 mtk_hnat_dscp_update(skb, entry);
2487
developerfd40db22021-04-29 10:08:25 +08002488 /* update mcast timestamp*/
developer4164cfe2022-12-01 11:27:41 +08002489 if (hnat_priv->data->version == MTK_HNAT_V1_3 &&
developerfd40db22021-04-29 10:08:25 +08002490 hnat_priv->data->mcast && entry->bfib1.sta == 1)
2491 entry->ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
2492
2493 if (entry_hnat_is_bound(entry)) {
2494 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
2495
2496 return -1;
2497 }
2498 break;
2499 case HIT_BIND_MULTICAST_TO_CPU:
2500 case HIT_BIND_MULTICAST_TO_GMAC_CPU:
2501 /*do not forward to gdma again,if ppe already done it*/
developerd35bbcc2022-09-28 22:46:01 +08002502 if (IS_LAN_GRP(out) || IS_WAN(out))
developerfd40db22021-04-29 10:08:25 +08002503 return -1;
2504 break;
2505 }
2506
2507 return 0;
2508}
2509
2510static unsigned int
2511mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb,
2512 const struct nf_hook_state *state)
2513{
2514 struct foe_entry *entry;
2515 struct ipv6hdr *ip6h;
2516 struct iphdr _iphdr;
2517 const struct iphdr *iph;
2518 struct tcpudphdr _ports;
2519 const struct tcpudphdr *pptr;
2520 int udp = 0;
2521
2522 if (unlikely(!skb_hnat_is_hashed(skb)))
2523 return NF_ACCEPT;
2524
developer577ad2f2022-11-28 10:33:36 +08002525 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2526 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2527 return NF_ACCEPT;
2528
developer471f6562021-05-10 20:48:34 +08002529 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002530 if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH) {
2531 ip6h = ipv6_hdr(skb);
2532 if (ip6h->nexthdr == NEXTHDR_IPIP) {
2533 /* Map-E LAN->WAN: need to record orig info before fn. */
2534 if (mape_toggle) {
2535 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
2536 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +08002537 if (unlikely(!iph))
2538 return NF_ACCEPT;
2539
developerfd40db22021-04-29 10:08:25 +08002540 switch (iph->protocol) {
2541 case IPPROTO_UDP:
2542 udp = 1;
2543 case IPPROTO_TCP:
2544 break;
2545
2546 default:
2547 return NF_ACCEPT;
2548 }
2549
2550 pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4,
2551 sizeof(_ports), &_ports);
developer4c32b7a2021-11-13 16:46:43 +08002552 if (unlikely(!pptr))
2553 return NF_ACCEPT;
2554
developerfd40db22021-04-29 10:08:25 +08002555 entry->bfib1.udp = udp;
2556
developer25fc8c02022-05-06 16:24:02 +08002557 /* Map-E LAN->WAN record inner IPv4 header info. */
developerd35bbcc2022-09-28 22:46:01 +08002558#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08002559 entry->bfib1.pkt_type = IPV4_MAP_E;
2560 entry->ipv4_dslite.iblk2.dscp = iph->tos;
developerd35bbcc2022-09-28 22:46:01 +08002561 entry->ipv4_mape.new_sip = ntohl(iph->saddr);
2562 entry->ipv4_mape.new_dip = ntohl(iph->daddr);
2563 entry->ipv4_mape.new_sport = ntohs(pptr->src);
2564 entry->ipv4_mape.new_dport = ntohs(pptr->dst);
developerfd40db22021-04-29 10:08:25 +08002565#else
2566 entry->ipv4_hnapt.iblk2.dscp = iph->tos;
2567 entry->ipv4_hnapt.new_sip = ntohl(iph->saddr);
2568 entry->ipv4_hnapt.new_dip = ntohl(iph->daddr);
2569 entry->ipv4_hnapt.new_sport = ntohs(pptr->src);
2570 entry->ipv4_hnapt.new_dport = ntohs(pptr->dst);
2571#endif
2572 } else {
2573 entry->bfib1.pkt_type = IPV4_DSLITE;
2574 }
2575 }
2576 }
2577 return NF_ACCEPT;
2578}
2579
2580static unsigned int
2581mtk_hnat_ipv6_nf_post_routing(void *priv, struct sk_buff *skb,
2582 const struct nf_hook_state *state)
2583{
developer577ad2f2022-11-28 10:33:36 +08002584 if (!skb)
2585 goto drop;
2586
developerfd40db22021-04-29 10:08:25 +08002587 post_routing_print(skb, state->in, state->out, __func__);
2588
2589 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv6_get_nexthop,
2590 __func__))
2591 return NF_ACCEPT;
2592
developer577ad2f2022-11-28 10:33:36 +08002593drop:
2594 if (skb)
2595 trace_printk(
2596 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2597 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2598 __func__, skb_hnat_iface(skb), state->out->name,
2599 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2600 skb_hnat_sport(skb), skb_hnat_reason(skb),
2601 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002602
2603 return NF_DROP;
2604}
2605
2606static unsigned int
2607mtk_hnat_ipv4_nf_post_routing(void *priv, struct sk_buff *skb,
2608 const struct nf_hook_state *state)
2609{
developer577ad2f2022-11-28 10:33:36 +08002610 if (!skb)
2611 goto drop;
2612
developerfd40db22021-04-29 10:08:25 +08002613 post_routing_print(skb, state->in, state->out, __func__);
2614
2615 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv4_get_nexthop,
2616 __func__))
2617 return NF_ACCEPT;
2618
developer577ad2f2022-11-28 10:33:36 +08002619drop:
2620 if (skb)
2621 trace_printk(
2622 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2623 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2624 __func__, skb_hnat_iface(skb), state->out->name,
2625 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2626 skb_hnat_sport(skb), skb_hnat_reason(skb),
2627 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002628
2629 return NF_DROP;
2630}
2631
2632static unsigned int
2633mtk_pong_hqos_handler(void *priv, struct sk_buff *skb,
2634 const struct nf_hook_state *state)
2635{
developer659fdeb2022-12-01 23:03:07 +08002636 struct vlan_ethhdr *veth;
2637
2638 if (!skb)
2639 goto drop;
2640
2641 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
developerfd40db22021-04-29 10:08:25 +08002642
developer34028fb2022-01-11 13:51:29 +08002643 if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +08002644 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2645 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2646 }
developerfd40db22021-04-29 10:08:25 +08002647
2648 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
2649 clr_from_extge(skb);
2650
2651 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +08002652 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
2653 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +08002654 if (!do_hnat_ext_to_ge2(skb, __func__))
2655 return NF_STOLEN;
2656 goto drop;
2657 }
2658
2659 /* packets form ge -> external device */
2660 if (do_ge2ext_fast(state->in, skb)) {
2661 if (!do_hnat_ge_to_ext(skb, __func__))
2662 return NF_STOLEN;
2663 goto drop;
2664 }
2665
2666 return NF_ACCEPT;
developer577ad2f2022-11-28 10:33:36 +08002667
developerfd40db22021-04-29 10:08:25 +08002668drop:
developer577ad2f2022-11-28 10:33:36 +08002669 if (skb)
2670 printk_ratelimited(KERN_WARNING
2671 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
2672 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2673 __func__, state->in->name, skb_hnat_iface(skb),
2674 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2675 skb_hnat_sport(skb), skb_hnat_reason(skb),
2676 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002677
2678 return NF_DROP;
2679}
2680
2681static unsigned int
2682mtk_hnat_br_nf_local_out(void *priv, struct sk_buff *skb,
2683 const struct nf_hook_state *state)
2684{
developer577ad2f2022-11-28 10:33:36 +08002685 if (!skb)
2686 goto drop;
2687
developerfd40db22021-04-29 10:08:25 +08002688 post_routing_print(skb, state->in, state->out, __func__);
2689
2690 if (!mtk_hnat_nf_post_routing(skb, state->out, 0, __func__))
2691 return NF_ACCEPT;
2692
developer577ad2f2022-11-28 10:33:36 +08002693drop:
2694 if (skb)
2695 trace_printk(
2696 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2697 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2698 __func__, skb_hnat_iface(skb), state->out->name,
2699 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2700 skb_hnat_sport(skb), skb_hnat_reason(skb),
2701 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002702
2703 return NF_DROP;
2704}
2705
2706static unsigned int
2707mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb,
2708 const struct nf_hook_state *state)
2709{
2710 struct sk_buff *new_skb;
2711 struct foe_entry *entry;
2712 struct iphdr *iph;
2713
2714 if (!skb_hnat_is_hashed(skb))
2715 return NF_ACCEPT;
2716
developer577ad2f2022-11-28 10:33:36 +08002717 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2718 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2719 return NF_ACCEPT;
2720
developer471f6562021-05-10 20:48:34 +08002721 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002722
2723 if (unlikely(skb_headroom(skb) < FOE_INFO_LEN)) {
2724 new_skb = skb_realloc_headroom(skb, FOE_INFO_LEN);
2725 if (!new_skb) {
2726 dev_info(hnat_priv->dev, "%s:drop\n", __func__);
2727 return NF_DROP;
2728 }
2729 dev_kfree_skb(skb);
2730 skb = new_skb;
2731 }
2732
2733 /* Make the flow from local not be bound. */
2734 iph = ip_hdr(skb);
2735 if (iph->protocol == IPPROTO_IPV6) {
2736 entry->udib1.pkt_type = IPV6_6RD;
2737 hnat_set_head_frags(state, skb, 0, hnat_set_alg);
2738 } else {
2739 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2740 }
2741
2742 return NF_ACCEPT;
2743}
2744
2745static unsigned int mtk_hnat_br_nf_forward(void *priv,
2746 struct sk_buff *skb,
2747 const struct nf_hook_state *state)
2748{
developer4164cfe2022-12-01 11:27:41 +08002749 if ((hnat_priv->data->version == MTK_HNAT_V1_2) &&
developer99506e52021-06-30 22:03:02 +08002750 unlikely(IS_EXT(state->in) && IS_EXT(state->out)))
developerfd40db22021-04-29 10:08:25 +08002751 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2752
2753 return NF_ACCEPT;
2754}
2755
2756static struct nf_hook_ops mtk_hnat_nf_ops[] __read_mostly = {
2757 {
2758 .hook = mtk_hnat_ipv4_nf_pre_routing,
2759 .pf = NFPROTO_IPV4,
2760 .hooknum = NF_INET_PRE_ROUTING,
2761 .priority = NF_IP_PRI_FIRST + 1,
2762 },
2763 {
2764 .hook = mtk_hnat_ipv6_nf_pre_routing,
2765 .pf = NFPROTO_IPV6,
2766 .hooknum = NF_INET_PRE_ROUTING,
2767 .priority = NF_IP_PRI_FIRST + 1,
2768 },
2769 {
2770 .hook = mtk_hnat_ipv6_nf_post_routing,
2771 .pf = NFPROTO_IPV6,
2772 .hooknum = NF_INET_POST_ROUTING,
2773 .priority = NF_IP_PRI_LAST,
2774 },
2775 {
2776 .hook = mtk_hnat_ipv6_nf_local_out,
2777 .pf = NFPROTO_IPV6,
2778 .hooknum = NF_INET_LOCAL_OUT,
2779 .priority = NF_IP_PRI_LAST,
2780 },
2781 {
2782 .hook = mtk_hnat_ipv4_nf_post_routing,
2783 .pf = NFPROTO_IPV4,
2784 .hooknum = NF_INET_POST_ROUTING,
2785 .priority = NF_IP_PRI_LAST,
2786 },
2787 {
2788 .hook = mtk_hnat_ipv4_nf_local_out,
2789 .pf = NFPROTO_IPV4,
2790 .hooknum = NF_INET_LOCAL_OUT,
2791 .priority = NF_IP_PRI_LAST,
2792 },
2793 {
2794 .hook = mtk_hnat_br_nf_local_in,
2795 .pf = NFPROTO_BRIDGE,
2796 .hooknum = NF_BR_LOCAL_IN,
2797 .priority = NF_BR_PRI_FIRST,
2798 },
2799 {
2800 .hook = mtk_hnat_br_nf_local_out,
2801 .pf = NFPROTO_BRIDGE,
2802 .hooknum = NF_BR_LOCAL_OUT,
2803 .priority = NF_BR_PRI_LAST - 1,
2804 },
2805 {
2806 .hook = mtk_pong_hqos_handler,
2807 .pf = NFPROTO_BRIDGE,
2808 .hooknum = NF_BR_PRE_ROUTING,
developer2b85f652021-08-19 16:09:50 +08002809 .priority = NF_BR_PRI_FIRST + 1,
developerfd40db22021-04-29 10:08:25 +08002810 },
2811};
2812
2813int hnat_register_nf_hooks(void)
2814{
2815 return nf_register_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2816}
2817
2818void hnat_unregister_nf_hooks(void)
2819{
2820 nf_unregister_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2821}
2822
2823int whnat_adjust_nf_hooks(void)
2824{
2825 struct nf_hook_ops *hook = mtk_hnat_nf_ops;
2826 unsigned int n = ARRAY_SIZE(mtk_hnat_nf_ops);
2827
developerfd40db22021-04-29 10:08:25 +08002828 while (n-- > 0) {
2829 if (hook[n].hook == mtk_hnat_br_nf_local_in) {
2830 hook[n].hooknum = NF_BR_PRE_ROUTING;
developer2b85f652021-08-19 16:09:50 +08002831 hook[n].priority = NF_BR_PRI_FIRST + 1;
developerfd40db22021-04-29 10:08:25 +08002832 } else if (hook[n].hook == mtk_hnat_br_nf_local_out) {
2833 hook[n].hooknum = NF_BR_POST_ROUTING;
2834 } else if (hook[n].hook == mtk_pong_hqos_handler) {
2835 hook[n].hook = mtk_hnat_br_nf_forward;
2836 hook[n].hooknum = NF_BR_FORWARD;
2837 hook[n].priority = NF_BR_PRI_LAST - 1;
2838 }
2839 }
2840
2841 return 0;
2842}
2843
developerfd40db22021-04-29 10:08:25 +08002844int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
2845 struct packet_type *pt, struct net_device *unused)
2846{
2847 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
2848
2849 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2850 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2851
developer659fdeb2022-12-01 23:03:07 +08002852 if (do_hnat_ge_to_ext(skb, __func__) == -1)
2853 return 1;
developerfd40db22021-04-29 10:08:25 +08002854
2855 return 0;
2856}
developerfd40db22021-04-29 10:08:25 +08002857