blob: 2e68d3dc257abab177880ed2541d26d53e106db6 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
11 * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
12 */
13
14#include <linux/netfilter_bridge.h>
15#include <linux/netfilter_ipv6.h>
16
17#include <net/arp.h>
18#include <net/neighbour.h>
19#include <net/netfilter/nf_conntrack_helper.h>
20#include <net/netfilter/nf_flow_table.h>
21#include <net/ipv6.h>
22#include <net/ip6_route.h>
23#include <net/ip.h>
24#include <net/tcp.h>
25#include <net/udp.h>
developer30a47682021-11-02 17:06:14 +080026#include <net/netfilter/nf_conntrack.h>
27#include <net/netfilter/nf_conntrack_acct.h>
developerfd40db22021-04-29 10:08:25 +080028
29#include "nf_hnat_mtk.h"
30#include "hnat.h"
31
32#include "../mtk_eth_soc.h"
developer8051e042022-04-08 13:26:36 +080033#include "../mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080034
35#define do_ge2ext_fast(dev, skb) \
developerd35bbcc2022-09-28 22:46:01 +080036 ((IS_LAN_GRP(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
developerfd40db22021-04-29 10:08:25 +080037 skb_hnat_is_hashed(skb) && \
38 skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU)
39#define do_ext2ge_fast_learn(dev, skb) \
40 (IS_PPD(dev) && \
41 (skb_hnat_sport(skb) == NR_PDMA_PORT || \
42 skb_hnat_sport(skb) == NR_QDMA_PORT) && \
43 ((get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK)) || \
44 get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK)))
45#define do_mape_w2l_fast(dev, skb) \
46 (mape_toggle && IS_WAN(dev) && (!is_from_mape(skb)))
47
48static struct ipv6hdr mape_l2w_v6h;
49static struct ipv6hdr mape_w2l_v6h;
50static inline uint8_t get_wifi_hook_if_index_from_dev(const struct net_device *dev)
51{
52 int i;
53
54 for (i = 1; i < MAX_IF_NUM; i++) {
55 if (hnat_priv->wifi_hook_if[i] == dev)
56 return i;
57 }
58
59 return 0;
60}
61
62static inline int get_ext_device_number(void)
63{
64 int i, number = 0;
65
66 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++)
67 number += 1;
68 return number;
69}
70
71static inline int find_extif_from_devname(const char *name)
72{
73 int i;
74 struct extdev_entry *ext_entry;
75
76 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
77 ext_entry = hnat_priv->ext_if[i];
78 if (!strcmp(name, ext_entry->name))
79 return 1;
80 }
81 return 0;
82}
83
84static inline int get_index_from_dev(const struct net_device *dev)
85{
86 int i;
87 struct extdev_entry *ext_entry;
88
89 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
90 ext_entry = hnat_priv->ext_if[i];
91 if (dev == ext_entry->dev)
92 return ext_entry->dev->ifindex;
93 }
94 return 0;
95}
96
97static inline struct net_device *get_dev_from_index(int index)
98{
99 int i;
100 struct extdev_entry *ext_entry;
101 struct net_device *dev = 0;
102
103 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
104 ext_entry = hnat_priv->ext_if[i];
105 if (ext_entry->dev && index == ext_entry->dev->ifindex) {
106 dev = ext_entry->dev;
107 break;
108 }
109 }
110 return dev;
111}
112
113static inline struct net_device *get_wandev_from_index(int index)
114{
developer8c9c0d02021-06-18 16:15:37 +0800115 if (!hnat_priv->g_wandev)
116 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
developerfd40db22021-04-29 10:08:25 +0800117
developer8c9c0d02021-06-18 16:15:37 +0800118 if (hnat_priv->g_wandev && hnat_priv->g_wandev->ifindex == index)
119 return hnat_priv->g_wandev;
developerfd40db22021-04-29 10:08:25 +0800120 return NULL;
121}
122
123static inline int extif_set_dev(struct net_device *dev)
124{
125 int i;
126 struct extdev_entry *ext_entry;
127
128 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
129 ext_entry = hnat_priv->ext_if[i];
130 if (!strcmp(dev->name, ext_entry->name) && !ext_entry->dev) {
131 dev_hold(dev);
132 ext_entry->dev = dev;
133 pr_info("%s(%s)\n", __func__, dev->name);
134
135 return ext_entry->dev->ifindex;
136 }
137 }
138
139 return -1;
140}
141
142static inline int extif_put_dev(struct net_device *dev)
143{
144 int i;
145 struct extdev_entry *ext_entry;
146
147 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
148 ext_entry = hnat_priv->ext_if[i];
149 if (ext_entry->dev == dev) {
150 ext_entry->dev = NULL;
151 dev_put(dev);
152 pr_info("%s(%s)\n", __func__, dev->name);
153
developerbc53e5f2021-05-21 10:07:17 +0800154 return 0;
developerfd40db22021-04-29 10:08:25 +0800155 }
156 }
157
158 return -1;
159}
160
161int ext_if_add(struct extdev_entry *ext_entry)
162{
163 int len = get_ext_device_number();
164
developer4c32b7a2021-11-13 16:46:43 +0800165 if (len < MAX_EXT_DEVS)
166 hnat_priv->ext_if[len++] = ext_entry;
167
developerfd40db22021-04-29 10:08:25 +0800168 return len;
169}
170
171int ext_if_del(struct extdev_entry *ext_entry)
172{
173 int i, j;
174
175 for (i = 0; i < MAX_EXT_DEVS; i++) {
176 if (hnat_priv->ext_if[i] == ext_entry) {
177 for (j = i; hnat_priv->ext_if[j] && j < MAX_EXT_DEVS - 1; j++)
178 hnat_priv->ext_if[j] = hnat_priv->ext_if[j + 1];
179 hnat_priv->ext_if[j] = NULL;
180 break;
181 }
182 }
183
184 return i;
185}
186
187void foe_clear_all_bind_entries(struct net_device *dev)
188{
developer471f6562021-05-10 20:48:34 +0800189 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800190 struct foe_entry *entry;
191
developerd35bbcc2022-09-28 22:46:01 +0800192 if (!IS_LAN_GRP(dev) && !IS_WAN(dev) &&
developerfd40db22021-04-29 10:08:25 +0800193 !find_extif_from_devname(dev->name) &&
194 !dev->netdev_ops->ndo_flow_offload_check)
195 return;
196
developer471f6562021-05-10 20:48:34 +0800197 for (i = 0; i < CFG_PPE_NUM; i++) {
198 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
199 SMA, SMA_ONLY_FWD_CPU);
200
201 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
202 entry = hnat_priv->foe_table_cpu[i] + hash_index;
203 if (entry->bfib1.state == BIND) {
204 entry->ipv4_hnapt.udib1.state = INVALID;
205 entry->ipv4_hnapt.udib1.time_stamp =
206 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
207 }
developerfd40db22021-04-29 10:08:25 +0800208 }
209 }
210
211 /* clear HWNAT cache */
212 hnat_cache_ebl(1);
213
214 mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
215}
216
217static void gmac_ppe_fwd_enable(struct net_device *dev)
218{
219 if (IS_LAN(dev) || IS_GMAC1_MODE)
developerd35bbcc2022-09-28 22:46:01 +0800220 set_gmac_ppe_fwd(NR_GMAC1_PORT, 1);
developerfd40db22021-04-29 10:08:25 +0800221 else if (IS_WAN(dev))
developerd35bbcc2022-09-28 22:46:01 +0800222 set_gmac_ppe_fwd(NR_GMAC2_PORT, 1);
223 else if (IS_LAN2(dev))
224 set_gmac_ppe_fwd(NR_GMAC3_PORT, 1);
developerfd40db22021-04-29 10:08:25 +0800225}
226
227int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
228 void *ptr)
229{
230 struct net_device *dev;
231
232 dev = netdev_notifier_info_to_dev(ptr);
233
234 switch (event) {
235 case NETDEV_UP:
236 gmac_ppe_fwd_enable(dev);
237
238 extif_set_dev(dev);
239
240 break;
241 case NETDEV_GOING_DOWN:
242 if (!get_wifi_hook_if_index_from_dev(dev))
243 extif_put_dev(dev);
244
245 foe_clear_all_bind_entries(dev);
246
247 break;
developer8c9c0d02021-06-18 16:15:37 +0800248 case NETDEV_UNREGISTER:
developer1901f412022-01-04 17:22:00 +0800249 if (hnat_priv->g_ppdev == dev) {
developer8c9c0d02021-06-18 16:15:37 +0800250 hnat_priv->g_ppdev = NULL;
251 dev_put(dev);
252 }
developer1901f412022-01-04 17:22:00 +0800253 if (hnat_priv->g_wandev == dev) {
developer8c9c0d02021-06-18 16:15:37 +0800254 hnat_priv->g_wandev = NULL;
255 dev_put(dev);
256 }
257
258 break;
259 case NETDEV_REGISTER:
260 if (IS_PPD(dev) && !hnat_priv->g_ppdev)
261 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
262 if (IS_WAN(dev) && !hnat_priv->g_wandev)
263 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
264
265 break;
developer8051e042022-04-08 13:26:36 +0800266 case MTK_FE_RESET_NAT_DONE:
267 pr_info("[%s] HNAT driver starts to do warm init !\n", __func__);
268 hnat_warm_init();
269 break;
developerfd40db22021-04-29 10:08:25 +0800270 default:
271 break;
272 }
273
274 return NOTIFY_DONE;
275}
276
277void foe_clear_entry(struct neighbour *neigh)
278{
279 u32 *daddr = (u32 *)neigh->primary_key;
280 unsigned char h_dest[ETH_ALEN];
281 struct foe_entry *entry;
developer471f6562021-05-10 20:48:34 +0800282 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800283 u32 dip;
284
285 dip = (u32)(*daddr);
286
developer471f6562021-05-10 20:48:34 +0800287 for (i = 0; i < CFG_PPE_NUM; i++) {
developer8051e042022-04-08 13:26:36 +0800288 if (!hnat_priv->foe_table_cpu[i])
289 continue;
290
developer471f6562021-05-10 20:48:34 +0800291 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
292 entry = hnat_priv->foe_table_cpu[i] + hash_index;
293 if (entry->bfib1.state == BIND &&
294 entry->ipv4_hnapt.new_dip == ntohl(dip)) {
295 *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
296 *((u16 *)&h_dest[4]) =
297 swab16(entry->ipv4_hnapt.dmac_lo);
298 if (strncmp(h_dest, neigh->ha, ETH_ALEN) != 0) {
299 pr_info("%s: state=%d\n", __func__,
300 neigh->nud_state);
301 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
302 SMA, SMA_ONLY_FWD_CPU);
developerfd40db22021-04-29 10:08:25 +0800303
developer471f6562021-05-10 20:48:34 +0800304 entry->ipv4_hnapt.udib1.state = INVALID;
305 entry->ipv4_hnapt.udib1.time_stamp =
306 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
developerfd40db22021-04-29 10:08:25 +0800307
developer471f6562021-05-10 20:48:34 +0800308 /* clear HWNAT cache */
309 hnat_cache_ebl(1);
developerfd40db22021-04-29 10:08:25 +0800310
developer471f6562021-05-10 20:48:34 +0800311 mod_timer(&hnat_priv->hnat_sma_build_entry_timer,
312 jiffies + 3 * HZ);
developerfd40db22021-04-29 10:08:25 +0800313
developer471f6562021-05-10 20:48:34 +0800314 pr_info("Delete old entry: dip =%pI4\n", &dip);
315 pr_info("Old mac= %pM\n", h_dest);
316 pr_info("New mac= %pM\n", neigh->ha);
317 }
developerfd40db22021-04-29 10:08:25 +0800318 }
319 }
320 }
321}
322
323int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
324 void *ptr)
325{
326 struct net_device *dev = NULL;
327 struct neighbour *neigh = NULL;
328
329 switch (event) {
330 case NETEVENT_NEIGH_UPDATE:
331 neigh = ptr;
332 dev = neigh->dev;
333 if (dev)
334 foe_clear_entry(neigh);
335 break;
336 }
337
338 return NOTIFY_DONE;
339}
340
341unsigned int mape_add_ipv6_hdr(struct sk_buff *skb, struct ipv6hdr mape_ip6h)
342{
343 struct ethhdr *eth = NULL;
344 struct ipv6hdr *ip6h = NULL;
345 struct iphdr *iph = NULL;
346
347 if (skb_headroom(skb) < IPV6_HDR_LEN || skb_shared(skb) ||
348 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
349 return -1;
350 }
351
352 /* point to L3 */
353 memcpy(skb->data - IPV6_HDR_LEN - ETH_HLEN, skb_push(skb, ETH_HLEN), ETH_HLEN);
354 memcpy(skb_push(skb, IPV6_HDR_LEN - ETH_HLEN), &mape_ip6h, IPV6_HDR_LEN);
355
356 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
357 eth->h_proto = htons(ETH_P_IPV6);
358 skb->protocol = htons(ETH_P_IPV6);
359
360 iph = (struct iphdr *)(skb->data + IPV6_HDR_LEN);
361 ip6h = (struct ipv6hdr *)(skb->data);
362 ip6h->payload_len = iph->tot_len; /* maybe different with ipv4 */
363
364 skb_set_network_header(skb, 0);
365 skb_set_transport_header(skb, iph->ihl * 4 + IPV6_HDR_LEN);
366 return 0;
367}
368
369static void fix_skb_packet_type(struct sk_buff *skb, struct net_device *dev,
370 struct ethhdr *eth)
371{
372 skb->pkt_type = PACKET_HOST;
373 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
374 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
375 skb->pkt_type = PACKET_BROADCAST;
376 else
377 skb->pkt_type = PACKET_MULTICAST;
378 }
379}
380
381unsigned int do_hnat_ext_to_ge(struct sk_buff *skb, const struct net_device *in,
382 const char *func)
383{
384 if (hnat_priv->g_ppdev && hnat_priv->g_ppdev->flags & IFF_UP) {
385 u16 vlan_id = 0;
386 skb_set_network_header(skb, 0);
387 skb_push(skb, ETH_HLEN);
388 set_to_ppe(skb);
389
390 vlan_id = skb_vlan_tag_get_id(skb);
391 if (vlan_id) {
392 skb = vlan_insert_tag(skb, skb->vlan_proto, skb->vlan_tci);
393 if (!skb)
394 return -1;
395 }
396
397 /*set where we come from*/
398 skb->vlan_proto = htons(ETH_P_8021Q);
399 skb->vlan_tci =
400 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
401 trace_printk(
402 "%s: vlan_prot=0x%x, vlan_tci=%x, in->name=%s, skb->dev->name=%s\n",
403 __func__, ntohs(skb->vlan_proto), skb->vlan_tci,
404 in->name, hnat_priv->g_ppdev->name);
405 skb->dev = hnat_priv->g_ppdev;
406 dev_queue_xmit(skb);
407 trace_printk("%s: called from %s successfully\n", __func__, func);
408 return 0;
409 }
410
411 trace_printk("%s: called from %s fail\n", __func__, func);
412 return -1;
413}
414
415unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func)
416{
417 struct ethhdr *eth = eth_hdr(skb);
418 struct net_device *dev;
419 struct foe_entry *entry;
420
421 trace_printk("%s: vlan_prot=0x%x, vlan_tci=%x\n", __func__,
422 ntohs(skb->vlan_proto), skb->vlan_tci);
423
developer577ad2f2022-11-28 10:33:36 +0800424 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
425 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
426 return -1;
427
developerfd40db22021-04-29 10:08:25 +0800428 dev = get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK);
429
430 if (dev) {
431 /*set where we to go*/
432 skb->dev = dev;
433 skb->vlan_proto = 0;
434 skb->vlan_tci = 0;
435
436 if (ntohs(eth->h_proto) == ETH_P_8021Q) {
437 skb = skb_vlan_untag(skb);
438 if (unlikely(!skb))
439 return -1;
440 }
441
442 if (IS_BOND_MODE &&
developer4164cfe2022-12-01 11:27:41 +0800443 (((hnat_priv->data->version == MTK_HNAT_V2 ||
444 hnat_priv->data->version == MTK_HNAT_V3) &&
developerfd40db22021-04-29 10:08:25 +0800445 (skb_hnat_entry(skb) != 0x7fff)) ||
developer4164cfe2022-12-01 11:27:41 +0800446 ((hnat_priv->data->version != MTK_HNAT_V2 &&
447 hnat_priv->data->version != MTK_HNAT_V3) &&
developerfd40db22021-04-29 10:08:25 +0800448 (skb_hnat_entry(skb) != 0x3fff))))
449 skb_set_hash(skb, skb_hnat_entry(skb) >> 1, PKT_HASH_TYPE_L4);
450
451 set_from_extge(skb);
452 fix_skb_packet_type(skb, skb->dev, eth);
453 netif_rx(skb);
454 trace_printk("%s: called from %s successfully\n", __func__,
455 func);
456 return 0;
457 } else {
458 /* MapE WAN --> LAN/WLAN PingPong. */
459 dev = get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK);
460 if (mape_toggle && dev) {
461 if (!mape_add_ipv6_hdr(skb, mape_w2l_v6h)) {
462 skb_set_mac_header(skb, -ETH_HLEN);
463 skb->dev = dev;
464 set_from_mape(skb);
465 skb->vlan_proto = 0;
466 skb->vlan_tci = 0;
467 fix_skb_packet_type(skb, skb->dev, eth_hdr(skb));
developer471f6562021-05-10 20:48:34 +0800468 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800469 entry->bfib1.pkt_type = IPV4_HNAPT;
470 netif_rx(skb);
471 return 0;
472 }
473 }
474 trace_printk("%s: called from %s fail\n", __func__, func);
475 return -1;
476 }
477}
478
479unsigned int do_hnat_ge_to_ext(struct sk_buff *skb, const char *func)
480{
481 /*set where we to go*/
482 u8 index;
483 struct foe_entry *entry;
484 struct net_device *dev;
485
developer577ad2f2022-11-28 10:33:36 +0800486 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
487 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
488 return -1;
489
developer471f6562021-05-10 20:48:34 +0800490 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800491
492 if (IS_IPV4_GRP(entry))
493 index = entry->ipv4_hnapt.act_dp;
494 else
495 index = entry->ipv6_5t_route.act_dp;
496
developerdce18f52023-03-18 22:11:13 +0800497 dev = get_dev_from_index(index);
498 if (!dev) {
499 trace_printk("%s: called from %s. Get wifi interface fail\n",
500 __func__, func);
501 return 0;
502 }
503
504 skb->dev = dev;
developerfd40db22021-04-29 10:08:25 +0800505
developer34028fb2022-01-11 13:51:29 +0800506 if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +0800507 skb = skb_unshare(skb, GFP_ATOMIC);
508 if (!skb)
509 return NF_ACCEPT;
510
511 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
512 return NF_ACCEPT;
513
514 skb_pull_rcsum(skb, VLAN_HLEN);
515
516 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN,
517 2 * ETH_ALEN);
518 }
developerfd40db22021-04-29 10:08:25 +0800519
520 if (skb->dev) {
521 skb_set_network_header(skb, 0);
522 skb_push(skb, ETH_HLEN);
523 dev_queue_xmit(skb);
524 trace_printk("%s: called from %s successfully\n", __func__,
525 func);
526 return 0;
527 } else {
528 if (mape_toggle) {
529 /* Add ipv6 header mape for lan/wlan -->wan */
530 dev = get_wandev_from_index(index);
531 if (dev) {
532 if (!mape_add_ipv6_hdr(skb, mape_l2w_v6h)) {
533 skb_set_network_header(skb, 0);
534 skb_push(skb, ETH_HLEN);
535 skb_set_mac_header(skb, 0);
536 skb->dev = dev;
537 dev_queue_xmit(skb);
538 return 0;
539 }
540 trace_printk("%s: called from %s fail[MapE]\n", __func__,
541 func);
542 return -1;
543 }
544 }
545 }
546 /*if external devices is down, invalidate related ppe entry*/
547 if (entry_hnat_is_bound(entry)) {
548 entry->bfib1.state = INVALID;
549 if (IS_IPV4_GRP(entry))
550 entry->ipv4_hnapt.act_dp = 0;
551 else
552 entry->ipv6_5t_route.act_dp = 0;
553
554 /* clear HWNAT cache */
555 hnat_cache_ebl(1);
556 }
557 trace_printk("%s: called from %s fail, index=%x\n", __func__,
558 func, index);
559 return -1;
560}
561
562static void pre_routing_print(struct sk_buff *skb, const struct net_device *in,
563 const struct net_device *out, const char *func)
564{
565 trace_printk(
566 "[%s]: %s(iif=0x%x CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
567 __func__, in->name, skb_hnat_iface(skb),
568 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
569 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
570 func);
571}
572
573static void post_routing_print(struct sk_buff *skb, const struct net_device *in,
574 const struct net_device *out, const char *func)
575{
576 trace_printk(
577 "[%s]: %s(iif=0x%x, CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
578 __func__, in->name, skb_hnat_iface(skb),
579 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
580 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
581 func);
582}
583
584static inline void hnat_set_iif(const struct nf_hook_state *state,
585 struct sk_buff *skb, int val)
586{
developer40017972021-06-29 14:27:35 +0800587 if (IS_WHNAT(state->in) && FROM_WED(skb)) {
developere567ad32021-05-25 17:16:17 +0800588 return;
589 } else if (IS_LAN(state->in)) {
developerfd40db22021-04-29 10:08:25 +0800590 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN;
developerd35bbcc2022-09-28 22:46:01 +0800591 } else if (IS_LAN2(state->in)) {
592 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN2;
developerfd40db22021-04-29 10:08:25 +0800593 } else if (IS_PPD(state->in)) {
594 skb_hnat_iface(skb) = FOE_MAGIC_GE_PPD;
595 } else if (IS_EXT(state->in)) {
596 skb_hnat_iface(skb) = FOE_MAGIC_EXT;
597 } else if (IS_WAN(state->in)) {
598 skb_hnat_iface(skb) = FOE_MAGIC_GE_WAN;
developerfd40db22021-04-29 10:08:25 +0800599 } else if (!IS_BR(state->in)) {
developer99506e52021-06-30 22:03:02 +0800600 if (state->in->netdev_ops->ndo_flow_offload_check) {
601 skb_hnat_iface(skb) = FOE_MAGIC_GE_VIRTUAL;
602 } else {
603 skb_hnat_iface(skb) = FOE_INVALID;
developerfd40db22021-04-29 10:08:25 +0800604
developer99506e52021-06-30 22:03:02 +0800605 if (is_magic_tag_valid(skb) &&
606 IS_SPACE_AVAILABLE_HEAD(skb))
607 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
608 }
developerfd40db22021-04-29 10:08:25 +0800609 }
610}
611
612static inline void hnat_set_alg(const struct nf_hook_state *state,
613 struct sk_buff *skb, int val)
614{
615 skb_hnat_alg(skb) = val;
616}
617
618static inline void hnat_set_head_frags(const struct nf_hook_state *state,
619 struct sk_buff *head_skb, int val,
620 void (*fn)(const struct nf_hook_state *state,
621 struct sk_buff *skb, int val))
622{
623 struct sk_buff *segs = skb_shinfo(head_skb)->frag_list;
624
625 fn(state, head_skb, val);
626 while (segs) {
627 fn(state, segs, val);
628 segs = segs->next;
629 }
630}
631
developer25fc8c02022-05-06 16:24:02 +0800632static void ppe_fill_flow_lbl(struct foe_entry *entry, struct ipv6hdr *ip6h)
633{
634 entry->ipv4_dslite.flow_lbl[0] = ip6h->flow_lbl[2];
635 entry->ipv4_dslite.flow_lbl[1] = ip6h->flow_lbl[1];
636 entry->ipv4_dslite.flow_lbl[2] = ip6h->flow_lbl[0];
637}
638
developerfd40db22021-04-29 10:08:25 +0800639unsigned int do_hnat_mape_w2l_fast(struct sk_buff *skb, const struct net_device *in,
640 const char *func)
641{
642 struct ipv6hdr *ip6h = ipv6_hdr(skb);
643 struct iphdr _iphdr;
644 struct iphdr *iph;
645 struct ethhdr *eth;
646
647 /* WAN -> LAN/WLAN MapE. */
648 if (mape_toggle && (ip6h->nexthdr == NEXTHDR_IPIP)) {
649 iph = skb_header_pointer(skb, IPV6_HDR_LEN, sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800650 if (unlikely(!iph))
651 return -1;
652
developerfd40db22021-04-29 10:08:25 +0800653 switch (iph->protocol) {
654 case IPPROTO_UDP:
655 case IPPROTO_TCP:
656 break;
657 default:
658 return -1;
659 }
660 mape_w2l_v6h = *ip6h;
661
662 /* Remove ipv6 header. */
663 memcpy(skb->data + IPV6_HDR_LEN - ETH_HLEN,
664 skb->data - ETH_HLEN, ETH_HLEN);
665 skb_pull(skb, IPV6_HDR_LEN - ETH_HLEN);
666 skb_set_mac_header(skb, 0);
667 skb_set_network_header(skb, ETH_HLEN);
668 skb_set_transport_header(skb, ETH_HLEN + sizeof(_iphdr));
669
670 eth = eth_hdr(skb);
671 eth->h_proto = htons(ETH_P_IP);
672 set_to_ppe(skb);
673
674 skb->vlan_proto = htons(ETH_P_8021Q);
675 skb->vlan_tci =
676 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
677
678 if (!hnat_priv->g_ppdev)
679 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
680
681 skb->dev = hnat_priv->g_ppdev;
682 skb->protocol = htons(ETH_P_IP);
683
684 dev_queue_xmit(skb);
685
686 return 0;
687 }
688 return -1;
689}
690
developere8b7dfa2023-04-20 10:16:44 +0800691void mtk_464xlat_pre_process(struct sk_buff *skb)
692{
693 struct foe_entry *foe;
694
developerdd61ff42023-05-02 22:17:16 +0800695 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
696 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
697 return;
698
developere8b7dfa2023-04-20 10:16:44 +0800699 foe = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
700 if (foe->bfib1.state != BIND &&
701 skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH)
702 memcpy(&headroom[skb_hnat_entry(skb)], skb->head,
703 sizeof(struct hnat_desc));
developer25fc8c02022-05-06 16:24:02 +0800704
developere8b7dfa2023-04-20 10:16:44 +0800705 if (foe->bfib1.state == BIND)
706 memset(&headroom[skb_hnat_entry(skb)], 0,
707 sizeof(struct hnat_desc));
708}
developer25fc8c02022-05-06 16:24:02 +0800709
developerfd40db22021-04-29 10:08:25 +0800710static unsigned int is_ppe_support_type(struct sk_buff *skb)
711{
712 struct ethhdr *eth = NULL;
713 struct iphdr *iph = NULL;
714 struct ipv6hdr *ip6h = NULL;
715 struct iphdr _iphdr;
716
717 eth = eth_hdr(skb);
developerfd2d7422021-06-09 17:09:39 +0800718 if (!is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb) ||
developerb254f762022-01-20 20:06:25 +0800719 is_broadcast_ether_addr(eth->h_dest))
developerfd40db22021-04-29 10:08:25 +0800720 return 0;
721
722 switch (ntohs(skb->protocol)) {
723 case ETH_P_IP:
724 iph = ip_hdr(skb);
725
726 /* do not accelerate non tcp/udp traffic */
727 if ((iph->protocol == IPPROTO_TCP) ||
728 (iph->protocol == IPPROTO_UDP) ||
729 (iph->protocol == IPPROTO_IPV6)) {
730 return 1;
731 }
732
733 break;
734 case ETH_P_IPV6:
735 ip6h = ipv6_hdr(skb);
736
737 if ((ip6h->nexthdr == NEXTHDR_TCP) ||
738 (ip6h->nexthdr == NEXTHDR_UDP)) {
739 return 1;
740 } else if (ip6h->nexthdr == NEXTHDR_IPIP) {
741 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
742 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800743 if (unlikely(!iph))
744 return 0;
developerfd40db22021-04-29 10:08:25 +0800745
746 if ((iph->protocol == IPPROTO_TCP) ||
747 (iph->protocol == IPPROTO_UDP)) {
748 return 1;
749 }
750
751 }
752
753 break;
754 case ETH_P_8021Q:
755 return 1;
756 }
757
758 return 0;
759}
760
761static unsigned int
762mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
763 const struct nf_hook_state *state)
764{
developer577ad2f2022-11-28 10:33:36 +0800765 if (!skb)
766 goto drop;
767
developerfd40db22021-04-29 10:08:25 +0800768 if (!is_ppe_support_type(skb)) {
769 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
770 return NF_ACCEPT;
771 }
772
773 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
774
775 pre_routing_print(skb, state->in, state->out, __func__);
776
developerfd40db22021-04-29 10:08:25 +0800777 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
778 if (do_ext2ge_fast_try(state->in, skb)) {
779 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
780 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800781 return NF_ACCEPT;
782 }
783
784 /* packets form ge -> external device
785 * For standalone wan interface
786 */
787 if (do_ge2ext_fast(state->in, skb)) {
788 if (!do_hnat_ge_to_ext(skb, __func__))
789 return NF_STOLEN;
790 goto drop;
791 }
792
developerf4c370a2022-10-08 17:01:19 +0800793
794#if !(defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +0800795 /* MapE need remove ipv6 header and pingpong. */
796 if (do_mape_w2l_fast(state->in, skb)) {
797 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
798 return NF_STOLEN;
799 else
800 return NF_ACCEPT;
801 }
802
803 if (is_from_mape(skb))
804 clr_from_extge(skb);
developerf4c370a2022-10-08 17:01:19 +0800805#endif
developere8b7dfa2023-04-20 10:16:44 +0800806 if (xlat_toggle)
807 mtk_464xlat_pre_process(skb);
808
developerfd40db22021-04-29 10:08:25 +0800809 return NF_ACCEPT;
810drop:
developer577ad2f2022-11-28 10:33:36 +0800811 if (skb)
812 printk_ratelimited(KERN_WARNING
813 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
814 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
815 __func__, state->in->name, skb_hnat_iface(skb),
816 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
817 skb_hnat_sport(skb), skb_hnat_reason(skb),
818 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800819
820 return NF_DROP;
821}
822
823static unsigned int
824mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
825 const struct nf_hook_state *state)
826{
developer577ad2f2022-11-28 10:33:36 +0800827 if (!skb)
828 goto drop;
829
developerfd40db22021-04-29 10:08:25 +0800830 if (!is_ppe_support_type(skb)) {
831 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
832 return NF_ACCEPT;
833 }
834
835 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
836
837 pre_routing_print(skb, state->in, state->out, __func__);
838
developerfd40db22021-04-29 10:08:25 +0800839 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
840 if (do_ext2ge_fast_try(state->in, skb)) {
841 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
842 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800843 return NF_ACCEPT;
844 }
845
846 /* packets form ge -> external device
847 * For standalone wan interface
848 */
849 if (do_ge2ext_fast(state->in, skb)) {
850 if (!do_hnat_ge_to_ext(skb, __func__))
851 return NF_STOLEN;
852 goto drop;
853 }
developere8b7dfa2023-04-20 10:16:44 +0800854 if (xlat_toggle)
855 mtk_464xlat_pre_process(skb);
developerfd40db22021-04-29 10:08:25 +0800856
857 return NF_ACCEPT;
858drop:
developer577ad2f2022-11-28 10:33:36 +0800859 if (skb)
860 printk_ratelimited(KERN_WARNING
861 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
862 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
863 __func__, state->in->name, skb_hnat_iface(skb),
864 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
865 skb_hnat_sport(skb), skb_hnat_reason(skb),
866 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800867
868 return NF_DROP;
869}
870
871static unsigned int
872mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
873 const struct nf_hook_state *state)
874{
developerfd40db22021-04-29 10:08:25 +0800875 struct vlan_ethhdr *veth;
876
developer577ad2f2022-11-28 10:33:36 +0800877 if (!skb)
878 goto drop;
879
developer34028fb2022-01-11 13:51:29 +0800880 if (IS_HQOS_MODE && hnat_priv->data->whnat) {
developerfd40db22021-04-29 10:08:25 +0800881 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
882
883 if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
884 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
885 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
886 }
887 }
developerfd40db22021-04-29 10:08:25 +0800888
889 if (!HAS_HQOS_MAGIC_TAG(skb) && !is_ppe_support_type(skb)) {
890 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
891 return NF_ACCEPT;
892 }
893
894 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
895
896 pre_routing_print(skb, state->in, state->out, __func__);
897
898 if (unlikely(debug_level >= 7)) {
899 hnat_cpu_reason_cnt(skb);
900 if (skb_hnat_reason(skb) == dbg_cpu_reason)
901 foe_dump_pkt(skb);
902 }
903
developerfd40db22021-04-29 10:08:25 +0800904 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
905 if ((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb) &&
906 !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
907 if (!hnat_priv->g_ppdev)
908 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
909
910 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
911 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800912 return NF_ACCEPT;
913 }
914
915 if (hnat_priv->data->whnat) {
916 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
917 clr_from_extge(skb);
918
919 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +0800920 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
921 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +0800922 if (!do_hnat_ext_to_ge2(skb, __func__))
923 return NF_STOLEN;
924 goto drop;
925 }
926
927 /* packets form ge -> external device */
928 if (do_ge2ext_fast(state->in, skb)) {
929 if (!do_hnat_ge_to_ext(skb, __func__))
930 return NF_STOLEN;
931 goto drop;
932 }
933 }
934
developerf4c370a2022-10-08 17:01:19 +0800935#if !(defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +0800936 /* MapE need remove ipv6 header and pingpong. (bridge mode) */
937 if (do_mape_w2l_fast(state->in, skb)) {
938 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
939 return NF_STOLEN;
940 else
941 return NF_ACCEPT;
942 }
developerf4c370a2022-10-08 17:01:19 +0800943#endif
developerfd40db22021-04-29 10:08:25 +0800944 return NF_ACCEPT;
945drop:
developer577ad2f2022-11-28 10:33:36 +0800946 if (skb)
947 printk_ratelimited(KERN_WARNING
948 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
949 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
950 __func__, state->in->name, skb_hnat_iface(skb),
951 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
952 skb_hnat_sport(skb), skb_hnat_reason(skb),
953 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800954
955 return NF_DROP;
956}
957
958static unsigned int hnat_ipv6_get_nexthop(struct sk_buff *skb,
959 const struct net_device *out,
960 struct flow_offload_hw_path *hw_path)
961{
962 const struct in6_addr *ipv6_nexthop;
963 struct neighbour *neigh = NULL;
964 struct dst_entry *dst = skb_dst(skb);
965 struct ethhdr *eth;
966
967 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
968 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
969 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
970 return 0;
971 }
972
973 rcu_read_lock_bh();
974 ipv6_nexthop =
975 rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
976 neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
977 if (unlikely(!neigh)) {
978 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n", __func__,
979 &ipv6_hdr(skb)->daddr);
980 rcu_read_unlock_bh();
981 return -1;
982 }
983
984 /* why do we get all zero ethernet address ? */
985 if (!is_valid_ether_addr(neigh->ha)) {
986 rcu_read_unlock_bh();
987 return -1;
988 }
989
990 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP) {
991 /*copy ether type for DS-Lite and MapE */
992 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
993 eth->h_proto = skb->protocol;
994 } else {
995 eth = eth_hdr(skb);
996 }
997
998 ether_addr_copy(eth->h_dest, neigh->ha);
999 ether_addr_copy(eth->h_source, out->dev_addr);
1000
1001 rcu_read_unlock_bh();
1002
1003 return 0;
1004}
1005
1006static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb,
1007 const struct net_device *out,
1008 struct flow_offload_hw_path *hw_path)
1009{
1010 u32 nexthop;
1011 struct neighbour *neigh;
1012 struct dst_entry *dst = skb_dst(skb);
1013 struct rtable *rt = (struct rtable *)dst;
1014 struct net_device *dev = (__force struct net_device *)out;
1015
1016 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
1017 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
1018 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
1019 return 0;
1020 }
1021
1022 rcu_read_lock_bh();
1023 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
1024 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
1025 if (unlikely(!neigh)) {
1026 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n", __func__,
1027 &ip_hdr(skb)->daddr);
1028 rcu_read_unlock_bh();
1029 return -1;
1030 }
1031
1032 /* why do we get all zero ethernet address ? */
1033 if (!is_valid_ether_addr(neigh->ha)) {
1034 rcu_read_unlock_bh();
1035 return -1;
1036 }
1037
1038 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
1039 memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
1040
1041 rcu_read_unlock_bh();
1042
1043 return 0;
1044}
1045
1046static u16 ppe_get_chkbase(struct iphdr *iph)
1047{
1048 u16 org_chksum = ntohs(iph->check);
1049 u16 org_tot_len = ntohs(iph->tot_len);
1050 u16 org_id = ntohs(iph->id);
1051 u16 chksum_tmp, tot_len_tmp, id_tmp;
1052 u32 tmp = 0;
1053 u16 chksum_base = 0;
1054
1055 chksum_tmp = ~(org_chksum);
1056 tot_len_tmp = ~(org_tot_len);
1057 id_tmp = ~(org_id);
1058 tmp = chksum_tmp + tot_len_tmp + id_tmp;
1059 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
1060 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
1061 chksum_base = tmp & 0xFFFF;
1062
1063 return chksum_base;
1064}
1065
1066struct foe_entry ppe_fill_L2_info(struct ethhdr *eth, struct foe_entry entry,
1067 struct flow_offload_hw_path *hw_path)
1068{
developer5ffc5f12022-10-25 18:51:46 +08001069 switch ((int)entry.bfib1.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001070 case IPV4_HNAPT:
1071 case IPV4_HNAT:
1072 entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest));
1073 entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1074 entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1075 entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1076 entry.ipv4_hnapt.pppoe_id = hw_path->pppoe_sid;
1077 break;
1078 case IPV4_DSLITE:
1079 case IPV4_MAP_E:
1080 case IPV6_6RD:
1081 case IPV6_5T_ROUTE:
1082 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001083 case IPV6_HNAPT:
1084 case IPV6_HNAT:
developerfd40db22021-04-29 10:08:25 +08001085 entry.ipv6_5t_route.dmac_hi = swab32(*((u32 *)eth->h_dest));
1086 entry.ipv6_5t_route.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1087 entry.ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1088 entry.ipv6_5t_route.smac_lo =
1089 swab16(*((u16 *)&eth->h_source[4]));
1090 entry.ipv6_5t_route.pppoe_id = hw_path->pppoe_sid;
1091 break;
1092 }
1093 return entry;
1094}
1095
1096struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
1097 struct flow_offload_hw_path *hw_path)
1098{
1099 entry.bfib1.psn = (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) ? 1 : 0;
1100 entry.bfib1.vlan_layer += (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) ? 1 : 0;
1101 entry.bfib1.vpm = (entry.bfib1.vlan_layer) ? 1 : 0;
developerfd40db22021-04-29 10:08:25 +08001102 entry.bfib1.cah = 1;
developer4164cfe2022-12-01 11:27:41 +08001103 entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V2 ||
1104 hnat_priv->data->version == MTK_HNAT_V3) ?
developerfd40db22021-04-29 10:08:25 +08001105 readl(hnat_priv->fe_base + 0x0010) & (0xFF) :
1106 readl(hnat_priv->fe_base + 0x0010) & (0x7FFF);
1107
developer5ffc5f12022-10-25 18:51:46 +08001108 switch ((int)entry.bfib1.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001109 case IPV4_HNAPT:
1110 case IPV4_HNAT:
developer8116b0a2021-08-23 18:07:20 +08001111 if (hnat_priv->data->mcast &&
1112 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001113 entry.ipv4_hnapt.iblk2.mcast = 1;
developer4164cfe2022-12-01 11:27:41 +08001114 if (hnat_priv->data->version == MTK_HNAT_V1_3) {
developerfd40db22021-04-29 10:08:25 +08001115 entry.bfib1.sta = 1;
1116 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1117 }
1118 } else {
1119 entry.ipv4_hnapt.iblk2.mcast = 0;
1120 }
1121
1122 entry.ipv4_hnapt.iblk2.port_ag =
developer4164cfe2022-12-01 11:27:41 +08001123 (hnat_priv->data->version == MTK_HNAT_V2 ||
1124 hnat_priv->data->version == MTK_HNAT_V3) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001125 break;
1126 case IPV4_DSLITE:
1127 case IPV4_MAP_E:
1128 case IPV6_6RD:
1129 case IPV6_5T_ROUTE:
1130 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001131 case IPV6_HNAPT:
1132 case IPV6_HNAT:
developer8116b0a2021-08-23 18:07:20 +08001133 if (hnat_priv->data->mcast &&
1134 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001135 entry.ipv6_5t_route.iblk2.mcast = 1;
developer4164cfe2022-12-01 11:27:41 +08001136 if (hnat_priv->data->version == MTK_HNAT_V1_3) {
developerfd40db22021-04-29 10:08:25 +08001137 entry.bfib1.sta = 1;
1138 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1139 }
1140 } else {
1141 entry.ipv6_5t_route.iblk2.mcast = 0;
1142 }
1143
1144 entry.ipv6_5t_route.iblk2.port_ag =
developer4164cfe2022-12-01 11:27:41 +08001145 (hnat_priv->data->version == MTK_HNAT_V2 ||
1146 hnat_priv->data->version == MTK_HNAT_V3) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001147 break;
1148 }
1149 return entry;
1150}
1151
developerfd40db22021-04-29 10:08:25 +08001152static unsigned int skb_to_hnat_info(struct sk_buff *skb,
1153 const struct net_device *dev,
1154 struct foe_entry *foe,
1155 struct flow_offload_hw_path *hw_path)
1156{
1157 struct foe_entry entry = { 0 };
1158 int whnat = IS_WHNAT(dev);
1159 struct ethhdr *eth;
1160 struct iphdr *iph;
1161 struct ipv6hdr *ip6h;
1162 struct tcpudphdr _ports;
1163 const struct tcpudphdr *pptr;
developer5ffc5f12022-10-25 18:51:46 +08001164 struct nf_conn *ct;
1165 enum ip_conntrack_info ctinfo;
developerfd40db22021-04-29 10:08:25 +08001166 u32 gmac = NR_DISCARD;
1167 int udp = 0;
1168 u32 qid = 0;
developeraf07fad2021-11-19 17:53:42 +08001169 u32 port_id = 0;
developerfd40db22021-04-29 10:08:25 +08001170 int mape = 0;
1171
developer5ffc5f12022-10-25 18:51:46 +08001172 ct = nf_ct_get(skb, &ctinfo);
1173
developerfd40db22021-04-29 10:08:25 +08001174 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP)
1175 /* point to ethernet header for DS-Lite and MapE */
1176 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
1177 else
1178 eth = eth_hdr(skb);
developer8116b0a2021-08-23 18:07:20 +08001179
1180 /*do not bind multicast if PPE mcast not enable*/
1181 if (!hnat_priv->data->mcast && is_multicast_ether_addr(eth->h_dest))
1182 return 0;
developerfd40db22021-04-29 10:08:25 +08001183
1184 entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
developerf94d8862022-03-29 10:11:17 +08001185 entry.bfib1.state = foe->udib1.state;
1186
developerd35bbcc2022-09-28 22:46:01 +08001187#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08001188 entry.bfib1.sp = foe->udib1.sp;
1189#endif
1190
1191 switch (ntohs(eth->h_proto)) {
1192 case ETH_P_IP:
1193 iph = ip_hdr(skb);
1194 switch (iph->protocol) {
1195 case IPPROTO_UDP:
1196 udp = 1;
1197 /* fallthrough */
1198 case IPPROTO_TCP:
1199 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1200
1201 /* DS-Lite WAN->LAN */
1202 if (entry.ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE ||
1203 entry.ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {
1204 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1205 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1206 entry.ipv4_dslite.sport =
1207 foe->ipv4_dslite.sport;
1208 entry.ipv4_dslite.dport =
1209 foe->ipv4_dslite.dport;
1210
developerd35bbcc2022-09-28 22:46:01 +08001211#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08001212 if (entry.bfib1.pkt_type == IPV4_MAP_E) {
1213 pptr = skb_header_pointer(skb,
1214 iph->ihl * 4,
1215 sizeof(_ports),
1216 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001217 if (unlikely(!pptr))
1218 return -1;
developerfd40db22021-04-29 10:08:25 +08001219
developerd35bbcc2022-09-28 22:46:01 +08001220 entry.ipv4_mape.new_sip =
developerfd40db22021-04-29 10:08:25 +08001221 ntohl(iph->saddr);
developerd35bbcc2022-09-28 22:46:01 +08001222 entry.ipv4_mape.new_dip =
developerfd40db22021-04-29 10:08:25 +08001223 ntohl(iph->daddr);
developerd35bbcc2022-09-28 22:46:01 +08001224 entry.ipv4_mape.new_sport =
developerfd40db22021-04-29 10:08:25 +08001225 ntohs(pptr->src);
developerd35bbcc2022-09-28 22:46:01 +08001226 entry.ipv4_mape.new_dport =
developerfd40db22021-04-29 10:08:25 +08001227 ntohs(pptr->dst);
1228 }
1229#endif
1230
1231 entry.ipv4_dslite.tunnel_sipv6_0 =
1232 foe->ipv4_dslite.tunnel_sipv6_0;
1233 entry.ipv4_dslite.tunnel_sipv6_1 =
1234 foe->ipv4_dslite.tunnel_sipv6_1;
1235 entry.ipv4_dslite.tunnel_sipv6_2 =
1236 foe->ipv4_dslite.tunnel_sipv6_2;
1237 entry.ipv4_dslite.tunnel_sipv6_3 =
1238 foe->ipv4_dslite.tunnel_sipv6_3;
1239
1240 entry.ipv4_dslite.tunnel_dipv6_0 =
1241 foe->ipv4_dslite.tunnel_dipv6_0;
1242 entry.ipv4_dslite.tunnel_dipv6_1 =
1243 foe->ipv4_dslite.tunnel_dipv6_1;
1244 entry.ipv4_dslite.tunnel_dipv6_2 =
1245 foe->ipv4_dslite.tunnel_dipv6_2;
1246 entry.ipv4_dslite.tunnel_dipv6_3 =
1247 foe->ipv4_dslite.tunnel_dipv6_3;
1248
1249 entry.ipv4_dslite.bfib1.rmt = 1;
1250 entry.ipv4_dslite.iblk2.dscp = iph->tos;
1251 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1252 if (hnat_priv->data->per_flow_accounting)
1253 entry.ipv4_dslite.iblk2.mibf = 1;
1254
1255 } else {
1256 entry.ipv4_hnapt.iblk2.dscp = iph->tos;
1257 if (hnat_priv->data->per_flow_accounting)
1258 entry.ipv4_hnapt.iblk2.mibf = 1;
1259
1260 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
1261
developerdfc8ef52022-12-06 14:00:09 +08001262 if (skb_vlan_tag_present(skb)) {
developerfd40db22021-04-29 10:08:25 +08001263 entry.bfib1.vlan_layer += 1;
1264
1265 if (entry.ipv4_hnapt.vlan1)
developerdfc8ef52022-12-06 14:00:09 +08001266 entry.ipv4_hnapt.vlan2 =
1267 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001268 else
developerdfc8ef52022-12-06 14:00:09 +08001269 entry.ipv4_hnapt.vlan1 =
1270 skb->vlan_tci;
1271 }
developerfd40db22021-04-29 10:08:25 +08001272
1273 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1274 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1275 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1276 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1277
1278 entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
1279 entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
1280 }
1281
1282 entry.ipv4_hnapt.bfib1.udp = udp;
1283 if (IS_IPV4_HNAPT(foe)) {
1284 pptr = skb_header_pointer(skb, iph->ihl * 4,
1285 sizeof(_ports),
1286 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001287 if (unlikely(!pptr))
1288 return -1;
1289
developerfd40db22021-04-29 10:08:25 +08001290 entry.ipv4_hnapt.new_sport = ntohs(pptr->src);
1291 entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
1292 }
1293
1294 break;
1295
1296 default:
1297 return -1;
1298 }
1299 trace_printk(
1300 "[%s]skb->head=%p, skb->data=%p,ip_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1301 __func__, skb->head, skb->data, iph, skb->len,
1302 skb->data_len);
1303 break;
1304
1305 case ETH_P_IPV6:
1306 ip6h = ipv6_hdr(skb);
1307 switch (ip6h->nexthdr) {
1308 case NEXTHDR_UDP:
1309 udp = 1;
1310 /* fallthrough */
1311 case NEXTHDR_TCP: /* IPv6-5T or IPv6-3T */
1312 entry.ipv6_5t_route.etype = htons(ETH_P_IPV6);
1313
1314 entry.ipv6_5t_route.vlan1 = hw_path->vlan_id;
1315
developerdfc8ef52022-12-06 14:00:09 +08001316 if (skb_vlan_tag_present(skb)) {
developerfd40db22021-04-29 10:08:25 +08001317 entry.bfib1.vlan_layer += 1;
1318
1319 if (entry.ipv6_5t_route.vlan1)
developerdfc8ef52022-12-06 14:00:09 +08001320 entry.ipv6_5t_route.vlan2 =
1321 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001322 else
developerdfc8ef52022-12-06 14:00:09 +08001323 entry.ipv6_5t_route.vlan1 =
1324 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001325 }
1326
1327 if (hnat_priv->data->per_flow_accounting)
1328 entry.ipv6_5t_route.iblk2.mibf = 1;
1329 entry.ipv6_5t_route.bfib1.udp = udp;
1330
1331 if (IS_IPV6_6RD(foe)) {
1332 entry.ipv6_5t_route.bfib1.rmt = 1;
1333 entry.ipv6_6rd.tunnel_sipv4 =
1334 foe->ipv6_6rd.tunnel_sipv4;
1335 entry.ipv6_6rd.tunnel_dipv4 =
1336 foe->ipv6_6rd.tunnel_dipv4;
1337 }
1338
1339 entry.ipv6_3t_route.ipv6_sip0 =
1340 foe->ipv6_3t_route.ipv6_sip0;
1341 entry.ipv6_3t_route.ipv6_sip1 =
1342 foe->ipv6_3t_route.ipv6_sip1;
1343 entry.ipv6_3t_route.ipv6_sip2 =
1344 foe->ipv6_3t_route.ipv6_sip2;
1345 entry.ipv6_3t_route.ipv6_sip3 =
1346 foe->ipv6_3t_route.ipv6_sip3;
1347
1348 entry.ipv6_3t_route.ipv6_dip0 =
1349 foe->ipv6_3t_route.ipv6_dip0;
1350 entry.ipv6_3t_route.ipv6_dip1 =
1351 foe->ipv6_3t_route.ipv6_dip1;
1352 entry.ipv6_3t_route.ipv6_dip2 =
1353 foe->ipv6_3t_route.ipv6_dip2;
1354 entry.ipv6_3t_route.ipv6_dip3 =
1355 foe->ipv6_3t_route.ipv6_dip3;
1356
developer729f0272021-06-09 17:28:38 +08001357 if (IS_IPV6_3T_ROUTE(foe)) {
1358 entry.ipv6_3t_route.prot =
1359 foe->ipv6_3t_route.prot;
1360 entry.ipv6_3t_route.hph =
1361 foe->ipv6_3t_route.hph;
1362 }
1363
developerfd40db22021-04-29 10:08:25 +08001364 if (IS_IPV6_5T_ROUTE(foe) || IS_IPV6_6RD(foe)) {
1365 entry.ipv6_5t_route.sport =
1366 foe->ipv6_5t_route.sport;
1367 entry.ipv6_5t_route.dport =
1368 foe->ipv6_5t_route.dport;
1369 }
developer5ffc5f12022-10-25 18:51:46 +08001370
developer5ffc5f12022-10-25 18:51:46 +08001371 if (ct && (ct->status & IPS_SRC_NAT)) {
developer317dbfa2023-04-26 15:00:56 +08001372#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer5ffc5f12022-10-25 18:51:46 +08001373 entry.bfib1.pkt_type = IPV6_HNAPT;
1374
1375 if (IS_WAN(dev) || IS_DSA_WAN(dev)) {
1376 entry.ipv6_hnapt.eg_ipv6_dir =
1377 IPV6_SNAT;
1378 entry.ipv6_hnapt.new_ipv6_ip0 =
1379 ntohl(ip6h->saddr.s6_addr32[0]);
1380 entry.ipv6_hnapt.new_ipv6_ip1 =
1381 ntohl(ip6h->saddr.s6_addr32[1]);
1382 entry.ipv6_hnapt.new_ipv6_ip2 =
1383 ntohl(ip6h->saddr.s6_addr32[2]);
1384 entry.ipv6_hnapt.new_ipv6_ip3 =
1385 ntohl(ip6h->saddr.s6_addr32[3]);
1386 } else {
1387 entry.ipv6_hnapt.eg_ipv6_dir =
1388 IPV6_DNAT;
1389 entry.ipv6_hnapt.new_ipv6_ip0 =
1390 ntohl(ip6h->daddr.s6_addr32[0]);
1391 entry.ipv6_hnapt.new_ipv6_ip1 =
1392 ntohl(ip6h->daddr.s6_addr32[1]);
1393 entry.ipv6_hnapt.new_ipv6_ip2 =
1394 ntohl(ip6h->daddr.s6_addr32[2]);
1395 entry.ipv6_hnapt.new_ipv6_ip3 =
1396 ntohl(ip6h->daddr.s6_addr32[3]);
1397 }
1398
1399 pptr = skb_header_pointer(skb, IPV6_HDR_LEN,
1400 sizeof(_ports),
1401 &_ports);
1402 if (unlikely(!pptr))
1403 return -1;
1404
1405 entry.ipv6_hnapt.new_sport = ntohs(pptr->src);
1406 entry.ipv6_hnapt.new_dport = ntohs(pptr->dst);
developer317dbfa2023-04-26 15:00:56 +08001407#else
1408 return -1;
developer5ffc5f12022-10-25 18:51:46 +08001409#endif
developer317dbfa2023-04-26 15:00:56 +08001410 }
developer5ffc5f12022-10-25 18:51:46 +08001411
developerfd40db22021-04-29 10:08:25 +08001412 entry.ipv6_5t_route.iblk2.dscp =
1413 (ip6h->priority << 4 |
1414 (ip6h->flow_lbl[0] >> 4));
1415 break;
1416
1417 case NEXTHDR_IPIP:
1418 if ((!mape_toggle &&
1419 entry.bfib1.pkt_type == IPV4_DSLITE) ||
1420 (mape_toggle &&
1421 entry.bfib1.pkt_type == IPV4_MAP_E)) {
1422 /* DS-Lite LAN->WAN */
1423 entry.ipv4_dslite.bfib1.udp =
1424 foe->ipv4_dslite.bfib1.udp;
1425 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1426 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1427 entry.ipv4_dslite.sport =
1428 foe->ipv4_dslite.sport;
1429 entry.ipv4_dslite.dport =
1430 foe->ipv4_dslite.dport;
1431
1432 entry.ipv4_dslite.tunnel_sipv6_0 =
1433 ntohl(ip6h->saddr.s6_addr32[0]);
1434 entry.ipv4_dslite.tunnel_sipv6_1 =
1435 ntohl(ip6h->saddr.s6_addr32[1]);
1436 entry.ipv4_dslite.tunnel_sipv6_2 =
1437 ntohl(ip6h->saddr.s6_addr32[2]);
1438 entry.ipv4_dslite.tunnel_sipv6_3 =
1439 ntohl(ip6h->saddr.s6_addr32[3]);
1440
1441 entry.ipv4_dslite.tunnel_dipv6_0 =
1442 ntohl(ip6h->daddr.s6_addr32[0]);
1443 entry.ipv4_dslite.tunnel_dipv6_1 =
1444 ntohl(ip6h->daddr.s6_addr32[1]);
1445 entry.ipv4_dslite.tunnel_dipv6_2 =
1446 ntohl(ip6h->daddr.s6_addr32[2]);
1447 entry.ipv4_dslite.tunnel_dipv6_3 =
1448 ntohl(ip6h->daddr.s6_addr32[3]);
1449
1450 ppe_fill_flow_lbl(&entry, ip6h);
1451
1452 entry.ipv4_dslite.priority = ip6h->priority;
1453 entry.ipv4_dslite.hop_limit = ip6h->hop_limit;
1454 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1455 if (hnat_priv->data->per_flow_accounting)
1456 entry.ipv4_dslite.iblk2.mibf = 1;
developer25fc8c02022-05-06 16:24:02 +08001457 /* Map-E LAN->WAN record inner IPv4 header info. */
developer8c707df2022-10-24 14:09:00 +08001458#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer25fc8c02022-05-06 16:24:02 +08001459 if (mape_toggle) {
1460 entry.ipv4_dslite.iblk2.dscp = foe->ipv4_dslite.iblk2.dscp;
developerd35bbcc2022-09-28 22:46:01 +08001461 entry.ipv4_mape.new_sip = foe->ipv4_mape.new_sip;
1462 entry.ipv4_mape.new_dip = foe->ipv4_mape.new_dip;
1463 entry.ipv4_mape.new_sport = foe->ipv4_mape.new_sport;
1464 entry.ipv4_mape.new_dport = foe->ipv4_mape.new_dport;
developer25fc8c02022-05-06 16:24:02 +08001465 }
1466#endif
developerfd40db22021-04-29 10:08:25 +08001467 } else if (mape_toggle &&
1468 entry.bfib1.pkt_type == IPV4_HNAPT) {
1469 /* MapE LAN -> WAN */
1470 mape = 1;
1471 entry.ipv4_hnapt.iblk2.dscp =
1472 foe->ipv4_hnapt.iblk2.dscp;
1473 if (hnat_priv->data->per_flow_accounting)
1474 entry.ipv4_hnapt.iblk2.mibf = 1;
1475
developerbb816412021-06-11 15:43:44 +08001476 if (IS_GMAC1_MODE)
1477 entry.ipv4_hnapt.vlan1 = 1;
1478 else
1479 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
developerfd40db22021-04-29 10:08:25 +08001480
1481 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1482 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1483 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1484 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1485
1486 entry.ipv4_hnapt.new_sip =
1487 foe->ipv4_hnapt.new_sip;
1488 entry.ipv4_hnapt.new_dip =
1489 foe->ipv4_hnapt.new_dip;
1490 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1491
developer34028fb2022-01-11 13:51:29 +08001492 if (IS_HQOS_MODE) {
developeraf07fad2021-11-19 17:53:42 +08001493 entry.ipv4_hnapt.iblk2.qid =
developer4164cfe2022-12-01 11:27:41 +08001494 (hnat_priv->data->version ==
1495 MTK_HNAT_V2 ||
1496 hnat_priv->data->version ==
1497 MTK_HNAT_V3) ?
developeraf07fad2021-11-19 17:53:42 +08001498 skb->mark & 0x7f : skb->mark & 0xf;
developerd35bbcc2022-09-28 22:46:01 +08001499#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001500 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001501 (IS_HQOS_DL_MODE &&
1502 IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001503 (IS_PPPQ_MODE &&
1504 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001505 entry.ipv4_hnapt.tport_id = 1;
1506 else
1507 entry.ipv4_hnapt.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001508#else
developeraf07fad2021-11-19 17:53:42 +08001509 entry.ipv4_hnapt.iblk2.fqos = 1;
developerd35bbcc2022-09-28 22:46:01 +08001510#endif
developeraf07fad2021-11-19 17:53:42 +08001511 }
developerfd40db22021-04-29 10:08:25 +08001512
1513 entry.ipv4_hnapt.bfib1.udp =
1514 foe->ipv4_hnapt.bfib1.udp;
1515
1516 entry.ipv4_hnapt.new_sport =
1517 foe->ipv4_hnapt.new_sport;
1518 entry.ipv4_hnapt.new_dport =
1519 foe->ipv4_hnapt.new_dport;
1520 mape_l2w_v6h = *ip6h;
1521 }
1522 break;
1523
1524 default:
1525 return -1;
1526 }
1527
1528 trace_printk(
1529 "[%s]skb->head=%p, skb->data=%p,ipv6_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1530 __func__, skb->head, skb->data, ip6h, skb->len,
1531 skb->data_len);
1532 break;
1533
1534 default:
developerfd40db22021-04-29 10:08:25 +08001535 iph = ip_hdr(skb);
1536 switch (entry.bfib1.pkt_type) {
1537 case IPV6_6RD: /* 6RD LAN->WAN */
1538 entry.ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
1539 entry.ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
1540 entry.ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
1541 entry.ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
1542
1543 entry.ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
1544 entry.ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
1545 entry.ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
1546 entry.ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
1547
1548 entry.ipv6_6rd.sport = foe->ipv6_6rd.sport;
1549 entry.ipv6_6rd.dport = foe->ipv6_6rd.dport;
1550 entry.ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
1551 entry.ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
1552 entry.ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
1553 entry.ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
1554 entry.ipv6_6rd.ttl = iph->ttl;
1555 entry.ipv6_6rd.dscp = iph->tos;
1556 entry.ipv6_6rd.per_flow_6rd_id = 1;
1557 entry.ipv6_6rd.vlan1 = hw_path->vlan_id;
1558 if (hnat_priv->data->per_flow_accounting)
1559 entry.ipv6_6rd.iblk2.mibf = 1;
1560 break;
1561
1562 default:
1563 return -1;
1564 }
1565 }
1566
1567 /* Fill Layer2 Info.*/
1568 entry = ppe_fill_L2_info(eth, entry, hw_path);
1569
1570 /* Fill Info Blk*/
1571 entry = ppe_fill_info_blk(eth, entry, hw_path);
1572
1573 if (IS_LAN(dev)) {
1574 if (IS_DSA_LAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001575 port_id = hnat_dsa_fill_stag(dev, &entry, hw_path,
1576 ntohs(eth->h_proto),
1577 mape);
developerfd40db22021-04-29 10:08:25 +08001578
1579 if (IS_BOND_MODE)
1580 gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ?
1581 NR_GMAC2_PORT : NR_GMAC1_PORT;
1582 else
1583 gmac = NR_GMAC1_PORT;
developerd35bbcc2022-09-28 22:46:01 +08001584 } else if (IS_LAN2(dev)) {
1585 gmac = NR_GMAC3_PORT;
developerfd40db22021-04-29 10:08:25 +08001586 } else if (IS_WAN(dev)) {
1587 if (IS_DSA_WAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001588 port_id = hnat_dsa_fill_stag(dev,&entry, hw_path,
1589 ntohs(eth->h_proto),
1590 mape);
developerfd40db22021-04-29 10:08:25 +08001591 if (mape_toggle && mape == 1) {
1592 gmac = NR_PDMA_PORT;
1593 /* Set act_dp = wan_dev */
1594 entry.ipv4_hnapt.act_dp = dev->ifindex;
1595 } else {
1596 gmac = (IS_GMAC1_MODE) ? NR_GMAC1_PORT : NR_GMAC2_PORT;
1597 }
developerd35bbcc2022-09-28 22:46:01 +08001598 } else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN_GRP(skb) ||
developer99506e52021-06-30 22:03:02 +08001599 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb) || FROM_WED(skb))) {
developerfd40db22021-04-29 10:08:25 +08001600 if (!hnat_priv->data->whnat && IS_GMAC1_MODE) {
1601 entry.bfib1.vpm = 1;
1602 entry.bfib1.vlan_layer = 1;
1603
1604 if (FROM_GE_LAN(skb))
1605 entry.ipv4_hnapt.vlan1 = 1;
1606 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1607 entry.ipv4_hnapt.vlan1 = 2;
1608 }
1609
1610 trace_printk("learn of lan or wan(iif=%x) --> %s(ext)\n",
1611 skb_hnat_iface(skb), dev->name);
1612 /* To CPU then stolen by pre-routing hant hook of LAN/WAN
1613 * Current setting is PDMA RX.
1614 */
1615 gmac = NR_PDMA_PORT;
1616 if (IS_IPV4_GRP(foe))
1617 entry.ipv4_hnapt.act_dp = dev->ifindex;
1618 else
1619 entry.ipv6_5t_route.act_dp = dev->ifindex;
1620 } else {
1621 printk_ratelimited(KERN_WARNING
1622 "Unknown case of dp, iif=%x --> %s\n",
1623 skb_hnat_iface(skb), dev->name);
1624
1625 return 0;
1626 }
1627
developerafff5662022-06-29 10:09:56 +08001628 if (IS_HQOS_MODE || skb->mark >= MAX_PPPQ_PORT_NUM)
developeraf07fad2021-11-19 17:53:42 +08001629 qid = skb->mark & (MTK_QDMA_TX_MASK);
developer934756a2022-11-18 14:51:34 +08001630 else if (IS_PPPQ_MODE && IS_PPPQ_PATH(dev, skb))
developeraf07fad2021-11-19 17:53:42 +08001631 qid = port_id & MTK_QDMA_TX_MASK;
1632 else
1633 qid = 0;
developerfd40db22021-04-29 10:08:25 +08001634
1635 if (IS_IPV4_GRP(foe)) {
1636 entry.ipv4_hnapt.iblk2.dp = gmac;
1637 entry.ipv4_hnapt.iblk2.port_mg =
developer4164cfe2022-12-01 11:27:41 +08001638 (hnat_priv->data->version == MTK_HNAT_V1_1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001639
developeraf07fad2021-11-19 17:53:42 +08001640 if (qos_toggle) {
developer4164cfe2022-12-01 11:27:41 +08001641 if (hnat_priv->data->version == MTK_HNAT_V2 ||
1642 hnat_priv->data->version == MTK_HNAT_V3) {
developeraf07fad2021-11-19 17:53:42 +08001643 entry.ipv4_hnapt.iblk2.qid = qid & 0x7f;
1644 } else {
1645 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1646 entry.ipv4_hnapt.iblk2.qid = qid & 0xf;
developer4164cfe2022-12-01 11:27:41 +08001647 if (hnat_priv->data->version != MTK_HNAT_V1_1)
developeraf07fad2021-11-19 17:53:42 +08001648 entry.ipv4_hnapt.iblk2.port_mg |=
1649 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001650
developerd35bbcc2022-09-28 22:46:01 +08001651 if (((IS_EXT(dev) && (FROM_GE_LAN_GRP(skb) ||
developeraf07fad2021-11-19 17:53:42 +08001652 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) ||
1653 ((mape_toggle && mape == 1) && !FROM_EXT(skb))) &&
1654 (!whnat)) {
1655 entry.ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1656 entry.ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1657 entry.bfib1.vlan_layer = 1;
1658 }
developerfd40db22021-04-29 10:08:25 +08001659 }
developerfd40db22021-04-29 10:08:25 +08001660
developer34028fb2022-01-11 13:51:29 +08001661 if (FROM_EXT(skb) || skb_hnat_sport(skb) == NR_QDMA_PORT ||
1662 (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001663 entry.ipv4_hnapt.iblk2.fqos = 0;
1664 else
developerd35bbcc2022-09-28 22:46:01 +08001665#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001666 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001667 (IS_HQOS_DL_MODE && IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001668 (IS_PPPQ_MODE &&
1669 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001670 entry.ipv4_hnapt.tport_id = 1;
1671 else
1672 entry.ipv4_hnapt.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001673#else
developer399ec072022-06-24 16:07:41 +08001674 entry.ipv4_hnapt.iblk2.fqos =
developer934756a2022-11-18 14:51:34 +08001675 (!IS_PPPQ_MODE ||
1676 (IS_PPPQ_MODE &&
1677 IS_PPPQ_PATH(dev, skb)));
developerd35bbcc2022-09-28 22:46:01 +08001678#endif
developeraf07fad2021-11-19 17:53:42 +08001679 } else {
developerfd40db22021-04-29 10:08:25 +08001680 entry.ipv4_hnapt.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001681 }
developerfd40db22021-04-29 10:08:25 +08001682 } else {
1683 entry.ipv6_5t_route.iblk2.dp = gmac;
1684 entry.ipv6_5t_route.iblk2.port_mg =
developer4164cfe2022-12-01 11:27:41 +08001685 (hnat_priv->data->version == MTK_HNAT_V1_1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001686
developeraf07fad2021-11-19 17:53:42 +08001687 if (qos_toggle) {
developer4164cfe2022-12-01 11:27:41 +08001688 if (hnat_priv->data->version == MTK_HNAT_V2 ||
1689 hnat_priv->data->version == MTK_HNAT_V3) {
developeraf07fad2021-11-19 17:53:42 +08001690 entry.ipv6_5t_route.iblk2.qid = qid & 0x7f;
1691 } else {
1692 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1693 entry.ipv6_5t_route.iblk2.qid = qid & 0xf;
developer4164cfe2022-12-01 11:27:41 +08001694 if (hnat_priv->data->version != MTK_HNAT_V1_1)
developeraf07fad2021-11-19 17:53:42 +08001695 entry.ipv6_5t_route.iblk2.port_mg |=
1696 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001697
developerd35bbcc2022-09-28 22:46:01 +08001698 if (IS_EXT(dev) && (FROM_GE_LAN_GRP(skb) ||
developeraf07fad2021-11-19 17:53:42 +08001699 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) &&
1700 (!whnat)) {
1701 entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1702 entry.ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1703 entry.bfib1.vlan_layer = 1;
1704 }
developerfd40db22021-04-29 10:08:25 +08001705 }
developerfd40db22021-04-29 10:08:25 +08001706
developer34028fb2022-01-11 13:51:29 +08001707 if (FROM_EXT(skb) ||
1708 (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001709 entry.ipv6_5t_route.iblk2.fqos = 0;
1710 else
developerd35bbcc2022-09-28 22:46:01 +08001711#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001712 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001713 (IS_HQOS_DL_MODE && IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001714 (IS_PPPQ_MODE &&
1715 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001716 entry.ipv6_5t_route.tport_id = 1;
1717 else
1718 entry.ipv6_5t_route.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001719#else
developer399ec072022-06-24 16:07:41 +08001720 entry.ipv6_5t_route.iblk2.fqos =
developer934756a2022-11-18 14:51:34 +08001721 (!IS_PPPQ_MODE ||
1722 (IS_PPPQ_MODE &&
1723 IS_PPPQ_PATH(dev, skb)));
developerd35bbcc2022-09-28 22:46:01 +08001724#endif
developeraf07fad2021-11-19 17:53:42 +08001725 } else {
developerfd40db22021-04-29 10:08:25 +08001726 entry.ipv6_5t_route.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001727 }
developerfd40db22021-04-29 10:08:25 +08001728 }
1729
developer60e60962021-06-15 21:05:07 +08001730 /* The INFO2.port_mg and 2nd VLAN ID fields of PPE entry are redefined
1731 * by Wi-Fi whnat engine. These data and INFO2.dp will be updated and
1732 * the entry is set to BIND state in mtk_sw_nat_hook_tx().
1733 */
developer7b36dca2022-05-19 18:29:10 +08001734 if (!whnat) {
1735 entry.bfib1.ttl = 1;
developer60e60962021-06-15 21:05:07 +08001736 entry.bfib1.state = BIND;
developer7b36dca2022-05-19 18:29:10 +08001737 }
developer60e60962021-06-15 21:05:07 +08001738
developerbc552cc2022-03-15 16:19:27 +08001739 wmb();
developerfd40db22021-04-29 10:08:25 +08001740 memcpy(foe, &entry, sizeof(entry));
1741 /*reset statistic for this entry*/
developer577ad2f2022-11-28 10:33:36 +08001742 if (hnat_priv->data->per_flow_accounting &&
1743 skb_hnat_entry(skb) < hnat_priv->foe_etry_num &&
1744 skb_hnat_ppe(skb) < CFG_PPE_NUM)
developer471f6562021-05-10 20:48:34 +08001745 memset(&hnat_priv->acct[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
1746 0, sizeof(struct mib_entry));
developerfd40db22021-04-29 10:08:25 +08001747
developerfdfe1572021-09-13 16:56:33 +08001748 skb_hnat_filled(skb) = HNAT_INFO_FILLED;
developerfd40db22021-04-29 10:08:25 +08001749
1750 return 0;
1751}
1752
1753int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
1754{
1755 struct foe_entry *entry;
1756 struct ethhdr *eth;
developerbc552cc2022-03-15 16:19:27 +08001757 struct hnat_bind_info_blk bfib1_tx;
developerfd40db22021-04-29 10:08:25 +08001758
developerfdfe1572021-09-13 16:56:33 +08001759 if (skb_hnat_alg(skb) || !is_hnat_info_filled(skb) ||
1760 !is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb))
developerfd40db22021-04-29 10:08:25 +08001761 return NF_ACCEPT;
1762
1763 trace_printk(
1764 "[%s]entry=%x reason=%x gmac_no=%x wdmaid=%x rxid=%x wcid=%x bssid=%x\n",
1765 __func__, skb_hnat_entry(skb), skb_hnat_reason(skb), gmac_no,
1766 skb_hnat_wdma_id(skb), skb_hnat_bss_id(skb),
1767 skb_hnat_wc_id(skb), skb_hnat_rx_id(skb));
1768
developer99506e52021-06-30 22:03:02 +08001769 if ((gmac_no != NR_WDMA0_PORT) && (gmac_no != NR_WDMA1_PORT) &&
1770 (gmac_no != NR_WHNAT_WDMA_PORT))
1771 return NF_ACCEPT;
1772
developerc0419aa2022-12-07 15:56:36 +08001773 if (unlikely(!skb_mac_header_was_set(skb)))
1774 return NF_ACCEPT;
1775
developerfd40db22021-04-29 10:08:25 +08001776 if (!skb_hnat_is_hashed(skb))
1777 return NF_ACCEPT;
1778
developer955a6f62021-07-26 10:54:39 +08001779 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
1780 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
1781 return NF_ACCEPT;
1782
developer471f6562021-05-10 20:48:34 +08001783 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001784 if (entry_hnat_is_bound(entry))
1785 return NF_ACCEPT;
1786
1787 if (skb_hnat_reason(skb) != HIT_UNBIND_RATE_REACH)
1788 return NF_ACCEPT;
1789
1790 eth = eth_hdr(skb);
developerbc552cc2022-03-15 16:19:27 +08001791 memcpy(&bfib1_tx, &entry->bfib1, sizeof(entry->bfib1));
developer8116b0a2021-08-23 18:07:20 +08001792
1793 /*not bind multicast if PPE mcast not enable*/
developerfdfe1572021-09-13 16:56:33 +08001794 if (!hnat_priv->data->mcast) {
1795 if (is_multicast_ether_addr(eth->h_dest))
1796 return NF_ACCEPT;
1797
1798 if (IS_IPV4_GRP(entry))
1799 entry->ipv4_hnapt.iblk2.mcast = 0;
1800 else
1801 entry->ipv6_5t_route.iblk2.mcast = 0;
1802 }
developerfd40db22021-04-29 10:08:25 +08001803
1804 /* Some mt_wifi virtual interfaces, such as apcli,
1805 * will change the smac for specail purpose.
1806 */
developer5ffc5f12022-10-25 18:51:46 +08001807 switch ((int)bfib1_tx.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001808 case IPV4_HNAPT:
1809 case IPV4_HNAT:
1810 entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1811 entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1812 break;
1813 case IPV4_DSLITE:
1814 case IPV4_MAP_E:
1815 case IPV6_6RD:
1816 case IPV6_5T_ROUTE:
1817 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001818 case IPV6_HNAPT:
1819 case IPV6_HNAT:
developerfd40db22021-04-29 10:08:25 +08001820 entry->ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1821 entry->ipv6_5t_route.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1822 break;
1823 }
1824
developer0ff76882021-10-26 10:54:13 +08001825 if (skb->vlan_tci) {
developerbc552cc2022-03-15 16:19:27 +08001826 bfib1_tx.vlan_layer = 1;
1827 bfib1_tx.vpm = 1;
developer0ff76882021-10-26 10:54:13 +08001828 if (IS_IPV4_GRP(entry)) {
1829 entry->ipv4_hnapt.etype = htons(ETH_P_8021Q);
developer00a07372022-03-11 16:04:34 +08001830 entry->ipv4_hnapt.vlan1 = skb->vlan_tci;
developer0ff76882021-10-26 10:54:13 +08001831 } else if (IS_IPV6_GRP(entry)) {
1832 entry->ipv6_5t_route.etype = htons(ETH_P_8021Q);
developer00a07372022-03-11 16:04:34 +08001833 entry->ipv6_5t_route.vlan1 = skb->vlan_tci;
developer0ff76882021-10-26 10:54:13 +08001834 }
1835 } else {
developerbc552cc2022-03-15 16:19:27 +08001836 bfib1_tx.vpm = 0;
1837 bfib1_tx.vlan_layer = 0;
developer0ff76882021-10-26 10:54:13 +08001838 }
developer60e60962021-06-15 21:05:07 +08001839
developerfd40db22021-04-29 10:08:25 +08001840 /* MT7622 wifi hw_nat not support QoS */
1841 if (IS_IPV4_GRP(entry)) {
1842 entry->ipv4_hnapt.iblk2.fqos = 0;
developer4164cfe2022-12-01 11:27:41 +08001843 if ((hnat_priv->data->version == MTK_HNAT_V1_2 &&
developere567ad32021-05-25 17:16:17 +08001844 gmac_no == NR_WHNAT_WDMA_PORT) ||
developer4164cfe2022-12-01 11:27:41 +08001845 ((hnat_priv->data->version == MTK_HNAT_V2 ||
1846 hnat_priv->data->version == MTK_HNAT_V3) &&
developere567ad32021-05-25 17:16:17 +08001847 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001848 entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1849 entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
developerd35bbcc2022-09-28 22:46:01 +08001850#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer47545a32022-11-15 16:06:58 +08001851 entry->ipv4_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developerd35bbcc2022-09-28 22:46:01 +08001852 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1853 entry->ipv4_hnapt.iblk2.winfoi = 1;
1854 entry->ipv4_hnapt.winfo_pao.usr_info =
1855 skb_hnat_usr_info(skb);
1856 entry->ipv4_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
1857 entry->ipv4_hnapt.winfo_pao.is_fixedrate =
1858 skb_hnat_is_fixedrate(skb);
1859 entry->ipv4_hnapt.winfo_pao.is_prior =
1860 skb_hnat_is_prior(skb);
1861 entry->ipv4_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
1862 entry->ipv4_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
1863 entry->ipv4_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
1864#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
developerfd40db22021-04-29 10:08:25 +08001865 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1866 entry->ipv4_hnapt.iblk2.winfoi = 1;
1867#else
1868 entry->ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb);
1869 entry->ipv4_hnapt.iblk2w.winfoi = 1;
1870 entry->ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1871#endif
1872 } else {
1873 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
developerbc552cc2022-03-15 16:19:27 +08001874 bfib1_tx.vpm = 1;
1875 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001876
developerd35bbcc2022-09-28 22:46:01 +08001877 if (FROM_GE_LAN_GRP(skb))
developerfd40db22021-04-29 10:08:25 +08001878 entry->ipv4_hnapt.vlan1 = 1;
1879 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1880 entry->ipv4_hnapt.vlan1 = 2;
1881 }
1882
developer34028fb2022-01-11 13:51:29 +08001883 if (IS_HQOS_MODE &&
developerd35bbcc2022-09-28 22:46:01 +08001884 (FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerbc552cc2022-03-15 16:19:27 +08001885 bfib1_tx.vpm = 0;
1886 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001887 entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1888 entry->ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1889 entry->ipv4_hnapt.iblk2.fqos = 1;
1890 }
developerfd40db22021-04-29 10:08:25 +08001891 }
1892 entry->ipv4_hnapt.iblk2.dp = gmac_no;
developer5ffc5f12022-10-25 18:51:46 +08001893#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1894 } else if (IS_IPV6_HNAPT(entry) || IS_IPV6_HNAT(entry)) {
1895 entry->ipv6_hnapt.iblk2.dp = gmac_no;
1896 entry->ipv6_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1897 entry->ipv6_hnapt.iblk2.winfoi = 1;
1898
1899 entry->ipv6_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1900 entry->ipv6_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
1901 entry->ipv6_hnapt.winfo_pao.usr_info = skb_hnat_usr_info(skb);
1902 entry->ipv6_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
1903 entry->ipv6_hnapt.winfo_pao.is_fixedrate =
1904 skb_hnat_is_fixedrate(skb);
1905 entry->ipv6_hnapt.winfo_pao.is_prior = skb_hnat_is_prior(skb);
1906 entry->ipv6_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
1907 entry->ipv6_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
1908 entry->ipv6_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
developer47545a32022-11-15 16:06:58 +08001909 entry->ipv6_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developer5ffc5f12022-10-25 18:51:46 +08001910#endif
developerfd40db22021-04-29 10:08:25 +08001911 } else {
1912 entry->ipv6_5t_route.iblk2.fqos = 0;
developer4164cfe2022-12-01 11:27:41 +08001913 if ((hnat_priv->data->version == MTK_HNAT_V1_2 &&
developere567ad32021-05-25 17:16:17 +08001914 gmac_no == NR_WHNAT_WDMA_PORT) ||
developer4164cfe2022-12-01 11:27:41 +08001915 ((hnat_priv->data->version == MTK_HNAT_V2 ||
1916 hnat_priv->data->version == MTK_HNAT_V3) &&
developere567ad32021-05-25 17:16:17 +08001917 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001918 entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
1919 entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
developerd35bbcc2022-09-28 22:46:01 +08001920#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer47545a32022-11-15 16:06:58 +08001921 entry->ipv6_5t_route.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developerfd40db22021-04-29 10:08:25 +08001922 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1923 entry->ipv6_5t_route.iblk2.winfoi = 1;
developerd35bbcc2022-09-28 22:46:01 +08001924 entry->ipv6_5t_route.winfo_pao.usr_info =
1925 skb_hnat_usr_info(skb);
1926 entry->ipv6_5t_route.winfo_pao.tid =
1927 skb_hnat_tid(skb);
1928 entry->ipv6_5t_route.winfo_pao.is_fixedrate =
1929 skb_hnat_is_fixedrate(skb);
1930 entry->ipv6_5t_route.winfo_pao.is_prior =
1931 skb_hnat_is_prior(skb);
1932 entry->ipv6_5t_route.winfo_pao.is_sp =
1933 skb_hnat_is_sp(skb);
1934 entry->ipv6_5t_route.winfo_pao.hf =
1935 skb_hnat_hf(skb);
1936 entry->ipv6_5t_route.winfo_pao.amsdu =
1937 skb_hnat_amsdu(skb);
1938#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
1939 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1940 entry->ipv6_5t_route.iblk2.winfoi = 1;
developerfd40db22021-04-29 10:08:25 +08001941#else
1942 entry->ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb);
1943 entry->ipv6_5t_route.iblk2w.winfoi = 1;
1944 entry->ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1945#endif
1946 } else {
1947 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
developerbc552cc2022-03-15 16:19:27 +08001948 bfib1_tx.vpm = 1;
1949 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001950
developerd35bbcc2022-09-28 22:46:01 +08001951 if (FROM_GE_LAN_GRP(skb))
developerfd40db22021-04-29 10:08:25 +08001952 entry->ipv6_5t_route.vlan1 = 1;
1953 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1954 entry->ipv6_5t_route.vlan1 = 2;
1955 }
1956
developer34028fb2022-01-11 13:51:29 +08001957 if (IS_HQOS_MODE &&
developerd35bbcc2022-09-28 22:46:01 +08001958 (FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerbc552cc2022-03-15 16:19:27 +08001959 bfib1_tx.vpm = 0;
1960 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001961 entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1962 entry->ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1963 entry->ipv6_5t_route.iblk2.fqos = 1;
1964 }
developerfd40db22021-04-29 10:08:25 +08001965 }
1966 entry->ipv6_5t_route.iblk2.dp = gmac_no;
1967 }
1968
developer7b36dca2022-05-19 18:29:10 +08001969 bfib1_tx.ttl = 1;
developerbc552cc2022-03-15 16:19:27 +08001970 bfib1_tx.state = BIND;
1971 wmb();
1972 memcpy(&entry->bfib1, &bfib1_tx, sizeof(bfib1_tx));
developerfd40db22021-04-29 10:08:25 +08001973
1974 return NF_ACCEPT;
1975}
1976
1977int mtk_sw_nat_hook_rx(struct sk_buff *skb)
1978{
developer99506e52021-06-30 22:03:02 +08001979 if (!IS_SPACE_AVAILABLE_HEAD(skb) || !FROM_WED(skb)) {
1980 skb_hnat_magic_tag(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001981 return NF_ACCEPT;
developer99506e52021-06-30 22:03:02 +08001982 }
developerfd40db22021-04-29 10:08:25 +08001983
1984 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001985 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001986 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1987
1988 if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
1989 skb_hnat_sport(skb) = NR_WDMA0_PORT;
1990 else if (skb_hnat_iface(skb) == FOE_MAGIC_WED1)
1991 skb_hnat_sport(skb) = NR_WDMA1_PORT;
1992
1993 return NF_ACCEPT;
1994}
1995
1996void mtk_ppe_dev_register_hook(struct net_device *dev)
1997{
1998 int i, number = 0;
1999 struct extdev_entry *ext_entry;
2000
developerfd40db22021-04-29 10:08:25 +08002001 for (i = 1; i < MAX_IF_NUM; i++) {
2002 if (hnat_priv->wifi_hook_if[i] == dev) {
2003 pr_info("%s : %s has been registered in wifi_hook_if table[%d]\n",
2004 __func__, dev->name, i);
2005 return;
2006 }
developera7e6c242022-12-05 13:52:40 +08002007 }
2008
2009 for (i = 1; i < MAX_IF_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002010 if (!hnat_priv->wifi_hook_if[i]) {
2011 if (find_extif_from_devname(dev->name)) {
2012 extif_set_dev(dev);
2013 goto add_wifi_hook_if;
2014 }
2015
2016 number = get_ext_device_number();
2017 if (number >= MAX_EXT_DEVS) {
2018 pr_info("%s : extdev array is full. %s is not registered\n",
2019 __func__, dev->name);
2020 return;
2021 }
2022
2023 ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
2024 if (!ext_entry)
2025 return;
2026
developer4c32b7a2021-11-13 16:46:43 +08002027 strncpy(ext_entry->name, dev->name, IFNAMSIZ - 1);
developerfd40db22021-04-29 10:08:25 +08002028 dev_hold(dev);
2029 ext_entry->dev = dev;
2030 ext_if_add(ext_entry);
2031
2032add_wifi_hook_if:
2033 dev_hold(dev);
2034 hnat_priv->wifi_hook_if[i] = dev;
2035
2036 break;
2037 }
2038 }
2039 pr_info("%s : ineterface %s register (%d)\n", __func__, dev->name, i);
2040}
2041
2042void mtk_ppe_dev_unregister_hook(struct net_device *dev)
2043{
2044 int i;
2045
2046 for (i = 1; i < MAX_IF_NUM; i++) {
2047 if (hnat_priv->wifi_hook_if[i] == dev) {
2048 hnat_priv->wifi_hook_if[i] = NULL;
2049 dev_put(dev);
2050
2051 break;
2052 }
2053 }
2054
2055 extif_put_dev(dev);
2056 pr_info("%s : ineterface %s set null (%d)\n", __func__, dev->name, i);
2057}
2058
2059static unsigned int mtk_hnat_accel_type(struct sk_buff *skb)
2060{
2061 struct dst_entry *dst;
2062 struct nf_conn *ct;
2063 enum ip_conntrack_info ctinfo;
2064 const struct nf_conn_help *help;
2065
2066 /* Do not accelerate 1st round of xfrm flow, and 2nd round of xfrm flow
2067 * is from local_out which is also filtered in sanity check.
2068 */
2069 dst = skb_dst(skb);
2070 if (dst && dst_xfrm(dst))
2071 return 0;
2072
2073 ct = nf_ct_get(skb, &ctinfo);
2074 if (!ct)
2075 return 1;
2076
2077 /* rcu_read_lock()ed by nf_hook_slow */
2078 help = nfct_help(ct);
2079 if (help && rcu_dereference(help->helper))
2080 return 0;
2081
2082 return 1;
2083}
2084
developer6f4a0c72021-10-19 10:04:22 +08002085static void mtk_hnat_dscp_update(struct sk_buff *skb, struct foe_entry *entry)
2086{
2087 struct iphdr *iph;
2088 struct ethhdr *eth;
2089 struct ipv6hdr *ip6h;
2090 bool flag = false;
2091
2092 eth = eth_hdr(skb);
2093 switch (ntohs(eth->h_proto)) {
2094 case ETH_P_IP:
2095 iph = ip_hdr(skb);
developer001e7be2021-12-09 15:00:27 +08002096 if (IS_IPV4_GRP(entry) && entry->ipv4_hnapt.iblk2.dscp != iph->tos)
developer6f4a0c72021-10-19 10:04:22 +08002097 flag = true;
2098 break;
2099 case ETH_P_IPV6:
2100 ip6h = ipv6_hdr(skb);
developer001e7be2021-12-09 15:00:27 +08002101 if ((IS_IPV6_3T_ROUTE(entry) || IS_IPV6_5T_ROUTE(entry)) &&
2102 (entry->ipv6_5t_route.iblk2.dscp !=
2103 (ip6h->priority << 4 | (ip6h->flow_lbl[0] >> 4))))
developer6f4a0c72021-10-19 10:04:22 +08002104 flag = true;
2105 break;
2106 default:
2107 return;
2108 }
2109
2110 if (flag) {
developer1080dd82022-03-07 19:31:04 +08002111 if (debug_level >= 2)
2112 pr_info("Delete entry idx=%d.\n", skb_hnat_entry(skb));
developer6f4a0c72021-10-19 10:04:22 +08002113 memset(entry, 0, sizeof(struct foe_entry));
2114 hnat_cache_ebl(1);
2115 }
2116}
2117
developer30a47682021-11-02 17:06:14 +08002118static void mtk_hnat_nf_update(struct sk_buff *skb)
2119{
2120 struct nf_conn *ct;
2121 struct nf_conn_acct *acct;
2122 struct nf_conn_counter *counter;
2123 enum ip_conntrack_info ctinfo;
2124 struct hnat_accounting diff;
2125
2126 ct = nf_ct_get(skb, &ctinfo);
2127 if (ct) {
2128 if (!hnat_get_count(hnat_priv, skb_hnat_ppe(skb), skb_hnat_entry(skb), &diff))
2129 return;
2130
2131 acct = nf_conn_acct_find(ct);
2132 if (acct) {
2133 counter = acct->counter;
2134 atomic64_add(diff.packets, &counter[CTINFO2DIR(ctinfo)].packets);
2135 atomic64_add(diff.bytes, &counter[CTINFO2DIR(ctinfo)].bytes);
2136 }
2137 }
developere8b7dfa2023-04-20 10:16:44 +08002138}
2139
2140int mtk_464xlat_fill_mac(struct foe_entry *entry, struct sk_buff *skb,
2141 const struct net_device *out, bool l2w)
2142{
2143 const struct in6_addr *ipv6_nexthop;
2144 struct dst_entry *dst = skb_dst(skb);
2145 struct neighbour *neigh = NULL;
2146 struct rtable *rt = (struct rtable *)dst;
2147 u32 nexthop;
2148
2149 rcu_read_lock_bh();
2150 if (l2w) {
2151 ipv6_nexthop = rt6_nexthop((struct rt6_info *)dst,
2152 &ipv6_hdr(skb)->daddr);
2153 neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
2154 if (unlikely(!neigh)) {
2155 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n",
2156 __func__, &ipv6_hdr(skb)->daddr);
2157 rcu_read_unlock_bh();
2158 return -1;
2159 }
2160 } else {
2161 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
2162 neigh = __ipv4_neigh_lookup_noref(dst->dev, nexthop);
2163 if (unlikely(!neigh)) {
2164 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n",
2165 __func__, &ip_hdr(skb)->daddr);
2166 rcu_read_unlock_bh();
2167 return -1;
2168 }
2169 }
2170 rcu_read_unlock_bh();
2171
2172 entry->ipv4_dslite.dmac_hi = swab32(*((u32 *)neigh->ha));
2173 entry->ipv4_dslite.dmac_lo = swab16(*((u16 *)&neigh->ha[4]));
2174 entry->ipv4_dslite.smac_hi = swab32(*((u32 *)out->dev_addr));
2175 entry->ipv4_dslite.smac_lo = swab16(*((u16 *)&out->dev_addr[4]));
2176
2177 return 0;
2178}
2179
2180int mtk_464xlat_get_hash(struct sk_buff *skb, u32 *hash, bool l2w)
2181{
2182 struct in6_addr addr_v6, prefix;
2183 struct ipv6hdr *ip6h;
2184 struct iphdr *iph;
2185 struct tcpudphdr *pptr, _ports;
2186 struct foe_entry tmp;
2187 u32 addr, protoff;
2188
2189 if (l2w) {
2190 ip6h = ipv6_hdr(skb);
2191 if (mtk_ppe_get_xlat_v4_by_v6(&ip6h->daddr, &addr))
2192 return -1;
2193 protoff = IPV6_HDR_LEN;
2194
2195 tmp.bfib1.pkt_type = IPV4_HNAPT;
2196 tmp.ipv4_hnapt.sip = ntohl(ip6h->saddr.s6_addr32[3]);
2197 tmp.ipv4_hnapt.dip = ntohl(addr);
2198 } else {
2199 iph = ip_hdr(skb);
2200 if (mtk_ppe_get_xlat_v6_by_v4(&iph->saddr, &addr_v6, &prefix))
2201 return -1;
2202
2203 protoff = iph->ihl * 4;
2204
2205 tmp.bfib1.pkt_type = IPV6_5T_ROUTE;
2206 tmp.ipv6_5t_route.ipv6_sip0 = ntohl(addr_v6.s6_addr32[0]);
2207 tmp.ipv6_5t_route.ipv6_sip1 = ntohl(addr_v6.s6_addr32[1]);
2208 tmp.ipv6_5t_route.ipv6_sip2 = ntohl(addr_v6.s6_addr32[2]);
2209 tmp.ipv6_5t_route.ipv6_sip3 = ntohl(addr_v6.s6_addr32[3]);
2210 tmp.ipv6_5t_route.ipv6_dip0 = ntohl(prefix.s6_addr32[0]);
2211 tmp.ipv6_5t_route.ipv6_dip1 = ntohl(prefix.s6_addr32[1]);
2212 tmp.ipv6_5t_route.ipv6_dip2 = ntohl(prefix.s6_addr32[2]);
2213 tmp.ipv6_5t_route.ipv6_dip3 = ntohl(iph->daddr);
2214 }
2215
2216 pptr = skb_header_pointer(skb, protoff,
2217 sizeof(_ports), &_ports);
2218 if (unlikely(!pptr))
2219 return -1;
2220
2221 if (l2w) {
2222 tmp.ipv4_hnapt.sport = ntohs(pptr->src);
2223 tmp.ipv4_hnapt.dport = ntohs(pptr->dst);
2224 } else {
2225 tmp.ipv6_5t_route.sport = ntohs(pptr->src);
2226 tmp.ipv6_5t_route.dport = ntohs(pptr->dst);
2227 }
2228
2229 *hash = hnat_get_ppe_hash(&tmp);
2230
2231 return 0;
2232}
2233
2234void mtk_464xlat_fill_info1(struct foe_entry *entry,
2235 struct sk_buff *skb, bool l2w)
2236{
2237 entry->bfib1.cah = 1;
2238 entry->bfib1.ttl = 1;
2239 entry->bfib1.state = BIND;
2240 entry->bfib1.time_stamp = readl(hnat_priv->fe_base + 0x0010) & (0xFF);
2241 if (l2w) {
2242 entry->bfib1.pkt_type = IPV4_DSLITE;
2243 entry->bfib1.udp = ipv6_hdr(skb)->nexthdr ==
2244 IPPROTO_UDP ? 1 : 0;
2245 } else {
2246 entry->bfib1.pkt_type = IPV6_6RD;
2247 entry->bfib1.udp = ip_hdr(skb)->protocol ==
2248 IPPROTO_UDP ? 1 : 0;
2249 }
2250}
2251
2252void mtk_464xlat_fill_info2(struct foe_entry *entry, bool l2w)
2253{
2254 entry->ipv4_dslite.iblk2.mibf = 1;
2255 entry->ipv4_dslite.iblk2.port_ag = 0xF;
2256
2257 if (l2w)
2258 entry->ipv4_dslite.iblk2.dp = NR_GMAC2_PORT;
2259 else
2260 entry->ipv6_6rd.iblk2.dp = NR_GMAC1_PORT;
2261}
2262
2263void mtk_464xlat_fill_ipv4(struct foe_entry *entry, struct sk_buff *skb,
2264 struct foe_entry *foe, bool l2w)
2265{
2266 struct iphdr *iph;
2267
2268 if (l2w) {
2269 entry->ipv4_dslite.sip = foe->ipv4_dslite.sip;
2270 entry->ipv4_dslite.dip = foe->ipv4_dslite.dip;
2271 entry->ipv4_dslite.sport = foe->ipv4_dslite.sport;
2272 entry->ipv4_dslite.dport = foe->ipv4_dslite.dport;
2273 } else {
2274 iph = ip_hdr(skb);
2275 entry->ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
2276 entry->ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
2277 entry->ipv6_6rd.sport = foe->ipv6_6rd.sport;
2278 entry->ipv6_6rd.dport = foe->ipv6_6rd.dport;
2279 entry->ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
2280 entry->ipv6_6rd.ttl = iph->ttl;
2281 entry->ipv6_6rd.dscp = iph->tos;
2282 entry->ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
2283 }
2284}
2285
2286int mtk_464xlat_fill_ipv6(struct foe_entry *entry, struct sk_buff *skb,
2287 struct foe_entry *foe, bool l2w)
2288{
2289 struct ipv6hdr *ip6h;
2290 struct in6_addr addr_v6, prefix;
2291 u32 addr;
2292
2293 if (l2w) {
2294 ip6h = ipv6_hdr(skb);
2295
2296 if (mtk_ppe_get_xlat_v4_by_v6(&ip6h->daddr, &addr))
2297 return -1;
2298
2299 if (mtk_ppe_get_xlat_v6_by_v4(&addr, &addr_v6, &prefix))
2300 return -1;
2301
2302 entry->ipv4_dslite.tunnel_sipv6_0 =
2303 ntohl(prefix.s6_addr32[0]);
2304 entry->ipv4_dslite.tunnel_sipv6_1 =
2305 ntohl(ip6h->saddr.s6_addr32[1]);
2306 entry->ipv4_dslite.tunnel_sipv6_2 =
2307 ntohl(ip6h->saddr.s6_addr32[2]);
2308 entry->ipv4_dslite.tunnel_sipv6_3 =
2309 ntohl(ip6h->saddr.s6_addr32[3]);
2310 entry->ipv4_dslite.tunnel_dipv6_0 =
2311 ntohl(ip6h->daddr.s6_addr32[0]);
2312 entry->ipv4_dslite.tunnel_dipv6_1 =
2313 ntohl(ip6h->daddr.s6_addr32[1]);
2314 entry->ipv4_dslite.tunnel_dipv6_2 =
2315 ntohl(ip6h->daddr.s6_addr32[2]);
2316 entry->ipv4_dslite.tunnel_dipv6_3 =
2317 ntohl(ip6h->daddr.s6_addr32[3]);
2318
2319 ppe_fill_flow_lbl(entry, ip6h);
2320 entry->ipv4_dslite.priority = ip6h->priority;
2321 entry->ipv4_dslite.hop_limit = ip6h->hop_limit;
2322
2323 } else {
2324 entry->ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
2325 entry->ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
2326 entry->ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
2327 entry->ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
2328 entry->ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
2329 entry->ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
2330 entry->ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
2331 entry->ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
2332 }
2333
2334 return 0;
2335}
2336
2337int mtk_464xlat_fill_l2(struct foe_entry *entry, struct sk_buff *skb,
2338 const struct net_device *dev, bool l2w)
2339{
2340 const unsigned int *port_reg;
2341 int port_index;
2342 u16 sp_tag;
2343
2344 if (l2w)
2345 entry->ipv4_dslite.etype = ETH_P_IP;
2346 else {
2347 if (IS_DSA_LAN(dev)) {
2348 port_reg = of_get_property(dev->dev.of_node,
2349 "reg", NULL);
2350 if (unlikely(!port_reg))
2351 return -1;
2352
2353 port_index = be32_to_cpup(port_reg);
2354 sp_tag = BIT(port_index);
2355
2356 entry->bfib1.vlan_layer = 1;
2357 entry->bfib1.vpm = 0;
2358 entry->ipv6_6rd.etype = sp_tag;
2359 } else
2360 entry->ipv6_6rd.etype = ETH_P_IPV6;
2361 }
2362
2363 if (mtk_464xlat_fill_mac(entry, skb, dev, l2w))
2364 return -1;
2365
2366 return 0;
developer30a47682021-11-02 17:06:14 +08002367}
2368
developere8b7dfa2023-04-20 10:16:44 +08002369
2370int mtk_464xlat_fill_l3(struct foe_entry *entry, struct sk_buff *skb,
2371 struct foe_entry *foe, bool l2w)
2372{
2373 mtk_464xlat_fill_ipv4(entry, skb, foe, l2w);
2374
2375 if (mtk_464xlat_fill_ipv6(entry, skb, foe, l2w))
2376 return -1;
2377
2378 return 0;
2379}
2380
2381int mtk_464xlat_post_process(struct sk_buff *skb, const struct net_device *out)
2382{
2383 struct foe_entry *foe, entry = {};
2384 u32 hash;
2385 bool l2w;
2386
2387 if (skb->protocol == htons(ETH_P_IPV6))
2388 l2w = true;
2389 else if (skb->protocol == htons(ETH_P_IP))
2390 l2w = false;
2391 else
2392 return -1;
2393
2394 if (mtk_464xlat_get_hash(skb, &hash, l2w))
2395 return -1;
2396
2397 if (hash >= hnat_priv->foe_etry_num)
2398 return -1;
2399
2400 if (headroom[hash].crsn != HIT_UNBIND_RATE_REACH)
2401 return -1;
2402
2403 foe = &hnat_priv->foe_table_cpu[headroom_ppe(headroom[hash])][hash];
2404
2405 mtk_464xlat_fill_info1(&entry, skb, l2w);
2406
2407 if (mtk_464xlat_fill_l3(&entry, skb, foe, l2w))
2408 return -1;
2409
2410 mtk_464xlat_fill_info2(&entry, l2w);
2411
2412 if (mtk_464xlat_fill_l2(&entry, skb, out, l2w))
2413 return -1;
2414
2415 /* We must ensure all info has been updated before set to hw */
2416 wmb();
2417 memcpy(foe, &entry, sizeof(struct foe_entry));
2418
2419 return 0;
2420}
2421
developerfd40db22021-04-29 10:08:25 +08002422static unsigned int mtk_hnat_nf_post_routing(
2423 struct sk_buff *skb, const struct net_device *out,
2424 unsigned int (*fn)(struct sk_buff *, const struct net_device *,
2425 struct flow_offload_hw_path *),
2426 const char *func)
2427{
2428 struct foe_entry *entry;
2429 struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
developere5763512021-05-21 01:04:58 +08002430 .virt_dev = (struct net_device*)out };
developerfd40db22021-04-29 10:08:25 +08002431 const struct net_device *arp_dev = out;
2432
developere8b7dfa2023-04-20 10:16:44 +08002433 if (xlat_toggle && !mtk_464xlat_post_process(skb, out))
2434 return 0;
2435
developerfd40db22021-04-29 10:08:25 +08002436 if (skb_hnat_alg(skb) || unlikely(!is_magic_tag_valid(skb) ||
2437 !IS_SPACE_AVAILABLE_HEAD(skb)))
2438 return 0;
2439
developerc0419aa2022-12-07 15:56:36 +08002440 if (unlikely(!skb_mac_header_was_set(skb)))
2441 return 0;
2442
developerfd40db22021-04-29 10:08:25 +08002443 if (unlikely(!skb_hnat_is_hashed(skb)))
2444 return 0;
2445
2446 if (out->netdev_ops->ndo_flow_offload_check) {
developere5763512021-05-21 01:04:58 +08002447 out->netdev_ops->ndo_flow_offload_check(&hw_path);
developerfd40db22021-04-29 10:08:25 +08002448 out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
2449 }
2450
developerd35bbcc2022-09-28 22:46:01 +08002451 if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
developerfd40db22021-04-29 10:08:25 +08002452 return 0;
2453
2454 trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
2455 skb_hnat_iface(skb), out->name, skb_hnat_reason(skb));
2456
developer577ad2f2022-11-28 10:33:36 +08002457 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2458 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2459 return -1;
2460
developer471f6562021-05-10 20:48:34 +08002461 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002462
2463 switch (skb_hnat_reason(skb)) {
2464 case HIT_UNBIND_RATE_REACH:
2465 if (entry_hnat_is_bound(entry))
2466 break;
2467
2468 if (fn && !mtk_hnat_accel_type(skb))
2469 break;
2470
2471 if (fn && fn(skb, arp_dev, &hw_path))
2472 break;
2473
2474 skb_to_hnat_info(skb, out, entry, &hw_path);
2475 break;
2476 case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
developer30a47682021-11-02 17:06:14 +08002477 /* update hnat count to nf_conntrack by keepalive */
2478 if (hnat_priv->data->per_flow_accounting && hnat_priv->nf_stat_en)
2479 mtk_hnat_nf_update(skb);
2480
developerfd40db22021-04-29 10:08:25 +08002481 if (fn && !mtk_hnat_accel_type(skb))
2482 break;
2483
developer6f4a0c72021-10-19 10:04:22 +08002484 /* update dscp for qos */
2485 mtk_hnat_dscp_update(skb, entry);
2486
developerfd40db22021-04-29 10:08:25 +08002487 /* update mcast timestamp*/
developer4164cfe2022-12-01 11:27:41 +08002488 if (hnat_priv->data->version == MTK_HNAT_V1_3 &&
developerfd40db22021-04-29 10:08:25 +08002489 hnat_priv->data->mcast && entry->bfib1.sta == 1)
2490 entry->ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
2491
2492 if (entry_hnat_is_bound(entry)) {
2493 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
2494
2495 return -1;
2496 }
2497 break;
2498 case HIT_BIND_MULTICAST_TO_CPU:
2499 case HIT_BIND_MULTICAST_TO_GMAC_CPU:
2500 /*do not forward to gdma again,if ppe already done it*/
developerd35bbcc2022-09-28 22:46:01 +08002501 if (IS_LAN_GRP(out) || IS_WAN(out))
developerfd40db22021-04-29 10:08:25 +08002502 return -1;
2503 break;
2504 }
2505
2506 return 0;
2507}
2508
2509static unsigned int
2510mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb,
2511 const struct nf_hook_state *state)
2512{
2513 struct foe_entry *entry;
2514 struct ipv6hdr *ip6h;
2515 struct iphdr _iphdr;
2516 const struct iphdr *iph;
2517 struct tcpudphdr _ports;
2518 const struct tcpudphdr *pptr;
2519 int udp = 0;
2520
2521 if (unlikely(!skb_hnat_is_hashed(skb)))
2522 return NF_ACCEPT;
2523
developer577ad2f2022-11-28 10:33:36 +08002524 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2525 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2526 return NF_ACCEPT;
2527
developer471f6562021-05-10 20:48:34 +08002528 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002529 if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH) {
2530 ip6h = ipv6_hdr(skb);
2531 if (ip6h->nexthdr == NEXTHDR_IPIP) {
2532 /* Map-E LAN->WAN: need to record orig info before fn. */
2533 if (mape_toggle) {
2534 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
2535 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +08002536 if (unlikely(!iph))
2537 return NF_ACCEPT;
2538
developerfd40db22021-04-29 10:08:25 +08002539 switch (iph->protocol) {
2540 case IPPROTO_UDP:
2541 udp = 1;
2542 case IPPROTO_TCP:
2543 break;
2544
2545 default:
2546 return NF_ACCEPT;
2547 }
2548
2549 pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4,
2550 sizeof(_ports), &_ports);
developer4c32b7a2021-11-13 16:46:43 +08002551 if (unlikely(!pptr))
2552 return NF_ACCEPT;
2553
developerfd40db22021-04-29 10:08:25 +08002554 entry->bfib1.udp = udp;
2555
developer25fc8c02022-05-06 16:24:02 +08002556 /* Map-E LAN->WAN record inner IPv4 header info. */
developerd35bbcc2022-09-28 22:46:01 +08002557#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08002558 entry->bfib1.pkt_type = IPV4_MAP_E;
2559 entry->ipv4_dslite.iblk2.dscp = iph->tos;
developerd35bbcc2022-09-28 22:46:01 +08002560 entry->ipv4_mape.new_sip = ntohl(iph->saddr);
2561 entry->ipv4_mape.new_dip = ntohl(iph->daddr);
2562 entry->ipv4_mape.new_sport = ntohs(pptr->src);
2563 entry->ipv4_mape.new_dport = ntohs(pptr->dst);
developerfd40db22021-04-29 10:08:25 +08002564#else
2565 entry->ipv4_hnapt.iblk2.dscp = iph->tos;
2566 entry->ipv4_hnapt.new_sip = ntohl(iph->saddr);
2567 entry->ipv4_hnapt.new_dip = ntohl(iph->daddr);
2568 entry->ipv4_hnapt.new_sport = ntohs(pptr->src);
2569 entry->ipv4_hnapt.new_dport = ntohs(pptr->dst);
2570#endif
2571 } else {
2572 entry->bfib1.pkt_type = IPV4_DSLITE;
2573 }
2574 }
2575 }
2576 return NF_ACCEPT;
2577}
2578
2579static unsigned int
2580mtk_hnat_ipv6_nf_post_routing(void *priv, struct sk_buff *skb,
2581 const struct nf_hook_state *state)
2582{
developer577ad2f2022-11-28 10:33:36 +08002583 if (!skb)
2584 goto drop;
2585
developerfd40db22021-04-29 10:08:25 +08002586 post_routing_print(skb, state->in, state->out, __func__);
2587
2588 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv6_get_nexthop,
2589 __func__))
2590 return NF_ACCEPT;
2591
developer577ad2f2022-11-28 10:33:36 +08002592drop:
2593 if (skb)
2594 trace_printk(
2595 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2596 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2597 __func__, skb_hnat_iface(skb), state->out->name,
2598 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2599 skb_hnat_sport(skb), skb_hnat_reason(skb),
2600 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002601
2602 return NF_DROP;
2603}
2604
2605static unsigned int
2606mtk_hnat_ipv4_nf_post_routing(void *priv, struct sk_buff *skb,
2607 const struct nf_hook_state *state)
2608{
developer577ad2f2022-11-28 10:33:36 +08002609 if (!skb)
2610 goto drop;
2611
developerfd40db22021-04-29 10:08:25 +08002612 post_routing_print(skb, state->in, state->out, __func__);
2613
2614 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv4_get_nexthop,
2615 __func__))
2616 return NF_ACCEPT;
2617
developer577ad2f2022-11-28 10:33:36 +08002618drop:
2619 if (skb)
2620 trace_printk(
2621 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2622 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2623 __func__, skb_hnat_iface(skb), state->out->name,
2624 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2625 skb_hnat_sport(skb), skb_hnat_reason(skb),
2626 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002627
2628 return NF_DROP;
2629}
2630
2631static unsigned int
2632mtk_pong_hqos_handler(void *priv, struct sk_buff *skb,
2633 const struct nf_hook_state *state)
2634{
developer659fdeb2022-12-01 23:03:07 +08002635 struct vlan_ethhdr *veth;
2636
2637 if (!skb)
2638 goto drop;
2639
2640 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
developerfd40db22021-04-29 10:08:25 +08002641
developer34028fb2022-01-11 13:51:29 +08002642 if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +08002643 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2644 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2645 }
developerfd40db22021-04-29 10:08:25 +08002646
2647 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
2648 clr_from_extge(skb);
2649
2650 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +08002651 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
2652 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +08002653 if (!do_hnat_ext_to_ge2(skb, __func__))
2654 return NF_STOLEN;
2655 goto drop;
2656 }
2657
2658 /* packets form ge -> external device */
2659 if (do_ge2ext_fast(state->in, skb)) {
2660 if (!do_hnat_ge_to_ext(skb, __func__))
2661 return NF_STOLEN;
2662 goto drop;
2663 }
2664
2665 return NF_ACCEPT;
developer577ad2f2022-11-28 10:33:36 +08002666
developerfd40db22021-04-29 10:08:25 +08002667drop:
developer577ad2f2022-11-28 10:33:36 +08002668 if (skb)
2669 printk_ratelimited(KERN_WARNING
2670 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
2671 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2672 __func__, state->in->name, skb_hnat_iface(skb),
2673 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2674 skb_hnat_sport(skb), skb_hnat_reason(skb),
2675 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002676
2677 return NF_DROP;
2678}
2679
2680static unsigned int
2681mtk_hnat_br_nf_local_out(void *priv, struct sk_buff *skb,
2682 const struct nf_hook_state *state)
2683{
developer577ad2f2022-11-28 10:33:36 +08002684 if (!skb)
2685 goto drop;
2686
developerfd40db22021-04-29 10:08:25 +08002687 post_routing_print(skb, state->in, state->out, __func__);
2688
2689 if (!mtk_hnat_nf_post_routing(skb, state->out, 0, __func__))
2690 return NF_ACCEPT;
2691
developer577ad2f2022-11-28 10:33:36 +08002692drop:
2693 if (skb)
2694 trace_printk(
2695 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2696 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2697 __func__, skb_hnat_iface(skb), state->out->name,
2698 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2699 skb_hnat_sport(skb), skb_hnat_reason(skb),
2700 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002701
2702 return NF_DROP;
2703}
2704
2705static unsigned int
2706mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb,
2707 const struct nf_hook_state *state)
2708{
2709 struct sk_buff *new_skb;
2710 struct foe_entry *entry;
2711 struct iphdr *iph;
2712
2713 if (!skb_hnat_is_hashed(skb))
2714 return NF_ACCEPT;
2715
developer577ad2f2022-11-28 10:33:36 +08002716 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2717 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2718 return NF_ACCEPT;
2719
developer471f6562021-05-10 20:48:34 +08002720 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002721
2722 if (unlikely(skb_headroom(skb) < FOE_INFO_LEN)) {
2723 new_skb = skb_realloc_headroom(skb, FOE_INFO_LEN);
2724 if (!new_skb) {
2725 dev_info(hnat_priv->dev, "%s:drop\n", __func__);
2726 return NF_DROP;
2727 }
2728 dev_kfree_skb(skb);
2729 skb = new_skb;
2730 }
2731
2732 /* Make the flow from local not be bound. */
2733 iph = ip_hdr(skb);
2734 if (iph->protocol == IPPROTO_IPV6) {
2735 entry->udib1.pkt_type = IPV6_6RD;
2736 hnat_set_head_frags(state, skb, 0, hnat_set_alg);
2737 } else {
2738 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2739 }
2740
2741 return NF_ACCEPT;
2742}
2743
2744static unsigned int mtk_hnat_br_nf_forward(void *priv,
2745 struct sk_buff *skb,
2746 const struct nf_hook_state *state)
2747{
developer4164cfe2022-12-01 11:27:41 +08002748 if ((hnat_priv->data->version == MTK_HNAT_V1_2) &&
developer99506e52021-06-30 22:03:02 +08002749 unlikely(IS_EXT(state->in) && IS_EXT(state->out)))
developerfd40db22021-04-29 10:08:25 +08002750 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2751
2752 return NF_ACCEPT;
2753}
2754
2755static struct nf_hook_ops mtk_hnat_nf_ops[] __read_mostly = {
2756 {
2757 .hook = mtk_hnat_ipv4_nf_pre_routing,
2758 .pf = NFPROTO_IPV4,
2759 .hooknum = NF_INET_PRE_ROUTING,
2760 .priority = NF_IP_PRI_FIRST + 1,
2761 },
2762 {
2763 .hook = mtk_hnat_ipv6_nf_pre_routing,
2764 .pf = NFPROTO_IPV6,
2765 .hooknum = NF_INET_PRE_ROUTING,
2766 .priority = NF_IP_PRI_FIRST + 1,
2767 },
2768 {
2769 .hook = mtk_hnat_ipv6_nf_post_routing,
2770 .pf = NFPROTO_IPV6,
2771 .hooknum = NF_INET_POST_ROUTING,
2772 .priority = NF_IP_PRI_LAST,
2773 },
2774 {
2775 .hook = mtk_hnat_ipv6_nf_local_out,
2776 .pf = NFPROTO_IPV6,
2777 .hooknum = NF_INET_LOCAL_OUT,
2778 .priority = NF_IP_PRI_LAST,
2779 },
2780 {
2781 .hook = mtk_hnat_ipv4_nf_post_routing,
2782 .pf = NFPROTO_IPV4,
2783 .hooknum = NF_INET_POST_ROUTING,
2784 .priority = NF_IP_PRI_LAST,
2785 },
2786 {
2787 .hook = mtk_hnat_ipv4_nf_local_out,
2788 .pf = NFPROTO_IPV4,
2789 .hooknum = NF_INET_LOCAL_OUT,
2790 .priority = NF_IP_PRI_LAST,
2791 },
2792 {
2793 .hook = mtk_hnat_br_nf_local_in,
2794 .pf = NFPROTO_BRIDGE,
2795 .hooknum = NF_BR_LOCAL_IN,
2796 .priority = NF_BR_PRI_FIRST,
2797 },
2798 {
2799 .hook = mtk_hnat_br_nf_local_out,
2800 .pf = NFPROTO_BRIDGE,
2801 .hooknum = NF_BR_LOCAL_OUT,
2802 .priority = NF_BR_PRI_LAST - 1,
2803 },
2804 {
2805 .hook = mtk_pong_hqos_handler,
2806 .pf = NFPROTO_BRIDGE,
2807 .hooknum = NF_BR_PRE_ROUTING,
developer2b85f652021-08-19 16:09:50 +08002808 .priority = NF_BR_PRI_FIRST + 1,
developerfd40db22021-04-29 10:08:25 +08002809 },
2810};
2811
2812int hnat_register_nf_hooks(void)
2813{
2814 return nf_register_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2815}
2816
2817void hnat_unregister_nf_hooks(void)
2818{
2819 nf_unregister_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2820}
2821
2822int whnat_adjust_nf_hooks(void)
2823{
2824 struct nf_hook_ops *hook = mtk_hnat_nf_ops;
2825 unsigned int n = ARRAY_SIZE(mtk_hnat_nf_ops);
2826
developerfd40db22021-04-29 10:08:25 +08002827 while (n-- > 0) {
2828 if (hook[n].hook == mtk_hnat_br_nf_local_in) {
2829 hook[n].hooknum = NF_BR_PRE_ROUTING;
developer2b85f652021-08-19 16:09:50 +08002830 hook[n].priority = NF_BR_PRI_FIRST + 1;
developerfd40db22021-04-29 10:08:25 +08002831 } else if (hook[n].hook == mtk_hnat_br_nf_local_out) {
2832 hook[n].hooknum = NF_BR_POST_ROUTING;
2833 } else if (hook[n].hook == mtk_pong_hqos_handler) {
2834 hook[n].hook = mtk_hnat_br_nf_forward;
2835 hook[n].hooknum = NF_BR_FORWARD;
2836 hook[n].priority = NF_BR_PRI_LAST - 1;
2837 }
2838 }
2839
2840 return 0;
2841}
2842
developerfd40db22021-04-29 10:08:25 +08002843int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
2844 struct packet_type *pt, struct net_device *unused)
2845{
2846 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
2847
2848 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2849 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2850
developer659fdeb2022-12-01 23:03:07 +08002851 if (do_hnat_ge_to_ext(skb, __func__) == -1)
2852 return 1;
developerfd40db22021-04-29 10:08:25 +08002853
2854 return 0;
2855}
developerfd40db22021-04-29 10:08:25 +08002856